index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
56,998 | meoke/pangtreevis | refs/heads/master | /dash_app/components/consensustree.py | from collections import deque
from typing import Dict, List, Tuple, Set, Any
import math
from ..layout.colors import colors
import plotly.graph_objs as go
import pandas as pd
from pangtreebuild.consensus.ConsensusTree import ConsensusNodeID
from pangtreebuild.output.PangenomeJSON import PangenomeJSON, ConsensusNode
import networkx as nx
from networkx.readwrite import json_graph
def get_consensustree_dict(jsonpangenome: PangenomeJSON) -> Dict:
tree = get_consensustree(jsonpangenome)
tree_dict = tree_to_dict(tree)
return tree_dict
def get_consensustree(jsonpangenome: PangenomeJSON) -> nx.DiGraph:
tree_graph = nx.DiGraph()
for consensus in sorted(jsonpangenome.consensuses, key=lambda c: c.consensus_node_id):
node_is_leaf = True if not consensus.children else False
tree_graph.add_node(consensus.consensus_node_id,
name=consensus.name,
comp=consensus.comp_to_all_sequences,
sequences_ids=consensus.sequences_int_ids,
show_in_table=True,
hidden=False,
children_consensuses=consensus.children,
# mincomp=consensus.mincomp ** (1/jsonpangenome.program_parameters.p),
mincomp=consensus.mincomp,
is_leaf=node_is_leaf)
if consensus.parent is not None:
tree_graph.add_edge(consensus.parent, consensus.consensus_node_id, weight=len(consensus.sequences_int_ids))
return tree_graph
def tree_to_dict(tree_graph: nx.DiGraph) -> Dict:
return json_graph.tree_data(tree_graph, root=0)
def dict_to_tree(tree_data: Dict) -> nx.DiGraph:
return json_graph.tree_graph(tree_data)
def get_node_id_to_y_pos(tree: nx.DiGraph) -> Dict[ConsensusNodeID, int]:
node_id_to_y = {}
leafs_ids = []
for node_id in tree.nodes:
if not tree.nodes[node_id]['children_consensuses']:
leafs_ids.append(node_id)
leafs_count = len(leafs_ids)
min_y = 0
max_y = 100
leaf_distance = (max_y - min_y) / (leafs_count + 1)
for i, leaf_id in enumerate(leafs_ids):
node_id_to_y[leaf_id] = leaf_distance * (i + 1)
nodes_to_process = deque(leafs_ids)
while nodes_to_process:
processed_child_id = nodes_to_process.pop()
parents = [node_id
for node_id in tree.nodes
if processed_child_id in tree.nodes[node_id]['children_consensuses']]
if parents:
parent_id = parents[0]
else:
break
siblings = tree.nodes[parent_id]['children_consensuses']
all_siblings_set = all([s in node_id_to_y.keys() for s in siblings])
if all_siblings_set:
for s in siblings:
if s in nodes_to_process:
nodes_to_process.remove(s)
else:
for s in siblings:
if s not in node_id_to_y.keys() and s not in nodes_to_process:
nodes_to_process.appendleft(s)
nodes_to_process.appendleft(processed_child_id)
continue
siblings_positions = [y for node_id, y in node_id_to_y.items() if node_id in siblings]
left_child_pos = min(siblings_positions)
right_child_pos = max(siblings_positions)
node_id_to_y[parent_id] = (right_child_pos + left_child_pos) / 2
nodes_to_process.append(parent_id)
return node_id_to_y
def get_consensustree_graph(tree: nx.DiGraph, slider_value: float, leaf_info_value: str, full_consensustable: pd.DataFrame) -> go.Figure:
node_id_to_y = get_node_id_to_y_pos(tree)
minCompsLabels = [format(tree.nodes[node_id]["mincomp"], '.4f') for node_id in range(len(node_id_to_y))]
labels_on_hover = [f'{minCompLabel}' for minCompLabel in minCompsLabels]
labels = [f"{node_id}" for node_id in range(len(node_id_to_y))]
positions = [(tree.nodes[node_id]["mincomp"], node_id_to_y[node_id]) for node_id in range(len(node_id_to_y))]
tree_nodes_graph = get_tree_nodes_graph(positions, labels_on_hover)
tree_nodes_annotations = get_tree_nodes_annotations(positions, labels)
tree_lines_graph = get_tree_lines_graph(positions, tree)
line_graph = get_line_graph(slider_value)
leaves_text_graph = get_leaves_text_graph(positions, tree, leaf_info_value, full_consensustable)
layout = dict(title='Consensuses Tree',
annotations=tree_nodes_annotations,
font=dict(size=12),
showlegend=False,
xaxis=go.layout.XAxis(dict(range=[0, 1.2], showline=False, zeroline=False, showgrid=False,
showticklabels=False,)),
yaxis=go.layout.YAxis(dict(range=[0, 100], showline=False, zeroline=False, showgrid=False,
showticklabels=False,)),
margin=dict(l=20, r=10, b=0, t=0),
hovermode='closest',
plot_bgcolor=colors['transparent'],
autosize=True,
)
return go.Figure(
data=[tree_lines_graph, tree_nodes_graph, line_graph, leaves_text_graph],
layout=layout,
)
def get_line_graph(slider_value: float) -> go.Scatter:
return go.Scatter(x=[slider_value, slider_value],
y=[0, 100],
mode='lines',
line=dict(color=colors['accent']))
def get_tree_nodes_graph(positions: List[Tuple[float, float]], labels_on_hover: List[str]) -> go.Scatter:
return go.Scatter(x=[x for [x, _] in positions],
y=[y for [_, y] in positions],
mode='markers',
name='',
textposition='top left',
marker=dict(symbol='circle',
size=20,
color='rgba(255, 255, 255, 1)',
line=dict(color='rgba(49, 55, 21, 1)',
width=1),
),
text=labels_on_hover,
hoverinfo='text',
opacity=1)
def get_tree_lines_graph(positions: List[Tuple[float, float]], tree: nx.DiGraph) -> go.Scatter:
lines_x = []
lines_y = []
for u, v in tree.edges:
lines_x += [positions[u][0], positions[v][0], None]
lines_y += [positions[u][1], positions[v][1], None]
tree_lines_graph = go.Scatter(x=lines_x,
y=lines_y,
mode='lines',
line=dict(color='rgba(49, 55, 21, 1)',width=2),
hoverinfo='none'
)
return tree_lines_graph
def get_tree_nodes_annotations(positions: List[Tuple[float, float]], labels: List[str]) -> List[Dict]:
return [{'x': position[0],
'y': position[1],
'text': f"{labels[i]}",
'showarrow': False}
for i, position in enumerate(positions)]
def get_leaf_label(sequences_ids: List[int], leaf_info_value: str, full_consensustable: pd.DataFrame) -> str:
return ", ".join([str(l) for l in set(full_consensustable[leaf_info_value].loc[full_consensustable["ID"].isin(sequences_ids)])])
def get_leaves_text_graph(positions: List[Tuple[float, float]], tree: nx.DiGraph, leaf_info_value: str,
full_consensustable: pd.DataFrame) -> go.Scatter:
x = []
y = []
text = []
for i in range(len(tree.nodes)):
if not tree.nodes[i]['is_leaf']:
continue
x.append(positions[i][0] + 0.02)
y.append(positions[i][1])
text.append(get_leaf_label(sequences_ids=tree.nodes[i]['sequences_ids'],
leaf_info_value=leaf_info_value,
full_consensustable=full_consensustable))
return go.Scatter(
x=x,
y=y,
text=text,
mode='text+markers',
textposition='middle right',
hoverinfo='none',
marker=dict(symbol='line-ew-open',
size=3,
color='black',
line=dict(color='rgb(50,50,50)', width=0),
)
)
def get_leaf_info_dropdown_options(metadata: List[str]) -> List[Dict[str, str]]:
return [ {'label': m, 'value': m} for m in metadata]
def get_offspring_ids(tree: nx.DiGraph, current_node_id: ConsensusNodeID) -> List[ConsensusNodeID]:
nodes_to_visit = deque(tree.nodes[current_node_id]['children_consensuses'])
offspring_ids = []
while nodes_to_visit:
current_node_id = nodes_to_visit.pop()
offspring_ids.append(current_node_id)
nodes_to_visit.extend(tree.nodes[current_node_id]['children_consensuses'])
return offspring_ids | {"/dash_app/callbacks/visualisation.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py"], "/dash_app/layout/pages.py": ["/dash_app/layout/layout_ids.py"], "/run.py": ["/dash_app/app.py"], "/dash_app/callbacks/consensustree.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/app.py": ["/dash_app/server.py"], "/dash_app/callbacks/consensustable.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/callbacks/mafgraph.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py"], "/dash_app/callbacks/pangtreebuild.py": ["/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py", "/dash_app/server.py"], "/dash_app/callbacks/poagraph.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py", "/dash_app/app.py"]} |
56,999 | meoke/pangtreevis | refs/heads/master | /dash_app/callbacks/consensustree.py | from dash.dependencies import Input, Output
from dash_app.components import consensustable
from dash_app.components import tools
from dash_app.layout.layout_ids import *
from dash_app.components import consensustree
from dash_app.server import app
@app.callback(
Output(id_full_consensustree_hidden, 'children'),
[Input(id_pangenome_hidden, 'children')]
)
def update_consensustree_hidden(jsonified_pangenome):
if not jsonified_pangenome:
return []
jsonpangenome = tools.unjsonify_jsonpangenome(jsonified_pangenome)
consensustree_dict = consensustree.get_consensustree_dict(jsonpangenome)
return tools.jsonify_builtin_types(consensustree_dict)
@app.callback(
Output(id_current_consensustree_hidden, 'children'),
[Input(id_full_consensustree_hidden, 'children')]
)
def update_current_tree_state(jsonified_full_consensustree):
if not jsonified_full_consensustree:
return []
full_consensustree_data = tools.unjsonify_builtin_types(jsonified_full_consensustree)
full_consensus_tree = consensustree.dict_to_tree(full_consensustree_data)
current_consensustree_data = consensustree.tree_to_dict(full_consensus_tree)
return tools.jsonify_builtin_types(current_consensustree_data)
@app.callback(
Output(id_consensus_tree_graph, 'figure'),
[Input(id_current_consensustree_hidden, 'children'),
Input(id_consensus_tree_slider, 'value'),
Input(id_leaf_info_dropdown, 'value'),
Input(id_full_consensustable_hidden, 'children')])
def to_consensustree_graph(jsonified_current_consensustree, slider_value, leaf_info, jsonified_full_consensustable):
if not jsonified_current_consensustree or not jsonified_full_consensustable:
return {}
current_consensustree_data = tools.unjsonify_builtin_types(jsonified_current_consensustree)
current_consensustree_tree = consensustree.dict_to_tree(current_consensustree_data)
full_consensustable_data = tools.unjsonify_df(jsonified_full_consensustable)
return consensustree.get_consensustree_graph(current_consensustree_tree, slider_value, leaf_info, full_consensustable_data)
@app.callback(
Output(id_consensus_node_details_header, 'children'),
[Input(id_consensus_tree_graph, 'clickData')]
)
def to_consensus_node_details_header(tree_click_data):
if not tree_click_data:
return []
clicked_node = tree_click_data['points'][0]
node_id = clicked_node['pointIndex']
return f"Consensus {node_id}"
@app.callback(
Output(id_consensus_node_details_table_hidden, 'children'),
[Input(id_consensus_tree_graph, 'clickData'),
Input(id_full_consensustable_hidden, 'children'),
Input(id_full_consensustree_hidden, 'children')]
)
def to_consensus_node_details_table(tree_click_data, jsonified_full_consensustable, jsonified_consensustree):
if not jsonified_full_consensustable or not tree_click_data:
return []
clicked_node = tree_click_data['points'][0]
node_id = clicked_node['pointIndex']
full_consensustable = tools.unjsonify_df(jsonified_full_consensustable)
consensustree_data = tools.unjsonify_builtin_types(jsonified_consensustree)
tree = consensustree.dict_to_tree(consensustree_data)
node_details_df = consensustable.get_consensus_details_df(node_id, full_consensustable, tree)
return tools.jsonify_df(node_details_df)
@app.callback(
Output(id_consensus_node_details_table, 'data'),
[Input(id_consensus_node_details_table_hidden, 'children')]
)
def to_consensusnode_details_content(jsonified_consensus_details_table):
if not jsonified_consensus_details_table:
return []
consensus_details_table_data = tools.unjsonify_df(jsonified_consensus_details_table)
return consensus_details_table_data.to_dict("rows")
@app.callback(
Output(id_consensus_node_details_distribution, 'src'),
[Input(id_consensus_tree_graph, 'clickData'),
Input(id_full_consensustable_hidden, 'children')]
)
def to_consensus_node_details_distribution(tree_click_data, jsonified_full_consensustable):
if not jsonified_full_consensustable or not tree_click_data:
return ""
clicked_node = tree_click_data['points'][0]
node_id = clicked_node['pointIndex']
full_consensustable = tools.unjsonify_df(jsonified_full_consensustable)
distribution_figure = consensustable.get_node_distribution_fig(node_id, full_consensustable)
return distribution_figure
@app.callback(
Output(id_consensus_node_details_table, 'columns'),
[Input(id_consensus_node_details_table_hidden, 'children')]
)
def update_columns(jsonified_consensus_details_table):
if not jsonified_consensus_details_table:
return [{}]
consensus_details_table_data = tools.unjsonify_df(jsonified_consensus_details_table)
return [{"name": i, "id": i} for i in list(consensus_details_table_data.columns)]
@app.callback(
Output(id_consensus_tree_container, 'style'),
[Input(id_current_consensustree_hidden, 'children')])
def show_consensus_tree_container(jsonified_current_consensustree):
if jsonified_current_consensustree:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
Output('tree_info', 'style'),
[Input(id_consensus_tree_graph, 'clickData')])
def show_consensus_tree_info(click_data):
if click_data:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
Output(id_leaf_info_dropdown, 'options'),
[Input(id_full_consensustable_hidden, 'children')])
def to_consensustree_leaf_info_options_dropdown(jsonified_full_consensustable):
if not jsonified_full_consensustable:
return []
full_consensustable = tools.unjsonify_df(jsonified_full_consensustable)
metadata = consensustable.get_metadata_list(full_consensustable)
return consensustree.get_leaf_info_dropdown_options(metadata)
| {"/dash_app/callbacks/visualisation.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py"], "/dash_app/layout/pages.py": ["/dash_app/layout/layout_ids.py"], "/run.py": ["/dash_app/app.py"], "/dash_app/callbacks/consensustree.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/app.py": ["/dash_app/server.py"], "/dash_app/callbacks/consensustable.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/callbacks/mafgraph.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py"], "/dash_app/callbacks/pangtreebuild.py": ["/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py", "/dash_app/server.py"], "/dash_app/callbacks/poagraph.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py", "/dash_app/app.py"]} |
57,000 | meoke/pangtreevis | refs/heads/master | /dash_app/app.py | import dash_bootstrap_components as dbc
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
import dash_html_components as html
from .server import app
from .layout import layout_ids, pages
app.title = 'PangtreeVis'
# app.css.config.serve_locally = True
# app.scripts.config.serve_locally = True
external_css = [
'https://use.fontawesome.com/releases/v5.8.1/css/all.css',
dbc.themes.FLATLY
]
for css in external_css:
app.css.append_css({"external_url": css})
app.config.suppress_callback_exceptions = True
draw_poagraph = True
app.layout = html.Div(
[dcc.Location(id=layout_ids.id_url, refresh=False),
dbc.Navbar(
[
html.A(
dbc.Row(
[
dbc.Col(html.Img(src="assets/favicon.ico", height="30px")),
dbc.Col(dbc.NavbarBrand("Pangtree", className="ml-2")),
],
align="center",
no_gutters=True,
),
href="/#",
),
dbc.NavbarToggler(id="navbar-toggler"),
dbc.Collapse(dbc.Row(children=[
dbc.Col(dbc.NavLink("Tools", href="/tools")),
dbc.Col(dbc.NavLink("Package", href="/package")),
dbc.Col(dbc.NavLink("Contact", href="/contact")),
],
no_gutters=True,
className="ml-auto flex-nowrap mt-3 mt-md-0",
align="center"), id="navbar-collapse", navbar=True)
],
sticky="top",
),
html.Div(id=layout_ids.id_page_content)])
@app.callback(
Output("navbar-collapse", "is_open"),
[Input("navbar-toggler", "n_clicks")],
[State("navbar-collapse", "is_open")],
)
def toggle_navbar_collapse(n, is_open):
if n:
return not is_open
return is_open
@app.callback(Output(layout_ids.id_page_content, 'children'),
[Input(layout_ids.id_url, 'pathname')])
def display_page(pathname):
if pathname == '/tools':
return pages.tools()
elif pathname == '/package':
return pages.package()
elif pathname == '/contact':
return pages.contact()
else:
return pages.index()
from .callbacks import consensustable
from .callbacks import consensustree
from .callbacks import mafgraph
from .callbacks import poagraph
from .callbacks import pangtreebuild
from .callbacks import visualisation | {"/dash_app/callbacks/visualisation.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py"], "/dash_app/layout/pages.py": ["/dash_app/layout/layout_ids.py"], "/run.py": ["/dash_app/app.py"], "/dash_app/callbacks/consensustree.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/app.py": ["/dash_app/server.py"], "/dash_app/callbacks/consensustable.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/callbacks/mafgraph.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py"], "/dash_app/callbacks/pangtreebuild.py": ["/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py", "/dash_app/server.py"], "/dash_app/callbacks/poagraph.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py", "/dash_app/app.py"]} |
57,001 | meoke/pangtreevis | refs/heads/master | /dash_app/components/tools.py | import os
import shutil
from datetime import datetime
import json
from pathlib import Path
from typing import Any, Optional
from base64 import b64decode
import pandas as pd
from pangtreebuild.output import PangenomeJSON
import uuid
from io import StringIO
def unjsonify_jsonpangenome(jsonified_pangenome: str) -> PangenomeJSON:
return PangenomeJSON.str_to_PangenomeJSON(jsonified_pangenome)
def jsonify_builtin_types(data: Any) -> str:
return json.dumps(data)
def unjsonify_builtin_types(jsonified_data: str) -> Any:
return json.loads(jsonified_data)
def jsonify_df(df: pd.DataFrame) -> str:
return df.to_json()
def unjsonify_df(jsonified_df: str) -> pd.DataFrame:
return pd.read_json(jsonified_df)
def decode_content(content: str) -> str:
if not content:
return ''
content_string = content.split(',')[1]
return b64decode(content_string).decode('ascii')
def decode_zip_content(content: str) -> str:
if not content:
return ''
content_string = content.split(',')[1]
return b64decode(content_string)
def create_output_dir() -> Path:
parent_output_dir = Path(os.path.abspath(os.path.join(os.path.dirname(__file__)))).joinpath("../../users_temp_data/")
current_time = get_current_time()
uid = str(uuid.uuid4()).replace("-", "_")
output_dir = "_".join([current_time, uid])
output_dir_path = parent_output_dir.joinpath(output_dir)
create_dir(output_dir_path)
return output_dir_path
def get_cwd() -> Path:
"""Returns current working directory."""
return Path(os.getcwd())
def get_current_time() -> str:
"""Returns current date and time in format MM_DD__HH_MM_SS"""
return datetime.now().strftime('%m_%d__%H_%M_%S')
def get_child_dir(parent_dir_path: Path, child_dir_name: str) -> Path:
child_dir_path = get_child_path(parent_dir_path, child_dir_name)
create_dir(child_dir_path)
return child_dir_path
def create_dir(dir_path: Path):
dir_path.mkdir()
def get_child_path(output_dir: Path, file_name: str) -> Path:
return output_dir.joinpath(file_name)
def save_to_file(filecontent: str, filename: Path, mode: Optional[str] = 'w') -> None:
"""Saves string to file."""
with open(filename, mode) as output:
output.write(filecontent)
def read_file_to_stream(path: Path):
with open(path) as in_file:
filecontent = in_file.read()
return StringIO(filecontent)
def dir_to_zip(dir_name: Path) -> Path:
shutil.make_archive(dir_name, 'zip', dir_name)
return Path(str(dir_name) + ".zip")
def remove_file(path: Path)-> None:
os.remove(path) | {"/dash_app/callbacks/visualisation.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py"], "/dash_app/layout/pages.py": ["/dash_app/layout/layout_ids.py"], "/run.py": ["/dash_app/app.py"], "/dash_app/callbacks/consensustree.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/app.py": ["/dash_app/server.py"], "/dash_app/callbacks/consensustable.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/callbacks/mafgraph.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py"], "/dash_app/callbacks/pangtreebuild.py": ["/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py", "/dash_app/server.py"], "/dash_app/callbacks/poagraph.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py", "/dash_app/app.py"]} |
57,002 | meoke/pangtreevis | refs/heads/master | /dash_app/callbacks/consensustable.py | from dash.dependencies import Input, Output, State
from dash_app.components import consensustable, consensustree
from dash_app.components import tools
from dash_app.layout.layout_ids import *
from dash_app.server import app
@app.callback(
Output(id_full_consensustable_hidden, 'children'),
[Input(id_pangenome_hidden, 'children')]
)
def update_full_consensustable_hidden(jsonified_pangenome):
if not jsonified_pangenome:
return []
jsonpangenome = tools.unjsonify_jsonpangenome(jsonified_pangenome)
consensustable_data = consensustable.get_full_table_data(jsonpangenome)
return tools.jsonify_df(consensustable_data)
@app.callback(
Output(id_partial_consensustable_hidden, 'children'),
[Input(id_full_consensustable_hidden, 'children'),
Input(id_full_consensustree_hidden, 'children'),
Input(id_consensus_tree_slider, 'value')]
)
def update_partial_table_data(jsonified_full_consensustable: str, jsonified_tree: str, slider_value: float):
if not jsonified_full_consensustable or not jsonified_tree:
return []
full_consensustable_data = tools.unjsonify_df(jsonified_full_consensustable)
full_consensustree_data = tools.unjsonify_builtin_types(jsonified_tree)
full_consensustree_tree = consensustree.dict_to_tree(full_consensustree_data)
table_without_consensuses_smaller_than_slider = consensustable.remove_smaller_than_slider(full_consensustable_data,
full_consensustree_tree,
slider_value)
return tools.jsonify_df(table_without_consensuses_smaller_than_slider)
@app.callback(
Output(id_consensuses_table, 'data'),
[Input(id_partial_consensustable_hidden, 'children')]
)
def to_consensustable_content(jsonified_partial_consensustable):
if not jsonified_partial_consensustable:
return []
partial_consensustable_data = tools.unjsonify_df(jsonified_partial_consensustable)
return partial_consensustable_data.to_dict("rows")
@app.callback(
Output(id_consensuses_table, 'columns'),
[Input(id_partial_consensustable_hidden, 'children')]
)
def update_columns(jsonified_partial_consensustable):
if not jsonified_partial_consensustable:
return [{}]
partial_consensustable_data = tools.unjsonify_df(jsonified_partial_consensustable)
return [{"name": i, "id": i} for i in partial_consensustable_data.columns]
@app.callback(
Output(id_consensuses_table, 'style_data_conditional'),
[Input(id_partial_consensustable_hidden, 'children')],
[State(id_full_consensustree_hidden, 'children')]
)
def color_consensuses_table_cells(jsonified_partial_consensustable, jsonified_consensus_tree):
if not jsonified_partial_consensustable or not jsonified_consensus_tree:
return []
partial_consensustable_data = tools.unjsonify_df(jsonified_partial_consensustable)
consensustree_data = tools.unjsonify_builtin_types(jsonified_consensus_tree)
tree = consensustree.dict_to_tree(consensustree_data)
return consensustable.get_cells_styling(tree, partial_consensustable_data)
@app.callback(
Output("consensus_table_container", 'style'),
[Input(id_full_consensustable_hidden, 'children')])
def show_consensus_tree_container(jsonified_current_consensustree):
if jsonified_current_consensustree:
return {'display': 'block'}
else:
return {'display': 'none'}
| {"/dash_app/callbacks/visualisation.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py"], "/dash_app/layout/pages.py": ["/dash_app/layout/layout_ids.py"], "/run.py": ["/dash_app/app.py"], "/dash_app/callbacks/consensustree.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/app.py": ["/dash_app/server.py"], "/dash_app/callbacks/consensustable.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/callbacks/mafgraph.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py"], "/dash_app/callbacks/pangtreebuild.py": ["/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py", "/dash_app/server.py"], "/dash_app/callbacks/poagraph.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py", "/dash_app/app.py"]} |
57,003 | meoke/pangtreevis | refs/heads/master | /dash_app/components/poagraph.py | import pickle
from ..components import tools
from flask import Flask, session
from ..layout.colors import colors
import colorsys
from typing import List, Dict, Tuple, Set, Optional, Any, Union
import math
import pandas as pd
from pangtreebuild.output.PangenomeJSON import PangenomeJSON, Sequence
import plotly.graph_objs as go
CytoscapeNode = Dict[str, Union[str, Dict[str, Any]]]
CytoscapeEdge = Dict[str, Union[str, Dict[str, Any]]]
def HSVToRGB(h, s, v):
(r, g, b) = colorsys.hsv_to_rgb(h, s, v)
return (int(255*r), int(255*g), int(255*b))
def get_distinct_colors(n):
huePartition = 1.0 / (n + 1)
return [HSVToRGB(huePartition * value, 1.0, 1.0) for value in range(0, n)]
def get_poagraph_stylesheet():
return [
{
'selector': 'node',
'style': {
'background-color': 'white',
}
},
{
'selector': '.s_node',
'style': {
'background-color': colors['background'],
# 'border-color': 'green',
# 'border-width': '0.5px',
'content': 'data(label)',
'height': '10px',
'width': '10px',
'text-halign': 'center',
'text-valign': 'center',
'font-size': '5px',
# 'shape': 'circle',
}
},
{
'selector': '.c_node',
'style': {
'height': '7px',
'width': '7px',
'opacity': 0.5
}
},
{
'selector': 'edge',
'style': {
}
},
{
'selector': '.s_edge',
'style': {
'width': 'data(weight)',
'target-arrow-shape': 'triangle',
'arrow-scale': 0.5,
'curve-style': 'bezier'
}
},
{
'selector': '.c_edge',
'style': {
'opacity': 0.5,
'curve-style': 'haystack',
'haystack-radius': 0.3,
'width': 'data(weight)',
# 'label': 'data(label)'
}
},
{
'selector': '.c2',
'style': {
'line-color': 'red',
}
},
{
'selector': '.c1',
'style': {
'line-color': 'green',
}
},
{
'selector': '.c_short',
'style': {
'curve-style': 'haystack',
}
},
{
'selector': '.s_short',
'style': {
'curve-style': 'haystack',
}
},
{
'selector': '.s_edge_aligned',
'style': {
'line-style': 'dashed',
'width': '10'
}
},
]
def _get_pangenome_graph_x_range_faster(max_column_id: int) -> Tuple:
return (-2, min(max_column_id + 2, 2000))
def get_pangenome_figure_faster(jsonpangenome: PangenomeJSON) -> go.Figure:
def get_columns_cut_width(jsonpangenome) -> List[int]:
col_ids = set([node.column_id for node in jsonpangenome.nodes])
columns_cut_widths = [set() for _ in col_ids]
for sequence in jsonpangenome.sequences:
for path in sequence.nodes_ids:
for i in range(len(path)-1):
current = path[i]
next = path[i+1]
current_col = jsonpangenome.nodes[current].column_id
next_col = jsonpangenome.nodes[next].column_id
for k in range(next_col-1, current_col-1, -1):
columns_cut_widths[k].add((current, next))
return [len(x) for x in columns_cut_widths]
def get_cut_width_trace(columns_cut_width: List[int]) -> go.Scattergl:
return go.Scattergl(
x=[*range(len(columns_cut_width))],
y=columns_cut_width,
hoverinfo='skip',
mode='lines',
marker=dict(
color=colors["accent"]
),
name="Pangenome Cut Width"
)
columns_cut_width = get_columns_cut_width(jsonpangenome)
pangenome_trace = get_cut_width_trace(columns_cut_width)
pangenome_x_range = _get_pangenome_graph_x_range_faster(len(columns_cut_width)-1)
max_y = max(columns_cut_width)
y_range = [0, max_y + 1]
return go.Figure(
data=[pangenome_trace],
layout=go.Layout(
dragmode='pan',
yaxis=dict(
range=y_range,
fixedrange=True,
tickvals=[i for i in range(max_y + 1)]
),
xaxis=dict(
range=[pangenome_x_range[0], pangenome_x_range[1]],
showgrid=False,
zeroline=False,
showline=False,
title="Drag the chart to the right or left to see details of the highlighted pangenome region."
),
shapes=[
{
'type': 'rect',
'xref': 'paper',
'yref': 'paper',
'x0': 0.3,
'y0': 0,
'x1': 0.6,
'y1': 1,
'line': {
'color': colors["dark_background"],
'width': 1,
}
}
]
)
)
def remove_elements_data_faster(elements_cache_info):
pass
# tools.remove_file(elements_cache_info)
def update_cached_poagraph_elements_faster(user_session_elements_id, jsonpangenome: PangenomeJSON):
def get_y(column_id, node_id):
if column_id not in cols_occupancy:
cols_occupancy[column_id] = {node_id: 0}
columns[column_id] = [node_id]
return 0
else:
y = max([*cols_occupancy[column_id].values()]) + node_y_distance
cols_occupancy[column_id][node_id] = y
columns[column_id].append(node_id)
return y
def get_continuous_paths() -> List[List[int]]:
continuous_paths: List[List[int]] = []
for from_node_id, to_node_ids in edges.items():
followers = list(set(to_node_ids))
if len(followers) == 1 and len(set(edges_reverted[followers[0]])) == 1:
path_was_extended = False
for continuous_path in continuous_paths:
if continuous_path[-1] == from_node_id:
continuous_path.append(followers[0])
path_was_extended = True
break
if not path_was_extended:
continuous_paths.append([from_node_id, followers[0]])
return continuous_paths
def find_out_y(continuous_path):
columns_occupied_y = [cols_occupancy[jsonpangenome.nodes[node_id].column_id]
for node_id in continuous_path]
y_candidate = 0
while True:
if any([y_candidate == y and node_id not in continuous_path for co in columns_occupied_y for node_id, y in
co.items()]):
y_candidate += node_y_distance
else:
for node_id in continuous_path:
cols_occupancy[jsonpangenome.nodes[node_id].column_id][node_id] = y_candidate
return y_candidate
def get_cytoscape_node(id, label, x, y, cl, sequences_ids, consensus_ids) -> CytoscapeNode:
return {'data': {'id': id,
'label': f"{label}",
'sequences_ids': sequences_ids,
'consensus_ids': consensus_ids},
'position': {'x': x, 'y': y},
'classes': cl}
def get_cytoscape_edge(source, target, weight, cl) -> CytoscapeEdge:
return {'data': {'label': cl, 'source': source, 'target': target, 'weight': weight}, 'classes': cl}
def get_poagraph_elements() -> Tuple[List[CytoscapeNode], Dict[int, List[CytoscapeEdge]]]:
sequences_nodes = [get_cytoscape_node(id=node_id,
label=node_info[3],
x=node_info[0],
y=node_info[1],
cl='s_node',
sequences_ids=nodes_to_sequences[node_id],
consensus_ids=[])
# consensus_ids=node_data['consensus_ids'])
for node_id, node_info in enumerate(nodes)]
# consensuses_nodes = [get_cytoscape_node(id=node_id,
# label=node_info[3],
# x=node_info[0],
# y=node_info[1],
# cl='s_node',
# sequences_ids=nodes_to_sequences[node_id],
# consensus_ids=[])
# # consensus_ids=node_data['consensus_ids'])
# for node_id, node_info in enumerate(nodes)]
all_edges = {}
for src_node_id, targets in edges.items():
targets_unique = set(targets)
all_edges[src_node_id] = [get_cytoscape_edge(src_node_id,
t,
math.log10(targets.count(t)+1),
's_edge')
for t in targets_unique]
for t in targets_unique:
if jsonpangenome.nodes[t].column_id != jsonpangenome.nodes[src_node_id].column_id + 1:
all_edges[src_node_id].append(get_cytoscape_edge(source=src_node_id,
target=t,
weight=0,
cl='s_edge'))
for i, node in enumerate(nodes):
if node[3] != None:
if i in all_edges:
all_edges[i].append(get_cytoscape_edge(
source=i,
target= node[3],
weight=1,
cl='s_edge_aligned'))
else:
all_edges[i] = [get_cytoscape_edge(
source=i,
target=node[3],
weight=1,
cl='s_edge_aligned')]
# if jsonpangenome.consensuses:
# for consensus in jsonpangenome.consensuses:
# for i in range(len(consensus.nodes_ids)-1):
# c_edge = get_cytoscape_edge(
# source=consensus.nodes_ids[i],
# target=consensus.nodes_ids[i+1],
# weight=math.log10(len(consensus.sequences_int_ids)+1),
# cl=f'c_edge c{consensus.name}')
# all_edges[consensus.nodes_ids[i]].append(c_edge)
return sequences_nodes, all_edges
nodes = [None] * len(jsonpangenome.nodes) # id ~ (x, y, aligned_to)
nodes_to_sequences = dict() # id ~ [sequences_ids]
cols_occupancy: Dict[int, Dict[int, int]] = dict()
columns = [None] * (max([n.column_id for n in jsonpangenome.nodes])+1) # column_id ~ [nodes_ids]
edges = dict() # node_id ~ [nodes_ids]
edges_reverted = dict() # node_id ~ [nodes_ids]
node_width = 10
node_y_distance = node_width * 1.5
continuous_path_nodes_distance = node_width * 2/3
for sequence in jsonpangenome.sequences:
for path in sequence.nodes_ids:
for i in range(len(path) - 1):
current_node_id = path[i]
current_node = jsonpangenome.nodes[current_node_id]
next_node_id = path[i + 1]
if current_node_id in nodes_to_sequences:
nodes_to_sequences[current_node_id].append(sequence.sequence_int_id)
if current_node_id in edges:
edges[current_node_id].append(next_node_id)
else:
edges[current_node_id] = [next_node_id]
else:
nodes_to_sequences[current_node_id] = [sequence.sequence_int_id]
x = current_node.column_id * node_y_distance
col_id = current_node.column_id
y = get_y(col_id, current_node_id)
nodes[current_node_id] = (x, y, current_node.aligned_to, current_node.base, col_id)
edges[current_node_id] = [next_node_id]
if next_node_id in edges_reverted:
edges_reverted[next_node_id].append(current_node_id)
else:
edges_reverted[next_node_id] = [current_node_id]
last_node_id = path[-1]
last_node = jsonpangenome.nodes[last_node_id]
if last_node_id in nodes_to_sequences:
nodes_to_sequences[last_node_id].append(sequence.sequence_int_id)
else:
nodes_to_sequences[last_node_id] = [sequence.sequence_int_id]
x = last_node.column_id * node_y_distance
col_id = last_node.column_id
y = get_y(col_id, last_node_id)
nodes[last_node_id] = (x, y, current_node.aligned_to, current_node.base, col_id)
continuous_paths = get_continuous_paths()
for continuous_path in continuous_paths:
first_node_x = nodes[continuous_path[0]][0]
last_node_x = nodes[continuous_path[-1]][0]
middle_point = first_node_x + (last_node_x - first_node_x) / 2
new_first_node_x = middle_point - 2 / 3 * len(
continuous_path) // 2 * node_width + node_width / 3
node_x = new_first_node_x
path_y = find_out_y(continuous_path)
for node_id in continuous_path:
nodes[node_id] = (node_x, path_y, nodes[node_id][2], nodes[node_id][3], nodes[node_id][4])
node_x += continuous_path_nodes_distance
sequences_nodes, edges = get_poagraph_elements()
d = {"sn": sequences_nodes,
"e": edges,
"cw": columns}
with open(user_session_elements_id, 'wb') as o:
pickle.dump(d, o)
def get_poagraph_elements_faster(elements_cache_info, relayout_data):
with open(elements_cache_info, 'rb') as i:
poagraph_elements = pickle.load(i)
max_column_id = len(poagraph_elements["cw"])+1
try:
min_x = int(relayout_data['xaxis.range[0]'])
max_x = int(relayout_data['xaxis.range[1]'])
visible_axis_length = abs(max_x - min_x)
min_x = max(0, min_x + int(max_column_id *0.3))
max_x = min(min_x + int(0.3 * max_column_id), max_column_id)#visible_axis_length // 3 * 2
except KeyError:
min_x = int(0.3*max_column_id)
max_x = int(0.6*max_column_id)
c_to_n = poagraph_elements["cw"]
nodes_ids_to_display = [n for nodes_ids in c_to_n[min_x:max_x+1] for n in nodes_ids]
if nodes_ids_to_display:
nodes = poagraph_elements["sn"][min(nodes_ids_to_display): max(nodes_ids_to_display)+1]
edges = [e for src in nodes_ids_to_display for e in poagraph_elements["e"][src]]
else:
nodes = []
edges = []
return nodes + edges
| {"/dash_app/callbacks/visualisation.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py"], "/dash_app/layout/pages.py": ["/dash_app/layout/layout_ids.py"], "/run.py": ["/dash_app/app.py"], "/dash_app/callbacks/consensustree.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/app.py": ["/dash_app/server.py"], "/dash_app/callbacks/consensustable.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/callbacks/mafgraph.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py"], "/dash_app/callbacks/pangtreebuild.py": ["/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py", "/dash_app/server.py"], "/dash_app/callbacks/poagraph.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py", "/dash_app/app.py"]} |
57,004 | meoke/pangtreevis | refs/heads/master | /dash_app/callbacks/mafgraph.py | from dash_app.components import tools
from ..server import app
from dash.dependencies import Input, Output, State
from ..layout.layout_ids import *
from ..components import mafgraph
@app.callback(Output(id_mafgraph_graph, 'elements'),
[Input(id_pangenome_hidden, 'children')],
[State(id_mafgraph_graph, 'elements')])
def show_input_vis(jsonified_pangenome, mafgraph_elements):
if not jsonified_pangenome:
return []
jsonpangenome = tools.unjsonify_jsonpangenome(jsonified_pangenome)
mafgraph_nodes, mafgraph_edges = mafgraph.get_graph_elements(jsonpangenome)
mafgraph_elements = []
mafgraph_elements.extend(mafgraph_nodes)
mafgraph_elements.extend(mafgraph_edges)
return mafgraph_elements
@app.callback(
Output(id_mafgraph_graph, 'style'),
[Input(id_mafgraph_graph, 'elements')],
[State(id_mafgraph_graph, 'style')])
def show_mafgraph(mafgraph_elements, mafgraph_style):
if len(mafgraph_elements) > 0:
mafgraph_style['visibility'] = 'visible'
return mafgraph_style | {"/dash_app/callbacks/visualisation.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py"], "/dash_app/layout/pages.py": ["/dash_app/layout/layout_ids.py"], "/run.py": ["/dash_app/app.py"], "/dash_app/callbacks/consensustree.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/app.py": ["/dash_app/server.py"], "/dash_app/callbacks/consensustable.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/callbacks/mafgraph.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py"], "/dash_app/callbacks/pangtreebuild.py": ["/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py", "/dash_app/server.py"], "/dash_app/callbacks/poagraph.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py", "/dash_app/app.py"]} |
57,005 | meoke/pangtreevis | refs/heads/master | /dash_app/callbacks/pangtreebuild.py | import io
import os
from io import StringIO
from pathlib import Path
from typing import Dict, List
import dash_html_components as html
import flask
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from pangtreebuild.consensus.input_types import Blosum, Hbmin, Stop, P
from pangtreebuild.datamodel.DataType import DataType
from pangtreebuild.datamodel.fasta_providers.ConstSymbolProvider import ConstSymbolProvider
from pangtreebuild.datamodel.fasta_providers.FromFile import FromFile
from pangtreebuild.datamodel.fasta_providers.FromNCBI import FromNCBI
from pangtreebuild.datamodel.input_types import Maf, Po, MissingSymbol, MetadataCSV
from pangtreebuild.output.PangenomeJSON import to_json
from dash_app.components import tools
from dash_app.components import pangtreebuild
from dash_app.layout.layout_ids import *
from dash_app.layout.pages import get_task_description_layout
from dash_app.server import app
def get_success_info(message):
return [html.I(className="fas fa-check-circle correct"),
html.P(message, style={"display": "inline", "margin-left": "10px"})]
def get_error_info(message):
return [html.I(className="fas fa-exclamation-circle incorrect"),
html.P(message, style={"display": "inline", "margin-left": "10px"})]
# Metadata Validation
@app.callback(Output(id_metadata_upload_state, 'data'),
[Input(id_metadata_upload, 'contents')],
[State(id_metadata_upload, 'filename'),
State(id_session_state, 'data')])
def validate_metadata_file(file_content, file_name, session_state):
if file_content is None or file_name is None:
return None
else:
file_content = tools.decode_content(file_content)
error_message = pangtreebuild.metadata_file_is_valid(file_content, file_name)
if len(error_message) == 0:
return {"is_correct": True, "filename": file_name, "error": error_message}
else:
return {"is_correct": False, "filename": file_name, "error": error_message}
@app.callback(Output(id_metadata_upload_state_info, 'children'),
[Input(id_metadata_upload_state, 'data')])
def show_validation_result(upload_state_data):
if upload_state_data is None or len(upload_state_data) == 0:
return []
else:
if upload_state_data["is_correct"]:
filename = upload_state_data["filename"]
return get_success_info(f"File {filename} is uploaded.")
else:
return get_error_info(upload_state_data["error"])
# Multialignment validation
@app.callback(Output(id_multialignment_upload_state, 'data'),
[Input(id_multialignment_upload, 'contents')],
[State(id_multialignment_upload, 'filename')])
def validate_metadata_file(file_content, file_name):
if file_content is None or file_name is None:
return None
else:
file_content = tools.decode_content(file_content)
error_message = pangtreebuild.multialignment_file_is_valid(file_content, file_name)
if len(error_message) == 0:
return {"is_correct": True, "filename": file_name, "error": error_message}
else:
return {"is_correct": False, "filename": file_name, "error": error_message}
@app.callback(Output(id_multialignment_upload_state_info, 'children'),
[Input(id_multialignment_upload_state, 'data')])
def show_multialignment_validation_result(upload_state_data):
if upload_state_data is None or len(upload_state_data) == 0:
return []
else:
if upload_state_data["is_correct"]:
filename = upload_state_data["filename"]
return get_success_info(f"File {filename} is uploaded.")
else:
return get_error_info(upload_state_data["error"])
# MAF specific parameters toggling
@app.callback(Output(id_maf_specific_params, 'is_open'),
[Input(id_multialignment_upload_state, 'data')])
def toggle_maf_specific_params(multialignment_upload_state_data):
if multialignment_upload_state_data is None or "maf" not in multialignment_upload_state_data["filename"]:
return False
else:
return True
@app.callback(Output(id_missing_symbol_param, 'is_open'),
[Input(id_fasta_provider_choice, 'value')])
def toggle_mising_symbol_param(fasta_provider_choice):
if fasta_provider_choice is None or fasta_provider_choice != "Symbol":
return False
else:
return True
@app.callback(Output(id_fasta_upload_param, 'is_open'),
[Input(id_fasta_provider_choice, 'value')])
def toggle_fasta_upload_param(fasta_provider_choice):
if fasta_provider_choice is None or fasta_provider_choice != "File":
return False
else:
return True
# FASTA VALIDATION
@app.callback(Output(id_fasta_upload_state, 'data'),
[Input(id_fasta_upload, 'contents'),
Input(id_session_dir, 'data')],
[State(id_fasta_upload, 'filename')])
def validate_fasta_file(file_content, session_dir, file_name):
if file_content is None or file_name is None or session_dir is None:
return None
else:
if "zip" in file_name:
file_content = tools.decode_zip_content(file_content)
else:
file_content = tools.decode_content(file_content)
output_dir = Path(session_dir)
fasta_path = tools.get_child_path(output_dir, file_name)
if "zip" in file_name:
tools.save_to_file(file_content, fasta_path, 'wb')
else:
tools.save_to_file(file_content, fasta_path)
error_message = pangtreebuild.fasta_file_is_valid(fasta_path)
if len(error_message) == 0:
return {"is_correct": True, "filename": file_name, "error": error_message}
else:
return {"is_correct": False, "filename": file_name, "error": error_message}
@app.callback(Output(id_fasta_upload_state_info, 'children'),
[Input(id_fasta_upload_state, 'data')])
def show_fasta_validation_result(upload_state_data):
if upload_state_data is None or len(upload_state_data) == 0:
return []
else:
if upload_state_data["is_correct"]:
filename = upload_state_data["filename"]
return get_success_info(f"File {filename} is uploaded.")
else:
return get_error_info(upload_state_data["error"])
# Blosum Validation
@app.callback(Output(id_blosum_upload_state, 'data'),
[Input(id_blosum_upload, 'contents'),
Input(id_missing_symbol_input, 'value'),
Input(id_fasta_provider_choice, "value")],
[State(id_blosum_upload, 'filename')])
def validate_blosum_file(file_content, missing_symbol, fasta_provider_choice, file_name):
if file_content is None or file_name is None:
return None
if fasta_provider_choice == "Symbol" and missing_symbol != "":
symbol = missing_symbol
else:
symbol = None
if file_content is None:
blosum_file_content = tools.read_file_to_stream(pangtreebuild.get_default_blosum_path())
file_source_info = "default BLOSUM file"
else:
blosum_file_content = StringIO(tools.decode_content(file_content))
file_source_info = f"provided BLOSUM file: {file_name}"
error_message = pangtreebuild.blosum_file_is_valid(blosum_file_content, symbol)
if len(error_message) == 0:
symbol_info = f"It contains symbol for missing nucleotides/proteins: {symbol}." if symbol else ""
validation_message = f"The {file_source_info} is correct. " + symbol_info
return {"is_correct": True,
"filename": file_name,
"symbol": symbol,
"validation_message": validation_message}
else:
validation_message = f"Error in {file_source_info} or symbol for missing nucleotides/proteins: {symbol}. " \
f"Reason: {error_message}"
return {"is_correct": False,
"filename": file_name,
"symbol": symbol,
"validation_message": validation_message}
@app.callback(Output(id_blosum_upload_state_info, 'children'),
[Input(id_blosum_upload_state, 'data')])
def show_validation_result(blosum_upload_state_data):
if blosum_upload_state_data is None or len(blosum_upload_state_data) == 0:
return []
else:
validation_message = blosum_upload_state_data["validation_message"]
if blosum_upload_state_data["is_correct"]:
return [html.I(className="fas fa-check-circle correct"),
html.P(f"{validation_message}", style={"display": "inline", "margin-left": "10px"})]
else:
return [html.I(className="fas fa-exclamation-circle incorrect"),
html.P(f"{validation_message}", style={"display": "inline", "margin-left": "10px"})]
# POA specific parameters toggling
@app.callback(Output(id_poa_specific_params, 'is_open'),
[Input(id_consensus_algorithm_choice, 'value')])
def toggle_poa_specific_params(consensus_algorithm_choice):
if consensus_algorithm_choice is None or consensus_algorithm_choice != "poa":
return False
else:
return True
# TREE specific parameters toggling
@app.callback(Output(id_tree_specific_params, 'is_open'),
[Input(id_consensus_algorithm_choice, 'value')])
def toggle_tree_specific_params(consensus_algorithm_choice):
if consensus_algorithm_choice is None or consensus_algorithm_choice != "tree":
return False
else:
return True
# HANDLE SESSION DIR
@app.callback(Output(id_session_dir, 'data'),
[Input(id_fasta_upload, 'contents')],
[State(id_session_dir, 'data')])
def create_output_dir(_, session_dir):
if session_dir is None:
output_dir = tools.create_output_dir()
session_dir = str(output_dir)
return session_dir
# EXAMPLE DATASETS
@app.callback(
Output("ebola_collapse", "is_open"),
[Input("collapse-ebola-button", "n_clicks")],
[State("ebola_collapse", "is_open")],
)
def toggle_ebola_example_collapse(ebola_btn_clicks, is_open):
if ebola_btn_clicks:
return not is_open
return is_open
@app.callback(
Output("simulated_collapse", "is_open"),
[Input("collapse_simulated_button", "n_clicks")],
[State("simulated_collapse", "is_open")],
)
def toggle_collapse(simulated_btn_clicks, is_open):
if simulated_btn_clicks:
return not is_open
return is_open
# RUN PROCESSING
@app.callback(
Output(id_session_state, 'data'),
[Input(id_pang_button, 'n_clicks')],
[State(id_session_state, 'data'),
State(id_session_dir, 'data'),
State(id_data_type, "value"),
State(id_multialignment_upload, "contents"),
State(id_multialignment_upload, "filename"),
State(id_fasta_provider_choice, "value"),
State(id_fasta_upload, "contents"),
State(id_fasta_upload, "filename"),
State(id_missing_symbol_input, "value"),
State(id_blosum_upload, "contents"),
State(id_blosum_upload, "filename"),
State(id_consensus_algorithm_choice, "value"),
State(id_output_configuration, "values"),
State(id_metadata_upload, "contents"),
State(id_metadata_upload, "filename"),
State(id_hbmin_input, "value"),
State(id_stop_input, "value"),
State(id_p_input, "value")],
)
def run_pangenome(run_processing_btn_click,
session_state: Dict,
session_dir: str,
datatype: str,
multialignment_content: str,
multialignment_filename: str,
fasta_provider_choice: str,
fasta_content: str,
fasta_filename: str,
missing_symbol: str,
blosum_contents: str,
blosum_filename: str,
consensus_choice: str,
output_config: List[str],
metadata_content: str,
metadata_filename: str,
hbmin_value: float,
stop_value: float,
p_value: float):
if run_processing_btn_click == 0:
raise PreventUpdate()
if session_state is None:
session_state = {}
if session_dir is None:
session_dir = tools.create_output_dir()
else:
session_dir = Path(session_dir)
current_processing_output_dir_name = tools.get_child_path(session_dir, tools.get_current_time())
tools.create_dir(current_processing_output_dir_name)
if "maf" in multialignment_filename:
multialignment = Maf(StringIO(tools.decode_content(multialignment_content)), filename=multialignment_filename)
elif "po" in multialignment_filename:
multialignment = Po(StringIO(tools.decode_content(multialignment_content)), filename=multialignment_filename)
else:
session_state["error"] = "Cannot create Poagraph. Only MAF and PO files are supported."
return session_state
missing_symbol = MissingSymbol(missing_symbol) if missing_symbol != "" else MissingSymbol()
fasta_path = None
if fasta_provider_choice == "NCBI":
fasta_provider = FromNCBI(use_cache=True)
elif fasta_provider_choice == "File":
fasta_path = tools.get_child_path(current_processing_output_dir_name, fasta_filename).resolve()
save_mode = "wb" if "zip" in fasta_filename else "w"
if "zip" in fasta_filename:
fasta_decoded_content = tools.decode_zip_content(fasta_content)
else:
fasta_decoded_content = tools.decode_content(fasta_content)
tools.save_to_file(fasta_decoded_content, fasta_path, save_mode)
fasta_provider = FromFile(fasta_path)
else:
fasta_provider = ConstSymbolProvider(missing_symbol)
if not blosum_contents:
blosum_path = pangtreebuild.get_default_blosum_path()
blosum_contents = tools.read_file_to_stream(blosum_path)
else:
blosum_path = tools.get_child_path(current_processing_output_dir_name, blosum_filename)
blosum_contents = tools.decode_content(blosum_contents)
tools.save_to_file(blosum_contents, blosum_path)
blosum_contents = StringIO(blosum_contents)
blosum = Blosum(blosum_contents, blosum_path)
metadata = MetadataCSV(StringIO(tools.decode_content(metadata_content)), metadata_filename) if metadata_content else None
pangenomejson = pangtreebuild.run_pangtreebuild(output_dir=current_processing_output_dir_name,
datatype=DataType[datatype],
multialignment=multialignment,
fasta_provider=fasta_provider,
blosum=blosum,
consensus_choice=consensus_choice,
output_po=True if "po" in output_config else False,
output_fasta=True if "fasta" in output_config else False,
missing_symbol=missing_symbol,
metadata=metadata,
hbmin=Hbmin(hbmin_value) if hbmin_value else None,
stop=Stop(stop_value) if stop_value else None,
p=P(p_value) if p_value else None,
fasta_path=fasta_filename if fasta_filename else None)
pangenome_json_str = to_json(pangenomejson)
current_processing_output_zip = tools.dir_to_zip(current_processing_output_dir_name)
current_processing_short_name = "/".join(str(current_processing_output_zip).split("/")[-2:])
return {"last_output_zip": current_processing_short_name,
"jsonpangenome": pangenome_json_str,
"error": ""}
# DOWNLOAD RESULTS
@app.callback(Output(id_download_processing_result, "href"),
[Input(id_session_state, 'data')])
def update_download_result_content(session_state_data):
if session_state_data is None:
raise PreventUpdate()
if not "last_output_zip" in session_state_data:
return ""
return f'/export/pang?n={session_state_data["last_output_zip"]}'
@app.server.route('/export/pang')
def export_pang_result_zip():
zip_short_path = flask.request.args.get('n')
zip_full_path = Path(os.path.abspath(os.path.join(os.path.dirname(__file__)))).joinpath(
"../../users_temp_data/").joinpath(zip_short_path)
with open(zip_full_path, 'rb') as f:
data = io.BytesIO(f.read())
data.seek(0)
result_id = zip_short_path.split("/")[1]
return flask.send_file(
data,
mimetype='application/zip',
attachment_filename=f'result_{result_id}',
as_attachment=True,
cache_timeout=0
)
@app.callback(Output(id_poapangenome_result, "is_open"),
[Input(id_session_state, 'data')])
def open_poapangenome_result(session_state_data):
if session_state_data is None or "jsonpangenome" not in session_state_data:
return False
return True
@app.callback(Output(id_poapangenome_result_description, "children"),
[Input(id_session_state, 'data')])
def get_poapangenome_result_description(session_state_data):
if session_state_data is None or "jsonpangenome" not in session_state_data:
return []
jsonpangenome = tools.unjsonify_jsonpangenome(session_state_data["jsonpangenome"])
poapangenome_task_description = get_task_description_layout(jsonpangenome)
return poapangenome_task_description
@app.callback(Output(id_result_icon, "className"),
[Input(id_session_state, 'data')])
def get_poapangenome_result_description(session_state_data):
if session_state_data is None or "jsonpangenome" not in session_state_data:
return ""
if session_state_data["error"]:
return "fas fa-times-circle incorrect"
else:
return "fas fa-check-circle correct"
| {"/dash_app/callbacks/visualisation.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py"], "/dash_app/layout/pages.py": ["/dash_app/layout/layout_ids.py"], "/run.py": ["/dash_app/app.py"], "/dash_app/callbacks/consensustree.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/app.py": ["/dash_app/server.py"], "/dash_app/callbacks/consensustable.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/callbacks/mafgraph.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py"], "/dash_app/callbacks/pangtreebuild.py": ["/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py", "/dash_app/server.py"], "/dash_app/callbacks/poagraph.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py", "/dash_app/app.py"]} |
57,006 | meoke/pangtreevis | refs/heads/master | /dash_app/callbacks/poagraph.py | from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from dash_app.components import poagraph
import dash
from ..components import tools, visualisation
from ..layout.layout_ids import *
from ..server import app
from ..app import draw_poagraph
@app.callback(Output(id_full_pangenome_graph, 'figure'),
[Input(id_pangenome_upload, 'contents')])
def get_full_pangenome_graph(pangenome_upload_contents):
if not pangenome_upload_contents or not draw_poagraph:
raise PreventUpdate()
jsonpangenome = visualisation.read_pangenome_upload(pangenome_upload_contents)
return poagraph.get_pangenome_figure_faster(jsonpangenome)
@app.callback(Output(id_visualisation_session_info, 'data'),
[Input(id_pangenome_upload, 'contents')])
def update_pangenome_hash(pangenome_upload_contents):
if not pangenome_upload_contents:
raise PreventUpdate()
hashed_contents = visualisation.get_hash(pangenome_upload_contents)
return hashed_contents
@app.callback(Output(id_elements_cache_info, 'data'),
[Input(id_visualisation_session_info, 'data')],
[State(id_elements_cache_info, 'data')])
def update_elements_cache_info(visualisation_session_info, elements_cache_info):
if not visualisation_session_info or not draw_poagraph:
raise PreventUpdate()
if elements_cache_info:
poagraph.remove_elements_data_faster(elements_cache_info)
new_elem_cache_info = visualisation.get_elem_cache_info(int(visualisation_session_info))
return str(new_elem_cache_info)
@app.callback(Output(id_poagraph, 'elements'),
[Input(id_elements_cache_info, 'data'),
Input(id_full_pangenome_graph, 'relayoutData')],
[State(id_pangenome_upload, 'contents'),
State(id_poagraph, 'elements')])
def get_poagraph_elements(elements_cache_info, relayout_data, pangenome_upload_contents, poagraph_elements):
def new_pangenome_loaded(trigger):
return trigger['prop_id'] == id_elements_cache_info+'.data'
def ignore_trigger():
return not dash.callback_context.triggered or\
not elements_cache_info or\
not pangenome_upload_contents or\
not relayout_data or\
not draw_poagraph
def cache_new_poagraph_elements():
jsonpangenome = visualisation.read_pangenome_upload(pangenome_upload_contents)
poagraph.update_cached_poagraph_elements_faster(elements_cache_info, jsonpangenome)
def read_poagraph_elements_to_redraw():
return poagraph.get_poagraph_elements_faster(elements_cache_info, relayout_data)
if ignore_trigger():
raise PreventUpdate
trigger = dash.callback_context.triggered[0]
if new_pangenome_loaded(trigger):
cache_new_poagraph_elements()
return read_poagraph_elements_to_redraw()
@app.callback(Output(id_full_pangenome_graph, 'style'),
[Input(id_full_pangenome_graph, 'figure')],
[State(id_full_pangenome_graph, 'style')])
def expand_graph(fig, s):
if not fig:
raise PreventUpdate()
if not s:
s = {}
s["visibility"] = "visible"
return s | {"/dash_app/callbacks/visualisation.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py"], "/dash_app/layout/pages.py": ["/dash_app/layout/layout_ids.py"], "/run.py": ["/dash_app/app.py"], "/dash_app/callbacks/consensustree.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/app.py": ["/dash_app/server.py"], "/dash_app/callbacks/consensustable.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/callbacks/mafgraph.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py"], "/dash_app/callbacks/pangtreebuild.py": ["/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py", "/dash_app/server.py"], "/dash_app/callbacks/poagraph.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py", "/dash_app/app.py"]} |
57,007 | meoke/pangtreevis | refs/heads/master | /dash_app/server.py | from flask import Flask, session
from flask_session import Session
from dash import Dash
server = Flask('pangenome')
SESSION_TYPE = 'filesystem'
server.config.from_object(__name__)
server.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
app = Dash(__name__, server=server)
| {"/dash_app/callbacks/visualisation.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py"], "/dash_app/layout/pages.py": ["/dash_app/layout/layout_ids.py"], "/run.py": ["/dash_app/app.py"], "/dash_app/callbacks/consensustree.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/app.py": ["/dash_app/server.py"], "/dash_app/callbacks/consensustable.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py"], "/dash_app/callbacks/mafgraph.py": ["/dash_app/server.py", "/dash_app/layout/layout_ids.py"], "/dash_app/callbacks/pangtreebuild.py": ["/dash_app/layout/layout_ids.py", "/dash_app/layout/pages.py", "/dash_app/server.py"], "/dash_app/callbacks/poagraph.py": ["/dash_app/layout/layout_ids.py", "/dash_app/server.py", "/dash_app/app.py"]} |
57,008 | swipswaps/freefall-1 | refs/heads/master | /freefall/utilities.py | """
utilities.py
A collection of utility functions to support simulations.
"""
import math
def find_vx_vy(*, speed, angle):
rads = math.radians(angle)
vx = speed * math.cos(rads)
vy = speed * math.sin(rads)
return vx, vy
# This function is from the article Generate Floating Point Range in Python
# by Meenakshi Agarwal
def float_range(A, L=None, D=None):
"""float_range(stop) -> float_range object
float_range(start, stop[, step]) -> float range object
Return an object that produces a sequence of floating point numbers from
start (inclusive) to stop (exclusive) by step. float_range(i, j) produces
i, i+1, i+2, ..., j-1. start defaults to 0, and stop is omitted!
float_range(4) produces 0, 1, 2, 3. These are exactly the valid indices for a
list of 4 elements. When step is given, it specifies the increment (or
decrement).
"""
if L == None:
L = A + 0.0
A = 0.0
if D == None:
D = 1.0
while True:
if D > 0 and A >= L:
break
elif D < 0 and A <= L:
break
yield A
A = A + D
| {"/examples/frc_powercell_shooter.py": ["/freefall/falling_objects.py", "/freefall/simulators.py", "/freefall/utilities.py"], "/examples/frc_powercell_drop.py": ["/freefall/falling_objects.py", "/freefall/simulators.py"]} |
57,009 | swipswaps/freefall-1 | refs/heads/master | /examples/frc_powercell_shooter.py | from freefall.falling_objects import frc_power_cell
from freefall.simulators import simulate_earth_surface
from freefall.simulators import terminate_vy_less_zero
from freefall.utilities import find_vx_vy, float_range
import matplotlib.pyplot as plt
X_INITIAL = 0 # m
Y_INITIAL = 27 / 40 # m
SPEED = 5 # m/s
ANGLE = 50 # degrees
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2)
# Plot the trajectory over several valves of speed
ax0.set(
title="Power Cell Trajectory by Speed", xlabel="Distance (m)", ylabel="Height (m)"
)
ax0.grid()
for i in float_range(2, 8, 0.1):
# run the simulation
vx_initial, vy_initial = find_vx_vy(speed=i, angle=ANGLE)
results = simulate_earth_surface(
frc_power_cell,
X_INITIAL,
Y_INITIAL,
vx_initial,
vy_initial,
terminator=terminate_vy_less_zero,
)
# Plot the results
ax0.plot(results.x, results.y)
# Plot the trajectory over several valves of angle
ax1.set(title="Power Cell Trajectory by Angle", xlabel="Distance (m)")
ax1.grid()
for i in float_range(10, 90, 2):
# run the simulation
vx_initial, vy_initial = find_vx_vy(speed=SPEED, angle=i)
results = simulate_earth_surface(
frc_power_cell,
X_INITIAL,
Y_INITIAL,
vx_initial,
vy_initial,
terminator=terminate_vy_less_zero,
)
# Plot the results
ax1.plot(results.x, results.y)
# Display the graph
plt.show()
| {"/examples/frc_powercell_shooter.py": ["/freefall/falling_objects.py", "/freefall/simulators.py", "/freefall/utilities.py"], "/examples/frc_powercell_drop.py": ["/freefall/falling_objects.py", "/freefall/simulators.py"]} |
57,010 | swipswaps/freefall-1 | refs/heads/master | /freefall/simulators.py | """
simulators.py
A collection of simulators and termination functions.
"""
from collections import namedtuple
SimResult = namedtuple("SimResult", ["x", "y", "vx", "vy", "ax", "ay", "t"])
def terminate_vy_less_zero(x, y, vx, vy, ax, ay, t):
"""Returns True if the y velocity is less than zero, otherwise returns False"""
return vy[-1] < 0
def terminate_y_less_zero(x, y, vx, vy, ax, ay, t):
"""Returns True if the y velocity is less than zero, otherwise returns False"""
return y[-1] < 0
def simulate_earth_surface(
target,
x_initial,
y_initial,
vx_initial=0,
vy_initial=0,
epsilon=0.001,
gravity=9.81,
terminator=terminate_y_less_zero,
):
"""
Simulates the motion of an FallingObject moving through the air near the Earth's surface
and returns a SimResult object with the FallingObject's trajectory.
Inputs:
target (FallingObject type): Represents the object moving through the air
x_initial, y_initial (float or int): The position of the falling object at t=0
vx_initial, vy_initial (optional float or int): The velocity of the falling object at
t=0 (default is 0)
epsilon (optional float): The size of the time step (default is 1 ms)
gravity (optional float): The acceleration of gravity in the negative y direction
(default is 9.81 m/s)
terminator (optional): A function that tells the simulator when it is time to stop
Outputs:
SimResult type object: Contains the results of the simulation
This simulator uses leapfrog integration:
acceleration: [t=0.0] ------> [t=1.0] ------> [t=2.0] ------> [t=3.0]
velocity: [t=0.5] ------> [t=1.5] ------> [t=2.5]------> [t=3.5]
position: [t=0.0] ------> [t=1.0] ------> [t=2.0] ------> [t=3.0]
The simulator assumes that gravity and air density are constant. All parameters are
specified in metric units (meters, kilograms, seconds, etc.).
"""
# Simulation results are stored in lists. Each list represents a state variable.
x = list() # position in the x direction
y = list() # position in the y direction
vx = list() # velocity in the x direction
vy = list() # velocity in the y direction
ax = list() # acceleration in the x direction
ay = list() # acceleration in the y direction
t = list() # simulator time (i.e. a kind of clock)
# Setup the initial values of the simulations
x.append(x_initial) # at t=0
y.append(y_initial) # at t=0
ax.append(0) # at t=0
ay.append(-gravity) # at t=0
vx.append(vx_initial + (epsilon / 2.0) * ax[-1]) # at t=0.5*epsilon
vy.append(vy_initial + (epsilon / 2.0) * ay[-1]) # at t=0.5*epsilon
t.append(0) # t is zero at t=0
# Loop through the simulation
while True:
# Find the next position values of the target
x.append(x[-1] + epsilon * vx[-1])
y.append(y[-1] + epsilon * vy[-1])
# Since aerodynamic drag only slows down targets and never speeds them up, the
# direction of the drag force depends on the direction of travel. Find the direction
# in the x direction.
if vx[-1] >= 0:
direction = -1
else:
direction = 1
# Apply the drag force (in the correct direction) to the target
a = direction * vx[-1] ** 2 * target.drag / target.mass
ax.append(a)
# Repeat for the y direction
if vy[-1] >= 0:
direction = -1
else:
direction = 1
# This time apply the drag force and the effect of gravity
a = direction * vy[-1] ** 2 * target.drag / target.mass - gravity
ay.append(a)
# Find the new values of velocity
vx.append(vx[-1] + epsilon * ax[-1])
vy.append(vy[-1] + epsilon * ay[-1])
# Store the current simulator time
t.append(t[-1] + epsilon)
# Use the terminator function decide if the simulation is complete.
if terminator(x, y, vx, vy, ax, ay, t):
break
# Return the results which are packaged neatly in a SimResults object
return SimResult(x=x, y=y, vx=vx, vy=vy, ax=ax, ay=ay, t=t)
| {"/examples/frc_powercell_shooter.py": ["/freefall/falling_objects.py", "/freefall/simulators.py", "/freefall/utilities.py"], "/examples/frc_powercell_drop.py": ["/freefall/falling_objects.py", "/freefall/simulators.py"]} |
57,011 | swipswaps/freefall-1 | refs/heads/master | /setup.py | from setuptools import setup, find_packages
import re
version = re.search(
r'^__VERSION__\s*=\s*"(.*)"', open("freefall/__init__.py").read(), re.M
).group(1)
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="freefall",
version=version,
packages=find_packages(),
url="https://github.com/cwstryker/freefall/",
author="Chadwick Stryker",
author_email="cwsaccts@stryker5.org",
description="A Python package module for simulating falling objects with aerodynamic drag.",
long_description=long_description,
python_requires=">=3.6",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Education",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
],
)
| {"/examples/frc_powercell_shooter.py": ["/freefall/falling_objects.py", "/freefall/simulators.py", "/freefall/utilities.py"], "/examples/frc_powercell_drop.py": ["/freefall/falling_objects.py", "/freefall/simulators.py"]} |
57,012 | swipswaps/freefall-1 | refs/heads/master | /freefall/__init__.py | """
A Python package module for simulating falling objects with simple aerodynamic drag.
Developed by FIRST Robotics Competition Team 6343 - Steel Ridge Robotics
Strong
Trustworthy
Empowering
Effective
Leadership
"""
__VERSION__ = "1.0.0b1"
| {"/examples/frc_powercell_shooter.py": ["/freefall/falling_objects.py", "/freefall/simulators.py", "/freefall/utilities.py"], "/examples/frc_powercell_drop.py": ["/freefall/falling_objects.py", "/freefall/simulators.py"]} |
57,013 | swipswaps/freefall-1 | refs/heads/master | /freefall/falling_objects.py | """
falling_objects.py
A collection of objects with defined mass and drag properties.
"""
from collections import namedtuple
FallingObject = namedtuple("FallingObject", ["mass", "drag"])
# A POWER CELL gamer piece from the 2020 FIRST Robotics Competition (Infinite Recharge)
frc_power_cell = FallingObject(mass=0.15, drag=0.0022)
| {"/examples/frc_powercell_shooter.py": ["/freefall/falling_objects.py", "/freefall/simulators.py", "/freefall/utilities.py"], "/examples/frc_powercell_drop.py": ["/freefall/falling_objects.py", "/freefall/simulators.py"]} |
57,014 | swipswaps/freefall-1 | refs/heads/master | /examples/frc_powercell_drop.py | from freefall.falling_objects import frc_power_cell
from freefall.simulators import simulate_earth_surface
import matplotlib.pyplot as plt
X_INITIAL = 0 # m
Y_INITIAL = 6.172 # m
VX_INITIAL = 0 # m/s
VY_INITIAL = 0 # m/s
results = simulate_earth_surface(
frc_power_cell, X_INITIAL, Y_INITIAL, VX_INITIAL, VY_INITIAL
)
# Plot the results
fig, ax = plt.subplots()
ax.plot(results.t, results.y)
# Format and annotate the graph
ax.grid()
ax.text(0.01, 0.4, f"Drag Coefficient = {frc_power_cell.drag:.5} kg/m")
ax.text(0.01, 0.1, f"Flight Time = {results.t[-1]:.4} s")
plt.xlabel("Time (s)")
plt.ylabel("Height (m)")
plt.title("Simulation of Power Cell Free Fall")
# Display the graph
plt.show()
| {"/examples/frc_powercell_shooter.py": ["/freefall/falling_objects.py", "/freefall/simulators.py", "/freefall/utilities.py"], "/examples/frc_powercell_drop.py": ["/freefall/falling_objects.py", "/freefall/simulators.py"]} |
57,025 | breqdev/wchat | refs/heads/master | /weschat-server.py | import socket, select
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_sock.bind(("0.0.0.0", 8888))
server_sock.listen(16)
CONNECTIONS = [server_sock]
UNAMES = {}
class Room:
def __init__(self, name):
self.name = name
self.users = []
def admit(self, user):
self.users.append(user)
self.send(user, "You have entered the room "+self.name)
def expel(self, user):
try:
self.send(user, "You have left the room "+self.name)
self.users.remove(user)
except:
pass
def recv(self, message):
#print(self.users)
for user in self.users:
self.send(user, message)
def lookupSocket(self, user):
for connection in CONNECTIONS:
try:
if connection.getpeername() == user:
return connection
except:
pass
return None
def send(self, user, message):
sock = self.lookupSocket(user)
try:
sock.send((message+"\n").encode("UTF-8"))
except AttributeError:
pass # Not a socket
except:
sock.close()
self.expel(user)
CONNECTIONS.remove(user)
def lookupUser(self, user):
return user in self.users
def autoExpel(self):
for user in self.users:
if self.lookupSocket(user) == None:
self.users.remove(user)
if len(self.users) == 0 and self.name != "hall": # Hall never closes
rooms.remove(self)
def broadcast(sock, message):
message = bytes(message, "UTF-8")
for s in CONNECTIONS:
if s != server_sock:
try:
s.send(message)
except:
try: # Sometimes it can't be closed...
s.close()
CONNECTIONS.remove(s)
except:
pass
def name(addr):
#print(UNAMES)
if addr in UNAMES.keys():
return UNAMES[addr]
else:
return str(addr)
hall = Room("hall")
rooms = [hall]
def roomLookup(name):
for room in rooms:
if room.name == name:
return room
return None
def lookupUser(user):
for room in rooms:
if user in room.users:
return room
return None
## COMMANDS
class Commander:
def __init__(self):
self.COMMANDS = {
"/uname":self.uname,
"/join":self.join,
"/help":self.help,
"/room":self.room
}
def uname(self, data, addr, sock):
uname = data[7:].strip("\r\n")
UNAMES[addr] = uname
def join(self, data, addr, sock):
currentRoom = lookupUser(addr)
currentRoom.expel(addr)
rname = data[6:].strip("\r\n")
room = roomLookup(rname)
if room == None:
room = Room(rname)
rooms.append(room)
room.admit(addr)
def help(self, data, addr, sock):
helpmessage = '''WesChat help menu:
/uname [username]: Change your username
/join [room]: Join a chatroom
/help: Display this help
/room: Display the current room name
'''
sock.send(helpmessage.encode("UTF-8"))
def room(self, data, addr, sock):
room = lookupUser(addr)
if room is None:
room = "Error looking up room"
room = "You are in the room "+room.name+"\n"
sock.send(room.encode("UTF-8"))
def command(self, data, addr, sock):
for command in self.COMMANDS.keys():
if data.startswith(command):
self.COMMANDS[command](data, addr, sock)
commander = Commander()
##
WAITING = []
HANDSHAKE = "Ni"
try:
while True:
for room in rooms:
room.autoExpel()
read_sockets = select.select(CONNECTIONS+WAITING, [], [], 0)[0]
for sock in read_sockets:
if sock == server_sock:
sfd, addr = server_sock.accept()
#CONNECTIONS.append(sfd)
WAITING.append(sfd)
#hall.admit(addr)
print("Client "+name(addr)+" connected, awaiting handshake")
#hall.recv("<server> Client "+name(addr)+" connected")
else:
try:
data = sock.recv(4096)
addr = sock.getpeername()
if sock in WAITING:
if data.decode() == HANDSHAKE:
print(name(addr)+" handshaked")
WAITING.remove(sock)
hall.admit(addr)
hall.recv("Client "+name(addr)+" connected")
CONNECTIONS.append(sock)
else:
#print("Received "+data.decode()+" not "+HANDSHAKE)
WAITING.remove(sock)
continue
if data:
data = data.decode()
if data.startswith("/"):
commander.command(data, addr, sock)
else:
msg = "<"+name(addr)+"> "+data
#print(msg)
userRoom = lookupUser(addr)
userRoom.recv(msg)
#print(hall.users)
#print(lookupUser(addr))
#print(addr in hall.users)
#hall.recv(msg)
except Exception as e:
print(e)
print("Client "+name(addr)+" disconnected")
try: # sometimes closing/removing will fail
sock.close()
CONNECTIONS.remove(sock)
hall.recv("<server> Client "+name(addr)+" disconnected")
try:
del UNAMES[addr]
except KeyError:
pass
continue
except:
continue
finally:
server_sock.close()
| {"/weschat.py": ["/weschat_api.py"]} |
57,026 | breqdev/wchat | refs/heads/master | /weschat.py | from weschat_api import WesChat
import tkinter
import json
import os
window = tkinter.Tk()
window.title("WesChat")
files = os.listdir("themes")
print("Choose a theme")
for i, file in enumerate(files):
print(i, file)
num = int(input("Enter the theme number: "))
with open("themes/"+files[num]) as file:
theme = json.loads(file.read())
font = tuple(theme["font"])
wc = None
def connect():
global wc, hostEntry, passEntry
wc = WesChat(hostEntry.get(), passEntry.get())
def post(blah=None):
global wc, postEntry
wc.post(postEntry.get())
postEntry.delete(0, tkinter.END)
connectBtn = tkinter.Button(window, text="Connect", command=connect,
font=font)
postBtn = tkinter.Button(window, text="Post", command=post,
font=font)
hostLabel = tkinter.Label(window, text="Server IP:", font=font)
passLabel = tkinter.Label(window, text="Password:", font=font)
hostEntry = tkinter.Entry(window, font=font)
passEntry = tkinter.Entry(window, font=font, show="*")
postEntry = tkinter.Entry(window, font=font)
msgLabel = tkinter.Text(window, font=font, wrap=tkinter.WORD)
connectBtn.grid(row=0, column=2, sticky=tkinter.E+tkinter.N)
postBtn.grid(row=3, column=2, sticky=tkinter.E, rowspan=2)
hostLabel.grid(row=0, column=0, sticky=tkinter.W+tkinter.N)
passLabel.grid(row=1, column=0, sticky=tkinter.W)
hostEntry.grid(row=0, column=1, sticky=tkinter.W+tkinter.E+tkinter.N)
passEntry.grid(row=1, column=1, sticky=tkinter.W+tkinter.E+tkinter.N)
postEntry.bind("<Return>", post)
postEntry.grid(row=3, column=0, columnspan=2, sticky=tkinter.W+tkinter.E)
msgLabel.grid(row=2, column=0, columnspan=3, sticky=tkinter.W)
window.rowconfigure(0, weight=1)
window.columnconfigure(1, weight=1)
while True:
window.update()
try:
msgLabel.insert(tkinter.END, wc.get_messages())
except:
pass
msgLabel.see(tkinter.END)
| {"/weschat.py": ["/weschat_api.py"]} |
57,027 | breqdev/wchat | refs/heads/master | /weschat_api.py | import socket
from select import select
import time
class WesChat:
def __init__(self, host, handshake, port=8888, name=None):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((host, port))
self.post(handshake)
if name == None:
name = socket.gethostname()
time.sleep(1)
self.post("/uname "+name)
def post(self, message):
self.s.send(message.encode())
def get_messages(self):
if select([self.s], [], [], 0)[0] == [self.s]:
return self.s.recv(4096).decode()
def wait_for_message(self):
select([self.s], [], [])
return self.s.recv(4096).decode()
| {"/weschat.py": ["/weschat_api.py"]} |
57,042 | romanishikov/VocaLights | refs/heads/main | /VocaLights.py | import VoiceCommands as VC # Module for retrieving voice input and giving output
import lifxlan as lx
import phue
import time
import sys
import re
from threading import Thread
LIFX_BRAND = "lifx"
PHUE_BRAND = "phue"
PARAMS = {
LIFX_BRAND: ["light_names", "mac_addresses", "ip_addresses", "default_colors", "default_brightness",
"max_brightness", "min_brightness", "brightness_rate", "color_rate", "flash_rate",
"colorama_rate", "disco_rate", "flicker_rate"],
PHUE_BRAND: ["light_names", "light_ids", "ip_addresses", "default_colors", "default_brightness",
"max_brightness", "min_brightness", "flash_rate",
"colorama_rate", "disco_rate", "flicker_rate"]
}
DEFAULTS = {
LIFX_BRAND: {
"light_names": "light 1",
"default_colors": "GOLD",
"default_brightness": 32500,
"max_brightness": 65000,
"min_brightness": 32500,
"brightness_rate": 3000,
"color_rate": 3000,
"flash_rate": 3,
"colorama_rate": 3,
"disco_rate": 0.1,
"flicker_rate": 0.03,
},
PHUE_BRAND: {
"light_names": "light 1",
"light_ids": [1],
"default_colors": "GOLD",
"default_brightness": 254,
"max_brightness": 254,
"min_brightness": 5,
"flash_rate": 1,
"colorama_rate": 3,
"disco_rate": 0.1,
"flicker_rate": 0.03,
}
}
SPEECH_RESPONSES = {
"turn on": "turning on",
"turn off": "turning off",
"change color": "changing color",
"dim": "dimming",
"raise": "raising",
"colorama on": "turning colorama on",
"colorama off": "turning colorama off",
"disco on": "activating disco",
"disco off": "stopping disco",
"flicker on": "activating flicker",
"flicker off": "stopping flicker",
"flash on": "activating flash",
"flash off": "stopping flash",
}
class Lights:
"""
A configuration class that logs light objects and stores them for later use.
Required to pass the brand parameter which corresponds to the type of light (e.g. lifx, phue).
All other parameters with the exceptions of ip_addresses and/or mac_addresses (see below)
will be given default values as outlined in the global variables above. The user can specify
any parameter values they like as long as they are the appropriate datatype.
The parameters are as follows:
- ip_addresses: Corresponds to the ip address associated with a
a) light bulb (LifX) and/or
b) bridge (PhilipsHue)
At minimum an IP address is required to set up the current brands (lifx also requires mac address).
* Subtype String. Must be either a list/tuple or single string (e.g. '198.221.1.111')
- light_names: The name(s) of the light(s) the user will refer to (e.g. 'bathroom light').
* Subtype String .Must be either a list/tuple or single string (comma separation is not registered)
- light_ids: Unique to PhilipsHue. This refers to the ID number of the light when it was setup (e.g. 1, 2, 3, etc.)
* Subtype Integer. Must be a small integer or a list/tuple of small integer(s)
- mac_addresses: Unique to LifX. To connect to a light bulb both mac address and ip address must be specified.
* Subtype String. Must be either a list/tuple or single string (e.g. 'D0:12:34:56:78:90')
- default_color: Specifies the color of the light when the program is first run.
Colors available are: red, orange, yellow, blue, green, cyan, purple, pink, white, and gold.
This parameter is not case sensitive as all values are converted to lowercase upon config.
* Subtype String. Must be either a list/tuple or single string (one value default all lights to that color)
- default_brightness: Specifies how bright a light will be on runtime.
Lifx uses a different range than PhilipsHue. Max brightness for LifX is 65535
while max brightness for PhilipsHue is 254. 0 is the lowest for both.
* Subtype Integer. Must be either a list/tuple or single integer
- max_brightness: Specifies the upper brightness range when executing commands such as 'raise lights' or 'flash'.
* Subtype Integer. Must be either a list/tuple or single integer
- min_brightness: Specified the lower brightness range when executing commands such as 'dim lights' or 'flash'
* Subtype Integer. Must be either a list/tuple or single integer
- brightness_rate: Unique for LifX. Specifies how fast a light is raised or dim (in ms).
* Subtype Integer. Must be either a list/tuple or single integer
- color_rate: Unique for LifX. Specifies how fast a light's color will change (in ms).
* Subtype Integer. Must be either a list/tuple or single integer
- flash_rate: Specifies the speed that the light(s) will flash in and out (in s, specific to the flash command)
* Subtype Integer. Must be either a list/tuple or single integer
- colorama_rate: Specifies the speed that the light(s) smoothly transition into new colors (in s).
* Subtype Integer. Must be either a list/tuple or single integer
- disco_rate: Similar to colorama except without smooth transition and at a faster rate (in s).
* Subtype Integer. Must be either a list/tuple or single integer (decimal)
- flicker_rate: Similar to flash except without smooth transition and at a faster rate (in s).
* Subtype Integer. Must be either a list/tuple or single integer (decimal)
"""
def __init__(self):
self.light_objects = []
def configure_lights(self, brand, ip_addresses, light_names=None, light_ids=None, mac_addresses=None,
default_colors=None, default_brightness=None, max_brightness=None, min_brightness=None,
brightness_rate=None, color_rate=None, flash_rate=None, colorama_rate=None,
disco_rate=None, flicker_rate=None):
if len([light_names]) != len([ip_addresses]):
print("WARNING: Number of lights and addresses do not match which may affect processing speed of requests.")
settings = {
"ip_addresses": ip_addresses,
"light_names": light_names,
"light_ids": light_ids,
"mac_addresses": mac_addresses,
"default_colors": default_colors,
"default_brightness": default_brightness,
"max_brightness": max_brightness,
"min_brightness": min_brightness,
"brightness_rate": brightness_rate,
"color_rate": color_rate,
"flash_rate": flash_rate,
"colorama_rate": colorama_rate,
"disco_rate": disco_rate,
"flicker_rate": flicker_rate
}
for key in list(settings.keys()):
if key not in PARAMS[brand]:
del settings[key]
continue
if None in [settings[key]]: # Get the default setting if none specified
settings[key] = DEFAULTS[brand][key]
if not isinstance(settings[key], (tuple, list)): # Convert
settings[key] = [settings[key]]
if len(settings[key]) != len(settings["light_names"]):
settings[key] = settings[key] * len(settings["light_names"])
params = list(settings.values())
if brand == "lifx":
try:
if None in settings["mac_addresses"]:
raise Exception("Insufficient parameters passed for 'mac_addresses'. "
"Make sure MAC addresses are included for all lights.")
lifx = self.LifX(*params)
self.light_objects.append(lifx)
except Exception as Ex:
print("Connection to LifX could not be established: " + str(Ex))
if brand == "phue":
try:
if None in settings["light_ids"]:
raise Exception("Insufficient parameters passed for 'light_ids'. "
"Make sure each light has it's associated id assigned.")
philips = self.PhilipsHue(*params)
self.light_objects.append(philips)
except Exception as Ex:
print("Connection to phue could not be established: " + str(Ex))
class LightAPI:
"""
Works with all the Light subclasses in order to execute them
at once based on the command passed.
The class takes in the subclasses (e.g. LifX, PhilipsHue) configured
by the user and stores them in a dictionary where they can be run
together once all other requests have been completed.
"""
def __init__(self, light_objects):
self.light_names = {}
self.light_objects = light_objects
for obj in self.light_objects:
self.light_names[obj] = GlobalOps(obj).get_light_names()
def run_commands(self, words):
words = words.lower() # Consistency across commands
requested_lights = []
# Look for any light mentioned by name
for obj in self.light_objects:
for name in self.light_names[obj]:
if name in words:
requested_lights.append(obj)
if len(requested_lights) == 0: # If not light specified, default to all lights
requested_lights = self.light_objects
responses = []
for obj in requested_lights:
response = obj.process_command(words)
responses.append(response)
return responses
class LifX:
def __init__(self, ip_addresses, light_names, mac_addresses, default_colors, default_brightness,
max_brightness, min_brightness, brightness_rate, color_rate,
flash_rate, colorama_rate, disco_rate, flicker_rate):
self.LIGHT_NAMES = light_names
# Color values according to lifxlan.Light module specifications
self.LX_COLORS = {"red": [65535, 65535, 65535, 3500], "orange": [6500, 65535, 65535, 3500],
"yellow": [9000, 65535, 65535, 3500], "green": [16173, 65535, 65535, 3500],
"cyan": [29814, 65535, 65535, 3500], "blue": [43634, 65535, 65535, 3500],
"purple": [50486, 65535, 65535, 3500], "pink": [58275, 65535, 47142, 3500],
"white": [58275, 0, 65535, 5500], "gold": [58275, 0, 65535, 2500]}
# Light commands along with their respective methods and arguments
self.LX_COMMANDS = {
"turn on": {"set_power": "on"}, # Turn power on
"turn off": {"set_power": "off"}, # Turn power off
"change color": {"set_color": self.LX_COLORS}, # Change color of bulb
"dim": {"set_brightness": "dynaInt" + str(min_brightness[0]), "rate": brightness_rate[0]},
# Lower the brightness
"raise": {"set_brightness": "dynaInt" + str(max_brightness[0]), "rate": brightness_rate[0]},
# Raise brightness
"colorama on": {"set_color": [True, "colorama", colorama_rate[0], self.LX_COLORS, color_rate[0]]},
# Casual color
"colorama off": {"set_color": [False, "colorama"]},
"disco on": {"set_color": [True, "disco", disco_rate[0], self.LX_COLORS, disco_rate[0]]},
# Intense color array
"disco off": {"set_color": [False, "disco"]},
"flash on": {"set_brightness": [True, "flash", flash_rate[0],
{"in": max_brightness[0], "out": min_brightness[0]},
brightness_rate[0]]},
"flash off": {"set_brightness": [False, "flash"]},
"flicker on": {"set_power": [True, "flicker", flicker_rate[0], {"on": "on", "off": "off"}, 0]},
"flicker off": {"set_power": [False, "flicker"]},
}
self.lights = {} # Stores lx.Light objects
self.lightThreads = {} # For purposes of multi-threading functions
self.threadVars = {} # Dynamic True/False values to trigger threaded function
for i, name in enumerate(light_names):
self.lights[name] = lx.Light(mac_addresses[i], ip_addresses[i]) # Set the Light objects
self.lights[name].set_color(getattr(lx, default_colors[i].upper())) # Set the default color
self.lights[name].set_brightness(default_brightness[i]) # Set the default brightness
def process_command(self, words):
args = []
light_name = None
for name in self.LIGHT_NAMES:
if name in words:
light_name = name
break
# If no light name was specified, default to all lights
if light_name:
lx_names = [light_name]
else:
lx_names = self.LIGHT_NAMES
args.append(lx_names)
try:
# Go through each command and see if it matches the spoken words
for cmd, value in self.LX_COMMANDS.items():
if cmd in words:
for method, specs in value.items():
args.append(method)
if isinstance(specs, dict): # Specific to color adjustment
for spec in words.split():
if spec in specs.keys():
args.append([specs[spec]])
break
elif isinstance(specs, list): # Specific to continuous functions
for name in lx_names: # If no light name specified default to all
if specs[0]:
global_cmd = specs[1]
self.threadVars[specs[1]] = True
self.lightThreads[name] = Thread(target=getattr(GlobalOps(self), global_cmd),
args=(args, self.LX_COMMANDS[cmd]),
daemon=True)
self.lightThreads[name].start()
else:
self.threadVars[specs[1]] = False
elif "dynaInt" in specs: # Specific to brightness adjustment
try:
args.append([int(int(re.findall(r'\d+', words)[-1]) * 650), value["rate"]])
except IndexError: # Without specifying percent, dynamically dim or raise the lights
args.append([int(re.findall(r'\d+', specs)[-1]), value["rate"]])
else:
args.append([specs]) # Power on/off
if len(args) >= 3:
self.execute_command(args)
return {"SUCCESS": {cmd: lx_names}, "Class": LIFX_BRAND}
return {"INFO": "Voice command '" + str(words) + "' does not exist.", "Class": LIFX_BRAND}
except Exception as Ex:
return {"ERROR": str(Ex), "Class": LIFX_BRAND}
def execute_command(self, args):
for name in args[0]:
getattr(self.lights[name], args[1])(*args[2])
def run_thread(self, args, elements):
while self.threadVars[elements[args[1]][1]]:
for key, val in elements[args[1]][3].items():
self.execute_command(args + [[val] + [elements[args[1]][4]]])
time.sleep(elements[args[1]][2])
if not self.threadVars[elements[args[1]][1]]:
break
class PhilipsHue:
def __init__(self, ip_addresses, light_names, light_ids, default_colors,
default_brightness, max_brightness, min_brightness,
flash_rate, colorama_rate, disco_rate, flicker_rate):
self.LIGHT_NAMES = light_names
self.PHUE_LIGHT_IDS = {}
self.bridge = phue.Bridge(ip_addresses[0])
self.lights = {}
self.lightThreads = {}
self.threadVars = {}
self.PHUE_COLORS = {"red": [1, 0], "orange": [0.55, 0.4], "yellow": [0.45, 0.47],
"green": [0, 1], "cyan": [0.196, 0.252], "blue": [0, 0],
"purple": [0.285, 0.202], "pink": [0.36, 0.23],
"white": [0.31, 0.316], "gold": [0.4, 0.35]}
self.PHUE_COMMANDS = {
"turn on": "on", # Turn power on
"turn off": "on", # Turn power off
"change color": "xy", # Change color of bulb
"dim": "bri", # Lower the light brightness
"raise": "bri", # Raise the light brightness
"colorama on": "xy", # Casual array of colors
"colorama off": "xy",
"disco on": "xy", # Intense array of colors
"disco off": "xy",
"flicker on": "on", # Malfunctioning paradigm
"flicker off": "off",
"flash on": "bri", # Smooth signal
"flash off": "bri",
}
self.PHUE_KEYWORDS = {
"change color": self.PHUE_COLORS,
"turn on": True,
"colorama on": [True, "colorama", colorama_rate[0], self.PHUE_COLORS],
"disco on": [True, "disco", disco_rate[0], self.PHUE_COLORS],
"flicker on": [True, "flicker", flicker_rate[0], {"on": True, "off": False}],
"flash on": [True, "flash", flash_rate[0], {"in": max_brightness[0], "out": min_brightness[0]}],
"turn off": False,
"colorama off": [False, "colorama"],
"disco off": [False, "disco"],
"flicker off": [False, "flicker"],
"flash off": [False, "flash"],
"dim": "dynaInt" + str(min_brightness[0]),
"raise": "dynaInt" + str(max_brightness[0]),
}
# Set the defaults
for i, name in enumerate(light_names):
self.PHUE_LIGHT_IDS[name] = light_ids[i]
isOn = self.bridge.get_light(light_ids[i])["state"]["on"]
if not isOn:
self.bridge.set_light(light_ids[i], "on", True) # Can only alter when on
self.bridge.set_light(light_ids[i], "xy", self.PHUE_COLORS[default_colors[i].lower()])
self.bridge.set_light(light_ids[i], "on", False)
else:
self.bridge.set_light(light_ids[i], "xy", self.PHUE_COLORS[default_colors[i].lower()])
self.bridge.set_light(light_ids[i], "bri", default_brightness[i])
def process_command(self, words):
args = []
light_id = None
for name in self.LIGHT_NAMES:
if name in words:
light_id = self.PHUE_LIGHT_IDS[name]
break
if light_id:
ids = [light_id]
else: # If no light name was specified, default to all lights
ids = list(self.PHUE_LIGHT_IDS.values())
args.append(ids)
try: # Go through each command and see if it matches the spoken words
for cmd, value in self.PHUE_COMMANDS.items():
if cmd in words:
args.append(value)
if isinstance(self.PHUE_KEYWORDS[cmd], dict): # For color related functions
for spec in words.split():
if spec in self.PHUE_KEYWORDS[cmd].keys():
args.append(self.PHUE_KEYWORDS[cmd][spec])
break
elif isinstance(self.PHUE_KEYWORDS[cmd], list): # For looped functions
for lid in ids: # If no light name specified default to all
if self.PHUE_KEYWORDS[cmd][0]:
global_cmd = self.PHUE_KEYWORDS[cmd][1] # Get function name in GlobalOps class
self.threadVars[self.PHUE_KEYWORDS[cmd][1]] = True
self.lightThreads[lid] = Thread(target=getattr(GlobalOps(self), global_cmd),
args=(args, self.PHUE_KEYWORDS[cmd]), daemon=True)
self.lightThreads[lid].start()
else:
self.threadVars[self.PHUE_KEYWORDS[cmd][1]] = False
elif "dynaInt" in str(self.PHUE_KEYWORDS[cmd]): # For brightness adjustment
try:
args.append(int(int(re.findall(r'\d+', words)[-1]) * 2.54))
except IndexError: # Without specifying percent, dynamically dim or raise the lights
args.append(int(re.findall(r'\d+', self.PHUE_KEYWORDS[cmd])[-1]))
else:
args.append(self.PHUE_KEYWORDS[cmd]) # For main switch
if len(args) >= 3:
response = self.execute_command(args) # PhilipsHue returns error responses
if list(response[0][0])[0] == "error":
raise Exception(response[0][0]["error"]["description"])
return {"SUCCESS": {cmd: ids}, "Class": PHUE_BRAND}
return {"INFO": "Voice command '" + str(words) + "' does not exist.", "Class": PHUE_BRAND}
except Exception as Ex:
return {"ERROR": str(Ex), "Class": PHUE_BRAND}
def execute_command(self, args):
return self.bridge.set_light(*args)
def run_thread(self, args, elements):
while self.threadVars[elements[1]]:
for key, val in elements[3].items():
result = self.execute_command(args + [val])
for i, light in enumerate(args[0]): # Unique for phue, check status to make sure error is captured
result_status = list(result[i][0].keys())[0]
if result_status == "error": # Exit thread for the light(s) that are non-responsive to requests
print(f"Light {i + 1} error: " + str(result[i][0][result_status]["description"]))
return
time.sleep(elements[2])
if not self.threadVars[elements[1]]:
break
class GlobalOps:
"""
These functions are shared across the Lights subclasses (i.e. LifX, PhilipsHue, etc).
The functions pass the request back to the object that called it.
NOTE: The run_thread and execute_command methods vary across the Lights subclasses
and are treated uniquely based on how they are controlled by their respective libraries.
"""
def __init__(self, LightObj):
self.LightObj = LightObj
def colorama(self, args, elements):
self.LightObj.run_thread(args, elements)
def disco(self, args, elements):
self.LightObj.run_thread(args, elements)
def flash(self, args, elements):
self.LightObj.run_thread(args, elements)
def flicker(self, args, elements):
self.LightObj.run_thread(args, elements)
def get_light_names(self):
return self.LightObj.LIGHT_NAMES
class Activation(Lights):
"""
Using the Lights object and its subclasses we initialize our program and
set up speech recognition and speech response.
Before running the main function, ensure all light objects have been configured
using the configure_lights method inherited from the Lights object. When setup is complete
then the run() function below can be used. The voice_response parameter can be set to True
in order to receive a response back from the machine on the status of a completed request.
"""
def __init__(self, pause_threshold=0.5):
super().__init__()
self.vIn = VC.CommandInputs(pause_threshold)
self.vOut = VC.CommandOutputs()
def run(self, voice_response=False, debug=False):
if len(self.light_objects) == 0:
raise Exception("ERROR: No lights have been configured for usage. To set up lights "
"use configure_lights and pass it the type of light (e.g. lifx, phue), "
"the light names, and any additional customizable parameters listed. ")
light_api = self.LightAPI(self.light_objects)
while True:
words = self.vIn.get_voice_input()
if "exit voice" in words:
sys.exit(0)
elif words == "Audio not understood":
continue
response = light_api.run_commands(words)
if debug:
print(response)
if voice_response:
self._voice_response(response)
def _voice_response(self, response):
speech_responses = set() # Get unique speech responses
quantity = 0 # Count how many lights were affected
for result in response:
if list(result)[0] == "SUCCESS":
command = list(result["SUCCESS"])[0]
quantity += len(list(result["SUCCESS"].values())[0]) # Count successful light calls
noun = "lights" if quantity > 1 else list(result["SUCCESS"].values())[0][0] # Get light name
speech_responses.add(SPEECH_RESPONSES[command] + " " + noun)
elif list(result)[0] == "INFO":
speech_responses.add(result["INFO"])
elif list(result)[0] == "ERROR":
speech_responses.add(result["ERROR"])
for res in speech_responses:
self.vOut.speak(res)
| {"/VocaLights.py": ["/VoiceCommands.py"]} |
57,043 | romanishikov/VocaLights | refs/heads/main | /VoiceCommands.py | import speech_recognition as sr # Module for getting microphone audio to text
import pyttsx3 # Module for computer speaking back to user
class CommandInputs:
def __init__(self, pause_threshold=0.5):
self.recognizer = sr.Recognizer()
self.pause_duration = pause_threshold # Time it gives to register a phrase once completed (in seconds)
def get_voice_input(self):
# obtain audio from the microphone
with sr.Microphone() as source:
print("Say something!")
self.recognizer.pause_threshold = self.pause_duration
audio = self.recognizer.listen(source)
# recognize speech using Google Speech Recognition
try:
return self.recognizer.recognize_google(audio)
except sr.UnknownValueError:
return "Audio not understood"
except sr.RequestError as e:
return "Could not request results from Google Speech Recognition service; {0}".format(e)
class CommandOutputs:
def __init__(self):
self.engine = pyttsx3.init('sapi5')
self.voices = self.engine.getProperty('voices')
self.engine.setProperty('voice', self.voices[0].id)
def speak(self, audio):
self.engine.say(audio)
self.engine.runAndWait()
| {"/VocaLights.py": ["/VoiceCommands.py"]} |
57,054 | ZhihaoZhu/Robot-Control-Motion-Planning | refs/heads/master | /forward_kinetics.py | import numpy as np
import argparse
from urdf_parser_py.urdf import URDF
URDF_PATH = "./locobot_description_v3.urdf"
'''
Helper Functions
'''
import numpy as np
import argparse
from urdf_parser_py.urdf import URDF
from math import *
F_path = "./locobot_description_v3.urdf"
def getWristPose(joint_angle_list,joint_names, axis_table, joint_table ):
T = np.eye(4)
for i,j in enumerate(joint_names):
xyz = joint_table[j].xyz
r,p,y = joint_table[j].rpy
TF_M = np.eye(4)
TF_M[:3, :3] = E_to_R(r, p, y)
TF_M[:3, -1] = xyz
T = T.dot(TF_M)
x,y,z = axis_table[j]
new = np.eye(4)
new[:3,:3] = A_to_R(joint_angle_list[i],x,y,z)
T = T.dot(new)
return T
def getWristJacobian(joint_angle, joint_names, joint_table, axis_table, wrist_pose):
T = np.eye(4)
position = wrist_pose[:3,-1]
Jacobian = np.zeros((6,5))
for i,j in enumerate(joint_names):
new = np.eye(4)
TF_M = np.eye(4)
# Calculate transform matrix
xyz = joint_table[j].xyz
r, p, y = joint_table[j].rpy
TF_M[:3, :3] = E_to_R(r, p, y)
TF_M[:3, -1] = xyz
# Apply transform matrix
T = T.dot(TF_M)
x,y,z = axis_table[j]
new[:3,:3] = A_to_R(joint_angle[i],x,y,z)
T = T.dot(new)
b1 = T[:3,:3].dot(np.array(axis_table[j]))
b2 = np.array(position)-T[:3,-1]
Jacobian[-3:, i] = b1
Jacobian[:3,i] = np.cross(b1,b2)
return Jacobian
def A_to_R(angle, x, y, z):
R = np.array([[cos(angle)+(x**2)*(1-cos(angle)),x*y*(1-cos(angle))-z*sin(angle),x*z*(1-cos(angle))+y*sin(angle)],
[y*x*(1-cos(angle))+z*sin(angle),cos(angle)+(y**2)*(1-cos(angle)),y*z*(1-cos(angle))-x*sin(angle)],
[z*x*(1-cos(angle))-y*sin(angle),z*y*(1-cos(angle))+x*sin(angle),cos(angle)+(z**2)*(1-cos(angle))]])
return R
def E_to_R(r,p,y):
rot_1 = np.array([[cos(y),sin(y),0],
[sin(y),cos(y),0],
[0,0,1]])
rot_2 = np.array([[cos(p),0,sin(p)],
[0,1,0],
[-sin(p),0,cos(p)]])
rot_3 = np.array([[1,0,0],
[0,cos(r),-sin(r)],
[0,sin(r),cos(r)]])
return rot_1.dot( rot_2.dot( rot_3))
def get_cuboid_config():
joint_angles = []
# assert len(joint_angles) == 5, "Incorrect number of joints specified."
joint_table, axis_table = {}, {}
robot = URDF.from_xml_file(F_path)
joint_names = robot.get_chain('arm_base_link','gripper_link',links=False)
for i in joint_names:
joint_info = robot.joint_map[i]
joint_table[i] = joint_info.origin
axis_table[i] = joint_info.axis
wrist_pose = getWristPose(joint_angles, joint_names, axis_table, joint_table)
jacobian = getWristJacobian(joint_angles, joint_names, joint_table, axis_table, wrist_pose)
print("Wrist pose: {}".format(np.array_str(np.array(wrist_pose), precision=3)))
print("Jacobian: {}".format(np.array_str(np.array(jacobian), precision=3)))
def getJointH(theta, axis):
H = np.eye(4).astype(float)
return H
def getURDFData():
robot = URDF.from_xml_file(URDF_PATH)
joint_name_list = robot.get_chain('arm_base_link', 'gripper_link', links=False)
axisList = []
positionList = []
orientationList = []
for jnt_name in joint_name_list:
jnt = robot.joint_map[jnt_name]
axisList.append(np.array(jnt.axis))
positionList.append(np.array(jnt.origin.xyz))
orientationList.append(np.array(jnt.origin.rpy))
return axisList, positionList, orientationList
def getHmatrixList(position, orientation):
assert len(position) == len(orientation)
tfMatrixList = []
for i in range(len(position)):
tfMatrixTmp = np.eye(4)
tfMatrixTmp[:3, -1] = position[i]
tfMatrixList.append(tfMatrixTmp)
return tfMatrixList
| {"/SAT.py": ["/plot_cuboid.py"], "/Random_sample.py": ["/Robot_construct.py", "/check_collision.py"], "/PRM.py": ["/Random_sample.py", "/calculate_dist.py", "/local_planner.py"], "/robot_control.py": ["/SAT.py"], "/local_planner.py": ["/Robot_construct.py", "/check_collision.py"]} |
57,055 | ZhihaoZhu/Robot-Control-Motion-Planning | refs/heads/master | /Dijkstra.py | def dijsktra(samples, edges, edge_length, start):
visited = {start: 0}
path = {}
nodes = set(samples)
while nodes:
cur_node = None
for node in nodes:
if node in visited:
if cur_node is None:
cur_node = node
elif visited[node] < visited[cur_node]:
cur_node = node
if cur_node is None:
break
nodes.remove(cur_node)
current_weight = visited[cur_node]
for edge_node in edges[cur_node]:
weight = current_weight + edge_length[(cur_node, edge_node)]
if edge_node not in visited or weight < visited[edge_node]:
visited[edge_node] = weight
path[edge_node] = cur_node
return path
| {"/SAT.py": ["/plot_cuboid.py"], "/Random_sample.py": ["/Robot_construct.py", "/check_collision.py"], "/PRM.py": ["/Random_sample.py", "/calculate_dist.py", "/local_planner.py"], "/robot_control.py": ["/SAT.py"], "/local_planner.py": ["/Robot_construct.py", "/check_collision.py"]} |
57,056 | ZhihaoZhu/Robot-Control-Motion-Planning | refs/heads/master | /SAT.py | import numpy as np
import math
from plot_cuboid import *
import time
'''
Transform the roll pitch yaw to rotation matrix
'''
def RPY_to_Rotation(RPY_list):
roll, pitch, yaw = RPY_list[0], RPY_list[1], RPY_list[2]
yawMatrix = np.array([
[math.cos(yaw), -math.sin(yaw), 0],
[math.sin(yaw), math.cos(yaw), 0],
[0, 0, 1]
])
pitchMatrix = np.array([
[math.cos(pitch), 0, math.sin(pitch)],
[0, 1, 0],
[-math.sin(pitch), 0, math.cos(pitch)]
])
rollMatrix = np.array([
[1, 0, 0],
[0, math.cos(roll), -math.sin(roll)],
[0, math.sin(roll), math.cos(roll)]
])
R = yawMatrix.dot(pitchMatrix.dot( rollMatrix))
return R
# the cubes do not collide with each other in one axis only when the max of one cube is smaller than another cube
# under the same axis
def collide_check(ref_min, ref_max, _min, _max):
if ref_min > _max or ref_max < _min:
return False
return True
# Detect collision in each dimension
def collision_detect(ref_corner, cuboid_corner):
x_min = np.min(cuboid_corner[:, 0])
x_max = np.max(cuboid_corner[:, 0])
y_min = np.min(cuboid_corner[:, 1])
y_max = np.max(cuboid_corner[:, 1])
z_min = np.min(cuboid_corner[:, 2])
z_max = np.max(cuboid_corner[:, 2])
xref_min = np.min(ref_corner[:, 0])
xref_max = np.max(ref_corner[:, 0])
yref_min = np.min(ref_corner[:, 1])
yref_max = np.max(ref_corner[:, 1])
zref_min = np.min(ref_corner[:, 2])
zref_max = np.max(ref_corner[:, 2])
x_collide = collide_check(xref_min, xref_max, x_min, x_max)
y_collide = collide_check(yref_min, yref_max, y_min, y_max)
z_collide = collide_check(zref_min, zref_max, z_min, z_max)
return (x_collide and y_collide and z_collide)
def get_cuboid_config():
deg_to_rad = np.pi / 180.
time.sleep(120)
return [[-80 * deg_to_rad,0,0,0,0,-0.03,-0.03],
[-70 * deg_to_rad, 0*deg_to_rad, -60*deg_to_rad, 0, 20 * deg_to_rad, -0.03, -0.03],
[50 * deg_to_rad, 0*deg_to_rad, -50*deg_to_rad, 10*deg_to_rad, 10 * deg_to_rad, -0.03, -0.03],
[85 * deg_to_rad, 20*deg_to_rad, -10*deg_to_rad, 0, 15 * deg_to_rad, -0.03, -0.03],
[20 * deg_to_rad, 30 * deg_to_rad, -0 * deg_to_rad, -10*deg_to_rad, 4 * deg_to_rad, -0.03, -0.03],
[0 * deg_to_rad, 60. * deg_to_rad, -75 * deg_to_rad, -75 * deg_to_rad, 0, -0.03, -0.03],
]
def Check_Collision(cuboid_ref, cuboid):
T_matrix = np.array([[1,1,1],[1,-1,1],[-1,-1,1],[-1,1,1],[1,1,-1],[1,-1,-1],[-1,-1,-1],[-1,1,-1]])
Projection_matrix = np.array([[1,0,0],[0,1,0],[0,0,1]]) # Here stores the information of the projection axis
# Calculate all possible projection axis for both cubes in respect to the base frame
Projection_axis = []
Rotation_ref = RPY_to_Rotation(cuboid_ref["Orientation"])
Rotation_cub = RPY_to_Rotation(cuboid["Orientation"])
PA_ref = Projection_matrix.dot( Rotation_ref)
PA_cub = Projection_matrix.dot(Rotation_cub)
Projection_axis.append(PA_ref)
Projection_axis.append(PA_cub)
for i in range(3):
base_axis = PA_ref[:,i].reshape(3)
PA = np.zeros((3,3))
for j in range(3):
a = np.cross(base_axis, PA_cub[:,j].reshape(3))
PA[:,j] = a.reshape(3)
Projection_axis.append(PA)
# Rotate each corner point relative to cube's center (do not consider the position relative to base frame's origin)
cuboid_corner_initial = np.array(
[cuboid["Dimension"][0] / 2, cuboid["Dimension"][1] / 2, cuboid["Dimension"][2] / 2])
cuboid_corner_dimension = np.tile(cuboid_corner_initial, (8, 1))
cuboid_corner = cuboid_corner_dimension * T_matrix
# Rotate each corner point relative to cube's center (do not consider the position relative to base frame's origin)
ref_corner_initial = np.array(
[cuboid_ref["Dimension"][0] / 2, cuboid_ref["Dimension"][1] / 2, cuboid_ref["Dimension"][2] / 2])
ref_corner_dimension = np.tile(ref_corner_initial, (8, 1))
ref_corner = ref_corner_dimension * T_matrix
# Add origin to get the absolute cordinates of each corner point
ref_corners = ref_corner.dot( Rotation_ref) + np.array(cuboid_ref["Origin"])
cub_corners = cuboid_corner.dot( Rotation_cub )+ np.array(cuboid["Origin"])
# Uncomment below to plot current position of two cubes
# plot_linear_cube(ref_corners, cub_corners, color='red')
Collision_or_not = True
for PA in Projection_axis:
cuboid_corner_new = cub_corners.dot( PA.T)
ref_corner_new = ref_corners.dot(PA.T)
Collision_Decision = collision_detect(ref_corner_new, cuboid_corner_new)
Collision_or_not = Collision_Decision and Collision_or_not
return Collision_or_not
def collosion_detect(cuboid_1,cuboid_2):
result = Check_Collision(cuboid_1, cuboid_2) # In reference of cuboid1
return result
def main():
cuboid_1 = {"Origin": [0, 0, 0], "Orientation": [0, 0, 0], "Dimension": [3, 1, 2]}
cuboid_2 = {"Origin": [3, 0, 0], "Orientation": [0, 0, 0], "Dimension": [3, 1, 1]}
print(collosion_detect(cuboid_1,cuboid_2))
if __name__ == '__main__':
main() | {"/SAT.py": ["/plot_cuboid.py"], "/Random_sample.py": ["/Robot_construct.py", "/check_collision.py"], "/PRM.py": ["/Random_sample.py", "/calculate_dist.py", "/local_planner.py"], "/robot_control.py": ["/SAT.py"], "/local_planner.py": ["/Robot_construct.py", "/check_collision.py"]} |
57,057 | ZhihaoZhu/Robot-Control-Motion-Planning | refs/heads/master | /test.py | int A[MAX],N;
int partitions(int low,int high)
{
int p=low,r=high,x=A[r],i=p-1;
for(int j=p;j<=r-1;j++)
{
if (A[j]<=x)
{
i=i+1;
swap(A[i],A[j]);
}
}
swap(A[i+1],A[r]);
return i+1;
}
int selection_algorithm(int left,int right,int kth)
{
for(;;)
{
int pivotIndex=partitions(left,right); //Select the Pivot Between Left and Right
int len=pivotIndex-left+1;
if(kth==len)
return A[pivotIndex];
else if(kth<len)
right=pivotIndex-1;
else
{
kth=kth-len;
left=pivotIndex+1;
}
}
} | {"/SAT.py": ["/plot_cuboid.py"], "/Random_sample.py": ["/Robot_construct.py", "/check_collision.py"], "/PRM.py": ["/Random_sample.py", "/calculate_dist.py", "/local_planner.py"], "/robot_control.py": ["/SAT.py"], "/local_planner.py": ["/Robot_construct.py", "/check_collision.py"]} |
57,058 | ZhihaoZhu/Robot-Control-Motion-Planning | refs/heads/master | /Random_sample.py | import numpy as np
from Robot_construct import *
from check_collision import *
def Random_sample():
sample_joint_config = (np.random.rand(5)-0.5) * np.pi
robot = Robot_construct(sample_joint_config)
if check_collision(robot):
Random_sample()
else:
return sample_joint_config
| {"/SAT.py": ["/plot_cuboid.py"], "/Random_sample.py": ["/Robot_construct.py", "/check_collision.py"], "/PRM.py": ["/Random_sample.py", "/calculate_dist.py", "/local_planner.py"], "/robot_control.py": ["/SAT.py"], "/local_planner.py": ["/Robot_construct.py", "/check_collision.py"]} |
57,059 | ZhihaoZhu/Robot-Control-Motion-Planning | refs/heads/master | /PRM.py | import numpy as np
from Random_sample import *
from calculate_dist import *
from local_planner import *
def PRM(n_samples, K):
deg_to_rad = np.pi / 180.
start = np.array([-80. * deg_to_rad,0,0,0,0])
end = np.array([0, 60*deg_to_rad, -75*deg_to_rad, -75*deg_to_rad, 0])
samples = [start,end]
edges = {}
edge_length = {}
for i in range(2,n_samples):
X = Random_sample()
samples.append(X)
dist = calculate_dist(X, samples[:i,:])
dist_index = np.argsort(dist)
dist = list(dist)
dist.sort()
edges[X] = []
for ii in range(min(K,len(dist))):
j = dist_index[ii]
if local_planner(X,samples[j,:]):
edges[X].append(samples[j,:])
edges[samples[j,:]].append(X)
edge_length[(X,samples[j,:])] = dist[ii]
edge_length[(samples[j,:],X)] = dist[ii]
return samples, edges, edge_length
| {"/SAT.py": ["/plot_cuboid.py"], "/Random_sample.py": ["/Robot_construct.py", "/check_collision.py"], "/PRM.py": ["/Random_sample.py", "/calculate_dist.py", "/local_planner.py"], "/robot_control.py": ["/SAT.py"], "/local_planner.py": ["/Robot_construct.py", "/check_collision.py"]} |
57,060 | ZhihaoZhu/Robot-Control-Motion-Planning | refs/heads/master | /robot_control.py | # Import system libraries
import argparse
import os
import sys
# Modify the following lines if you have problems importing the V-REP utilities
cwd = os.getcwd()
sys.path.append(cwd)
sys.path.append(os.path.join(cwd, 'lib'))
sys.path.append(os.path.join(cwd, 'utilities'))
# Import application libraries
import numpy as np
import vrep_utils as vu
from SAT import *
# Import any other libraries you might want to use ############################
import matplotlib.pyplot as plt
###############################################################################
class ArmController:
def __init__(self):
# Fill out this method ##################################
# Define any variables you may need here for feedback control
#########################################################
# Do not modify the following variables
self.err_cur = np.zeros(7)
self.err_acc = np.zeros(7)
self.kp = 10.0
self.ki = 0.0
self.kd = 0.2
self.rate = 1e-3
self.num = 0
self.history = {'timestamp': [],
'joint_feedback': [],
'joint_target': [],
'ctrl_commands': []}
self._target_joint_positions = None
def set_target_joint_positions(self, target_joint_positions):
assert len(target_joint_positions) == vu.N_ARM_JOINTS, \
'Expected target joint positions to be length {}, but it was length {} instead.'.format(
len(target_joint_positions), vu.N_ARM_JOINTS)
self._target_joint_positions = target_joint_positions
def calculate_commands_from_feedback(self, timestamp, sensed_joint_positions):
assert self._target_joint_positions, \
'Expected target joint positions to be set, but it was not.'
# Fill out this method ##################################
# Using the input joint feedback, and the known target joint positions,
# calculate the joint commands necessary to drive the system towards
# the target joint positions.
err = np.array(self._target_joint_positions) - np.array(sensed_joint_positions)
self.err_cur = err
self.err_acc += self.err_cur
if len(self.history['timestamp']) != 0:
err_d = err / (timestamp - self.history['timestamp'][-1])
else:
err_d = err / timestamp
ctrl_commands = self.kp * err + self.kd * err_d + self.ki * self.err_acc
# ...
#########################################################
# Do not modify the following variables
# append time history
self.history['timestamp'].append(timestamp)
self.history['joint_feedback'].append(sensed_joint_positions)
self.history['joint_target'].append(self._target_joint_positions)
self.history['ctrl_commands'].append(ctrl_commands)
return ctrl_commands
def has_stably_converged_to_target(self):
# Fill out this method ##################################
if np.sum(self.err_cur) <= self.rate:
self.num += 1
if self.num > 150:
self.num = 0
return True
else:
return False
else:
self.num = 0
# ...
#########################################################
link_cuboid_spec = []
obstacle_cuboid_spec = []
def main(args):
# Connect to V-REP
print('Connecting to V-REP...')
clientID = vu.connect_to_vrep()
print('Connected.')
# Reset simulation in case something was running
vu.reset_sim(clientID)
# Initial control inputs are zero
vu.set_arm_joint_target_velocities(clientID, np.zeros(vu.N_ARM_JOINTS))
# Despite the name, this sets the maximum allowable joint force
vu.set_arm_joint_forces(clientID, 50. * np.ones(vu.N_ARM_JOINTS))
# One step to process the above settings
vu.step_sim(clientID)
deg_to_rad = np.pi / 180.
link_cuboid_list = ("arm_base_link_joint_collision_cuboid", "shoulder_link_collision_cuboid",
"elbow_link_collision_cuboid", "forearm_link_collision_cuboid",
"wrist_link_collision_cuboid", "gripper_link_collision_cuboid",
"finger_r_collision_cuboid", "finger_l_collision_cuboid")
for link_cuboid in link_cuboid_list:
d = vu.get_handle_by_name(clientID, link_cuboid)
link_spec = {}
link_spec["Origin"] = vu.get_object_position(clientID, d)
link_spec["Orientation"] = vu.get_object_orientation(clientID, d)
link_spec["Dimension"] = vu.get_object_bounding_box(clientID, d)
link_cuboid_spec.append(link_spec)
obstacle_cuboid_list = ("cuboid_0", "cuboid_1",
"cuboid_2", "cuboid_3",
"cuboid_4", "cuboid_5")
for obstacle_cuboid in obstacle_cuboid_list:
d = vu.get_handle_by_name(clientID, obstacle_cuboid)
obstacle_spec = {}
obstacle_spec["Origin"] = vu.get_object_position(clientID, d)
obstacle_spec["Orientation"] = vu.get_object_orientation(clientID, d)
obstacle_spec["Dimension"] = vu.get_object_bounding_box(clientID, d)
obstacle_cuboid_spec.append(obstacle_spec)
print("done")
joint_targets = get_cuboid_config()
# Instantiate controller
controller = ArmController()
# Iterate through target joint positions
for target in joint_targets:
# Set new target position
controller.set_target_joint_positions(target)
steady_state_reached = False
while not steady_state_reached:
timestamp = vu.get_sim_time_seconds(clientID)
# print('Simulation time: {} sec'.format(timestamp))
# Get current joint positions
sensed_joint_positions = vu.get_arm_joint_positions(clientID)
# Calculate commands
commands = controller.calculate_commands_from_feedback(timestamp, sensed_joint_positions)
# Send commands to V-REP
vu.set_arm_joint_target_velocities(clientID, commands)
# Print current joint positions (comment out if you'd like)
vu.step_sim(clientID, 1)
# Determine if we've met the condition to move on to the next point
steady_state_reached = controller.has_stably_converged_to_target()
vu.stop_sim(clientID)
# Post simulation cleanup -- save results to a pickle, plot time histories, etc #####
# Fill this out here (optional) or in your own script
# If you use a separate script, don't forget to include it in the deliverables
# ...
#####################################################################################
plt.figure()
for i in range(7):
plt.subplot(2, 4, i + 1)
plt.title('Joint %d' % (i+1))
plt.xlabel('Time')
plt.ylabel('Joint angle')
a1 = np.array(controller.history['joint_feedback'])[:, i]
a2 = np.array(controller.history['joint_target'])[:, i]
plt.plot(0.05 * np.arange(len(a1)), a1)
plt.plot(0.05 * np.arange(len(a2)), a2)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args = parser.parse_args()
main(args)
| {"/SAT.py": ["/plot_cuboid.py"], "/Random_sample.py": ["/Robot_construct.py", "/check_collision.py"], "/PRM.py": ["/Random_sample.py", "/calculate_dist.py", "/local_planner.py"], "/robot_control.py": ["/SAT.py"], "/local_planner.py": ["/Robot_construct.py", "/check_collision.py"]} |
57,061 | ZhihaoZhu/Robot-Control-Motion-Planning | refs/heads/master | /check_collision.py | import numpy as np
# from SAT import collosion_detect
# True if collides
# check if the bounding box collides or not
def check_collision(robot):
global obstacle_cuboid_spec
for i in range(len(robot)):
for j in range(len(obstacle_cuboid_spec)):
if collosion_detect(i,j):
return True
return False | {"/SAT.py": ["/plot_cuboid.py"], "/Random_sample.py": ["/Robot_construct.py", "/check_collision.py"], "/PRM.py": ["/Random_sample.py", "/calculate_dist.py", "/local_planner.py"], "/robot_control.py": ["/SAT.py"], "/local_planner.py": ["/Robot_construct.py", "/check_collision.py"]} |
57,062 | ZhihaoZhu/Robot-Control-Motion-Planning | refs/heads/master | /calculate_dist.py | import numpy as np
import scipy.spatial.distance as ssd
def calculate_dist(X, sample_list):
dist = np.zeros(sample_list.shape[0])
for i in range(sample_list.shape[0]):
dist[i] = ssd.euclidean(X, sample_list[i])
return dist | {"/SAT.py": ["/plot_cuboid.py"], "/Random_sample.py": ["/Robot_construct.py", "/check_collision.py"], "/PRM.py": ["/Random_sample.py", "/calculate_dist.py", "/local_planner.py"], "/robot_control.py": ["/SAT.py"], "/local_planner.py": ["/Robot_construct.py", "/check_collision.py"]} |
57,063 | ZhihaoZhu/Robot-Control-Motion-Planning | refs/heads/master | /local_planner.py | import numpy as np
from Robot_construct import *
from check_collision import *
def local_planner(X, Y):
delta = X-Y
n_samples = np.ceil(np.sum(abs(delta)) / 0.03).astype(int)
for i in range(n_samples):
sample = Y + (i/n_samples)*delta
robot = Robot_construct(sample)
if check_collision(robot):
return False
return True | {"/SAT.py": ["/plot_cuboid.py"], "/Random_sample.py": ["/Robot_construct.py", "/check_collision.py"], "/PRM.py": ["/Random_sample.py", "/calculate_dist.py", "/local_planner.py"], "/robot_control.py": ["/SAT.py"], "/local_planner.py": ["/Robot_construct.py", "/check_collision.py"]} |
57,064 | ZhihaoZhu/Robot-Control-Motion-Planning | refs/heads/master | /Robot_construct.py | import numpy as np
# just return all the bounding box of arms
def Robot_construct(sample_joint_config):
return
| {"/SAT.py": ["/plot_cuboid.py"], "/Random_sample.py": ["/Robot_construct.py", "/check_collision.py"], "/PRM.py": ["/Random_sample.py", "/calculate_dist.py", "/local_planner.py"], "/robot_control.py": ["/SAT.py"], "/local_planner.py": ["/Robot_construct.py", "/check_collision.py"]} |
57,065 | ZhihaoZhu/Robot-Control-Motion-Planning | refs/heads/master | /plot_cuboid.py | from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
def plot_linear_cube(PS1, PS2, color='red'):
fig = plt.figure()
ax = Axes3D(fig)
point_set = PS1
xx = point_set[:,0].reshape(-1).tolist()
yy = point_set[:,1].reshape(-1).tolist()
zz = point_set[:,2].reshape(-1).tolist()
kwargs = {'alpha': 1, 'color': color}
ax.plot3D(xx, yy, zz, **kwargs)
ax.plot3D([point_set[0,0], point_set[3,0]], [point_set[0,1], point_set[3,1]], [point_set[0,2], point_set[3,2]], **kwargs)
ax.plot3D([point_set[4,0], point_set[7,0]], [point_set[4,1], point_set[7,1]], [point_set[4,2], point_set[7,2]], **kwargs)
for i in range(4):
ax.plot3D([point_set[i,0], point_set[i,0]], [point_set[i,1], point_set[i,1]], [point_set[i,2], point_set[4+i,2]], **kwargs)
point_set = PS2
xx = point_set[:,0].reshape(-1).tolist()
yy = point_set[:,1].reshape(-1).tolist()
zz = point_set[:,2].reshape(-1).tolist()
kwargs = {'alpha': 2, 'color': 'blue'}
ax.plot3D(xx, yy, zz, **kwargs)
ax.plot3D([point_set[0, 0], point_set[3, 0]], [point_set[0, 1], point_set[3, 1]],
[point_set[0, 2], point_set[3, 2]], **kwargs)
ax.plot3D([point_set[4, 0], point_set[7, 0]], [point_set[4, 1], point_set[7, 1]],
[point_set[4, 2], point_set[7, 2]], **kwargs)
for i in range(4):
ax.plot3D([point_set[i, 0], point_set[i, 0]], [point_set[i, 1], point_set[i, 1]],
[point_set[i, 2], point_set[4 + i, 2]], **kwargs)
plt.title('Cube')
plt.show()
| {"/SAT.py": ["/plot_cuboid.py"], "/Random_sample.py": ["/Robot_construct.py", "/check_collision.py"], "/PRM.py": ["/Random_sample.py", "/calculate_dist.py", "/local_planner.py"], "/robot_control.py": ["/SAT.py"], "/local_planner.py": ["/Robot_construct.py", "/check_collision.py"]} |
57,072 | BryceLuna/Fraud_Detection | refs/heads/master | /Models_Eval.py | import pandas as pd
import numpy as np
import cPickle as pickle
from Load_Data import split_data, resample_data, standardize_variables
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
def evaluate_model(model, X, y):
'''
returns the classification report
'''
y_pred = model.predict(X)
report = classification_report(y, y_pred)
return report
def main():
pass
if __name__ == '__main__':
#main()
numerical_lst = [0,4,6,11,12] #starts at zero - dropped acct_type
categorical_lst = [1,2,3,5,7,8,9,10,13,14,15,16,17,18,19,20] #prob (21) left out
df = pd.read_pickle('data/df_clean.pkl')
with open('data/y_prob.pkl','r') as f:
y_prob = pickle.load(f)
df['fraud_prob'] = y_prob
X_train, X_test, y_train, y_test = split_data(df)
X_train_resampled, y_train_resampled = resample_data(X_train, y_train, categorical_lst)
X_train_re_std, X_test_re_std = standardize_variables(X_train_resampled, X_test, numerical_lst)
with open('models/logistic_searched.pkl','r') as l:
logistic_params = pickle.load(l)
logistic_model = LogisticRegression(**logistic_params.best_params_)
logistic_model.fit(X_train_re_std, y_train_resampled)
logistic_report = evaluate_model(logistic_model, X_test, y_test)
with open('models/randomForest_searched.pkl','r') as b:
rf_params = pickle.load(b)
rf_model = RandomForestClassifier(n_estimators=200, n_jobs=3, **rf_params.best_params_)
rf_model.fit(X_train_resampled, y_train_resampled)
rf_report = evaluate_model(rf_model, X_test, y_test)
with open('models/boosting_searched.pkl','r') as b:
boosting_params = pickle.load(b)
boosting_model = GradientBoostingClassifier(n_estimators = 200, **boosting_params.best_params_)
boosting_model.fit(X_train_resampled, y_train_resampled)
boosting_report = evaluate_model(boosting_model, X_test, y_test)
| {"/Models_Eval.py": ["/Load_Data.py"], "/Search_Models_Params.py": ["/Load_Data.py"]} |
57,073 | BryceLuna/Fraud_Detection | refs/heads/master | /Search_Models_Params.py | import pandas as pd
import numpy as np
import cPickle as pickle
from Load_Data import split_data, resample_data, standardize_variables
from scipy.stats import randint as sp_randint
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import StandardScaler
'''
-LogisticRegressionCV for searching over Cs
-Consider using class_weight and an intercept to avoid scaling and
resampling for Logistic Regression
'''
def parameter_search(model, X, y, params, metric, n=10):
'''
returns the best parameters of the classification model
'''
random_search = RandomizedSearchCV(model, param_distributions=params, \
scoring = metric, n_jobs=3, n_iter=n)
random_search.fit(X, y)
return random_search
def main():
numerical_lst = [0,4,6,11,12] #starts at zero - dropped acct_type
categorical_lst = [1,2,3,5,7,8,9,10,13,14,15,16,17,18,19,20] #prob (21) left out
df = pd.read_pickle('data/df_clean.pkl')
with open('data/y_prob.pkl','r') as f:
y_prob = pickle.load(f)
df['fraud_prob'] = y_prob
X_train, X_test, y_train, y_test = split_data(df)
X_train_resampled, y_train_resampled = resample_data(X_train, y_train, categorical_lst)
X_train_re_std, X_test_re_std = standardize_variables(X_train_resampled, X_test, numerical_lst)
#Logistic Regression
logistic_params = {"C":[1e-4,1e-3,1e-2,1e-1,1,1e2,1e3,1e4]}
logistic = LogisticRegression()
logistic_searched = parameter_search(\
logistic, X_train_re_std, y_train_resampled, logistic_params, 'f1', 6)
with open('models/logistic_searched.pkl','w') as l:
pickle.dump(logistic_searched, l)
#Random Forest
forest = RandomForestClassifier(n_estimators=200, n_jobs=3)
forest_params = {"max_depth": [3, 4, None],
"max_features": sp_randint(1, 15),
"min_samples_split": sp_randint(2, 11),
"min_samples_leaf": sp_randint(1, 20),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
forest_searched = parameter_search(\
forest, X_train_resampled, y_train_resampled, forest_params, 'f1')
with open('models/randomForest_searched.pkl','w') as f:
pickle.dump(forest_searched, f)
#Gradient Boosting
boosting = GradientBoostingClassifier(n_estimators=200)
gradient_params = {"max_depth": [1, 2, 3],
"max_features": sp_randint(1, 15),
"learning_rate": [.1, .2, .5],
"min_samples_split": sp_randint(2, 11),
"min_samples_leaf": sp_randint(1, 20)}
boosting_searched = parameter_search(\
boosting, X_train_resampled, y_train_resampled, gradient_params, 'f1')
with open('models/boosting_searched.pkl','w') as b:
pickle.dump(boosting_searched, b)
if __name__ == '__main__':
main()
| {"/Models_Eval.py": ["/Load_Data.py"], "/Search_Models_Params.py": ["/Load_Data.py"]} |
57,074 | BryceLuna/Fraud_Detection | refs/heads/master | /NLP.py | import pandas as pd
import numpy as np
import cPickle as pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.cross_validation import train_test_split
def build_model(df):
X = df['description']
Y = df['acct_type']
X_train, X_test, y_train, y_test = train_test_split(X,Y, test_size=0.2, random_state=123)
vectorizer = TfidfVectorizer(stop_words='english')
tf_mat = vectorizer.fit_transform(X_train)
model = MultinomialNB(alpha=.01)
model.fit(tf_mat,y_train)
return vectorizer, model
def generate_nlp_prob(vectorizer, model, df_text):
mat = vectorizer.transform(df_text['description']) #fit_transform?
y_prob = np.transpose(model.predict_proba(mat))[1]
return y_prob
def main():
df = pd.read_pickle('data/df_text.pkl')
vectorizer, mnb_model = build_model(df)
with open('models/vectorizer.pkl','w') as v:
pickle.dump(vectorizer,v)
with open('models/mnb_model.pkl','w') as m:
pickle.dump(mnb_model,m)
with open('data/y_prob.pkl','w') as f:
pickle.dump(generate_nlp_prob(vectorizer, mnb_model, df),f)
if __name__ == '__main__':
main()
| {"/Models_Eval.py": ["/Load_Data.py"], "/Search_Models_Params.py": ["/Load_Data.py"]} |
57,075 | BryceLuna/Fraud_Detection | refs/heads/master | /Data_Cleaning.py | import pandas as pd
from bs4 import BeautifulSoup
from unidecode import unidecode
'''
Notes:
'''
#Data Cleaning
def clean_data(data_frame,columns_lst):
#clean all the varibles then change their datatypes then do dummies
#Dropping features that aren't obviously useful
df = data_frame.copy()
df.drop(columns_lst,axis=1,inplace=1)
#Target Variable
df['acct_type'] = df['acct_type'].map(lambda x: 0 if 'fraud' not in x else 1)
#Flag if the country variable doesn't match the venue_country
#Note:will not flag if both are null
df['diff_country'] = (df['venue_country'] != df['country']).apply(lambda x: 0 if x == False else 1)
df.drop('venue_country',inplace=1,axis=1)
#df.country.fillna(value='unknown',inplace=True)
#Flag if country wasn't available, check if this variable is predictive
#df['country_nan'] = df.country.isnull().astype(int)
#This might not be necessary because empty string countries would likely get filted below
#df['country'] = df.country.map(lambda x: 'empty_string' if x == '' else str(x))
#df['country_unknown'] = df.country.map(lambda x: 1 if x =='unknown' else 0)
#dropping obscure countries, get the top 4 most common countries
countries_lst = df.country.value_counts()[:4].index.values
df['country'] = df.country.map(lambda x: 'minor' if x not in countries_lst else 'major')
df.delivery_method.fillna(value=-1,inplace=1)
df['delivery_method'] = df.delivery_method.astype(int)
#flag specific email domains
df['hotmail'] = df['email_domain'].str.contains('hotmail').apply(lambda x: 0 if x == False else 1)
df['yahoo'] = df['email_domain'].str.contains('yahoo').apply(lambda x: 0 if x == False else 1)
df['live'] = df['email_domain'].str.contains('live').apply(lambda x: 0 if x == False else 1)
df.drop('email_domain', axis=1,inplace=1)
#Transforming time variables - from Unix Time
df['user_created'] = pd.to_datetime(df['user_created'], unit='s')
df['event_created'] = pd.to_datetime(df['event_created'], unit='s')
df['event_start'] = pd.to_datetime(df['event_start'], unit='s')
df['user_create_to_start'] = (df['event_start'] - df['user_created']).dt.days
df['create_to_start'] = (df['event_start'] - df['event_created']).dt.days
df.drop(['event_created','event_start','user_created'], axis=1,inplace=1)
df.org_facebook.fillna(value=-1,inplace=1)
df['org_facebook'] = df.org_facebook.map(lambda x: 1 if x == 0 else 0).astype(int)
#Flag if there is no org name
df['org_nameQ'] = df.org_name.map(lambda x: 0 if x == '' else 1)
df.drop('org_name',axis=1,inplace=1)
df['payout_type'] = df.payout_type.map(lambda x: 'unknown' if x =='' else x)
#flag if sale_duration doesn't exist
df['sale_duration_nan'] = df.sale_duration.isnull().astype(int)
df.drop('sale_duration',axis=1,inplace=1)
#flag if there isn't a venue name
df.venue_name.fillna(value='',inplace=1)
df['venue_nameQ'] = df.venue_name.map(lambda x: 0 if x =='' else 1)
df.drop('venue_name',axis=1,inplace=1)
df = pd.get_dummies(df,\
columns=['country','delivery_method','payout_type'],drop_first=1)
#delivery method 1 is negatively correlated with method 0
df.drop('delivery_method_1',axis=1,inplace=1)
return df
def clean_description(txt):
soup = BeautifulSoup(txt, 'html.parser')
return unidecode(soup.text)
#.translate(None, string.punctuation) sklearn will ignore punctuation by default
def main():
original_df = pd.read_json('data/train_new.json')
columns = ['approx_payout_date','channels','gts','has_header',\
'listed','name','name_length','org_desc','ticket_types','venue_latitude',\
'venue_longitude','object_id','org_twitter','payee_name','num_order',\
'previous_payouts','show_map','sale_duration2','user_type','venue_address',\
'venue_state','description','event_end','event_published','currency']
df = clean_data(original_df,columns)
df_text = pd.DataFrame(original_df['description'].apply(clean_description))
df_text['acct_type'] = df['acct_type']
#df_text = original_df['description'].apply(clean_description)
df_text.to_pickle('data/df_text.pkl')
df.to_pickle('data/df_clean.pkl')
#df.to_csv("C:/Users/Anon/Desktop/clean_df.csv",index=False)
if __name__ == '__main__':
main()
| {"/Models_Eval.py": ["/Load_Data.py"], "/Search_Models_Params.py": ["/Load_Data.py"]} |
57,076 | BryceLuna/Fraud_Detection | refs/heads/master | /Load_Data.py | import pandas as pd
import numpy as np
import cPickle as pickle
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.cross_validation import train_test_split
from imblearn.over_sampling import SMOTE
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.preprocessing import StandardScaler
def split_data(df):
'''
returns training and testing datasets
'''
y = df['acct_type']
X = df.drop(['acct_type'],axis=1)
#Split in the same way you did for NLP
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123)
return X_train, X_test, y_train, y_test
def resample_data(X, y, categorical_lst):
'''
up-samples minority class
'''
sm = SMOTE(kind='regular')
X_train_re, y_train_re = sm.fit_sample(X,y)
#rounding categorical variables
X_train_re[:,categorical_lst] = np.round(X_train_re[:,categorical_lst])
return X_train_re, y_train_re
def standardize_variables(X_train, X_test, numerical_lst):
'''
normalize/standardize numerical variables
'''
train_mat = np.copy(X_train)
test_mat = np.copy(X_test)
scaler = StandardScaler()
train_mat[:,numerical_lst] = scaler.fit_transform(train_mat[:,numerical_lst])
test_mat[:,numerical_lst] = scaler.transform(test_mat[:,numerical_lst])
return train_mat, test_mat
| {"/Models_Eval.py": ["/Load_Data.py"], "/Search_Models_Params.py": ["/Load_Data.py"]} |
57,084 | iliar1987/PrisonersDilemmaFractals | refs/heads/master | /PD_fractals2.py | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 04 23:22:07 2015
@author: Ilia
"""
from PD_fractals1 import *
fname_prefix = 'combined'
b=1.8
P = 0.01
T = b
S = 0
R = 1
#payoff_mat = np.array([[P,T],[S,R]])
SetPayoffMat(P,T,S,R)
N1 = 100
M1 = 100
f=3
N2 = N1*f
M2 = M1*f
N=[N1,N2]
M=[M1,M2]
indices1 = make_indices(N1,M1)
indices2 = make_indices(N2,M2)
indices=[indices1,indices2]
starting_lattice1 = np.ones((N1,M1),dtype=np.int)
starting_lattice1[N1/2,M1/2] = 0
central_squares = np.arange(N2/2-1,N2/2+2,dtype=np.int).reshape((1,3))
starting_lattice2 = np.ones((N2,M2),dtype=np.int)
starting_lattice2[central_squares.transpose(),central_squares] = 0
starting_lattices=[starting_lattice1,starting_lattice2]
fname_suffix = 'center'
#starting_lattice = np.ones((N,M),dtype=np.int)
#starting_lattice[N/2,M-1] = 0
#starting_lattice[N/2,0] = 0
#fname_suffix = 'wave'
#starting_lattice = MakeRandLattice(N,M,0.9)
#fname_suffix='rand0.9'
num_neighbors = 4
disp_pop = False
if num_neighbors == 4:
iteration_m = iterate_4_neighbors
else:
iteration_m = iterate_8_neighbors
#L = SingleGeneration(starting_lattice,indices,iteration_m)
num_generations = 120
from matplotlib import animation
FFMpegWriter = animation.writers['ffmpeg']
writer = FFMpegWriter(fps=25)
fig = plt.figure()
ax1 = plt.subplot(1,2,1)
ax2 = plt.subplot(1,2,2);
ax=[ax1,ax2]
import copy
L = copy.copy(starting_lattices)
pyoffs=[None]*2
img_h = [None]*2
for i in range(len(L)):
pyoffs[i] = CalcTotalPayoffs(L[i],indices[i],iteration_m)
if not disp_pop:
img_h[i] = ax[i].imshow(pyoffs[i],interpolation='none',clim=[0,b*(num_neighbors+1)*0.7])
else:
img_h[i] = plt.imshow(L[i],interpolation='none')
if not disp_pop:
plt.colorbar(img_h[i],ax=ax)
fname = '%s_%d_%d_%d_%d_%s_b%.2f_%s.mp4' % \
(fname_prefix,N[0],M[0],num_generations,num_neighbors,'pop' if disp_pop else '',b,fname_suffix)
with writer.saving(fig,fname,150):
writer.grab_frame()
for gen in range(num_generations):
if gen%10 == 0:
print( gen)
L[0] = SingleGeneration(L[0],indices[0],iteration_m)
for j in [None]*f:
L[1] = SingleGeneration(L[1],indices[1],iteration_m)
for i in range(len(L)):
if not disp_pop:
pyoffs[i] = CalcTotalPayoffs(L[i],indices[i],iteration_m)
img_h[i].set_data(pyoffs[i])
else:
img_h[i].set_data(L[i])
writer.grab_frame()
##################################################
| {"/PD_fractals2.py": ["/PD_fractals1.py"]} |
57,085 | iliar1987/PrisonersDilemmaFractals | refs/heads/master | /PD_fractals1.py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 03 16:11:00 2015
Based on 'Evolutionary games and spatial chaos by Martin et al.'
@author: Ilia
"""
import numpy as np
from matplotlib import pyplot as plt
def SetPayoffMat(P_,T_,S_,R_):
global payoff_mat
payoff_mat = np.array([[P_,T_],[S_,R_]])
def make_indices(N,M):
indices = {}
ind1 = list(range(1,M))
ind1.append(0)
indices['left'] = np.array(ind1)
ind1 = [M-1]
ind1.extend(range(0,M-1))
indices['right'] = np.array(ind1)
indices['hcenter'] = np.arange(M)
ind1 = list(range(1,N))
ind1.append(0)
indices['up'] = np.array(ind1)
ind1 = [N-1]
ind1.extend(range(0,N-1))
indices['down'] = np.array(ind1)
indices['vcenter'] = np.arange(N)
return indices
def reindex(lattice,ind1,ind2):
return lattice[ind1.reshape((ind1.size,1)),ind2]
def CalcPayoffs(lattice,ind1,ind2):
L = reindex(lattice,ind1,ind2)
payoffs = payoff_mat[lattice,L]
return payoffs
def iterate_8_neighbors():
for vind in ['up','vcenter','down']:
for hind in ['left','hcenter','right']:
#if not (vind == 'vcenter' and hind=='hcenter'):
if True:
yield (vind,hind)
def iterate_4_neighbors():
for vind in ['up','down']:
yield (vind,'hcenter')
for hind in ['left','right']:
yield ('vcenter',hind)
yield ('vcenter','hcenter')
def CalcTotalPayoffs(lattice,indices,neighbors_iter = iterate_8_neighbors):
payoffs = np.zeros(lattice.shape)
for vind,hind in neighbors_iter():
#print vind,hind
ind1 = indices[vind]
ind2 = indices[hind]
this_payoffs = CalcPayoffs(lattice,ind1,ind2)
payoffs += this_payoffs
return payoffs
#CalcTotalPayoffs(lattice,indices)
def SingleGeneration(lattice,indices,neighbors_iter = iterate_8_neighbors):
N = lattice.shape[0]
M = lattice.shape[1]
payoffs = CalcTotalPayoffs(lattice,indices,neighbors_iter = neighbors_iter)
#all_indices = [('vcenter','hcenter')]
all_indices=[]
all_indices.extend((vind,hind) for vind,hind in neighbors_iter())
all_payoff_array = np.zeros(lattice.shape+(len(all_indices),))
all_lattices_array = np.zeros(lattice.shape+(len(all_indices),),dtype=np.int)
for z,(vind,hind) in enumerate(all_indices):
ind1 = indices[vind]
ind2 = indices[hind]
all_payoff_array[:,:,z] = reindex(payoffs,ind1,ind2)
all_lattices_array[:,:,z] = reindex(lattice,ind1,ind2)
#return all_payoff_array
max_indices = np.argmax(all_payoff_array,axis=2)
new_lattice = all_lattices_array[np.arange(N).reshape((N,1)),np.arange(M),max_indices]
return new_lattice
from scipy import sparse
def MakeRandLattice(N,M,avg_density):
return np.array(np.ceil(sparse.rand(N,M,avg_density).toarray()),dtype=np.int)
if __name__ == '__main__':
#SingleGeneration(lattice,indices)
b=1.9
P = 0.01
T = b
S = 0
R = 1
payoff_mat = np.array([[P,T],[S,R]])
N = 35
M = 200
indices = make_indices(N,M)
starting_lattice = np.ones((N,M),dtype=np.int)
starting_lattice[N/2,M/2] = 0
#iteration_m = iterate_4_neighbors
iteration_m = iterate_8_neighbors
#L = SingleGeneration(starting_lattice,indices,iteration_m)
L = starting_lattice
num_generations = 200
for gen in range(num_generations):
if gen%10 == 0:
print (gen)
L = SingleGeneration(L,indices,iteration_m)
plt.figure()
m=plt.imshow(L,interpolation='none')
pyoffs = CalcTotalPayoffs(L,indices,iteration_m)
plt.figure()
plt.imshow(pyoffs,interpolation='none',clim=[0,10])
plt.colorbar()
#####################################
b=1.8
P = 0.01
T = b
S = 0
R = 1
payoff_mat = np.array([[P,T],[S,R]])
N = 200
M = 200
indices = make_indices(N,M)
starting_lattice = np.ones((N,M),dtype=np.int)
starting_lattice[N/2,M/2] = 0
fname_suffix = 'center'
#starting_lattice = np.ones((N,M),dtype=np.int)
#starting_lattice[N/2,M-1] = 0
#starting_lattice[N/2,0] = 0
#fname_suffix = 'wave'
#starting_lattice = MakeRandLattice(N,M,0.9)
#fname_suffix='rand0.9'
num_neighbors = 8
disp_pop = False
if num_neighbors == 4:
iteration_m = iterate_4_neighbors
else:
iteration_m = iterate_8_neighbors
#L = SingleGeneration(starting_lattice,indices,iteration_m)
num_generations = 1000
from matplotlib import animation
FFMpegWriter = animation.writers['ffmpeg']
writer = FFMpegWriter(fps=25)
fig = plt.figure()
L = starting_lattice
pyoffs = CalcTotalPayoffs(L,indices,iteration_m)
if not disp_pop:
img_h = plt.imshow(pyoffs,interpolation='none',clim=[0,b*(num_neighbors+1)*0.7])
plt.colorbar()
else:
img_h = plt.imshow(L,interpolation='none')
with writer.saving(fig,'out2_%d_%d_%d_%d_%s_b%.2f_%s.mp4' % \
(N,M,num_generations,num_neighbors,'pop' if disp_pop else '',b,fname_suffix)
,100):
writer.grab_frame()
for gen in range(num_generations):
if gen%10 == 0:
print( gen)
L = SingleGeneration(L,indices,iteration_m)
if not disp_pop:
pyoffs = CalcTotalPayoffs(L,indices,iteration_m)
img_h.set_data(pyoffs)
else:
img_h.set_data(L)
writer.grab_frame()
##################################################
| {"/PD_fractals2.py": ["/PD_fractals1.py"]} |
57,092 | rongtou/ErlSmart | refs/heads/master | /core/job.py | import subprocess
import json
import os
import logging
from urllib import request, parse
import ErlSmart.core.global_vars as gv
def add_index_job(file_path: str):
if file_path.endswith(".erl"):
pool = gv.pool()
pool.submit(do_index, file_path)
def del_index_job(file_path: str, is_dir: bool):
pool = gv.pool()
pool.submit(do_del, file_path, is_dir)
def do_index(file_path: str):
modified = int(os.path.getmtime(file_path))
need_updated = gv.index_reader().is_file_need_update(file_path, modified)
if need_updated:
params = parse.urlencode({'path': file_path})
f = request.urlopen(r"http://127.0.0.1:48437/?%s" % params)
data = f.read().decode('utf-8')
obj = json.loads(data)
# if obj['code'] == "ok" and obj['data'] != "":
# some file can't analyse, just ignore it
if obj['code'] != "parse_file_error":
gv.index_writer().add_req("index", (file_path, obj['data']))
else:
logging.warning("can not parser: %s", file_path)
pass
def do_del(file_path: str, is_dir: bool):
gv.index_writer().add_req("del", (file_path, is_dir))
| {"/core/monitor.py": ["/core/job.py", "/core/utils.py"], "/ErlSmart.py": ["/core/main.py", "/core/utils.py", "/core/smart_goto.py"], "/core/index.py": ["/core/utils.py"], "/core/main.py": ["/core/monitor.py", "/core/index.py", "/core/job.py", "/core/utils.py"]} |
57,093 | rongtou/ErlSmart | refs/heads/master | /core/smart_goto.py | import sublime
import os
import re
import ErlSmart.core.global_vars as gv
ERLANG_EXTENSIONS = ['.erl', '.hrl', '.xrl', '.yrl']
class SmartGoto(object):
def __init__(self, view):
self.view = view
self.window = view.window()
self.options = []
def run(self, kind, point):
(module, funcname, is_local) = self.get_mod_fun(point)
self.options = gv.index_reader().find_fun(module, funcname)
option_len = len(self.options)
if option_len == 1:
self.window.open_file('{0}:{1}'.format(self.options[0][4], self.options[0][3]), sublime.ENCODED_POSITION)
elif option_len > 1:
self.window.show_quick_panel(
list(map(lambda o: "{}:{}/{} {}:{}".format(o[0], o[1], o[2], o[4], o[3]), self.options)), self.on_done)
else:
ret = gv.index_reader().find_mod(module)
if ret:
self.window.open_file('{0}:0'.format(ret[0]), sublime.ENCODED_POSITION)
else:
# no exact matches, no module info: search for just the name
matches = lookup_symbol(self.window, kind + ': ' + funcname)
if matches:
locations = [loc for loc in matches if loc_is_module(loc, module)]
fname, display_fname, rowcol = locations[0]
row, col = rowcol
self.window.open_file('{0}:{1}'.format(fname, row), sublime.ENCODED_POSITION)
pass
else:
self.window.run_command('goto_definition', {'symbol': kind + ': ' + funcname})
def get_mod_fun(self, point):
mod_name = file_module_name(self.view.file_name())
expclass = sublime.CLASS_WORD_END | sublime.CLASS_WORD_START
word_sep = ' \"\t\n(){}[]+-*/=>,.;'
call = self.view.substr(self.view.expand_by_class(point, expclass, word_sep))
match = re.split('\'?:\'?', call)
# TODO: handle case when module is macro
if len(match) == 2:
return (match[0], match[1], match[0] == mod_name)
else:
return (mod_name, match[0], True)
def on_done(self, index):
if index >= 0:
self.window.open_file('{0}:{1}'.format(self.options[index][4], self.options[index][3]),
sublime.ENCODED_POSITION)
def file_module_name(filename):
(mod_name, ext) = os.path.splitext(os.path.basename(filename))
if ext in ERLANG_EXTENSIONS:
return mod_name
else:
return None
def lookup_symbol(window, symbol):
if len(symbol.strip()) < 3:
return []
index_locations = window.lookup_symbol_in_index(symbol)
open_file_locations = window.lookup_symbol_in_open_files(symbol)
def file_in_location_list(fname, locations):
for l in locations:
if l[0] == fname:
return True
return False;
# Combine the two lists, overriding results in the index with results
# from open files, while trying to preserve the order of the files in
# the index.
locations = []
ofl_ignore = []
for l in index_locations:
if file_in_location_list(l[0], open_file_locations):
if not file_in_location_list(l[0], ofl_ignore):
for ofl in open_file_locations:
if l[0] == ofl[0]:
locations.append(ofl)
ofl_ignore.append(ofl)
else:
locations.append(l)
for ofl in open_file_locations:
if not file_in_location_list(ofl[0], ofl_ignore):
locations.append(ofl)
return locations
def loc_is_module(loc, expected):
lmod = file_module_name(loc[0])
return (lmod != None) and (lmod == expected)
| {"/core/monitor.py": ["/core/job.py", "/core/utils.py"], "/ErlSmart.py": ["/core/main.py", "/core/utils.py", "/core/smart_goto.py"], "/core/index.py": ["/core/utils.py"], "/core/main.py": ["/core/monitor.py", "/core/index.py", "/core/job.py", "/core/utils.py"]} |
57,094 | rongtou/ErlSmart | refs/heads/master | /core/monitor.py | import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from .job import add_index_job, del_index_job
from .utils import get_folders, adjust_path
class Monitor(object):
def __init__(self):
self.__observer = Observer()
self.__wawtches = {}
def start(self):
for folder in get_folders():
self.add_path(folder)
if not self.__observer.isAlive():
self.__observer.start()
def add_path(self, path):
if self.__wawtches.__contains__(path):
return False
else:
event_handler = ErlFileEventHandler()
watch = self.__observer.schedule(event_handler, path, recursive=True)
self.__wawtches[path] = watch
return True
def remove_path(self, path):
watch = self.__wawtches.pop(path)
self.__observer.unschedule(watch)
def update_paths(self, paths):
all_paths = []
for path in paths:
if self.add_path(path):
all_paths.append(path)
removes = list(set(self.__wawtches.keys()).difference(set(paths)))
for r in removes:
self.remove_path(r)
return all_paths
def shutdown(self):
if self.__observer.isAlive():
self.__observer.stop()
self.__observer.join()
class ErlFileEventHandler(FileSystemEventHandler):
def on_moved(self, event):
what = 'directory' if event.is_directory else 'file'
logging.debug("Moved %s: from %s to %s", what, event.src_path, event.dest_path)
del_index_job(adjust_path(event.src_path), event.is_directory)
def on_created(self, event):
what = 'directory' if event.is_directory else 'file'
logging.debug("Created %s: %s", what, event.src_path)
if not event.is_directory:
add_index_job(adjust_path(event.src_path))
def on_deleted(self, event):
what = 'directory' if event.is_directory else 'file'
logging.debug("Deleted %s: %s", what, event.src_path)
del_index_job(adjust_path(event.src_path), event.is_directory)
def on_modified(self, event):
what = 'directory' if event.is_directory else 'file'
logging.debug("Modified %s: %s", what, event.src_path)
if not event.is_directory:
add_index_job(adjust_path(event.src_path))
| {"/core/monitor.py": ["/core/job.py", "/core/utils.py"], "/ErlSmart.py": ["/core/main.py", "/core/utils.py", "/core/smart_goto.py"], "/core/index.py": ["/core/utils.py"], "/core/main.py": ["/core/monitor.py", "/core/index.py", "/core/job.py", "/core/utils.py"]} |
57,095 | rongtou/ErlSmart | refs/heads/master | /ErlSmart.py | import os
import re
import sublime
import sublime_plugin
from .core.main import startup, shutdown, scan
from .core.utils import get_folders
from .core.smart_goto import SmartGoto
import ErlSmart.core.global_vars as gv
def plugin_loaded():
os.chdir(os.path.dirname(os.path.realpath(__file__)))
startup()
def plugin_unloaded():
shutdown()
class ErlListener(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
if not view.match_selector(locations[0], "source.erlang"):
return None
point = locations[0] - len(prefix) - 1
letter = view.substr(point)
if letter == ':':
module_name = view.substr(view.word(point))
completions = gv.index_reader().get_completions(module_name, prefix)
if completions:
return (
completions,
sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS
)
else:
return (
[],
sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS
)
else:
if re.match('^[0-9a-z_]+$', prefix) and len(prefix) > 1:
return gv.index_reader().get_mods(prefix) + gv.index_reader().get_completions('erlang', prefix)
else:
return None
def on_load(self, view):
# handle open new project or folder
if gv.monitor():
all_folders = gv.monitor().update_paths(get_folders())
if all_folders:
sublime.set_timeout_async(lambda: scan(all_folders), 100)
def on_window_command(self, window, command_name, args):
# print("windows ", command_name, args)
if command_name == 'remove_folder':
for path in args['dirs']:
gv.monitor().remove_path(path)
gv.index_writer().add_req("del", (path, True))
PREFIX_MAP = [
('Function', 'meta.function.erlang'),
('Function', 'meta.function.module.erlang'),
('Function', 'entity.name.function.erlang'),
('Function', 'entity.name.function.definition.erlang'),
('Type', 'storage.type.erlang'),
('Type', 'storage.type.module.erlang'),
('Type', 'storage.type.definition.erlang'),
('Record', 'storage.type.record.erlang'),
('Record', 'storage.type.record.definition.erlang'),
('Macro', 'keyword.other.macro.erlang'),
('Module', 'entity.name.type.class.module.erlang'),
('Yecc Rule', 'entity.name.token.unquoted.yecc'),
('Yecc Rule', 'entity.name.token.quoted.yecc')
]
class SmartGotoCommand(sublime_plugin.WindowCommand):
def run(self):
view = sublime.active_window().active_view()
point = view.sel()[0].begin()
scope = view.scope_name(point)
symbol = view.substr(view.word(point))
scores = map(lambda s: sublime.score_selector(scope, s[1]), PREFIX_MAP)
(maxscore, match) = max(zip(scores, PREFIX_MAP), key=lambda z: z[0])
kind = match[0]
if maxscore == 0:
gotosym = symbol
elif kind == 'Macro':
gotosym = kind + ': ' + strip_before('?', symbol)
elif kind == 'Record':
gotosym = kind + ': ' + strip_before('#', symbol)
elif kind == 'Function':
return SmartGoto(view).run(kind, point)
elif kind == 'Type':
return SmartGoto(view).run(kind, point)
else:
gotosym = kind + ': ' + symbol
sublime.active_window().run_command('goto_definition', {'symbol': gotosym})
def strip_before(char, s):
pos = s.find(char)
return s[pos + 1:]
| {"/core/monitor.py": ["/core/job.py", "/core/utils.py"], "/ErlSmart.py": ["/core/main.py", "/core/utils.py", "/core/smart_goto.py"], "/core/index.py": ["/core/utils.py"], "/core/main.py": ["/core/monitor.py", "/core/index.py", "/core/job.py", "/core/utils.py"]} |
57,096 | rongtou/ErlSmart | refs/heads/master | /core/index.py | import sqlite3
import ErlSmart.core.global_vars as gv
import queue
import hashlib
import logging
import os
import platform
import traceback
from threading import Thread
from .utils import path_in_cur_folders
index_file = "index/index.db"
CREATE_FOLDER_SQL = '''
create table if not exists file_path (
fid varchar(32) not null,
path varchar(256) not null,
updated_at int unsigned not null,
primary key(fid)
);
'''
CREATE_ERL_FILE_SQL = '''
create table if not exists erl_file (
fid varchar(256) not null,
mod varchar(128) not null,
fun varchar(128) not null,
arity tinyint not null,
line int unsigned not null,
args varchar(256) not null,
exported bool not null,
primary key(fid, mod, fun, arity, line)
);
'''
def init_index():
reader = IndexReader()
gv.set_index_reader(reader)
reader.create_table()
writer = IndexWriter()
gv.set_index_writer(writer)
class IndexReader(object):
def __init__(self):
if not os.path.exists("index"):
os.makedirs("index")
self.__pool_size = 5
self.__pool = queue.Queue(self.__pool_size)
self._create_con()
def _create_con(self):
for i in range(self.__pool_size):
con = sqlite3.connect(index_file, check_same_thread=False)
con.execute('pragma journal_mode=wal')
self.__pool.put(con)
def get_con(self) -> sqlite3.Connection:
return self.__pool.get()
def release_con(self, con):
self.__pool.put(con)
def create_table(self):
con = self.get_con()
con.execute(CREATE_FOLDER_SQL)
con.execute(CREATE_ERL_FILE_SQL)
self.release_con(con)
def is_file_need_update(self, path: str, modified: int) -> bool:
ret = None
con = self.get_con()
cur = con.cursor()
try:
cur.execute("select updated_at from file_path where path=?", (path,))
ret = cur.fetchone()
except sqlite3.Error:
traceback.print_exc()
finally:
self.release_con(con)
if ret is None:
return True
else:
return ret[0] != modified
def get_completions(self, mod, fun):
completions = []
con = self.get_con()
cur = con.cursor()
ret = []
try:
cur.execute(
"select path, fun, arity, args from erl_file e join file_path f on e.fid = f.fid where mod=? and exported = 1 and fun like ? order by fun, arity",
(mod, fun + "%"))
ret = cur.fetchall()
except sqlite3.Error:
traceback.print_exc()
finally:
self.release_con(con)
for (path, fun, arity, args) in ret:
if path_in_cur_folders(path):
arglist = args.split(", ")
param_list = ['${{{0}:{1}}}'.format(i + 1, arglist[i]) for i in range(arity)]
param_str = ', '.join(param_list)
completion = '{0}({1})${2}'.format(fun, param_str, arity + 1)
completions.append(['{}/{}\tMethod'.format(fun, arity), completion])
return completions
def get_mods(self, mod):
completions = []
con = self.get_con()
cur = con.cursor()
ret = []
try:
cur.execute(
"select path, mod from file_path as f left join (select fid, mod from erl_file group by fid, mod) as e on e.fid = f.fid where mod like '" + mod + "%'")
ret = cur.fetchall()
except sqlite3.Error:
traceback.print_exc()
finally:
self.release_con(con)
for (path, mod) in ret:
if path_in_cur_folders(path):
completions.append(['{}\tModule'.format(mod), mod])
return completions
def get_paths(self):
paths = []
con = self.get_con()
cur = con.cursor()
ret = []
try:
cur.execute("select path from file_path")
ret = cur.fetchall()
except sqlite3.Error:
traceback.print_exc()
finally:
self.release_con(con)
for (p,) in ret:
paths.append(p)
return paths
def find_fun(self, mod, fun):
options = []
ret = []
con = self.get_con()
cur = con.cursor()
try:
cur.execute(
"select mod, fun, arity, line, path from erl_file e join file_path f on e.fid = f.fid where mod=? and fun = ? order by arity",
(mod, fun))
ret = cur.fetchall()
except sqlite3.Error:
traceback.print_exc()
finally:
self.release_con(con)
for (mod, fun, arity, line, path) in ret:
if path_in_cur_folders(path):
options.append((mod, fun, arity, line, path))
return options
def find_mod(self, mod):
con = self.get_con()
cur = con.cursor()
ret = []
try:
cur.execute(
"select path from erl_file e join file_path f on e.fid = f.fid where mod=?", (mod,))
ret = cur.fetchall()
except sqlite3.Error:
traceback.print_exc()
finally:
self.release_con(con)
for (path,) in ret:
if path_in_cur_folders(path):
return (path,)
return None
class IndexWriter(Thread):
def __init__(self):
super(IndexWriter, self).__init__()
self.__con = sqlite3.connect(index_file, check_same_thread=False)
self.__con.execute('pragma journal_mode=wal')
self.__cur = self.__con.cursor()
self.__reqs = queue.Queue()
self.start()
def run(self):
while True:
op, param = self.__reqs.get()
if op == "index":
self.add_index(param)
else:
self.del_index(param)
def add_index(self, param):
path, parse_obj = param
modified = int(os.path.getmtime(path))
try:
self.__cur.execute("select updated_at from file_path where path=?", (path,))
ret = self.__cur.fetchone()
if ret is None:
need_update = True
else:
need_update = ret[0] != modified
if not need_update:
return
fid = make_id(path)
if ret is None:
logging.debug("index insert %s", path)
self.__cur.execute("insert into file_path(fid, path, updated_at) values(?,?,?)", (fid, path, modified))
else:
logging.debug("index update %s", path)
self.__cur.execute("update file_path set updated_at = ? where fid = ?", (modified, fid))
self.__cur.execute("delete from erl_file where fid=?", (fid,))
if parse_obj != "":
mod = parse_obj['mod']
for funobj in parse_obj['func']:
logging.debug("index %s %s", mod, funobj['name'])
self.__cur.execute(
"insert into erl_file(fid, mod, fun, arity, line, args, exported) values(?,?,?,?,?,?,?)",
[fid, mod, funobj['name'], funobj['arity'], funobj['line'],
", ".join(funobj['args']), funobj['exported']])
self.__con.commit()
except Exception:
# sometimes watchdog fire modified event, but file is not ready, doesn't has 'mod', just rollback
logging.info("add_index_error, path %s", path)
self.__con.rollback()
# traceback.print_exc())
def del_index(self, param):
path, is_dir = param
logging.debug("del index %s %s", path, is_dir)
try:
if not is_dir:
root, ext = os.path.splitext(path)
if ext != ".erl":
return
fid = make_id(path)
self.__cur.execute("delete from file_path where fid=?", (fid,))
self.__cur.execute("delete from erl_file where fid=?", (fid,))
self.__con.commit()
elif is_dir:
if platform.system() == "Windows":
path = path + "\\"
else:
path = path + "/"
self.__cur.execute("select fid from file_path where path like '" + path + "%'")
rets = self.__cur.fetchall()
if len(rets) > 0:
for ret in rets:
self.__cur.execute("delete from file_path where fid=?", (ret[0],))
self.__cur.execute("delete from erl_file where fid = ?", (ret[0],))
self.__con.commit()
except sqlite3.Error:
self.__con.rollback()
traceback.print_exc()
def add_req(self, op, param):
self.__reqs.put((op, param))
def make_id(path: str):
encrypt = hashlib.md5()
encrypt.update(path.encode("utf-8"))
return encrypt.hexdigest()
| {"/core/monitor.py": ["/core/job.py", "/core/utils.py"], "/ErlSmart.py": ["/core/main.py", "/core/utils.py", "/core/smart_goto.py"], "/core/index.py": ["/core/utils.py"], "/core/main.py": ["/core/monitor.py", "/core/index.py", "/core/job.py", "/core/utils.py"]} |
57,097 | rongtou/ErlSmart | refs/heads/master | /core/global_vars.py |
_global_dict = {}
def put(name, value):
global _global_dict
_global_dict[name] = value
def get(name, default=None):
try:
return _global_dict[name]
except KeyError:
return default
def index_reader():
return get('index_reader')
def set_index_reader(reader):
put('index_reader', reader)
def index_writer():
return get('index_writer')
def set_index_writer(writer):
put('index_writer', writer)
def monitor():
return get('monitor')
def set_monitor(monitor):
put('monitor', monitor)
def pool():
return get('pool')
def set_pool(pool):
put('pool', pool)
def erl_lib():
return get('erl_lib')
def set_erl_lib(path):
put('erl_lib', path)
| {"/core/monitor.py": ["/core/job.py", "/core/utils.py"], "/ErlSmart.py": ["/core/main.py", "/core/utils.py", "/core/smart_goto.py"], "/core/index.py": ["/core/utils.py"], "/core/main.py": ["/core/monitor.py", "/core/index.py", "/core/job.py", "/core/utils.py"]} |
57,098 | rongtou/ErlSmart | refs/heads/master | /core/utils.py | import sublime
import platform
import ErlSmart.core.global_vars as gv
def get_folders():
all_folders = []
for window in sublime.windows():
all_folders = all_folders + window.folders()
return list(map(lambda path: adjust_path(path), all_folders))
def adjust_path(path: str):
if platform.system() == "Windows":
path_list = path.replace("/", "\\").split("\\")
path_list[0] = path_list[0].capitalize()
return "\\".join(path_list)
return path
def path_in_cur_folders(path):
folders = [gv.erl_lib()] + sublime.active_window().folders()
for folder in folders:
if path.startswith(folder):
return True
return False
| {"/core/monitor.py": ["/core/job.py", "/core/utils.py"], "/ErlSmart.py": ["/core/main.py", "/core/utils.py", "/core/smart_goto.py"], "/core/index.py": ["/core/utils.py"], "/core/main.py": ["/core/monitor.py", "/core/index.py", "/core/job.py", "/core/utils.py"]} |
57,099 | rongtou/ErlSmart | refs/heads/master | /core/main.py | import sublime
import logging
import os
import platform
import subprocess
import threading
import ErlSmart.core.global_vars as gv
from concurrent.futures import ThreadPoolExecutor
from .monitor import Monitor
from .index import init_index
from .job import add_index_job, del_index_job
from .utils import get_folders, adjust_path
def startup():
init_log()
init_index()
start_parserv()
init_pool()
start_monitor()
scan_file()
del_outdated_index()
def shutdown():
gv.monitor().shutdown()
gv.pool().shutdown()
def init_log():
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s - %(levelname)s : %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
def start_parserv():
t = threading.Thread(target=start_parserv2, name='ParserServer')
t.start()
def start_parserv2():
if platform.system() == "Windows":
subprocess.Popen(['erl', '-boot', 'start_sasl', '-noshell', '-noinput',
'-pa', 'parserv/_build/default/lib/parserv/ebin',
'-pa', 'parserv/_build/default/lib/cowboy/ebin',
'-pa', 'parserv/_build/default/lib/cowlib/ebin',
'-pa', 'parserv/_build/default/lib/jsx/ebin',
'-pa', 'parserv/_build/default/lib/ranch/ebin',
'-s', 'parserv_main'], stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, stdin=subprocess.DEVNULL, shell=True).communicate()
else:
subprocess.Popen(['erl', '-boot', 'start_sasl', '-noshell', '-noinput',
'-pa', 'parserv/_build/default/lib/parserv/ebin',
'-pa', 'parserv/_build/default/lib/cowboy/ebin',
'-pa', 'parserv/_build/default/lib/cowlib/ebin',
'-pa', 'parserv/_build/default/lib/jsx/ebin',
'-pa', 'parserv/_build/default/lib/ranch/ebin',
'-s', 'parserv_main']).communicate()
def start_monitor():
monitor = Monitor()
monitor.start()
gv.set_monitor(monitor)
def init_pool():
gv.set_pool(ThreadPoolExecutor(1))
def scan_file():
erl_lib = adjust_path(subprocess.getoutput("escript core/erl_lib.erl"))
gv.set_erl_lib(erl_lib)
all_folders = [erl_lib] + get_folders()
sublime.set_timeout_async(lambda: scan(all_folders), 100)
def scan(all_folders: list):
for path in all_folders:
for file_path, dirs, files in os.walk(path):
for f in files:
filename = os.path.join(file_path, f)
add_index_job(filename)
def del_outdated_index():
sublime.set_timeout_async(lambda: del_outdated_index2(), 50)
def del_outdated_index2():
paths = gv.index_reader().get_paths()
for path in paths:
if not os.path.exists(path):
del_index_job(path, False)
| {"/core/monitor.py": ["/core/job.py", "/core/utils.py"], "/ErlSmart.py": ["/core/main.py", "/core/utils.py", "/core/smart_goto.py"], "/core/index.py": ["/core/utils.py"], "/core/main.py": ["/core/monitor.py", "/core/index.py", "/core/job.py", "/core/utils.py"]} |
57,145 | michalburdzy/python_tutorial | refs/heads/master | /ankieta/models.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.timezone import now
# Create your models here.
class Pytanie(models.Model):
text_pytania = models.CharField(max_length=200)
data_publikacji = models.DateTimeField(default=now, editable=False)
def siemanko(self):
return 'Siemanko z pytania nr : {}'.format(self.id)
def __str__(self):
return self.text_pytania
class Meta:
ordering = (['-data_publikacji'])
def __unicode__(self):
return unicode(self.text_pytania)
class Odpowiedz(models.Model):
pytanie = models.ForeignKey(Pytanie, on_delete=models.CASCADE)
odpowiedz = models.CharField(max_length=200)
glosy = models.IntegerField(default=0)
def __str__(self):
return self.odpowiedz
| {"/ankieta/views.py": ["/ankieta/models.py"]} |
57,146 | michalburdzy/python_tutorial | refs/heads/master | /ankieta/views.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import HttpResponse
from .models import Pytanie
from django.template import loader
# Create your views here.
def index(request):
pytania_od_najnowszego = Pytanie.objects.order_by('data_publikacji')[:5]
# return HttpResponse(', '.join([p.text_pytania for p in pytania_od_najnowszego]))
template = loader.get_template('ankieta/index.html')
context = {
'lista_pytan': pytania_od_najnowszego
}
return HttpResponse(template.render(context, request))
def detail(request, question_id):
return HttpResponse('Szukasz pytania z id %s' % question_id)
def results(request, question_id):
response = 'Szukasz rezultatu pytania z id %s'
return HttpResponse(response % question_id)
def vote(request, question_id):
return HttpResponse('Głosujesz na pytanie z id %s' % question_id)
| {"/ankieta/views.py": ["/ankieta/models.py"]} |
57,151 | darkb1ade/RealizedVolPrediction_Kaggle | refs/heads/develop | /model/XGBoost.py | from model.BaseModel import BaseModel
# Visuallize
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn import model_selection
# Modeling
from xgboost import XGBRegressor
from helper import utils
import optuna
class LGBM(BaseModel):
"""
This is lightGBM class. Insert list of str contain feature column here.
Output column should be named "output"
To run, use lgbm.train_and_test() <- recommend
"""
def __init__(self, feature_column=None):
super().init()
# Parameters of Light GBM
self.param = {
'tree_method': 'gpu_hist',
'lambda': 1,
'alpha': 1,
'colsample_bytree': 1.0,
'subsample': 1.0,
'learning_rate': 0.01,
'n_estimators': 1000,
'max_depth': 20,
'random_state': 2020,
'min_child_weight': 300
}
# seed0 = 2000
# self.params_lgbm = {
# 'objective': 'rmse',
# 'boosting_type': 'gbdt',
# 'max_depth': -1,
# 'max_bin': 100,
# 'min_data_in_leaf': 500,
# 'learning_rate': 0.05,
# 'subsample': 0.72,
# 'subsample_freq': 4,
# 'feature_fraction': 0.5,
# 'lambda_l1': 0.5,
# 'lambda_l2': 1.0,
# 'categorical_column': [0],
# 'seed': seed0,
# 'feature_fraction_seed': seed0,
# 'bagging_seed': seed0,
# 'drop_seed': seed0,
# 'data_random_seed': seed0,
# 'n_jobs': -1,
# 'verbose': -1}
# k-folds Ensemble Training
self.n_folds = 5
self.n_rounds = 10000
# Get feature name (Not sure if this work), Data must have column name of following features and output is 'target'
# features = [col for col in X_train.columns if col not in {"time_id", "target", "row_id"}]
if feature_column is None:
self.features = ['stock_id', 'log_return1', 'log_return2', 'trade_log_return1'] # Need to change
else:
self.features = feature_column
self.output_feature = "output"
# Define loss function for lightGBM training
def feval_RMSPE(self, preds, train_data):
labels = train_data.get_label()
return 'RMSPE', round(utils.rmspe(y_true=labels, y_pred=preds), 5), False
def train(self, X_train, y_train, X_val, y_val, param=None):
if param is None:
param = self.param
# training
model = XGBRegressor(**param)
model.fit(X_train, y_train, eval_set=[(X_val, y_val)], early_stopping_rounds=100, verbose=False)
# Prediction w/ validation data
preds_val = model.predict(X_val[self.features])
# train.loc[val_index, pred_name] = preds_val
# RMSPE calculation
score = round(utils.rmspe(y_true=y_val, y_pred=preds_val), 5)
return score, preds_val, model
def test(self, model, test_input):
# Prediction w/ validation data
test_preds = model.predict(test_input[self.features]).clip(0, 1e10)
return test_preds
# Combine train and test, with KFold CV
def train_and_test(self, train_input, test_input, param=None):
"""
:param train_input: pd array. Contain both feature data and "output" data
:param test_input: pd array. Contain feature data to test
:return: test_prediction (??): predicted output data of 'test_input'
"""
cv_trial = 1
kf = model_selection.KFold(n_splits=self.n_folds, shuffle=True, random_state=15)
# Create out of folds array
oof_predictions = np.zeros(train_input.shape[0])
# Create test array to store predictions
test_predictions = np.zeros(test_input.shape[0])
for train_index, val_index in kf.split(range(len(train_input))):
print(f'CV trial : {cv_trial} /{self.n_folds}')
# Divide dataset into train and validation data such as Cross Validation
X_train = train_input.loc[train_index, self.features]
y_train = train_input.loc[train_index, self.output_feature].values
X_val = train_input.loc[val_index, self.features]
y_val = train_input.loc[val_index, self.output_feature].values
score, preds_val, model = self.train(X_train, y_train, X_val, y_val, param)
test_preds = self.test(model, test_input)
oof_predictions[val_index] = preds_val
test_predictions += test_preds / self.n_folds
cv_trial += 1
rmspe_score = self.rmspe(train_input[self.output_feature], oof_predictions)
print(f'Our out of folds RMSPE is {rmspe_score}')
return test_predictions, rmspe_score
def optimize_params(self, train_input, test_input):
def objective(trial):
param = {
'tree_method': 'gpu_hist',
'lambda': trial.suggest_loguniform('lambda', 1e-3, 10.0),
'alpha': trial.suggest_loguniform('alpha', 1e-3, 10.0),
'colsample_bytree': trial.suggest_categorical('colsample_bytree',
[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]),
'subsample': trial.suggest_categorical('subsample', [0.4, 0.5, 0.6, 0.7, 0.8, 1.0]),
'learning_rate': trial.suggest_categorical('learning_rate',
[0.008, 0.009, 0.01, 0.012, 0.014, 0.016, 0.018, 0.02]),
'n_estimators': trial.suggest_int('n_estimators', 500, 3000),
'max_depth': trial.suggest_categorical('max_depth', [5, 7, 9, 11, 13, 15, 17, 20]),
'random_state': trial.suggest_categorical('random_state', [24, 48, 2020]),
'min_child_weight': trial.suggest_int('min_child_weight', 1, 300)}
# params = {"objective": "reg:squarederror",
# "eval_metric": "rmse",
# "tree_method": "hist",
# "grow_policy": "lossguide",
# 'silent': 1,
# "seed": 1,
# "colsample_bytree": 1,
# "subsample": 1,
# 'max_leaves': 31, # lossguideの場合、当該項目の設定が必要(default: 0)
# "max_depth": trial.suggest_int('max_depth', 2, 12),
# "eta": trial.suggest_loguniform('eta', 10e-2, 1),
# "alpha": trial.suggest_uniform('alpha', 0.0, 1.0),
# "lambda": trial.suggest_uniform('lambda', 0.0, 1.0)}
print(params)
_, score = self.train_and_test(train_input,test_input,params)
return score
opt = optuna.create_study(
direction='minimize',
sampler=optuna.samplers.RandomSampler(seed=1))
opt.optimize(objective,
n_trials=self.ntrial)
trial = opt.best_trial
params = self.params_lgbm.copy()
# params.update(**params, **trial.params)
for key, value in trial.params.items():
print('"{}" : {}'.format(key, value))
params[key] = value
test_predictions , score = self.train_and_test(train_input,test_input,params)
return test_predictions, score
if __name__ == "__main__":
lgbm = LGBM() | {"/model/XGBoost.py": ["/model/BaseModel.py"], "/helper/dataset.py": ["/model/features.py"], "/model/LGBM.py": ["/model/BaseModel.py"]} |
57,152 | darkb1ade/RealizedVolPrediction_Kaggle | refs/heads/develop | /helper/dataset.py | from model.features import *
import pandas as pd
import yaml
import glob
import os
from IPython.display import display
#os.chdir('C:/Users/Darkblade/Documents/Kaggle/RealizedVolPrediction_Kaggle')
print('current path', os.getcwd())
class DataLoader():
def __init__(self, mode):
self.conf = yaml.load(open('config/main.yaml'), Loader=yaml.FullLoader)
self.mode = mode
self.book_path = sorted(glob.glob(f"{self.conf['path']}/book_{mode}.parquet/*/*"))
self.trade_path = sorted(glob.glob(f"{self.conf['path']}/trade_{mode}.parquet/*/*"))
self.book_path = [l.replace('\\','/') for l in self.book_path]
self.book_path = [l.replace('//','/') for l in self.book_path]
self.trade_path = [l.replace('\\','/') for l in self.trade_path]
self.trade_path = [l.replace('//','/') for l in self.trade_path]
def get_each_parquet(self, i, show = False): # mode = 'test', 'train'
# i = int(i) if not isinstance(i, int) else i
# assert self.book_path[i].split('=')[-1].split('/')[0]==self.trade_path[i].split('=')[-1].split('/')[0], 'book and trade file not correspondence'
# book_path = glob.glob(f"{self.conf['path']}/book_{mode}.parquet/stock_id={i}/*")[0]
# stock_id = self.book_path[i].split('=')[-1].split('/')[0]
print('stock_id:', i)
book_path = glob.glob(f"{self.conf['path']}/book_{self.mode}.parquet/stock_id={i}/*")
trade_path = glob.glob(f"{self.conf['path']}/trade_{self.mode}.parquet/stock_id={i}/*")
# print(book_path)
# print(trade_path)
assert len(book_path)!=0 or len(trade_path)!=0, f"can't find stock id {i}"
dfBook = pd.read_parquet(book_path[0])
dfTrade = pd.read_parquet(trade_path[0])
dfBook0 = dfBook.copy()
dfTrade0 = dfTrade.copy()
if show:
display(dfBook.head())
display(dfTrade.head())
dfVol, dfBook_inter = self._cal_features(dfBook)
dfVol['stock_id'] = i
cols = dfVol.columns.tolist()
cols = cols[-1:] + cols[:-1]
dfVol = dfVol[cols]
dfTrade, dfTrade_inter = self._cal_features(dfTrade, flag = 'o')
for df in self._cal_features_time_series(dfVol.time_id, dfBook, show = False):
dfVol = pd.merge(dfVol, df, on=["time_id"])
dfVol = pd.merge(dfVol, dfTrade, on=["time_id"])
print('result')
display(dfVol)
return dfVol, dfBook_inter, dfTrade_inter, dfBook0, dfTrade0 # return result after process and raw input
def _cal_features_time_series(self, t, dfBook, show = False):
tmp = {} # dict window_size: list of feature
for k, v in self.conf['features'].items():
#print(v)
for w in v['Ex_win_size']: # if not blank
if w in tmp:
tmp[w].append(v['name'])
else:
tmp[w] = [v['name']]
for w, cols in tmp.items():
yield get_time_series(t, dfBook, cols, w, show = show)
def _cal_features(self, dfBook, flag = 'b', show = False):
for k, v in self.conf['features'].items():
if k[0]==flag:
if v.get('func') is not None:
eval(v.get('func'))(dfBook)
log_run(dfBook, v['name'])
dfBook = dfBook.dropna()
col_name = dfBook.columns.tolist()
col_name = [a for a in col_name if 'logReturn_' in a]
#col_name = ['logReturn_'+v['name'] for k, v in self.conf['features'].items() if k[0]==flag ] #cal_vol
dfVol = cal_vol(dfBook, col_name)
# display result
if show:
display(dfBook)
display(dfVol)
return dfVol, dfBook
def get_all_parquet(self, show = False): # mode = 'test', 'train'
df_volRes = pd.DataFrame()
#df_tradeRes = pd.DataFrame()
for lb, lt in zip(self.book_path, self.trade_path):
assert lb.split('=')[-1][0]==lt.split('=')[-1][0], 'book and trade file not correspondence'
stock_id = lb.split('=')[-1].split('/')[0]
print('Reading stock id:', stock_id)
dfVol, _, _, _, _ = self.get_each_parquet(stock_id)
# dfBook = pd.read_parquet(lb)
# dfTrade = pd.read_parquet(lt)
# if show:
# display(dfBook.head())
# display(dfTrade.head())
# print('feature calculation')
# dfVol, _ = self._cal_features(dfBook)
# dfVol['stock_id'] = stock_id
# cols = dfVol.columns.tolist()
# cols = cols[-1:] + cols[:-1]
# dfVol = dfVol[cols]
# dfTrade, _ = self._cal_features(dfTrade, flag = 'o')
# dfVol = pd.merge(dfVol, dfTrade, on=["time_id"])
# display(dfVol)
# display(dfBook)
# print('time series feature calculation')
# for df in self._cal_features_time_series(dfVol.time_id, dfBook, show = False):
# dfVol = pd.merge(dfVol, df, on=["time_id"])
df_volRes = pd.concat([df_volRes, dfVol])
if show:
display(df_volRes)
return df_volRes
def get_gt(self):
return pd.read_csv(f"{self.conf['path']}/{self.mode}.csv") | {"/model/XGBoost.py": ["/model/BaseModel.py"], "/helper/dataset.py": ["/model/features.py"], "/model/LGBM.py": ["/model/BaseModel.py"]} |
57,153 | darkb1ade/RealizedVolPrediction_Kaggle | refs/heads/develop | /model/cali_proc.py | import lightgbm as lgb
import pandas as pd
import numpy as np
import xgboost as xgb
import pickle
from sklearn.model_selection import KFold
import optuna
from sklearn.metrics import mean_squared_error
import os
import matplotlib.pyplot as plt
import logging
def straight_throught_forecast(x_train, y_train, x_test, y_test,conf): #conf = config['interp']['model']
os.makedirs('train_log', exist_ok=True)
logging.basicConfig(filename=f"train_log/xgb_train.log", filemode='w',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s: %(message)s', level=logging.INFO)
log = logging.getLogger()
NFOLD = conf['NFOLD']
NTRIAL = conf['NTRIAL']
def objective(trial):
# ==== 定義パラメータ ====
params = {"objective": "reg:squarederror",
"eval_metric": "rmse",
"tree_method": "hist",
"grow_policy": "lossguide",
'silent': 1,
"seed": 1,
"colsample_bytree": 1,
"subsample": 1,
'max_leaves': 31, # lossguideの場合、当該項目の設定が必要(default: 0)
"max_depth": trial.suggest_int('max_depth', 2, 12),
"eta": trial.suggest_loguniform('eta', 10e-2, 1),
"alpha": trial.suggest_uniform('alpha', 0.0, 1.0),
"lambda": trial.suggest_uniform('lambda', 0.0, 1.0)}
if int(0.95 * 2 ** params['max_depth']) > 31:
params['max_leaves'] = int(0.95 * 2 ** params['max_depth'])
print(params)
scores = 0
folds = KFold(n_splits=NFOLD, shuffle=True)
for fold_n, (train_index, test_index) in enumerate(folds.split(x_train)):
dtrain = xgb.DMatrix(data=x_train[train_index], label=y_train[train_index])
dvalid = xgb.DMatrix(data=x_train[test_index], label=y_train[test_index])
xgb_reg = xgb.train(params,
dtrain=dtrain,
num_boost_round=10 ** 6,
early_stopping_rounds=100,
evals=[(dvalid, 'validation')],
verbose_eval=10 ** 3)
score = xgb_reg.best_score
print("{}Fold: {}".format(fold_n,
score))
log.info("{}Fold: {}".format(fold_n,
score))
scores += score / NFOLD
print("RMSE:", scores)
return scores
# scoreの最大化は"maximize"。最小化の場合は"minimize"
opt = optuna.create_study(
direction='minimize',
sampler=optuna.samplers.RandomSampler(seed=1))
opt.optimize(objective,
n_trials=NTRIAL)
trial = opt.best_trial
params = {"objective": "reg:squarederror",
"eval_metric": "rmse",
"tree_method": "hist",
"grow_policy": "lossguide",
'silent': 1,
"seed": 1,
"colsample_bytree": 1,
"subsample": 1,
"max_depth": 0,
"eta": 0,
"alpha": 0,
"lambda": 0, }
# params.update(**params, **trial.params)
for key, value in trial.params.items():
print('"{}" : {}'.format(key, value))
params[key] = value
if int(0.95 * 2 ** params['max_depth']) > 31:
params['max_leaves'] = int(0.95 * 2 ** params['max_depth'])
folds = KFold(n_splits=NFOLD, shuffle=True)
train_index, valid_index = list(folds.split(x_train))[-1]
dtrain = xgb.DMatrix(data=x_train[train_index], label=y_train[train_index])
dvalid = xgb.DMatrix(data=x_train[valid_index], label=y_train[valid_index])
_xgb_reg = xgb.train(
params,
dtrain=dtrain,
num_boost_round=10 ** 6,
early_stopping_rounds=100,
evals=[(dvalid, 'last_valid'), ],
verbose_eval=1000)
xgb_reg = xgb.train(params,
xgb.DMatrix(data=x_train,
label=y_train),
num_boost_round=_xgb_reg.best_ntree_limit)
predicted = xgb_reg.predict(xgb.DMatrix(data=x_test,
label=y_test))
# 結果出力
pred_df = pd.DataFrame(y_test)
pred_df['pred'] = predicted
pred_df.columns = ['true', 'pred']
return pred_df, xgb_reg, params, opt.best_trial
def straight_throught_forecast_lgb(X_train, y_train, X_test, y_test,conf): #conf = config['interp']['model']
os.makedirs('train_log', exist_ok=True)
logging.basicConfig(filename=f"train_log/xgb_train.log", filemode='w',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s: %(message)s', level=logging.INFO)
log = logging.getLogger()
NFOLD = conf['NFOLD']
NTRIAL = conf['NTRIAL']
def objective(trial):
# ==== 定義パラメータ ====
params = {
'verbose': -1,
'objective': 'regression',
'metric': 'rmse',
'max_bin': trial.suggest_int('max_bin', 1, 512),
'num_leaves': trial.suggest_int('num_leaves', 2, 512),
'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0),
'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0),
'num_leaves': trial.suggest_int('num_leaves', 2, 256),
'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0),
'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0),
'bagging_freq': trial.suggest_int('bagging_freq', 1, 7),
'min_data_in_leaf': trial.suggest_int('min_data_in_leaf', 1, 50),
'min_child_samples': trial.suggest_int('min_child_samples', 5, 100),
'n_estimators': trial.suggest_int('n_estimators', 50, 1000),
"max_depth": trial.suggest_int('max_depth', 2, 12),
'sub_feature': trial.suggest_uniform('sub_feature', 0.0, 1.0),
'sub_row': trial.suggest_uniform('sub_row', 0.0, 1.0)
}
print(params)
scores = 0
folds = KFold(n_splits=NFOLD, shuffle=True)
for fold_n, (train_index, test_index) in enumerate(folds.split(X_train)):
dtrain = lgb.Dataset(X_train[train_index], label=y_train[train_index])
dvalid = lgb.Dataset(X_train[test_index], label=y_train[test_index])
booster = lgb.train(params,
dtrain,
num_boost_round=10 ** 6,
early_stopping_rounds=100,
valid_sets=dvalid,
verbose_eval=10 ** 3)
preds = booster.predict(X_train[test_index])
score = mean_squared_error(preds,y_train[test_index])
scores += score / NFOLD
return scores
opt = optuna.create_study(
direction='minimize',
sampler=optuna.samplers.RandomSampler(seed=1))
opt.optimize(objective,
n_trials=NTRIAL)
trial = opt.best_trial
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': ['rmse'],
'learning_rate': 0.005,
'feature_fraction': 0.9,
'bagging_fraction': 0.7,
'bagging_freq': 10,
'verbose': -1,
"max_depth": 8,
"num_leaves": 128,
"max_bin": 512,
"num_iterations": 1000,
"n_estimators": 1000
}
# params.update(**params, **trial.params)
for key, value in trial.params.items():
print('"{}" : {}'.format(key, value))
params[key] = value
if int(0.95 * 2 ** params['max_depth']) > 31:
params['max_leaves'] = int(0.95 * 2 ** params['max_depth'])
folds = KFold(n_splits=NFOLD, shuffle=True)
train_index, valid_index = list(folds.split(X_train))[-1]
dtrain = lgb.Dataset(X_train[train_index], label=y_train[train_index])
dvalid = lgb.Dataset(X_train[valid_index], label=y_train[valid_index])
gbm_reg = lgb.train(
params,
dtrain,
num_boost_round=10 ** 6,
early_stopping_rounds=100,
valid_sets=dvalid,
verbose_eval=1000)
print(f"PARAMETER: {params}")
gbm_reg = lgb.train(params,lgb.Dataset(X_train,label=y_train))
predicted = gbm_reg.predict(X_test)
# 結果出力
pred_df = pd.DataFrame(y_test)
pred_df['pred'] = predicted
pred_df.columns = ['true', 'pred']
return pred_df, gbm_reg, params, opt.best_trial
def train_no_optim(x_train, y_train):
# XGBoostのデフォルトパラメータ
default_xgb_params = {"objective": "reg:squarederror",
"eval_metric": "rmse",
"tree_method": "hist",
"grow_policy": "lossguide",
'verbose': 0,
"seed": 1,
"colsample_bytree": 1,
"subsample": 1,
"max_depth": 6,
'max_leaves': 31,
"eta": 0.3,
"alpha": 1.0,
"lambda": 0, }
dtrain = xgb.DMatrix(data=x_train, label=y_train)
xgb_reg = xgb.train(default_xgb_params,
dtrain=dtrain,
num_boost_round=10 ** 6,
early_stopping_rounds=1000,
verbose_eval=100)
return xgb_reg, default_xgb_params
| {"/model/XGBoost.py": ["/model/BaseModel.py"], "/helper/dataset.py": ["/model/features.py"], "/model/LGBM.py": ["/model/BaseModel.py"]} |
57,154 | darkb1ade/RealizedVolPrediction_Kaggle | refs/heads/develop | /model/BaseModel.py |
"""
Use this class as a base class for any
"""
class BaseModel:
def __init__(self):
self.ntrial = 10
def train(self):
return NotImplementedError
def detect(self):
return NotImplementedError
def train_and_test(self):
return NotImplementedError
def optimize_param(self, train):
return NotImplementedError
| {"/model/XGBoost.py": ["/model/BaseModel.py"], "/helper/dataset.py": ["/model/features.py"], "/model/LGBM.py": ["/model/BaseModel.py"]} |
57,155 | darkb1ade/RealizedVolPrediction_Kaggle | refs/heads/develop | /model/LGBM.py | from model.BaseModel import BaseModel
# Visuallize
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn import model_selection
# Modeling
import lightgbm as lgb
from helper import utils
import optuna
class LGBMModel(BaseModel):
"""
This is lightGBM class. Insert list of str contain feature column here.
Output column should be named "output"
To run, use lgbm.train_and_test() <- recommend
"""
def __init__(self, feature_column=None, output_column=None):
super().__init__()
# Parameters of Light GBM
self.params_lgbm = {
'task': 'train',
'boosting_type': 'gbdt',
'learning_rate': 0.01,
'objective': 'regression',
'metric': 'None',
'max_depth': -1,
'n_jobs': -1,
'feature_fraction': 0.7,
'bagging_fraction': 0.7,
'lambda_l2': 1,
'verbose': -1
# 'bagging_freq': 5
}
# seed0 = 2000
# self.params_lgbm = {
# 'objective': 'rmse',
# 'boosting_type': 'gbdt',
# 'max_depth': -1,
# 'max_bin': 100,
# 'min_data_in_leaf': 500,
# 'learning_rate': 0.05,
# 'subsample': 0.72,
# 'subsample_freq': 4,
# 'feature_fraction': 0.5,
# 'lambda_l1': 0.5,
# 'lambda_l2': 1.0,
# 'categorical_column': [0],
# 'seed': seed0,
# 'feature_fraction_seed': seed0,
# 'bagging_seed': seed0,
# 'drop_seed': seed0,
# 'data_random_seed': seed0,
# 'n_jobs': -1,
# 'verbose': -1}
# k-folds Ensemble Training
self.n_folds = 5
self.n_rounds = 10000
# Get feature name (Not sure if this work), Data must have column name of following features and output is 'target'
# features = [col for col in X_train.columns if col not in {"time_id", "target", "row_id"}]
if feature_column is None:
self.features = ['stock_id', 'log_return1', 'log_return2', 'trade_log_return1'] # Need to change
else:
self.features = feature_column
if output_column is None:
self.output_feature = "output"
else:
self.output_feature = output_column
# Define loss function for lightGBM training
def feval_RMSPE(self, preds, train_data):
labels = train_data.get_label()
return 'RMSPE', round(utils.rmspe(y_true=labels, y_pred=preds), 5), False
def train(self, train_data_raw, val_data_raw=None, param=None):
if val_data_raw is not None:
X_train = train_data_raw[self.features]
y_train = train_data_raw[self.output_feature].values
X_val = val_data_raw[self.features]
y_val = val_data_raw[self.output_feature].values
else:
kf = model_selection.KFold(n_splits=self.n_folds, shuffle=True, random_state=15)
for train_index, val_index in kf.split(range(len(train_data_raw))):
X_train = train_data_raw.loc[train_index, self.features]
X_val = train_data_raw.loc[val_index, self.features]
y_train = train_data_raw.loc[train_index, self.output_feature].values
y_val = train_data_raw.loc[val_index, self.output_feature].values
break
# Create dataset
train_data = lgb.Dataset(X_train, label=y_train, weight=1 / np.power(y_train, 2))
val_data = lgb.Dataset(X_val, label=y_val, weight=1 / np.power(y_val, 2))
# val_data = lgb.Dataset(X_val, label=y_val, categorical_feature=cats, weight=1/np.power(y_val,2))
if param is None:
param = self.params_lgbm
# training
model = lgb.train(params=param,
num_boost_round=1300,
train_set=train_data,
valid_sets=[train_data, val_data],
verbose_eval=250,
early_stopping_rounds=50,
feval=self.feval_RMSPE)
# Prediction w/ validation data
preds_val = model.predict(X_val[self.features])
# train.loc[val_index, pred_name] = preds_val
# RMSPE calculation
score = round(utils.rmspe(y_true=y_val, y_pred=preds_val), 5)
# delete dataset
del train_data, val_data
return score, preds_val, model
def test(self, model, test_input):
# Prediction w/ validation data
test_preds = model.predict(test_input[self.features]).clip(0, 1e10)
return test_preds
# Combine train and test, with KFold CV
def train_and_test(self, train_input, test_input=None, param=None):
"""
:param train_input: pd array. Contain both feature data and "output" data
:param test_input: pd array. Contain feature data to test
:return: test_prediction (??): predicted output data of 'test_input'
"""
cv_trial = 1
kf = model_selection.KFold(n_splits=self.n_folds, shuffle=True, random_state=15)
# Create out of folds array
oof_predictions = np.zeros(train_input.shape[0])
# Create test array to store predictions
if test_input is not None:
test_predictions = np.zeros(test_input.shape[0])
else:
test_predictions = None
for train_index, val_index in kf.split(range(len(train_input))):
print(f'CV trial : {cv_trial} /{self.n_folds}')
# Divide dataset into train and validation data such as Cross Validation
# X_train = train_input.loc[train_index, self.features]
X_train = train_input.loc[train_index]
# y_train = train_input.loc[train_index, self.output_feature].values
X_val = train_input.loc[val_index]
# X_val = train_input.loc[val_index, self.features]
# y_val = train_input.loc[val_index, self.output_feature].values
score, preds_val, model = self.train(X_train, X_val, param)
oof_predictions[val_index] = preds_val
if test_input is not None:
test_preds = self.test(model, test_input)
test_predictions += test_preds / self.n_folds
cv_trial += 1
rmspe_score = utils.rmspe(train_input[self.output_feature], oof_predictions)
# print(f'Our out of folds RMSPE is {rmspe_score}')
lgb.plot_importance(model, max_num_features=20)
return test_predictions, rmspe_score
def optimize_params(self, train_input):
def objective(trial):
params = {
'task': 'train',
'boosting_type': 'gbdt',
'learning_rate': 0.01,
'objective': 'regression',
'metric': 'None',
'max_depth': -1,
'n_jobs': -1,
'feature_fraction': 0.7,
'bagging_fraction': 0.7,
'lambda_l2': trial.suggest_uniform('lambda', 0.0, 1.0),
'verbose': -1
# 'bagging_freq': 5
}
# params = {"objective": "reg:squarederror",
# "eval_metric": "rmse",
# "tree_method": "hist",
# "grow_policy": "lossguide",
# 'silent': 1,
# "seed": 1,
# "colsample_bytree": 1,
# "subsample": 1,
# 'max_leaves': 31, # lossguideの場合、当該項目の設定が必要(default: 0)
# "max_depth": trial.suggest_int('max_depth', 2, 12),
# "eta": trial.suggest_loguniform('eta', 10e-2, 1),
# "alpha": trial.suggest_uniform('alpha', 0.0, 1.0),
# "lambda": trial.suggest_uniform('lambda', 0.0, 1.0)}
print(params)
_, score = self.train_and_test(train_input, None, params)
return score
opt = optuna.create_study(
direction='minimize',
sampler=optuna.samplers.RandomSampler(seed=1))
opt.optimize(objective,
n_trials=self.ntrial)
trial = opt.best_trial
params = self.params_lgbm.copy()
# params.update(**params, **trial.params)
for key, value in trial.params.items():
print('"{}" : {}'.format(key, value))
params[key] = value
score, preds_val, model = self.train(train_input, param=params)
print("Optimzied param is", params)
return score, model
def optimize_params_and_test(self, train_input, test_input):
def objective(trial):
params = {
'task': 'train',
'boosting_type': 'gbdt',
'learning_rate': 0.01,
'objective': 'regression',
'metric': 'None',
'max_depth': -1,
'n_jobs': -1,
'feature_fraction': 0.7,
'bagging_fraction': 0.7,
'lambda_l2': trial.suggest_uniform('lambda', 0.0, 1.0),
'verbose': -1
# 'bagging_freq': 5
}
# params = {"objective": "reg:squarederror",
# "eval_metric": "rmse",
# "tree_method": "hist",
# "grow_policy": "lossguide",
# 'silent': 1,
# "seed": 1,
# "colsample_bytree": 1,
# "subsample": 1,
# 'max_leaves': 31, # lossguideの場合、当該項目の設定が必要(default: 0)
# "max_depth": trial.suggest_int('max_depth', 2, 12),
# "eta": trial.suggest_loguniform('eta', 10e-2, 1),
# "alpha": trial.suggest_uniform('alpha', 0.0, 1.0),
# "lambda": trial.suggest_uniform('lambda', 0.0, 1.0)}
print(params)
_, score = self.train_and_test(train_input, test_input, params)
return score
opt = optuna.create_study(
direction='minimize',
sampler=optuna.samplers.RandomSampler(seed=1))
opt.optimize(objective,
n_trials=self.ntrial)
trial = opt.best_trial
params = self.params_lgbm.copy()
# params.update(**params, **trial.params)
for key, value in trial.params.items():
print('"{}" : {}'.format(key, value))
params[key] = value
test_predictions, score = self.train_and_test(train_input, test_input, params)
print("Optimzied param is", params)
return test_predictions, score, params
if __name__ == "__main__":
lgbm = LGBMModel()
| {"/model/XGBoost.py": ["/model/BaseModel.py"], "/helper/dataset.py": ["/model/features.py"], "/model/LGBM.py": ["/model/BaseModel.py"]} |
57,156 | darkb1ade/RealizedVolPrediction_Kaggle | refs/heads/develop | /main.py | from helper.dataset import DataLoader
from IPython.display import display
import pandas as pd
import numpy as np
from model.LGBM import LGBMModel
import lightgbm as lgb
# dl = DataLoader('train')
#
# # getting each stock (sample)
# d1, _ = dl.get_each_parquet(0)
#
# # get groundtruth/target
# gt = dl.get_gt()
# print("Ground Truth")
# display(gt)
#
# # getting all stocks
# df, _ = dl.get_all_parquet()
# print("All stocks")
# display(df)
def main():
# get groundtruth/target
dl = DataLoader('train')
gt = dl.get_gt()
# getting each stock (sample)
'''
d1 = result dataframe
d_book_inter, d_trade_inter = intermediate dataframe
d_book, d_trade = initial dataframe
'''
#d1, d_book_inter, d_trade_inter, d_book, d_trade = dl.get_each_parquet(0) # ex: stock_id = 0
df = pd.read_csv("data/update10Sep_feature.csv")
dataset = pd.merge(df, gt, on=["stock_id", "time_id"])
feature_name = list(dataset.columns)
feature_name.remove("time_id")
feature_name.remove("target")
lgbm_model = LGBMModel(feature_name, "target")
train_df, test_df = np.split(dataset, [int(.8*len(dataset))])
score, model = lbm_model.optimize_params(train_df)
print(f"Score: {score}")
lgb.save(model, "data/lgb_optimized.txt")
# score, test_predictions, model = lgbm_model.train(train_df, test_df)
# print(f"Test prediction: {test_predictions}, Score: {score}")
# # test_predictions, rmspe_score = lgbm_model.train_and_test(train_df,test_df)
# test_predictions, rmspe_score, params = lgbm_model.optimize_params_and_test(train_df,test_df)
# print(f"Train and Test prediction: {test_predictions}, Score: {rmspe_score}")
if __name__ == "__main__":
main()
| {"/model/XGBoost.py": ["/model/BaseModel.py"], "/helper/dataset.py": ["/model/features.py"], "/model/LGBM.py": ["/model/BaseModel.py"]} |
57,157 | darkb1ade/RealizedVolPrediction_Kaggle | refs/heads/develop | /helper/utils.py | import numpy as np
def rmspe(y_true, y_pred):
return (np.sqrt(np.mean(np.square((y_true - y_pred) / y_true)))) | {"/model/XGBoost.py": ["/model/BaseModel.py"], "/helper/dataset.py": ["/model/features.py"], "/model/LGBM.py": ["/model/BaseModel.py"]} |
57,158 | darkb1ade/RealizedVolPrediction_Kaggle | refs/heads/develop | /model/features.py | import pandas as pd
import numpy as np
def log_return(list_stock_prices):
return np.log(list_stock_prices).diff()
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
def cal_WAP1(dfBook):
dfBook['WAP1'] = (dfBook.bid_price1*dfBook.ask_size1+dfBook.ask_price1*dfBook.bid_size1)/(dfBook.bid_size1+dfBook.ask_size1)
def cal_WAP2(dfBook):
dfBook['WAP2'] = (dfBook.bid_price2*dfBook.ask_size2+dfBook.ask_price2*dfBook.bid_size2)/(dfBook.bid_size2+dfBook.ask_size2)
def cal_WAP3(dfBook):
dfBook['WAP3'] = (dfBook.bid_price1*dfBook.bid_size1+dfBook.ask_price1*dfBook.ask_size1)/(dfBook.bid_size1+dfBook.ask_size1)
def cal_WAP4(dfBook):
dfBook['WAP4'] = (dfBook.bid_price2*dfBook.bid_size2+dfBook.ask_price2*dfBook.ask_size2)/(dfBook.bid_size2+dfBook.ask_size2)
def log_run(dfBook, col):
#dfBook[f'time_w{w}'] = [f'{a}-{b}' for a, b in zip(dfBook.time_id, dfBook.seconds_in_bucket//w)]
dfBook[f'logReturn_{col}'] = dfBook.groupby(['time_id'])[col].apply(log_return)
def cal_vol(dfBook, col_names, group_col='time_id'):
dat = {'time_id': list(dfBook[group_col].unique())}
for col in col_names:
dat[col] = list(dfBook.groupby([group_col])[col].agg(realized_volatility))
return pd.DataFrame(dat)
def get_time_series(t, dfBook, cols, w, show = False): # w = window size, cols = list of feature name
cols = [f'logReturn_{col}' for col in cols]
# Grouping column by w
dfBook['time_label'] =[f'{a}-{b}' for a, b in zip(dfBook.time_id, dfBook.seconds_in_bucket//w)]
dfBook = dfBook.dropna()
# Compute vol based on new group column
a = cal_vol(dfBook, cols, 'time_label')
# base time_id col
time = [int(t.split('-')[0]) for t in list(a.time_id)]
a.insert(0, "time_id0", time, True)
if show: display(a.head())
# time-series column name
col_name = []
for col in cols:
col_tmp = [f"{col}_w{w}_prev{i*w}" for i in range(600//w)]
col_name.extend(col_tmp)
# new dataframe
tmp = pd.DataFrame(columns = ['time_id'] + col_name)
tmp['time_id'] = t #dfvol.time_id
for t in list(tmp.time_id):
b = a.loc[a.time_id0==t, cols].to_numpy()
#print(list(b.flatten(order = 'F')))
l_col = len(col_name)
list_b = list(b.flatten(order = 'F'))
l_b = len(list_b)
if l_col!= l_b:
new_b = [sum(list_b) / l_b]*(l_col-l_b)
new_b = list_b + new_b
#print(new_b, len(new_b), l_col)
#print(sum(list_b) / l_b)
#tmp_b =
else: new_b = list_b
tmp.loc[tmp.time_id==t,col_name] = new_b #list(a[a.time_id0==t][cols])
#assert len(col_name)== len(list(b.flatten(order = 'F'))), 'Too small window size on time-series, please edit the config file'
#tmp.loc[tmp.time_id==t,col_name] = list(b.flatten(order = 'F')) #list(a[a.time_id0==t][cols])
if show:
display(tmp)
return tmp | {"/model/XGBoost.py": ["/model/BaseModel.py"], "/helper/dataset.py": ["/model/features.py"], "/model/LGBM.py": ["/model/BaseModel.py"]} |
57,159 | eliabonazza/DIA_Lab | refs/heads/master | /Matching/CD_UCB.py | import numpy as np
from scipy.optimize import linear_sum_assignment
from Matching.UCB_Matching import UCB_Matching
from Matching.CUSUM import CUSUM
class CD_UCB(UCB_Matching):
def __init__(self, n_arms, n_rows, n_cols, M=100, eps=0.05, h=20, alpha=0.01):
super().__init__(n_arms, n_rows, n_cols)
self.change_detection = [CUSUM(M, eps, h) for _ in range(n_arms)]
self.valid_rewards_per_arms = [[] for _ in range(n_arms)]
self.detections = [[] for _ in range(n_arms)]
self.alpha = alpha
def pull_arm(self):
if np.random.binomial(1, 1-self.alpha):
upper_conf = self.empirical_means + self.confidence
upper_conf[np.isinf(upper_conf)] = 1e3
row_ind, col_ind = linear_sum_assignment(-upper_conf.reshape(self.n_rows, self.n_cols))
return row_ind, col_ind
else:
random_costs = np.random.randint(0, 10, size=(self.n_rows, self.n_cols))
return linear_sum_assignment(random_costs)
def update(self, pulled_arms, rewards):
self.t += 1
pulled_arms_flat = np.ravel_multi_index(pulled_arms, (self.n_rows, self.n_cols))
for pulled_arm, reward in zip(pulled_arms_flat, rewards):
if self.change_detection[pulled_arm].update(reward):
self.detections[pulled_arm].append(self.t)
self.valid_rewards_per_arms[pulled_arm] = []
self.change_detection[pulled_arm].reset()
self.update_observations(pulled_arm, reward)
self.empirical_means[pulled_arm] = np.mean(
self.valid_rewards_per_arms[pulled_arm]
)
total_valid_samples = sum(
[len(x) for x in self.valid_rewards_per_arms]
)
for a in range(self.n_arms):
n_samples = len(self.valid_rewards_per_arms[a])
if n_samples > 0:
self.confidence[a] = (2 * np.log(total_valid_samples) / n_samples) ** 0.5
else:
self.confidence[a] = np.inf
def update_observations(self, pulled_arm, reward):
self.rewards_per_arm[pulled_arm].append(reward)
self.valid_rewards_per_arms[pulled_arm].append(reward)
self.collected_rewards = np.append(self.collected_rewards, reward)
if __name__ == '__main__':
import matplotlib.pyplot as plt
from Pricing.NonStationaryEnvironment import Non_Stationary_Environment
p0 = np.array(
[[0.25, 1, 0.25], [0.5, 0.25, 0.25], [0.25, 0.25, 1]]
)
p1 = np.array(
[[1, 0.25, 0.25], [0.5, 0.25, 0.25], [0.25, 0.25, 1]]
)
p2 = np.array(
[[1, 0.25, 0.25], [0.5, 1, 0.25], [0.25, 0.25, 1]]
)
P = [p0,p1,p2]
n_exp = 10
T = 3000
regret_cd = np.zeros((n_exp, T))
regret_ucb = np.zeros((n_exp, T))
detections = [[] for _ in range(n_exp)]
M = 100
eps = 0.1
h = np.log(T)*2
for e in range(n_exp):
print(e)
e_UCB = Non_Stationary_Environment(p0.size, P, T)
e_CD = Non_Stationary_Environment(p0.size, P, T)
cd_learner = CD_UCB(p0.size, *p0.shape, M, eps, h)
ucb_learner = UCB_Matching(p0.size, *p0.shape)
rew_CD = []
rew_UCB = []
opt_rew = []
for t in range(T):
p = P[int(t/e_UCB.phase_size)]
opt = linear_sum_assignment(-p)
opt_rew.append(p[opt].sum())
pulled_arm = cd_learner.pull_arm()
reward = e_CD.round(pulled_arm)
cd_learner.update(pulled_arm, reward)
rew_CD.append(reward.sum())
pulled_arm = ucb_learner.pull_arm()
reward = e_UCB.round(pulled_arm)
ucb_learner.update(pulled_arm, reward)
rew_UCB.append(reward.sum())
regret_cd[e, :] = np.cumsum(opt_rew) - np.cumsum(rew_CD)
regret_ucb[e, :] = np.cumsum(opt_rew) - np.cumsum(rew_UCB)
plt.figure(0)
plt.ylabel('Regret')
plt.xlabel('t')
plt.plot(np.mean(regret_cd, axis=0))
plt.plot(np.mean(regret_ucb, axis=0))
plt.legend(['CD-UCB','UCB'])
plt.show()
| {"/Pricing/example.py": ["/Pricing/SWTS_Learner.py"]} |
57,160 | eliabonazza/DIA_Lab | refs/heads/master | /Matching/HungarianAlgorithm.py | """
suitable for dense graphs and it returns the minimal cost matching.
key idea:
if a number is added to all of the entries of any row or column
of a cost matrix, then an optimal assignment for the resulting cost
matrix is also an optimal assignment for the original cost matrix.
Thus we can compute the maximum matching by minimizing the loss instead of
the initial weights; generating a new matrix, subtracting from the maximum entry,
the values of all the other entries.
"""
import numpy as np
from scipy.optimize import linear_sum_assignment
### STEP 1
# subtract to every element of each row the smallest value of the row
# thus every row will contain a 0
def step1(m):
for i in range(m.shape[0]):
m[i,:] = m[i,:] - np.min(m[i,:])
### STEP 2
# subtract to every element of each column the smallest value of the column
# thus every columns will contain a 0
def step2(m):
for j in range(m.shape[1]):
m[:,j] = m[:,j] - np.min(m[:,j])
### STEP 3
# find the minimal number of lines (row and columns) we have to draw in order to take all the zeros
# 1. find a start assignment covering as many tasks (y-columns) as possible
# 2. mark all rows having no assignment
# 3. mark all (unmarked) columns having zeros in newly marked row(s)
# 4. mark all rows having assignments in newly marked columns
# 5. repeat for all non-assigned rows
# 6. select marked columns and unmarked rows
def step3(m):
dim = m.shape[0]
assigned = np.array([])
assignments = np.zeros(m.shape, dtype=int)
# assigned?????
for i in range(dim):
for j in range(dim):
if m[i,j]==0 and np.sum(assignments[:,j])==0 and np.sum(assignments[i,:])==0:
assignments[i,j] = 1
assigned = np.append(assigned, i)
# Return evenly spaced numbers over a specified interval.
rows = np.linspace(0, dim-1, dim).astype(int)
marked_rows = np.setdiff1d(rows, assigned)
# unmarked rows
new_marked_rows = marked_rows.copy()
marked_columns = np.array([])
while(len(new_marked_rows)>0):
new_marked_cols = np.array([], dtype=int)
for nr in new_marked_rows:
zeros_cols = np.argwhere(m[nr,:]==0).reshape(-1)
new_marked_cols = np.append(new_marked_cols,
np.setdiff1d(zeros_cols, marked_columns)
)
marked_columns = np.append(marked_columns, new_marked_cols)
new_marked_rows = np.array([], dtype=int)
for nc in new_marked_cols:
new_marked_rows = np.append(new_marked_rows,
np.argwhere(assignments[:,nc]==1).reshape(-1))
marked_rows = np.unique(np.append(marked_rows, new_marked_rows))
#unmarked_rows and marked cols
unmarked_rows = np.setdiff1d(rows, marked_rows).astype(int)
return unmarked_rows, np.unique(marked_columns)
### STEP 5
# find the smallest entry not covered by any line, subtract it from each row that is not crossed out,
# and then add it to each column that is crossed out
# go to ### STEP 3
def step5(m, covered_rows, covered_cols):
uncovered_rows = np.setdiff1d(np.linspace(0, m.shape[0] - 1, m.shape[0]),
covered_rows).astype(int)
uncovered_cols = np.setdiff1d(np.linspace(0, m.shape[1] - 1, m.shape[1]),
covered_cols).astype(int)
covered_rows, covered_cols = covered_rows.astype(int), covered_cols.astype(int)
min_val = np.max(m)
for i in uncovered_rows:
for j in uncovered_cols:
if m[i,j]<min_val:
min_val = m[i,j]
for i in uncovered_rows:
m[i,:] -= min_val
for j in covered_cols:
m[:,j] += min_val
return m
def find_rows_single_zero(matrix):
for i in range(matrix.shape[0]):
if np.sum(matrix[i,:]==0)==1:
j = np.argwhere(matrix[i,:]==0).reshape(-1)[0]
return i,j
return False
def find_cols_single_zero(matrix):
for i in range(matrix.shape[1]):
if np.sum(matrix[:, i]==0)==1:
j = np.argwhere(matrix[:,i]==0).reshape(-1)[0]
return i,j
return False
def assignment_single_zero_lines(m, assignment):
val = find_rows_single_zero(m)
while(val):
i, j = val[0], val[1]
m[i,j] += 1
m[:,j] += 1
assignment[i,j] = 1
val = find_rows_single_zero(m)
val = find_cols_single_zero(m)
while (val):
i, j = val[0], val[1]
m[i, :] += 1
m[i, j] += 1
assignment[i, j] = 1
val = find_cols_single_zero(m)
return assignment
def first_zero(m):
return np.argwhere(m==0)[0][0], np.argwhere(m==0)[0][1]
# 1. Find the first row with a single 0; mark this 0 by '1' to make the assignment
# 2. Mark all the zeros in the column of the marked zero
# 3. Do the same procedure for the columns
# 4. Repeat the procedure until there are no rows and columns with single zeros
# 5. If we have more rows or columns with more than one 0, then we have to choose
# one of the entries with value 0 and mark a cross in the cells of the remaining
# zeros in its row and column
# 6. Repeat the procedure until thera are no unmarked 0
def final_assignment(initial_matrix, m):
assignment = np.zeros(m.shape, dtype=int)
assignment = assignment_single_zero_lines(m, assignment)
while(np.sum(m==0)>0):
i, j = first_zero(m)
assignment[i,j] = 1
m[i,:] += 1
m[:,j] += 1
assignment = assignment_single_zero_lines(m, assignment)
return assignment*initial_matrix, assignment
def hungarian_algorithm(adj_matrix):
m = adj_matrix.copy()
step1(m)
step2(m)
# minimum number of lines to cover all the zeros
n_lines = 0
max_len = np.maximum(m.shape[0], m.shape[1])
while n_lines != max_len:
lines = step3(m)
n_lines = len(lines[0]) + len(lines[1])
### STEP 4
# check if the number of lines drawn is equal to the number of rows or column:
# if true, we can find an optimal assignment for zeros, and the algorithm ends
# otherwise go to ### STEP 5
if n_lines != max_len:
print('step5')
step5(m, lines[0], lines[1])
return final_assignment(adj_matrix, m)
if __name__ == '__main__':
a = np.array([
[102,120,152],
[152,139,174],
[118,146,260]
])
res = hungarian_algorithm(a)
print("\nOptimal Matching:\n", res[1], "\nValue", np.sum(res[0]))
| {"/Pricing/example.py": ["/Pricing/SWTS_Learner.py"]} |
57,161 | eliabonazza/DIA_Lab | refs/heads/master | /Matching/UCB.py | import numpy as np
from Pricing.Learner import Learner
class UCB(Learner):
def __init__(self, n_arms):
super().__init__(n_arms)
self.empirical_means = np.zeros(n_arms)
self.confidence = np.array([np.inf] * n_arms)
def pull_arm(self):
upper_conf = self.empirical_means + self.confidence
return np.random.choice(
np.where(
upper_conf == upper_conf.max()
)[0]
)
def update(self, pulled_arm, reward):
self.t += 1
self.empirical_means[pulled_arm] = (self.empirical_means[pulled_arm] * (self.t -1) + reward) / self.t
for a in range(self.n_arms):
n_samples = len(self.rewards_per_arm[a])
if n_samples > 0:
self.confidence[a] = (a * np.log(self.t)/n_samples) ** 0.5
self.update_observations(pulled_arm, reward)
| {"/Pricing/example.py": ["/Pricing/SWTS_Learner.py"]} |
57,162 | eliabonazza/DIA_Lab | refs/heads/master | /SocialInfluence/lin_ucb_example.py | import numpy as np
import matplotlib.pyplot as plt
from LinearMabEnvironment import LinearMabEnvironment
from LinearUCBLearner import LinearUCBLearner
if __name__ == '__main__':
n_arms = 10
T = 1000
n_experiments = 100
lin_ucb_reward_per_experiment = []
env = LinearMabEnvironment(n_arms=n_arms, dim=10)
for e in range(n_experiments):
lin_ucb_learner = LinearUCBLearner(arms_features=env.arms_features)
for t in range(T):
pulled_arm = lin_ucb_learner.pull_arm()
reward = env.round(pulled_arm)
lin_ucb_learner.update(pulled_arm, reward)
lin_ucb_reward_per_experiment.append(lin_ucb_learner.collected_rewards)
opt = env.opt()
plt.figure(0)
plt.ylabel("Regret")
plt.xlabel("t")
plt.plot(np.cumsum(
np.mean(
opt - lin_ucb_reward_per_experiment, axis=0)
), 'r'
)
plt.legend("LinearUCBLearner")
plt.show()
| {"/Pricing/example.py": ["/Pricing/SWTS_Learner.py"]} |
57,163 | eliabonazza/DIA_Lab | refs/heads/master | /Pricing/example.py | import matplotlib.pyplot as plt
import numpy as np
from Pricing.NonStationaryEnvironment import Non_Stationary_Environment
from Pricing.SWTS_Learner import SWTS_Learner
from Pricing.Environment import Environment
from Pricing.TS_Learner import TS_Learner
from Pricing.GreedyLearner import GreedyLearner
def stationary():
n_arms = 4
p = np.array([0.15, 0.1, 0.1, 0.35])
opt = p[3]
# horizon
T = 300
n_experiments = 100
ts_rewards_per_experiment = []
gr_rewards_per_experiment = []
for e in range(n_experiments):
env = Environment(n_arms=n_arms, probabilities=p)
ts_learner = TS_Learner(n_arms)
gr_learner = GreedyLearner(n_arms)
for t in range(T):
# ts
pulled_arm = ts_learner.pull_arm()
ts_reward = env.round(pulled_arm)
ts_learner.update(pulled_arm, ts_reward)
# gr
pulled_arm = gr_learner.pull_arm()
gr_reward = env.round(pulled_arm)
gr_learner.update(pulled_arm, gr_reward)
ts_rewards_per_experiment.append(ts_learner.collected_rewards)
gr_rewards_per_experiment.append(gr_learner.collected_rewards)
plt.figure(0)
plt.xlabel('t')
plt.ylabel('regret')
plt.plot(np.cumsum(np.mean(opt - ts_rewards_per_experiment, axis=0)), 'r')
plt.plot(np.cumsum(np.mean(opt - gr_rewards_per_experiment, axis=0)), 'g')
plt.legend(['ts', 'greedy'])
plt.show()
def non_stationary ():
# non stationary example
n_arms = 4
p = np.array([[0.15, 0.1, 0.2, 0.25],
[0.35, 0.21, 0.2, 0.25],
[0.5, 0.1, 0.1, 0.15],
[0.8, 0.21, 0.1, 0.15]
])
T = 400
n_experiments = 100
ts_rewards_per_experiment = []
swts_rewards_per_experiment = []
window_size = int(np.sqrt(T))
for e in range(n_experiments):
print(e)
ts_env = Non_Stationary_Environment(n_arms=n_arms, probabilities=p, horizon=T)
ts_learner = TS_Learner(n_arms)
swts_env = Non_Stationary_Environment(n_arms=n_arms, probabilities=p, horizon=T)
swts_learner = SWTS_Learner(n_arms, window_size)
for t in range(T):
# ts
pulled_arm = ts_learner.pull_arm()
ts_reward = ts_env.round(pulled_arm)
ts_learner.update(pulled_arm, ts_reward)
# ts
pulled_arm = swts_learner.pull_arm()
swts_reward = swts_env.round(pulled_arm)
swts_learner.update(pulled_arm, swts_reward)
ts_rewards_per_experiment.append(ts_learner.collected_rewards)
swts_rewards_per_experiment.append(swts_learner.collected_rewards)
ts_instantaneous_regret = np.zeros(T)
swts_instantaneous_regret = np.zeros(T)
n_phases = len(p)
phases_len = int(T / n_phases)
opt_per_phases = p.max(axis=1)
optimum_per_round = np.zeros(T)
for i in range(n_phases):
optimum_per_round[i * phases_len: (i + 1) * phases_len] = opt_per_phases[i]
ts_instantaneous_regret[i * phases_len: (i + 1) * phases_len] = opt_per_phases[i] - np.mean(
ts_rewards_per_experiment, axis=0)[i * phases_len: (i + 1) * phases_len]
swts_instantaneous_regret[i * phases_len: (i + 1) * phases_len] = opt_per_phases[i] - np.mean(
swts_rewards_per_experiment, axis=0)[i * phases_len: (i + 1) * phases_len]
plt.figure(0)
plt.xlabel('t')
plt.ylabel('reward')
plt.plot(np.mean(ts_rewards_per_experiment, axis=0), 'r')
plt.plot(np.mean(swts_rewards_per_experiment, axis=0), 'b')
plt.plot(optimum_per_round, '--g')
plt.legend(['ts', 'swts'])
plt.show()
plt.figure(1)
plt.xlabel('t')
plt.ylabel('regret')
plt.plot(np.cumsum(ts_instantaneous_regret, axis=0), 'r')
plt.plot(np.cumsum(swts_instantaneous_regret, axis=0), 'b')
plt.plot(optimum_per_round, 'g')
plt.legend(['ts', 'swts'])
plt.show()
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
stationary()
| {"/Pricing/example.py": ["/Pricing/SWTS_Learner.py"]} |
57,164 | eliabonazza/DIA_Lab | refs/heads/master | /Advertising/example.py | import numpy as np
import matplotlib.pyplot as plt
from BiddingEnvironment import BiddingEnvironment
from GP_TS_Learner import GP_TS_Learner
from GTS_Learner import GTS_Learner
if __name__ == '__main__':
n_arms = 20
min_bid = 0.05
max_bid = 1.0
bids = np.linspace(min_bid, max_bid, n_arms)
sigma = 10
T = 60
n_exp = 100
gts_reward_per_exp = []
gp_ts_reward_per_exp = []
for e in range(n_exp):
env = BiddingEnvironment(bids, sigma)
gts_learner = GTS_Learner(n_arms)
gp_ts_learner = GP_TS_Learner(n_arms, arms=bids)
for t in range(T):
pulled_arm = gts_learner.pull_arm()
reward = env.round(pulled_arm)
gts_learner.update(pulled_arm, reward)
pulled_arm = gp_ts_learner.pull_arm()
reward = env.round(pulled_arm)
gp_ts_learner.update(pulled_arm, reward)
gp_ts_reward_per_exp.append(gp_ts_learner.collected_rewards)
gts_reward_per_exp.append(gts_learner.collected_rewards)
opt = np.max(env.means)
plt.figure(0)
plt.ylabel('Regret')
plt.xlabel('t')
plt.plot(np.cumsum(
np.mean(opt - gts_reward_per_exp, axis=0)
), 'r')
plt.plot(np.cumsum(
np.mean(opt - gp_ts_reward_per_exp, axis=0)
), 'g')
plt.legend(['GTS','GP_TS'])
plt.show() | {"/Pricing/example.py": ["/Pricing/SWTS_Learner.py"]} |
57,165 | eliabonazza/DIA_Lab | refs/heads/master | /Advertising/GaussianProcess.py | import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
"""
Given the bid returns the expected number of clicks --> to be estimated
"""
def n(x):
return (1.0 - np.exp(-5.0*x)) * 100
"""
Observation : noise + n(x)
"""
def generate_observation(x, noise_std):
return n(x) + np.random.normal(0, noise_std, size=n(x).shape)
if __name__ == '__main__':
n_obs = 50
bids = np.linspace(0.0, 1.0, 20)
x_obs = np.array([])
y_obs = np.array([])
noise_std = 5.0
for i in range(n_obs):
new_x_obs = np.random.choice(bids, 1)
new_y_obs = generate_observation(new_x_obs, noise_std)
x_obs = np.append(x_obs, new_x_obs)
y_obs = np.append(y_obs, new_y_obs)
# for the GP
x = np.atleast_2d(x_obs).T
Y = y_obs.ravel()
theta = 1.0
l = 1.0
kernel = C(theta, (1e-3, 1e3)) * RBF(l, (1e-3, 1e3))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=noise_std**2,
normalize_y=True,
n_restarts_optimizer=9)
gp.fit(x, Y)
x_pred = np.atleast_2d(bids).T
y_pred, sigma = gp.predict(x_pred, return_std=True)
plt.figure(i)
plt.plot(x_pred, n(x_pred), 'r:', label= r'$n(x)$')
plt.plot(x.ravel(), Y, 'ro', label= u'Observed Clicks')
plt.plot(x_pred, y_pred, 'b-', label=u'Predicted Clicks')
# uncertainty 95%
plt.fill(np.concatenate([x_pred, x_pred[::-1]]),
np.concatenate([y_pred - 1.96*sigma, (y_pred + 1.96*sigma)[::-1]]),
alpha = .5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$n(x)$')
plt.legend(loc='lower right')
plt.show()
| {"/Pricing/example.py": ["/Pricing/SWTS_Learner.py"]} |
57,166 | eliabonazza/DIA_Lab | refs/heads/master | /SocialInfluence/learning_probabilities.py | import numpy as np
from copy import copy
def simulate_episode(init_prob_matrix, n_steps_max):
prob_matrix = init_prob_matrix.copy()
n_nodes = prob_matrix.shape[0]
initial_active_nodes = np.random.binomial(1, 0.1, size=(n_nodes))
history = np.array([initial_active_nodes])
active_nodes = initial_active_nodes
newly_active_nodes = active_nodes
t = 0
while t < n_steps_max and np.sum(newly_active_nodes)>0:
p = (prob_matrix.T * active_nodes).T
activated_edges = p > np.random.rand(p.shape[0], p.shape[1])
# remove from the probability matrix all the probabilities related to the previously
# activated nodes
prob_matrix = prob_matrix * ( (p!=0) == activated_edges )
# compute the value of the newly activated nodes
newly_active_nodes = (np.sum(activated_edges, axis=0) >0 ) * (1-active_nodes)
active_nodes = np.array( active_nodes + newly_active_nodes)
history = np.concatenate((history, [newly_active_nodes]), axis=0)
t += 1
return history
"""
we will estimate the probability using a credit assignment approach.
p_v,w = sum(credit_u,v) / A_v
credit_u,v = 1 / ( sum{w in S} I(t_w = t_v - 1) )
at each episode in which the node has been active, we can assign credit to its neighbors
depending whether these nodes have been active at the previous episode (partitioning the credit
among them)
"""
def estimate_probabilities(dataset, node_index, n_nodes):
estimated_probabilities = np.ones(n_nodes)*1.0 / (n_nodes -1 )
credits = np.zeros(n_nodes)
occur_v_active = np.zeros(n_nodes)
n_episodes = len(dataset)
for episode in dataset:
# in which row the target node has been activated
idx_w_active = np.argwhere(episode[:, node_index] == 1).reshape(-1)
if len(idx_w_active) > 0 and idx_w_active > 0:
active_nodes_in_prev_step = episode[idx_w_active-1,:].reshape(-1)
credits += active_nodes_in_prev_step / np.sum(active_nodes_in_prev_step)
#check occurrences of each node in each step
for v in range(n_nodes):
if v!=node_index:
idx_v_active = np.argwhere(episode[:, v] == 1).reshape(-1)
if len(idx_v_active)>0 and (idx_v_active<idx_w_active or len(idx_w_active)==0):
occur_v_active[v] += 1
estimated_probabilities = credits/occur_v_active
estimated_probabilities = np.nan_to_num(estimated_probabilities)
return estimated_probabilities
# example
if __name__ == '__main__':
n_nodes = 5
n_episodes = 1000
prob_matrix = np.random.uniform(0.0, 0.1, (n_nodes, n_nodes))
node_index = 4
dataset = []
for e in range(n_episodes):
dataset.append(simulate_episode(prob_matrix, n_steps_max=10))
estimated_probabilities = estimate_probabilities(dataset=dataset,
node_index=node_index,
n_nodes=n_nodes)
print(" True matrix : {}".format(prob_matrix[:,4]))
print(" Estimated matrix : {}".format(estimated_probabilities))
| {"/Pricing/example.py": ["/Pricing/SWTS_Learner.py"]} |
57,167 | eliabonazza/DIA_Lab | refs/heads/master | /Pricing/SWTS_Learner.py | import numpy as np
from Pricing.TS_Learner import TS_Learner
# sliding window
class SWTS_Learner(TS_Learner):
def __init__(self, n_arms, window_size):
super().__init__(n_arms)
self.window_size = window_size
def update(self, pulled_arm, reward):
self.t += 1
self.update_observations(pulled_arm, reward)
cum_reward = np.sum(
self.rewards_per_arm[pulled_arm][-self.window_size:]
)
n_rounds_arm = len(self.rewards_per_arm[pulled_arm][-self.window_size:])
self.beta_parameters[pulled_arm, 0] = cum_reward + 1.0
self.beta_parameters[pulled_arm, 1] = n_rounds_arm - cum_reward + 1.0
| {"/Pricing/example.py": ["/Pricing/SWTS_Learner.py"]} |
57,168 | eliabonazza/DIA_Lab | refs/heads/master | /Advertising/GTS_Learner.py | from Pricing.Learner import Learner
import numpy as np
class GTS_Learner(Learner):
def __init__(self, n_arms):
super(GTS_Learner, self).__init__(n_arms)
self.means = np.zeros(n_arms)
self.sigma = np.ones(n_arms) * 1e3 # no prior information -> high probability to be pulled
def pull_arm(self):
idx = np.argmax(np.random.normal(self.means, self.sigma))
return idx
def update(self, pulled_arm, reward):
self.t += 1
self.update_observations(pulled_arm, reward)
self.means[pulled_arm] = np.mean(self.rewards_per_arm[pulled_arm])
n_samples = len(self.rewards_per_arm[pulled_arm])
if n_samples > 1:
self.sigma[pulled_arm] = np.std(self.rewards_per_arm[pulled_arm]) / n_samples
| {"/Pricing/example.py": ["/Pricing/SWTS_Learner.py"]} |
57,169 | eliabonazza/DIA_Lab | refs/heads/master | /Advertising/BiddingEnvironment.py | import numpy as np
"""
maps the bid to the number of bids
"""
def fun(x):
return 100 * ( 1.0 - np.exp(-4*x + 3*x**3))
class BiddingEnvironment():
def __init__(self, bids, sigma):
self.bids = bids
self.means = fun(bids)
self.sigma = np.ones(len(bids)) * sigma
def round(self, pulled_arm):
return np.random.normal(self.means[pulled_arm], self.sigma[pulled_arm])
| {"/Pricing/example.py": ["/Pricing/SWTS_Learner.py"]} |
57,177 | SubZer0811/pacman | refs/heads/main | /ghost.py | #!/usr/bin/env python3
import pygame
import random
from maze_map import *
class ghost (maze_map):
def __init__(self,coord,surface,tid,color):
self.coord=coord
self.surface=surface
self.dir=-1
if(color == "red"):
self.ghost_colour = (255,0,0)
elif(color=="blue"):
self.ghost_colour=(34,234,233)
elif(color == "yellow"):
self.dir=2
self.ghost_colour=(255,195,11)
self.ghost_radius = 10
self.mode="chase"
self.color=color
self.speedcnt=0
self.tid = tid
def move_ghost(self, rcv, ret):
prev_speed_cnt = 1
while True:
pygame.time.wait(50)
t_stat=rcv['t_stat']
if(t_stat[self.tid] == 3):
return
if(t_stat[self.tid] == 0):
maze=rcv["maze"]
if(self.speedcnt==0):
tmp_x = int(self.coord[0])
tmp_y = int(self.coord[1])
ret['clear_draw']=tuple(self.coord)
if(self.color=="red"):
key_press = self.move_red(rcv)
elif(self.color == 'blue'):
key_press = self.move_blue(rcv)
elif(self.color == 'yellow'):
key_press = self.move_yellow(rcv)
if(key_press==-1):
t_stat[self.tid] = 1
continue
if(maze[tmp_y][tmp_x] == 9):
ret["draw_coin1"]=(tmp_x, tmp_y)
if(key_press == 0):
if(maze[tmp_y][tmp_x+1] != 1):
# print("ghost-right")
self.coord[0]+=0.5
self.dir=0
elif(key_press == 1):
if(maze[tmp_y][tmp_x-1]!=1):
# print("ghost-left")
self.coord[0]-=0.5
self.dir=1
elif(key_press == 2):
if(maze[tmp_y-1][tmp_x]!=1):
# print("ghost-up")
self.coord[1]-=0.5
self.dir=2
elif(key_press == 3):
if(maze[tmp_y+1][tmp_x]!=1):
# print("ghost-down")
self.coord[1]+=0.5
self.dir=3
ret["draw"]=(self.coord,self.ghost_colour)
self.speedcnt=1
t_stat[self.tid] = 1
elif(self.speedcnt==1):
ret['clear_draw']=tuple(self.coord)
tf_x = self.coord[0]
tf_y = self.coord[1]
tmp_x = int(self.coord[0])
tmp_y = int(self.coord[1])
self.coord[0]=tmp_x
self.coord[1]=tmp_y
if(maze[tmp_y][tmp_x] == 9):
ret["draw_coin1"]=(tmp_x, tmp_y)
if(self.dir==0):
tf_x+=0.5
tmp_x+=1
elif(self.dir==1):
tf_x-=0.5
tmp_x+=1
elif(self.dir==2):
tf_y-=0.5
tmp_y+=1
elif(self.dir==3):
tf_y+=0.5
tmp_y+=1
self.coord[0]=tmp_x
self.coord[1]=tmp_y
if(maze[tmp_y][tmp_x] == 9):
ret["draw_coin2"]=(tmp_x, tmp_y)
self.coord[0]=tf_x
self.coord[1]=tf_y
ret["draw"]=(self.coord,self.ghost_colour)
self.speedcnt=0
t_stat[self.tid] = 1
rcv["t_stat"]=t_stat
# print(key_press)
def BFS(self, start, target):
start=[int(i) for i in start]
queue = [start]
path = []
visited = []
while queue:
current = queue[0]
queue.remove(queue[0])
visited.append(current)
if current == target:
break
else:
neighbours = [[0, -1], [1, 0], [0, 1], [-1, 0]]
for neighbour in neighbours:
if neighbour[0]+current[0] >= 0 and neighbour[0] + current[0] < len(maze[0]):
if neighbour[1]+current[1] >= 0 and neighbour[1] + current[1] < len(maze):
next_cell = [neighbour[0] + current[0], neighbour[1] + current[1]]
if next_cell not in visited:
if maze[next_cell[1]][next_cell[0]] != 1:
queue.append(next_cell)
path.append({"Current": current, "Next": next_cell})
shortest = [target]
while target != start:
for step in path:
if step["Next"] == target:
target = step["Current"]
shortest.insert(0, step["Current"])
if(len(shortest)>1):
return(shortest[1])
return -1
def move_red(self, rcv):
s = self.BFS(self.coord,rcv["player_coord"])
if(s == -1):
return -1
if(self.coord[0]<s[0]):
return 0
elif(self.coord[0]>s[0]):
return 1
elif(self.coord[1]<s[1]):
return 3
elif(self.coord[1]>s[1]):
return 2
def move_blue(self, rcv):
tmp_x=int(self.coord[0])
tmp_y=int(self.coord[1])
play_x=rcv["player_coord"][0]
play_y=rcv["player_coord"][1]
test = []
w=[]
if(maze[tmp_y][tmp_x+1] != 1):
test.append((play_x-tmp_x-1)**2+(play_y-tmp_y)**2)
w.append(0)
if(maze[tmp_y][tmp_x-1] != 1):
test.append((play_x-tmp_x+1)**2+(play_y-tmp_y)**2)
w.append(1)
if(maze[tmp_y-1][tmp_x] != 1):
test.append((play_x-tmp_x)**2+(play_y-tmp_y+1)**2)
w.append(2)
if(maze[tmp_y+1][tmp_x] != 1):
test.append((play_x-tmp_x)**2+(play_y-tmp_y-1)**2)
w.append(3)
if(play_x==tmp_x and tmp_y==play_y):
return -1
return w[test.index(min(test))]
def move_yellow(self, rcv):
tmp_x=int(self.coord[0])
tmp_y=int(self.coord[1])
test = [] # right, left, up, down
if(maze[tmp_y][tmp_x+1] != 1):
test.append(0)
if(maze[tmp_y][tmp_x-1] != 1):
test.append(1)
if(maze[tmp_y-1][tmp_x] != 1):
test.append(2)
if(maze[tmp_y+1][tmp_x] != 1):
test.append(3)
if(self.dir in test):
return self.dir
return random.choice(test) | {"/main.py": ["/player.py", "/draw.py", "/ghost.py"]} |
57,178 | SubZer0811/pacman | refs/heads/main | /player.py | #!/usr/bin/env python3
from maze_map import *
import pygame
class player(maze_map):
SIZE = (10, 10)
color = (255,0,0)
lives = 3
player_colour = (255, 255, 0)
player_radius = 8
def __init__(self, player_coord, surface):
self.player_coord=player_coord
self.surface = surface
self.coin_score=0
def cleardraw(self):
pygame.draw.rect(self.surface,(0,0,0), pygame.Rect(([self.pixel_size*i for i in self.player_coord]), (self.pixel_size, self.pixel_size)))
def draw(self):
pygame.draw.circle(self.surface, self.player_colour, (self.player_coord[0]*self.pixel_size+(self.pixel_center), self.player_coord[1]*self.pixel_size+(self.pixel_center)), self.player_radius) | {"/main.py": ["/player.py", "/draw.py", "/ghost.py"]} |
57,179 | SubZer0811/pacman | refs/heads/main | /setup.py | import sys
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need fine tuning.
additional_modules = []
build_exe_options = {"includes": additional_modules,
"packages": ["pygame", "random", "sys", "threading","math","time","multiprocessing"],
"excludes": ['tkinter'],
"include_files": ['icon.png', 'start_screen.png','game_over.jpg']}
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup(name="Pacman",
version="1.0",
description="Catch me if u can",
options={"build_exe": build_exe_options},
executables=[Executable(script="main.py", base=base)]) | {"/main.py": ["/player.py", "/draw.py", "/ghost.py"]} |
57,180 | SubZer0811/pacman | refs/heads/main | /main.py | #!/usr/bin/env python3
import pygame
import sys
import random
import threading
import math
import time
import multiprocessing
from maze_map import *
from player import *
from draw import *
import config
from ghost import *
thread_status = [0, 0, 0, 0]
pygame.init()
pygame.display.set_caption('PACMAN')
pygame.font.init()
config.WIDTH = 560
config.HEIGHT = 700
surface = pygame.display.set_mode((config.WIDTH, config.HEIGHT))
config.surface = surface
image = pygame.image.load(r'start_screen.png')
gameIcon = pygame.image.load('icon.png')
pygame.display.set_icon(gameIcon)
surface.blit(image, (1, 1))
start = "Press SPACE to start the game!"
draw_text(start, config.surface, [
config.WIDTH//2, 660], 50, (255, 255, 255), 'Comic Sans MS', centered=True)
pygame.display.update()
flag = 0
while True:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
flag = 1
break
if flag:
break
pygame.display.update()
mapobj = maze_map(surface)
mapobj.show()
player_1 = player([13, 29], surface)
ghost1 = ghost([11, 13], surface, 0, color="red")
ghost2 = ghost([15, 13], surface, 1, color="blue")
ghost3 = ghost([14, 13], surface, 2, color="yellow")
draw((ghost1.coord), ghost1.ghost_colour)
draw((ghost2.coord), ghost2.ghost_colour)
draw((ghost3.coord), ghost3.ghost_colour)
pygame.display.flip()
clock = pygame.time.Clock()
GAME_FONT = pygame.font.SysFont('Comic Sans MS', 24)
with multiprocessing.Manager() as manager:
snd = manager.dict()
snd1 = manager.dict()
snd2 = manager.dict()
snd3 = manager.dict()
snd['t_stat'] = thread_status
snd['player_coord'] = player_1.player_coord
snd['maze'] = maze
snd1['draw'] = ([-1, -1], (255, 255, 255))
snd1['clear_draw'] = (-1, -1)
snd1['draw_coin1'] = (-1, -1)
snd1['draw_coin2'] = (-1, -1)
snd2['draw'] = ([-1, -1], (255, 255, 255))
snd2['clear_draw'] = (-1, -1)
snd2['draw_coin1'] = (-1, -1)
snd2['draw_coin2'] = (-1, -1)
snd3['draw'] = ([-1, -1], (255, 255, 255))
snd3['clear_draw'] = (-1, -1)
snd3['draw_coin1'] = (-1, -1)
snd3['draw_coin2'] = (-1, -1)
p1 = multiprocessing.Process(target=ghost1.move_ghost, args=(snd, snd1))
p2 = multiprocessing.Process(target=ghost2.move_ghost, args=(snd, snd2))
p3 = multiprocessing.Process(target=ghost3.move_ghost, args=(snd, snd3))
p1.start()
p2.start()
p3.start()
while True:
keys = pygame.key.get_pressed()
tmp_x = player_1.player_coord[0]
tmp_y = player_1.player_coord[1]
if keys[pygame.K_RIGHT]:
if(maze[tmp_y][tmp_x+1] == 9 or maze[tmp_y][tmp_x+1] == 0):
maze[tmp_y][tmp_x] = 0
if(maze[tmp_y][tmp_x+1] == 9): # check coin
player_1.coin_score += 1 # increment score
maze[tmp_y][tmp_x+1] = 0 # update map with no coin
player_1.player_coord[0] += 1
maze[player_1.player_coord[1]][player_1.player_coord[0]] = 7
elif keys[pygame.K_LEFT]:
if(maze[tmp_y][tmp_x-1] == 9 or maze[tmp_y][tmp_x-1] == 0):
maze[tmp_y][tmp_x] = 0
if(maze[tmp_y][tmp_x-1] == 9):
player_1.coin_score += 1
maze[tmp_y][tmp_x-1] = 0
player_1.player_coord[0] -= 1
maze[player_1.player_coord[1]][player_1.player_coord[0]] = 7
elif keys[pygame.K_UP]:
if(maze[tmp_y-1][tmp_x] == 9 or maze[tmp_y-1][tmp_x] == 0):
maze[tmp_y][tmp_x] = 0
if(maze[tmp_y-1][tmp_x] == 9):
player_1.coin_score += 1
maze[tmp_y-1][tmp_x] = 0
player_1.player_coord[1] -= 1
maze[player_1.player_coord[1]][player_1.player_coord[0]] = 7
elif keys[pygame.K_DOWN]:
if(maze[tmp_y+1][tmp_x] == 9 or maze[tmp_y+1][tmp_x] == 0):
maze[tmp_y][tmp_x] = 0
if(maze[tmp_y+1][tmp_x] == 9):
player_1.coin_score += 1
maze[tmp_y+1][tmp_x] = 0
player_1.player_coord[1] += 1
maze[player_1.player_coord[1]][player_1.player_coord[0]] = 7
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
pygame.draw.rect(surface, (0, 0, 0), pygame.Rect((0, 621), (560, 80)))
text = GAME_FONT.render("LIVES: " + str(player_1.lives) + 50 *
' '+"SCORE: " + str(player_1.coin_score), True, (255, 0, 255))
surface.blit(text, (100, 650))
snd["player_coord"] = player_1.player_coord
snd["maze"] = maze
mapobj.show()
while True:
if(sum(snd['t_stat']) >= 3):
if(snd1['draw'][0][0] != -1):
draw(snd1['draw'][0], snd1['draw'][1])
if(snd2['draw'][0][0] != -1):
draw(snd2['draw'][0], snd2['draw'][1])
if(snd3['draw'][0][0] != -1):
draw(snd3['draw'][0], snd3['draw'][1])
break
# pygame.time.wait(100)
if(snd1['draw'][0] == player_1.player_coord
or snd2['draw'][0] == player_1.player_coord
or snd3['draw'][0] == player_1.player_coord ):
if(player_1.lives - 1 > 0):
player_1.lives -= 1
maze[player_1.player_coord[1]][player_1.player_coord[0]] = 0
player_1.player_coord[0] = 13
player_1.player_coord[1] = 29
maze[29][13] = 7
pygame.time.wait(1000)
continue
else:
snd["t_stat"] = [3 for i in range(4)]
p1.join()
p2.join()
p3.join()
del ghost1
del ghost2
del ghost3
replay=end_window(player_1.coin_score)
elif(player_1.coin_score == 286):
snd["t_stat"] = [3 for i in range(4)]
p1.join()
p2.join()
p3.join()
del ghost1
del ghost2
del ghost3
replay=end_window(player_1.coin_score)
snd1['draw'] = ([-1, -1], (255, 255, 255))
snd1['clear_draw'] = (-1, -1)
snd1['draw_coin1'] = (-1, -1)
snd1['draw_coin2'] = (-1, -1)
snd2['draw'] = ([-1, -1], (255, 255, 255))
snd2['clear_draw'] = (-1, -1)
snd2['draw_coin1'] = (-1, -1)
snd2['draw_coin2'] = (-1, -1)
snd3['draw'] = ([-1, -1], (255, 255, 255))
snd3['clear_draw'] = (-1, -1)
snd3['draw_coin1'] = (-1, -1)
snd3['draw_coin2'] = (-1, -1)
pygame.display.flip()
clock.tick(10)
snd["t_stat"] = [0 for i in range(4)]
| {"/main.py": ["/player.py", "/draw.py", "/ghost.py"]} |
57,181 | SubZer0811/pacman | refs/heads/main | /draw.py | #!/usr/bin/env python3
import pygame
import config
import os
ghost_radius = 10
pixel_size = 20
pixel_center = pixel_size/2
coin_colour = (255, 182, 193)
coin_radius = 4
def draw(coord, ghost_colour):
pygame.draw.circle(config.surface, ghost_colour, (coord[0]*pixel_size+(pixel_center), coord[1]*pixel_size+(pixel_center)), ghost_radius)
def cleardraw(coord):
pygame.draw.rect(config.surface,(0,0,0), pygame.Rect(([pixel_size*i for i in coord]), (pixel_size, pixel_size)))
def draw_coin(coord):
pygame.draw.circle(config.surface, coin_colour, (coord[0]*pixel_size+(pixel_center), coord[1]*pixel_size+(pixel_center)), coin_radius)
def draw_text(words, screen, pos, size, colour, font_name, centered=False):
font = pygame.font.SysFont(font_name, size)
text = font.render(words, False, colour)
text_size = text.get_size()
if centered:
pos[0] = pos[0]-text_size[0]//2
pos[1] = pos[1]-text_size[1]//2
screen.blit(text, pos)
def end_window(score):
config.surface.fill((0, 0, 0))
image = pygame.image.load(r'game_over.jpg')
config.surface.blit(image, (100, 0))
pygame.display.update()
if(int(score) == 286):
score="YOU'VE WON!!!! Score : " + str(score)
score="Score : " + str(score)
quit_text = "Press the ESCAPE to quit"
again_text = "Press SPACE to play with again"
draw_text(score, config.surface, [
config.WIDTH//2, config.HEIGHT//2+100], 36, (190, 190, 190), 'Comic Sans MS', centered=True)
draw_text(again_text, config.surface, [
config.WIDTH//2, config.HEIGHT//2+200], 36, (190, 190, 190), 'Comic Sans MS', centered=True)
draw_text(quit_text, config.surface, [
config.WIDTH//2, config.HEIGHT//2 + 270], 36, (190, 190, 190), 'Comic Sans MS', centered=True)
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
pygame.quit()
os.system('python3 main.py')
exit(0)
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
exit(0)
if event.type == pygame.QUIT:
exit(0)
pygame.display.update()
| {"/main.py": ["/player.py", "/draw.py", "/ghost.py"]} |
57,183 | tubial/Race | refs/heads/master | /race_test.py | import unittest
import race
import pygame
class MyTest(unittest.TestCase):
def testvalidkeypressplayerone(self):
player = race.Player("one")
player.last_button_pressed = pygame.K_a
self.assertEqual(player.validForwardKeypress(pygame.K_s), True)
self.assertEqual(player.validForwardKeypress(pygame.K_a), True)
def testvalidkeypressplayertwo(self):
player = race.Player("two")
player.last_button_pressed = pygame.K_QUOTE
self.assertEqual(player.validForwardKeypress(pygame.K_SEMICOLON), True)
self.assertEqual(player.validForwardKeypress(pygame.K_QUOTE), True)
def testmove(self):
player = race.Player("one")
player.move
player.last_button_pressed = pygame.K_a
self.assertEqual(player.move(pygame.K_s), 1)
self.assertEqual(player.move(pygame.K_a), 2)
def testturnaround(self):
mrwolf = race.MrWolf()
# self.assertEqual()
if __name__ == '__main__':
unittest.main() | {"/race_test.py": ["/race.py"], "/player.py": ["/constants.py"], "/race.py": ["/constants.py", "/player.py"]} |
57,184 | tubial/Race | refs/heads/master | /player.py | # player.py
import pygame
import random
import constants
class Player(pygame.sprite.Sprite):
"""docstring for Player."""
def __init__(self, playernum):
super().__init__()
self.last_button_pressed = pygame.K_ESCAPE
self._playernum = playernum
self._position = 0
self.image = pygame.Surface([200, 200])
self.rect = self.image.get_rect()
if self._playernum == "one":
self.last_button_pressed = pygame.K_s
elif self._playernum == "two":
self.last_button_pressed = pygame.K_QUOTE
def move(self, key):
"""moves player up"""
if self.validForwardKeypress(key):
self._position += 1
@property
def position(self):
return self._position
@property
def playernum(self):
return self._playernum
def validForwardKeypress(self, key):
if self._playernum == "one":
print(self.last_button_pressed, pygame.K_s, pygame.K_a)
if (self.last_button_pressed == pygame.K_s and key == pygame.K_a) or (
self.last_button_pressed == pygame.K_a and key == pygame.K_s
):
if self.last_button_pressed == pygame.K_a:
self.last_button_pressed = pygame.K_s
else:
self.last_button_pressed = pygame.K_a
return True
elif self._playernum == "two":
if (
self.last_button_pressed == pygame.K_QUOTE and key == pygame.K_SEMICOLON
) or (
self.last_button_pressed == pygame.K_SEMICOLON and key == pygame.K_QUOTE
):
if self.last_button_pressed == pygame.K_QUOTE:
self.last_button_pressed = pygame.K_SEMICOLON
else:
self.last_button_pressed = pygame.K_QUOTE
return True
return False
class MrWolf(pygame.sprite.Sprite):
"""docstring for MrWolf."""
def __init__(self):
super().__init__()
self.difficulty = 0
self.image = pygame.Surface((constants.WIDTH, 10))
self.image.fill(constants.RED)
self.rect = self.image.get_rect()
self._facingforward = False
@property
def facingforward(self):
return self._facingforward
def update(self):
"""Will update the wolf to be facing forward after a random amount of time."""
if self._facingforward:
pass
else:
pass | {"/race_test.py": ["/race.py"], "/player.py": ["/constants.py"], "/race.py": ["/constants.py", "/player.py"]} |
57,185 | tubial/Race | refs/heads/master | /race.py | """race.py
"""
import random
import pygame
import constants
import player
class Game(object):
# docstring for Game
def __init__(self, end):
"""Game state
Arguments:
end - steps needed to win
"""
self._playerone = player.Player("one")
self._playertwo = player.Player("two")
self._players = [
[self._playerone, 0],
[self._playertwo, 0]
]
self._wolf = player.MrWolf()
self._goal = end
self._sprites = pygame.sprite.Group()
self._sprites.add(self._playerone)
self._sprites.add(self._playertwo)
self._sprites.add(self._wolf)
@property
def get_playerone(self):
return self._playerone
@property
def get_playertwo(self):
return self._playertwo
@property
def get_mrwolf(self):
return self._wolf
@property
def get_players(self):
return self._players
def update(self, event):
if event.key == pygame.K_s or event.key == pygame.K_a:
self._playerone.move(event.key)
if event.key == pygame.K_QUOTE or event.key == pygame.K_SEMICOLON:
self._playertwo.move(event.key)
def player_status(self):
""" status of players
returns:
list of players and percentages to finish"""
# update player's progress
for pl in self._players:
pl[1] = pl[0].position / self._goal
return self._players
def gamefinished(self):
"""returns the winning player if the game is ended
if game is not finished, returns None"""
if self._playerone.position >= self._goal:
return self._playerone
elif self._playertwo.position >= self._goal:
return self._playertwo
else:
return None
def draw(self, screen):
self._sprites.draw(screen)
def main():
game = Game(100)
pygame.init()
screen = pygame.display.set_mode([constants.WIDTH, constants.HEIGHT])
pygame.display.set_caption("Race!")
font = pygame.font.SysFont("monotype", 20)
done = False
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while not done:
# --- Main event loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
game.update(event)
screen.fill(constants.WHITE)
game.draw(screen)
for n, pl in enumerate(game.player_status()):
screen.blit(
font.render(str(pl[1]), 1, constants.BLACK, constants.WHITE),
(5, 5 * n * 10)
)
# Check to see if game finished
if game.gamefinished():
print(f"Player {game.gamefinished().playernum} wins!")
done = True
pygame.display.flip()
clock.tick(60)
pygame.quit()
if __name__ == "__main__":
main()
| {"/race_test.py": ["/race.py"], "/player.py": ["/constants.py"], "/race.py": ["/constants.py", "/player.py"]} |
57,186 | tubial/Race | refs/heads/master | /constants.py | # Colours
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
# Screen Dimensions
WIDTH = 600
HEIGHT = 800 | {"/race_test.py": ["/race.py"], "/player.py": ["/constants.py"], "/race.py": ["/constants.py", "/player.py"]} |
57,189 | WesleyvanDooren/ma-communicative-robots | refs/heads/master | /representation/annotation.py | # Define Annotation Class
from __future__ import annotations
import enum
import uuid
from rdflib import URIRef, Namespace
from typing import Iterable, Tuple, Generic, TypeVar
from representation.container import Sequence, AtomicContainer, Ruler, Index
from representation.entity import Person, Emotion
from representation.scenario import Mention, Annotation
from representation.util import Identifier
friends_namespace = Namespace("http://cltl.nl/leolani/friends/")
data_namespace = Namespace("http://cltl.nl/combot/signal/")
predicate_namespace = Namespace("http://cltl.nl/combot/predicate/")
class ImageLabel(enum.Enum):
FACE = 0
class EntityType(enum.Enum):
PERSON = 0
FRIEND = 1
OBJECT = 2
class Entity:
def __init__(self, id: URIRef, type: EntityType) -> None:
self.id = id
self.type = type
class Triple:
def __init__(self, subject: Entity, predicate: URIRef, object_: Entity) -> None:
self.subject = subject
self.predicate = predicate
self.object = object_
# TODO make this more generic
@classmethod
def from_friends(cls, subject_id, predicate_id, object_id) -> Triple:
return cls(Entity(friends_namespace.term(subject_id), EntityType.FRIEND),
predicate_namespace.term(predicate_id),
Entity(friends_namespace.term(object_id), EntityType.FRIEND))
class Token(AtomicContainer):
def __init__(self, value: str) -> None:
AtomicContainer.__init__(self, value)
class Utterance(Sequence):
def __init__(self, chat_id: Identifier, utterance: str, tokens: Iterable[Token], id_: Identifier = None) -> None:
self.chat_id = chat_id if chat_id else uuid.uuid4()
self.utterance = utterance
Sequence.__init__(self, tuple(tokens))
| {"/representation/annotation.py": ["/representation/container.py", "/representation/entity.py", "/representation/scenario.py", "/representation/util.py"], "/representation/scenario.py": ["/representation/container.py", "/representation/entity.py", "/representation/util.py"], "/representation/container.py": ["/representation/util.py"], "/representation/entity.py": ["/representation/util.py"]} |
57,190 | WesleyvanDooren/ma-communicative-robots | refs/heads/master | /representation/scenario.py | # Define Annotation Class
import enum
import os
import json
import uuid
from typing import Iterable, Union, Dict, Optional, Tuple, TypeVar
from representation.container import Container, TemporalContainer, Ruler, TemporalRuler, Sequence, ArrayContainer
from representation.entity import Person, Object
from representation.util import Identifier, serializer
class Modality(enum.Enum):
IMAGE = 0
TEXT = 1
AUDIO = 2
VIDEO = 3
class Annotation:
def __init__(self, value: object, source: Identifier, timestamp: int):
self.type = value.__class__.__name__
self.value = value
self.source = source
self.timestamp = timestamp
class Mention:
def __init__(self, segment: Union[Ruler, Iterable[Ruler]], annotations: Iterable[Annotation]):
self.segment = segment
self.annotations = annotations
R = TypeVar('R', bound=Ruler)
T = TypeVar('T')
class Signal(Container[R, T]):
def __init__(self, modality: Modality, time: TemporalRuler, files: Iterable[str],
mentions: Iterable[Mention]=None) -> None:
self.modality = modality
self.time = time
self.files = files
self.mentions = mentions if mentions is not None else []
class TextSignal(Signal[Sequence, str], Sequence[str]):
def __init__(self, id_: Identifier, time: TemporalRuler, files: Iterable[str],
length, mentions: Iterable[Mention]=None):
id_ = id_ if id_ else uuid.uuid4()
Signal.__init__(self, Modality.TEXT, time, files, mentions)
Sequence.__init__(self, id_=id_, stop=length)
class ImageSignal(Signal[ArrayContainer, float], ArrayContainer[float]):
def __init__(self, id_: Identifier, time: TemporalRuler, files: Iterable[str],
bounds: Tuple[Tuple[int,int], ...], mentions: Iterable[Mention]=None) -> None:
id_ = id_ if id_ else uuid.uuid4()
Signal.__init__(self, Modality.IMAGE, time, files, mentions)
ArrayContainer.__init__(self, id_=id_, bounds=bounds)
class AudioSignal(Signal[ArrayContainer, float], ArrayContainer[float]):
def __init__(self, id_: Identifier, time: TemporalRuler, files: Iterable[str],
mentions: Iterable[Mention]=None) -> None:
id_ = id_ if id_ else uuid.uuid4()
Signal.__init__(self, Modality.AUDIO, time, files, mentions)
ArrayContainer.__init__(self, id_=id_, bounds=None)
class VideoSignal(Signal[ArrayContainer, float], ArrayContainer[float]):
def __init__(self, id_: Identifier, time: TemporalRuler, files: Iterable[str],
mentions: Iterable[Mention]=None) -> None:
id_ = id_ if id_ else uuid.uuid4()
Signal.__init__(self, Modality.VIDEO, time, files, mentions)
ArrayContainer.__init__(self, id_=id_, bounds=None)
class ScenarioContext:
def __init__(self, agent: Identifier, speaker: Person, persons: Iterable[Person], objects: Iterable[Object]) -> None:
self.agent = agent
self.speaker = speaker
self.persons = persons
self.objects = objects
class Scenario(TemporalContainer):
def __init__(self, id_: Identifier, start: int, end: int, context: ScenarioContext, signals: Dict[Modality, str]) -> None:
super().__init__(start, end, id_=id_)
self.context = context
self.signals = signals
# TODO Just a list or with some structure, e.g. relate the ruler in the file (dict: time -> event)
def append_signal(path: str, signal: object, terminate: bool=False, indent=4):
os.makedirs(os.path.dirname(path), exist_ok=True)
initialize = not os.path.isfile(path)
with open(path, "a") as signal_file:
if initialize:
signal_file.write("[\n")
if signal:
json.dump(signal, signal_file, default=serializer, indent=indent)
signal_file.write(",\n")
if terminate:
signal_file.write("]")
| {"/representation/annotation.py": ["/representation/container.py", "/representation/entity.py", "/representation/scenario.py", "/representation/util.py"], "/representation/scenario.py": ["/representation/container.py", "/representation/entity.py", "/representation/util.py"], "/representation/container.py": ["/representation/util.py"], "/representation/entity.py": ["/representation/util.py"]} |
57,191 | WesleyvanDooren/ma-communicative-robots | refs/heads/master | /representation/util.py | import enum
import numpy as np
import uuid
from rdflib import URIRef
from typing import Union
Identifier = Union[URIRef, uuid.UUID, str, None]
def serializer(object):
if isinstance(object, enum.Enum):
return object.name
if isinstance(object, (URIRef, uuid.UUID)):
return str(object)
if isinstance(object, np.ndarray):
return object.tolist()
return vars(object) | {"/representation/annotation.py": ["/representation/container.py", "/representation/entity.py", "/representation/scenario.py", "/representation/util.py"], "/representation/scenario.py": ["/representation/container.py", "/representation/entity.py", "/representation/util.py"], "/representation/container.py": ["/representation/util.py"], "/representation/entity.py": ["/representation/util.py"]} |
57,192 | WesleyvanDooren/ma-communicative-robots | refs/heads/master | /integration/convert.py | import numpy as np
from PIL import Image
from leolani.datarepresentation.interaction import Context as LeolaniContext
from leolani.datarepresentation.language import Chat as LeolaniChat
from leolani.datarepresentation.language import Utterance as LeolaniUtterance
from leolani.datarepresentation.language import UtteranceHypothesis, UtteranceType
from leolani.datarepresentation.rdf_builder import RdfBuilder
from leolani.datarepresentation.representation import Triple as LeolaniTriple
from leolani.datarepresentation.vision import Bounds as LeolaniBounds
from leolani.datarepresentation.vision import Face as LeolaniFace
from leolani.datarepresentation.vision import Object as LeolaniObject, AbstractImage
from rdflib import URIRef
from typing import Tuple
from signal_meta_data import Object, BoundingBoxSegment, Person, FaceAnnotation, TimeSegment, \
ImageSignal, TextSignal, UtteranceAnnotation, Triple, Entity, Scenario
TOPIC_ON_CHAT_ENTER = "pepper.framework.context.topic.chat_enter"
TOPIC_ON_CHAT_TURN = "pepper.framework.context.topic.chat_turn"
TOPIC_ON_CHAT_EXIT = "pepper.framework.context.topic.chat_exit"
TOPIC_FACE = "pepper.framework.sensor.api.face_detector.topic"
TOPIC_FACE_NEW = "pepper.framework.sensor.api.face_detector.topic.new"
TOPIC_FACE_KNOWN = "pepper.framework.sensor.api.face_detector.topic.known"
class Event:
def __init__(self, payload, metadata):
self.payload = payload
self.metadata = metadata
def convert_context(scenario: Scenario) -> LeolaniContext:
leolani_context = LeolaniContext(scenario.id, convert(scenario.context.persons))
leolani_context.add_objects(convert(scenario.context.objects))
return leolani_context
def convert_person(person: Person) -> str:
# TODO Person in Leolani is currently just a name, we should give it an ID
return person.name
def convert_speaker(speaker: FaceAnnotation) -> str:
return speaker.person.name
def convert_speaker_from_image(speaker: FaceAnnotation, image_path: str, time: TimeSegment) -> LeolaniFace:
# TODO Representation of the face (image)
representation = None
# TODO Emotion?
return LeolaniFace(speaker.person.name, 1.0, representation, convert(speaker.segment), convert_image(image_path, speaker.segment, time))
def read_image(path):
image = Image.open(path)
return np.asarray(image)
def convert_image(path: str, bounding_box: BoundingBoxSegment, time: TimeSegment) -> AbstractImage:
image_array = read_image(path)
return AbstractImage(image_array, convert(bounding_box), np.array(()), time.start)
# TODO Object class is just a dummy
def convert_object(obj: Object) -> LeolaniObject:
return LeolaniObject(object.id, 1.0, convert(obj.bounding_box))
def convert_bounding_box(bounding_box: BoundingBoxSegment):
return LeolaniBounds(*bounding_box.bounding_box)
def convert_entity(entity: Entity) -> dict:
return {"label": str(entity.id), "type": entity.type.name.lower()}
def convert_predicate(predicate: URIRef) -> dict:
return {"type": str(predicate)}
def convert_triple(triple: Triple) -> LeolaniTriple:
builder = RdfBuilder()
subject = convert(triple.subject)
predicate = convert_predicate(triple.predicate)
obj = convert(triple.object)
return builder.fill_triple(subject, predicate, obj)
# TODO we could directly add to the context here, but that would not make it available to intentions
def integrate_image_signal(signal: ImageSignal) -> Tuple[Event]:
# Convert annotation from Face recognition to event payload
speaker = signal.speaker
time = signal.time
img_path = signal.files[0]
# TODO Convert annotation from Object recognition to event payload
return ((TOPIC_FACE, Event(convert_speaker_from_image(speaker, img_path, time), None)),)
def integrate_text_signal(signal: TextSignal, context: LeolaniContext) -> Tuple[Tuple[Tuple[(str, Event)]], Tuple[LeolaniTriple]]:
events = tuple(ev for utt in signal.utterances for ev in integrate_utterance(utt, context))
triples = convert(signal.triples)
return events, triples
def integrate_utterance(utterance: UtteranceAnnotation, context: LeolaniContext) -> Tuple[Tuple[(str, Event)]]:
events: Tuple[Tuple[(str, Event)]] = ()
speaker = utterance.speaker
chat_id = utterance.chat_id
# TODO Leolani will do its own NLP processing
# utterance.tokens
# utterance.metions
utterance_hypothesis = UtteranceHypothesis(utterance.utterance, 1.0)
if context.chatting and context.chat.id != chat_id:
context.stop_chat()
events += ((TOPIC_ON_CHAT_EXIT, Event(None, None)),)
if not context.chatting:
# TODO Leolani doesn't support setting chat id and time
context.start_chat(speaker.person.name)
events += ((TOPIC_ON_CHAT_ENTER, Event(speaker.person.name, None)),)
context.chat.add_utterance([utterance_hypothesis], False)
events += ((TOPIC_ON_CHAT_TURN, Event(utterance_hypothesis, None)),)
# TODO emotion
# utterance.emotion
# On chat turn event payload
return events
def convert_utterance(utterance: UtteranceAnnotation, context: LeolaniContext) -> LeolaniUtterance:
chat = LeolaniChat(utterance.speaker.person.name, context)
hyp = UtteranceHypothesis(utterance.utterance, 0.99)
leolani_utterance = LeolaniUtterance(chat, [hyp], False, -1)
leolani_utterance._type = UtteranceType.STATEMENT
leolani_utterance.pack_perspective({"certainty": 0.5, "polarity": 1, "sentiment": 0})
return leolani_utterance
def convert(obj: object):
if isinstance(obj, (str, int, float, bool)):
return obj
if isinstance(obj, (list, tuple)):
return tuple(convert(o) for o in obj)
if isinstance(obj, Scenario):
return convert_context(obj)
if isinstance(obj, Object):
return convert_object(obj)
if isinstance(obj, BoundingBoxSegment):
return convert_bounding_box(obj)
if isinstance(obj, FaceAnnotation):
return convert_speaker(obj)
if isinstance(obj, Person):
return convert_person(obj)
if isinstance(obj, Entity):
return convert_entity(obj)
if isinstance(obj, Triple):
return convert_triple(obj)
if __name__ == "__main__":
print(convert_bounding_box(BoundingBoxSegment(0,1,2,3))) | {"/representation/annotation.py": ["/representation/container.py", "/representation/entity.py", "/representation/scenario.py", "/representation/util.py"], "/representation/scenario.py": ["/representation/container.py", "/representation/entity.py", "/representation/util.py"], "/representation/container.py": ["/representation/util.py"], "/representation/entity.py": ["/representation/util.py"]} |
57,193 | WesleyvanDooren/ma-communicative-robots | refs/heads/master | /representation/container.py | # Define Annotation Class
from __future__ import annotations
import json
import numpy as np
import uuid
from typing import Union, TypeVar, Generic, Iterable, Tuple
from representation.util import serializer, Identifier
class Ruler:
"""Base type of Rulers that allow to identify a segment relative to a ruler in a signal"""
def __init__(self, container_id: Identifier):
self.type = self.__class__.__name__
self.container_id = container_id
R = TypeVar('R', bound=Ruler)
T = TypeVar('T')
class Container(Generic[R, T]):
def __getitem__(self, segment: R) -> T:
raise NotImplementedError()
class BaseContainer(Container[R, T]):
def __init__(self, id_: Identifier, ruler: R) -> None:
self.id = id_
self.ruler = ruler
class Index(Ruler):
def __init__(self, container_id: Identifier, start: int, stop: int):
super().__init__(container_id)
self.start = start
self.stop = stop
def get_offset(self, start: int, end: int) -> Index:
if start < self.start or end > self.stop:
raise ValueError("start and end must be within [{}, {}), was [{}, {})".format(self.start, self.stop, start, end))
return Index(self.container_id, start, end)
class Sequence(BaseContainer[Index, T]):
def __init__(self, seq: Iterable[T] = None, id_: Identifier = None, start=0, stop=None) -> None:
self.seq = tuple(seq) if seq is not None else None
id_ = id_ if id_ else uuid.uuid4()
super().__init__(id_, Index(id_, start, stop if stop is not None else len(self.seq)))
def __getitem__(self, offset: Index) -> T:
return self.seq[offset.start:offset.stop]
class MultiIndex(Ruler):
def __init__(self, container_id: Identifier, bounds: Tuple[Tuple[int,int], ...]) -> None:
super().__init__(container_id)
if len(bounds) < 2:
raise ValueError("MultiIndex must have at least two dimensions, was " + str(len(bounds)))
self.bounds = bounds
def get_area_bounding_box(self, x_min: int, y_min: int, x_max: int, y_max: int) -> MultiIndex:
if x_min < self.bounds[0][0] or x_max >= self.bounds[0][1] \
or y_min < self.bounds[1][0] or y_max >= self.bounds[1][1]:
raise ValueError("start and end must be within [%s, %s), was " + str(self.bounds))
return MultiIndex(self.container_id, ((x_min, x_max), (y_min, y_max)) + self.bounds[2:])
class ArrayContainer(BaseContainer[MultiIndex, T]):
def __init__(self, array: Union[tuple, list, np.ndarray] = None, id_: Identifier = None,
bounds: Tuple[Tuple[int,int], ...] = None) -> None:
self.array = np.array(array) if array is not None else None
id_ = id_ if id_ else uuid.uuid4()
bounds = bounds if bounds else tuple((0, upper) for upper in array.shape)
super().__init__(id_, MultiIndex(id_, bounds))
def __getitem__(self, bounding_box: MultiIndex) -> T:
return self.array[tuple(slice(b[0], b[1], 1) for b in bounding_box.bounds)]
class TemporalRuler(Ruler):
def __init__(self, container_id: Identifier, start: int, end: int) -> None:
super().__init__(container_id)
self.start = start
self.end = end
def get_time_segment(self, start: int, end: int) -> TemporalRuler:
if start < self.start or end >= self.end:
raise ValueError("start and end must be within [%s, %s), was [%s, %s)".format(self.start, self.end, start, end))
return TemporalRuler(self.container_id, start, end)
class TemporalContainer(BaseContainer[TemporalRuler, TemporalRuler]):
def __init__(self, start: int, end: int, id_: Identifier = None) -> None:
id_ = id_ if id_ else uuid.uuid4()
self.start_time = start
self.end_time = end
super().__init__(id_, TemporalRuler(id_, start, end))
def __getitem__(self, segment: TemporalRuler) -> TemporalRuler:
return segment
class AtomicRuler(Ruler):
pass
class AtomicContainer(BaseContainer[AtomicRuler, T]):
def __init__(self, value: T, id_: Identifier = None) -> None:
self.value = value
id_ = id_ if id_ else uuid.uuid4()
super().__init__(id_, AtomicRuler(id_))
def __getitem__(self, segment: AtomicRuler) -> T:
if not segment.container_id == self.id:
raise ValueError("Invalid segment")
return self.value
if __name__ == "__main__":
from pprint import pprint
tokens = Sequence(["I", "am", "in", "Amsterdam"])
token_offset = tokens.ruler.get_offset(0, 1)
token_segment = tokens[token_offset]
pprint(token_segment)
print(json.dumps(tokens, default=serializer, indent=2))
array = ArrayContainer(np.zeros((5,5,3), dtype=int))
bbox = array.ruler.get_area_bounding_box(0,0,2,2)
area = array[bbox]
pprint(area)
print(json.dumps(array, default=serializer, indent=2))
period = TemporalContainer(0, 1000)
time_segment = period.ruler.get_time_segment(10,100)
sub_period = period[time_segment]
print(sub_period)
print(json.dumps(period, default=serializer, indent=2)) | {"/representation/annotation.py": ["/representation/container.py", "/representation/entity.py", "/representation/scenario.py", "/representation/util.py"], "/representation/scenario.py": ["/representation/container.py", "/representation/entity.py", "/representation/util.py"], "/representation/container.py": ["/representation/util.py"], "/representation/entity.py": ["/representation/util.py"]} |
57,194 | WesleyvanDooren/ma-communicative-robots | refs/heads/master | /representation/entity.py | # Define Annotation Class
import enum
from datetime import date
import uuid
from representation.util import Identifier
class Emotion(enum.Enum):
NEUTRAL = 0
ANGER = 1
DISGUST = 2
FEAR = 3
HAPPINESS = 4
JOY = 5
SADNESS = 6
SURPRISE = 7
class Gender(enum.Enum):
UNDEFINED = 0
FEMALE = 1
MALE = 2
OTHER = 3
class Instance:
def __init__(self, id: Identifier):
self.id = id if id else uuid.uuid4()
### in GRaSP instances have a list of mentions which are all the segments that have been annotated with "reference" to this mention, these all get the grasp:denotedBy relation
### Do we want to do the same thing here? This would be a reverse index of (a subset of) the annotations.
class Object(Instance):
def __init__(self, id: Identifier, label: str):
super().__init__(id)
self.label = label
class Person(Instance):
def __init__(self, id: Identifier, name: str, age: int, gender: Gender):
super().__init__(id)
self.name = name # this could be a list of names
#self.pronoun = pronoun # this could be a list of pronouns
self.age = age # this should be changed to day of birth
self.gender = gender
class Friend(Person):
pass
| {"/representation/annotation.py": ["/representation/container.py", "/representation/entity.py", "/representation/scenario.py", "/representation/util.py"], "/representation/scenario.py": ["/representation/container.py", "/representation/entity.py", "/representation/util.py"], "/representation/container.py": ["/representation/util.py"], "/representation/entity.py": ["/representation/util.py"]} |
57,205 | AlpsSnow/PhotoMap | refs/heads/master | /createGeojson.py | from getExifData import getExifData
def writeGeojson(file,latLons):
#按照拍摄时间排序
for i in range(0,len(latLons)):
for j in range(i,len(latLons)):
if latLons[i][1] > latLons[j][1]:
tmp = latLons[j]
latLons[j] = latLons[i]
latLons[i] = tmp
index = 1
for photo in latLons:
print(photo)
file.writelines('{"type": "Feature","properties": {"cartodb_id":"' + str(index) + '"')
file.writelines(',"photo date":"' + str(photo[1]) + '","image":"' + 'https://github.com/mutou8bit/PhotoMap/tree/master/photo/' + photo[0] + '"')
if index == len(latLons):
file.writelines('},"geometry": {"type": "Point","coordinates": [' + str(photo[2]) + ',' + str(photo[3]) + ']}}\n')
else:
file.writelines('},"geometry": {"type": "Point","coordinates": [' + str(photo[2]) + ',' + str(photo[3]) + ']}},\n')
index += 1
if __name__ == "__main__":
wpt='./photo' #图片文件路径
geojsonFile = open("photo.geojson", "w")
geojsonFile.writelines('{\n"type": "FeatureCollection","features": [\n')
latLons = getExifData(wpt)
writeGeojson(geojsonFile, latLons)
geojsonFile.writelines(']}\n')
geojsonFile.close()
| {"/createGeojson.py": ["/getExifData.py"]} |
57,206 | AlpsSnow/PhotoMap | refs/heads/master | /getExifData.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
getExifData.py,批量取一个文件夹下照片的坐标,日期,照片名字
"""
import exifread
import os
import sys
#提取照片坐标和拍摄时间
def extractExif(fpath):
try:
with open(fpath,'rb') as rf:
exif=exifread.process_file(rf)
eDate=exif['EXIF DateTimeOriginal'].printable
eLon=exif['GPS GPSLongitude'].printable
eLat=exif['GPS GPSLatitude'].printable
#'[116, 29, 10533/500]' to [116,29,10533,500] type==(list)
lon=eLon[1:-1].replace(' ','').replace('/',',').split(',')
#经度
lon=float(lon[0])+float(lon[1])/60+float(lon[2])/float(lon[3])/3600
lat=eLat[1:-1].replace(' ','').replace('/',',').split(',')
lat=float(lat[0])+float(lat[1])/60+float(lat[2])/float(lat[3])/3600
p = fpath.rfind('/',0,len(fpath))
f = fpath[p+1:len(fpath)]
return [f,eDate,lon,lat] #照片的名字,拍摄时间,经度,纬度
except Exception as e:
print(e,fpath)
return None
#批量取一个文件夹下照片的名字,日期,坐标,
def getExifData(dirpath):
latLons=[]
for root, dirs, files in os.walk(dirpath):
print('根目录:{0},文件夹:{1},文件数:{2}'.format(root,dirs,len(files)))
files.sort()
for f in files:
exif=extractExif('{0}/{1}'.format(dirpath,f))
if exif:
latLons.append(exif)
else:
print(f,'exif is None')
#按照拍摄时间排序
for i in range(0,len(latLons)):
for j in range(i,len(latLons)):
if latLons[i][1] > latLons[j][1]:
tmp = latLons[j]
latLons[j] = latLons[i]
latLons[i] = tmp
return latLons
if __name__ == "__main__":
if len(sys.argv) > 2:
wpt=sys.argv[1] #图片文件路径
else:
wpt='./photo' #图片文件路径
if len(sys.argv) > 3:
outputpath=sys.argv[2] #gps信息输出路径
else:
outputpath='' #gps信息输出路径
latLons = getExifData(wpt)
if latLons:
if outputpath != '':
fo = open(outputpath, "w")
fo.write(latLons)
fo.close
else:
print(latLons)
else:
print('latLons is None') | {"/createGeojson.py": ["/getExifData.py"]} |
57,207 | AlpsSnow/PhotoMap | refs/heads/master | /resize.py | #!/usr/bin/env python
import os
from PIL import Image
size = 512, 512
def resize(photo_path):
for root, dirs, files in os.walk(photo_path):
print('根目录:{0},文件夹:{1},文件数:{2}'.format(root,dirs,len(files)))
for f in files:
outfile = f.replace(".jpg", ".thumbnail.jpg")
try:
im = Image.open('{0}/{1}'.format(photo_path,f))
im.thumbnail(size, Image.ANTIALIAS)
im.save('./resize/{0}'.format(outfile), "JPEG")
except IOError:
print('cannot create thumbnail for{0}'.format(outfile))
if __name__ == "__main__":
resize('./photo') | {"/createGeojson.py": ["/getExifData.py"]} |
57,214 | Sorcy32/UNIrequester | refs/heads/master | /save_to_xlsx.py | from openpyxl import Workbook
from datetime import datetime
wb = Workbook()
ws = wb.create_sheet('Ouptut', 0)
filename = (str(datetime.strftime(datetime.now(), "%d.%m.%y %H.%M.%S")) + '.xlsx')
def add_header(items):
data = []
# data = items
for x in items:
data.append(str(x))
ws.append(data)
print(ws.max_row)
def save():
wb.save(filename)
| {"/sert.py": ["/config.py"], "/requester.py": ["/config.py", "/sert.py", "/save_to_xlsx.py"], "/gui.py": ["/config.py", "/requester.py"]} |
57,215 | Sorcy32/UNIrequester | refs/heads/master | /save_to_sql.py | import sqlite3
import os
conn = sqlite3.connect('my.db')
cursor = conn.cursor()
def create():
cursor.execute("""CREATE TABLE export """)
def add_line(items, link=''):
for i in items:
cursor.execute('''
ALTER TABLE export ADD COLUMN ? text''', i)
| {"/sert.py": ["/config.py"], "/requester.py": ["/config.py", "/sert.py", "/save_to_xlsx.py"], "/gui.py": ["/config.py", "/requester.py"]} |
57,216 | Sorcy32/UNIrequester | refs/heads/master | /sert.py | import contextlib
import OpenSSL.crypto
import os
import config
""" Использование:
with pfx_to_pem('foo.pem', 'bar') as cert:
requests.post(url, cert=cert, data=payload)
"""
@contextlib.contextmanager
def pfx_to_pem(pfx_path, pfx_password):
if os.path.isfile(config.get_setting('Paths', 'PEM_certificate')):
with open(config.get_setting('Paths', 'PEM_certificate')) as t_pem:
f_pem = open(t_pem.name, 'wb')
pfx = open(pfx_path, 'rb').read()
p12 = OpenSSL.crypto.load_pkcs12(pfx, pfx_password)
f_pem.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, p12.get_privatekey()))
f_pem.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, p12.get_certificate()))
ca = p12.get_ca_certificates()
if ca is not None:
cs = 1
for cert in ca:
f_pem.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert))
cs += 1
cs = 1
f_pem.close()
yield t_pem.name
else:
f = open(config.get_setting('Paths', 'PEM_certificate'), 'tw', encoding='utf-8')
f.close()
print("Файл сертификата был создан. Необходимо перезапустить программу")
exitt = input()
raise SystemExit(1)
if __name__ == '__main__':
pfx_to_pem("cert.pfx", "123456")
print('its ok')
| {"/sert.py": ["/config.py"], "/requester.py": ["/config.py", "/sert.py", "/save_to_xlsx.py"], "/gui.py": ["/config.py", "/requester.py"]} |
57,217 | Sorcy32/UNIrequester | refs/heads/master | /requester.py | import requests
import json
import config
import sert
from threading import Thread, current_thread
from queue import Queue
import save_to_xlsx as stx
login = config.get_setting('Account', 'login')
password = config.get_setting('Account', 'password')
link = config.get_setting("Network", "link")
access_key = config.get_setting('Network', 'AccessKey')
queue = Queue() # Очередь ссылок для обработки
result_queue = Queue()
items_to_open = []
user_items = []
theard_count = 1
ssssert = ''
radio = 0
finishedlist = []
splitter = 10
def single_resp(resp, ans):
def checker(i):
if i is None:
return ''
else:
return i
try:
if len(ans) == 1:
x = resp[ans[0]]
return checker(x)
elif len(ans) == 2:
try:
x = resp[ans[0]][ans[1]]
return str(checker(x))
except:
try:
z = []
for x in resp[ans[0]]:
z.append(x[ans[1]])
return str(checker(z))
except TypeError:
if type(resp[ans[0]]) == type(None):
return 'None'
else:
return 'Проверить на сайте, сообщить об этой ошибке программы'
except KeyError:
print('Не верно указан итем запроса')
return 'Не верно указан итем запроса'
def multi_resp(resp, ans, li):
'''
Обработка ответа, который сохраняем построчно с одной колонкой
:param resp: ответ на запрос (json)
:param ans: что достаём из ответа
:param li: ссылка, по которой получали ответ
:return:
'''
if len(ans) == 2:
for xx in resp[ans[0]]:
if xx[ans[1]] is not None:
finishedlist.append(([xx[ans[1]]], li))
else:
print('NULL in ', li)
finishedlist.append(str(['NULL in '], li))
elif len(ans) == 1:
for xx in resp[ans[0]]:
if xx is not None:
finishedlist.append(str(xx))
else:
finishedlist.append(str(['NULL in '], li))
def get_request(link_to_open, login=login, password=password, locationN="XxXxX"):
r = requests.get(link_to_open, cert=ssssert, auth=(login, password), timeout=None)
print('Открываю ссылку: ', link_to_open)
if r.status_code == 200:
j = json.dumps(r.json())
resp = json.loads(j)
# try:
line = []
if radio == 0:
for item in user_items:
line.append(single_resp(resp, item))
finishedlist.append(line)
elif radio == 1:
for item in user_items:
multi_resp(resp, item, link_to_open)
# print(finishedlist)
# finishedlist.append(line)
# print(line)
# except:
# print('Ошибка пир обработке Json по параметрам')
else:
print('Ошибка sc != 200')
finishedlist.append(str('Ошибка сервера при обработке ссылки', link_to_open))
def run(queue, result_queue):
# Цикл продолжается пока очередь задач не станет пустой
while not queue.empty():
host = queue.get()
get_request(host)
queue.task_done()
result_queue.put_nowait(host)
result_queue.queue.clear()
queue.queue.clear()
def silent_saver():
coun = 0
dest = 100
while True:
if len(finishedlist) != 0:
stx.add_header(finishedlist.pop())
coun += 1
if coun == dest:
print('Перехвачено и сохранено: ', coun)
dest += 100
print(len(finishedlist))
# for x in finishedlist:
# stx.add_header(x)
def get():
# print('get started')
with sert.pfx_to_pem(
config.get_setting("Network", "Certificate_name"),
config.get_setting("Network", "Certificate_pass")) as cert:
global ssssert
ssssert = cert
for id in items_to_open:
full_link = str(str(link) + str(id) + str(access_key))
queue.put(full_link.strip())
for i in range(int(theard_count)):
thread = Thread(target=run, args=(queue, result_queue))
# thread = Thread(target=get_location, args=(link, login, Password, id))
# para = (link, login, Password, id)
thread.daemon = True
thread.start()
# thread2 = Thread(target=silent_saver)
# thread2.daemon = True
# thread2.start()
queue.join()
# Запись заголовков в файл
header = []
for xz in user_items:
header.append(xz)
stx.add_header(header)
# Сохранение информации
print('Сохраняю данные')
save_count = 0
save_total = len(finishedlist)
save_round = 0
while finishedlist:
save_round += 1
print('Cохраняю {0} строку из {1}.'.format(save_round, save_total))
if save_count <= int(splitter):
stx.add_header(finishedlist.pop())
save_count += 1
else:
stx.save()
save_count = 0
stx.save()
print('program exit')
print('')
def importer(packet):
items_to_open.clear()
finishedlist.clear()
global link, access_key, user_items, theard_count, radio, splitter
link = packet['link']
access_key = packet['accessKey']
user_items = packet['items']
theard_count = packet['threads']
file = packet['path']
radio = packet['radio']
splitter = packet['splitter']
with open(file, 'r') as f:
for z in f:
z = z.strip()
items_to_open.append(z)
get()
| {"/sert.py": ["/config.py"], "/requester.py": ["/config.py", "/sert.py", "/save_to_xlsx.py"], "/gui.py": ["/config.py", "/requester.py"]} |
57,218 | Sorcy32/UNIrequester | refs/heads/master | /config.py | """
Использование: сonfig.get_setting('секция','данные')
"""
import configparser
import os
path = "data\settings.ini"
def createConfig(path):
""" Создать конфиг файл """
config = configparser.ConfigParser()
config.add_section("Account")
config.set("Account", "Login", "Login")
config.set("Account", "Password", "Password")
config.add_section('Network')
config.set("Network", "AccessKey",
"?accessKey=access_key")
config.set("Network", "link",
"https://support.russianpost.ru/sd/services/rest/get/")
config.set("Network", "Certificate_name", 'data/cert.pfx')
config.set("Network", "Certificate_pass", 'xxxxxxx')
config.add_section("Paths")
config.set("Paths", "InputFile", "links.txt")
config.set("Paths", "PEM_certificate", "data/tmp_cert.pem")
with open(path, "w") as config_file:
config.write(config_file)
# Загрузить конфиг
def get_config(path):
if not os.path.exists(path):
createConfig(path)
# Вернет объект конфига
config = configparser.ConfigParser()
config.read(path)
return config
# Запрашиваем что-то из конфига
def get_setting(section, data):
config = get_config(path)
val = config.get(section, data)
return val
if __name__ == "__main__":
createConfig(path)
| {"/sert.py": ["/config.py"], "/requester.py": ["/config.py", "/sert.py", "/save_to_xlsx.py"], "/gui.py": ["/config.py", "/requester.py"]} |
57,219 | Sorcy32/UNIrequester | refs/heads/master | /gui.py | from tkinter import *
from tkinter import filedialog as fd
from tkinter import messagebox as mb
import config
import requester
# TODO Сделать возможность сохранения userItems
# Получаем данные из настроек программы
link = config.get_setting('Network', 'link')
key = config.get_setting('Network', 'accesskey')
itemList = []
filePath = None
def open_file():
"""
Ф-я открытия файла исходных данных
"""
try:
ePath.delete(0, END)
filename = fd.askopenfilename(filetypes=[('Текстовые файлы', '*.txt')])
global filePath
filePath = filename
ePath.insert(0, filename) # Вставит путь к файлу в текстовое поле
with open(filename, 'r') as file: # Это нужно для ввода в центр
x = file.readline() # ссылки примера (первой строки)
eLinkMiddle.delete(0, END) # из файла того как будет
eLinkMiddle.insert(0, x) # отображаться типовая ссылка
except FileNotFoundError:
pass
def get_count_threads():
"""
Функция для получения количества потоков, которое указал пользователь
"""
return eThreads.get()
def start():
"""Проверка и передача параметров"""
packet = {'radio': var_list.get(),
'items': get_list(),
'threads': get_count_threads(),
'link': eLinkOne.get(),
'path': filePath,
'accessKey': eLinkTwo.get(),
'splitter': eSaveRows.get()}
check = len(packet)
for x in packet:
if packet[x] == "" or packet[x] is None or len(itemList) == 0:
check -= 1
mb.showerror(title="Ошибка", message="Не все данные заполнены")
break
if len(packet) == check:
print('Запускаю запросы')
requester.importer(packet)
def add_item():
itemList.append(Item())
def del_item():
try:
current = itemList.pop()
Item.delete_item(current)
except:
pass
def get_list():
"""
Собирает значения текстовых полей, которые указал пользователь,
по которым будет поиск в Json от Naumen
:return: список списков значений пользователя
Пример:
[['1', '2'], ['3', '4', '5'], ['6', '7']]
"""
to_send_list = []
for row in itemList:
tmp = []
for r in row.texts:
tmp.append(r.get())
to_send_list.append(tmp)
return to_send_list
class Item:
def __init__(self):
self.texts = []
self.item_group = LabelFrame(answer_group, text=('Item ' + str(len(itemList) + 1)))
# TODO Проверку условий ниже исправить
if (len(itemList) + 1) <= 15:
self.item_group.grid(row=len(itemList) + 1, column=0)
elif 15 < len(itemList) + 1 <= 30:
self.item_group.grid(row=(int(len(itemList) + 1) - 15), column=1)
elif 30 < len(itemList) + 1 <= 45:
self.item_group.grid(row=(int(len(itemList) + 1) - 30), column=2)
else:
mb.showerror(title="Ошибка", message="Не больше 45")
self.content = Entry(self.item_group, width=20)
self.texts.append(self.content)
self.content.grid(row=0, column=1)
self.L = Radiobutton(self.item_group, text='IS LIST?', variable=var_list, value=len(itemList) + 1)
self.L.grid(row=0, column=0)
self.bAddEntry = Button(self.item_group, text="+", command=self.add_Entry, height=1)
self.bAddEntry.grid(row=0, column=len(self.texts) + 1)
def delete_item(self):
self.item_group.destroy()
def add_Entry(self):
self.entry = Entry(self.item_group, width=15)
self.entry.grid(row=0, column=len(self.texts) + 1)
self.texts.append(self.entry)
self.bAddEntry.grid(row=0, column=len(self.texts) + 2)
def open_pre_set():
while range(len(itemList)):
del_item()
var_list.set(0)
try:
preset_file = fd.askopenfilename(filetypes=[('Файлы конфигурации *.cfg', '*.cfg')])
with open(preset_file, 'r') as file:
file = file.read().splitlines()
count = 0
for usr_item in file:
add_item()
usr_item = usr_item.split(',')
if len(usr_item) == 1:
itemList[count].content.insert(0, usr_item)
elif len(usr_item) == 2:
itemList[count].content.insert(0, usr_item[0])
itemList[count].add_Entry()
itemList[count].entry.insert(0, usr_item[1])
else:
mb.showwarning('Warning', 'Something Wrong with opened file')
count += 1
except FileNotFoundError:
pass
# Сборка интерфейса
root = Tk()
root.geometry('800x580')
var_list = IntVar() # Переменная для radiobutton выбора принадлежности к списку.
var_list.set(0)
# Группа интерфейса открытия файла
open_file_group = LabelFrame(root, text='Импортировать список')
ePath = Entry(open_file_group, width=100)
ePath.grid(row=0, column=0, padx=10) # Путь к файлу импорта
bOpen = Button(open_file_group, text='Загрузить', command=open_file, width=20).grid(row=0, column=1) # Кнопка "открыть"
open_file_group.grid(row=0, column=0, columnspan=3, pady=5, padx=5, sticky=W + E)
# Группа быстрах настроек
settings_group = LabelFrame(root, text='Настройки')
ssThr = LabelFrame(settings_group, text='Потоки')
eThreads = Entry(ssThr, text='20', width=10) # Количество потоков
eThreads.insert(0, '20')
eThreads.grid(row=1, column=0, padx=10)
ssThr.grid(row=1, column=0, padx=10)
ssSav = LabelFrame(settings_group, text='Дробить строк')
eSaveRows = Entry(ssSav, text='30000', width=10) # Количество строк перед сохранением
eSaveRows.insert(0, "30000")
eSaveRows.grid(row=1, column=1, padx=10) #
ssSav.grid(row=1, column=1, padx=10)
bStart = Button(settings_group, text='START', command=start, width=20).grid(row=1, column=3) # Кнопка СТАРТ
settings_group.grid(row=1, column=0, columnspan=3, pady=5, padx=5, sticky=W + E)
bOpenPreSet = Button(settings_group, text='PreSet', command=open_pre_set, width=20).grid(row=1, column=4)
# Группа интерфейса для отображения примера ссылки для запроса
link_group = LabelFrame(root, text="Адрес запроса будет вида:")
eLinkOne = Entry(link_group, width=60, justify=RIGHT) # Первая половина ссылки
eLinkOne.grid(row=0, column=0)
eLinkOne.insert(0, link)
eLinkMiddle = Entry(link_group, justify=CENTER)
eLinkMiddle.grid(row=0, column=1)
eLinkTwo = Entry(link_group, width=48, justify=LEFT)
eLinkTwo.grid(row=0, column=2)
eLinkTwo.insert(0, key)
link_group.grid(row=2, column=0, columnspan=3, pady=5, padx=5, sticky=W + E)
# Группа интерфейса для выбора необходимого результата из ответа
answer_group = LabelFrame(root, text="Необходимые данные:", height=200)
answer_group.grid(row=3, column=0, columnspan=3, pady=5, padx=5, sticky=W + E)
controls = LabelFrame(answer_group)
controls.grid(row=0)
buttonAddSingle = Button(controls, text="+ Строка", command=add_item, width=7).grid(row=0, column=0)
buttonAddMultiply = Button(controls, text="- Строка", command=del_item, width=7).grid(row=0, column=1,
sticky=W + E)
buttonRadioDelete = Radiobutton(controls, text='Without List', variable=var_list, value=0)
buttonRadioDelete.grid(row=0, column=2)
info = Label(answer_group)
info.grid(row=0, column=2)
answer_group.grid(row=3, column=0, columnspan=3, pady=5, padx=5, sticky=W + E)
# Конец сборки интерфейса
root.mainloop() # Запуск отображения
| {"/sert.py": ["/config.py"], "/requester.py": ["/config.py", "/sert.py", "/save_to_xlsx.py"], "/gui.py": ["/config.py", "/requester.py"]} |
57,248 | RushiChavan-dev/fastapi-aws-lear | refs/heads/main | /venv/Lib/site-packages/a2wsgi/asgi.py | import asyncio
import collections
import threading
from http import HTTPStatus
from itertools import chain
from typing import Any, Deque, Iterable
from .types import ASGIApp, Environ, Message, Scope, StartResponse
__all__ = ("ASGIMiddleware",)
class AsyncEvent:
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
self.loop = loop
self.__waiters: Deque[asyncio.Future] = collections.deque()
self.__nowait = False
def _set(self, message: Any) -> None:
for future in filter(lambda f: not f.done(), self.__waiters):
future.set_result(message)
def set(self, message: Any) -> None:
self.loop.call_soon_threadsafe(self._set, message)
async def wait(self) -> Any:
if self.__nowait:
return None
future = self.loop.create_future()
self.__waiters.append(future)
try:
result = await future
return result
finally:
self.__waiters.remove(future)
def set_nowait(self) -> None:
self.__nowait = True
class SyncEvent:
def __init__(self) -> None:
self.__write_event = threading.Event()
self.__message: Any = None
def set(self, message: Any) -> None:
self.__message = message
self.__write_event.set()
def wait(self) -> Any:
self.__write_event.wait()
self.__write_event.clear()
message, self.__message = self.__message, None
return message
def build_scope(environ: Environ) -> Scope:
headers = [
(key.lower().replace("_", "-").encode("latin-1"), value.encode("latin-1"))
for key, value in chain(
(
(key[5:], value)
for key, value in environ.items()
if key.startswith("HTTP_")
),
(
(key, value)
for key, value in environ.items()
if key in ("CONTENT_TYPE", "CONTENT_LENGTH")
),
)
]
if environ.get("REMOTE_ADDR") and environ.get("REMOTE_PORT"):
client = (environ["REMOTE_ADDR"], int(environ["REMOTE_PORT"]))
else:
client = None
return {
"wsgi_environ": environ,
"type": "http",
"asgi": {"version": "3.0", "spec_version": "3.0"},
"http_version": environ.get("SERVER_PROTOCOL", "http/1.0").split("/")[1],
"method": environ["REQUEST_METHOD"],
"scheme": environ.get("wsgi.url_scheme", "http"),
"path": environ["PATH_INFO"].encode("latin1").decode("utf8"),
"query_string": environ["QUERY_STRING"].encode("ascii"),
"root_path": environ.get("SCRIPT_NAME", "").encode("latin1").decode("utf8"),
"client": client,
"server": (environ["SERVER_NAME"], int(environ["SERVER_PORT"])),
"headers": headers,
}
class ASGIMiddleware:
"""
Convert ASGIApp to WSGIApp.
wait_time: After the http response ends, the maximum time to wait for the ASGI app to run.
"""
def __init__(
self,
app: ASGIApp,
wait_time: float = None,
loop: asyncio.AbstractEventLoop = None,
) -> None:
self.app = app
if loop is None:
loop = asyncio.new_event_loop()
loop_threading = threading.Thread(target=loop.run_forever, daemon=True)
loop_threading.start()
self.loop = loop
self.wait_time = wait_time
def __call__(
self, environ: Environ, start_response: StartResponse
) -> Iterable[bytes]:
return ASGIResponder(self.loop, self.app, wait_time=self.wait_time)(
environ, start_response
)
class ASGIResponder:
def __init__(
self, loop: asyncio.AbstractEventLoop, app: ASGIApp, wait_time: float = None
) -> None:
self.loop = loop
self.app = app
self.wait_time = wait_time
self.sync_event = SyncEvent()
self.async_event = AsyncEvent(loop)
loop.call_soon_threadsafe(self._init_asgi_lock)
def _init_asgi_lock(self) -> None:
self.async_lock = asyncio.Lock()
def __call__(
self, environ: Environ, start_response: StartResponse
) -> Iterable[bytes]:
asgi_done = threading.Event()
wsgi_should_stop = False
def _done_callback(future: asyncio.Future) -> None:
if future.exception() is not None:
e: BaseException = future.exception() # type: ignore
self.sync_event.set(
{"type": "error", "exception": (type(e), e, e.__traceback__)}
)
asgi_done.set()
run_asgi: asyncio.Task = self.loop.create_task(
self.app(build_scope(environ), self.asgi_receive, self.asgi_send)
)
run_asgi.add_done_callback(_done_callback)
read_count, body = 0, environ["wsgi.input"]
content_length = int(environ.get("CONTENT_LENGTH", None) or 0)
self.loop.call_soon_threadsafe(lambda: None)
while not wsgi_should_stop:
message = self.sync_event.wait()
message_type = message["type"]
if message_type == "http.response.start":
status = message["status"]
headers = [
(
name.strip().decode("latin1"),
value.strip().decode("latin1"),
)
for name, value in message["headers"]
]
start_response(f"{status} {HTTPStatus(status).phrase}", headers, None)
elif message_type == "http.response.body":
yield message.get("body", b"")
wsgi_should_stop = not message.get("more_body", False)
elif message_type == "http.response.disconnect":
wsgi_should_stop = True
elif message_type == "error":
start_response(
f"{500} {HTTPStatus(500).phrase}",
[
("Content-Type", "text/plain; charset=utf-8"),
("Content-Length", str(len(HTTPStatus(500).description))),
],
message["exception"],
)
yield str(HTTPStatus(500).description).encode("utf-8")
wsgi_should_stop = True
if message_type == "receive":
data = body.read(min(4096, content_length - read_count))
read_count += len(data)
self.async_event.set(
{
"type": "http.request",
"body": data,
"more_body": read_count < content_length,
}
)
else:
self.async_event.set(None)
if wsgi_should_stop:
self.async_event.set_nowait()
if run_asgi.done():
break
# HTTP response ends, wait for run_asgi's background tasks
asgi_done.wait(self.wait_time)
run_asgi.cancel()
yield b""
async def asgi_receive(self) -> Message:
async with self.async_lock:
self.sync_event.set({"type": "receive"})
return await self.async_event.wait()
async def asgi_send(self, message: Message) -> None:
async with self.async_lock:
self.sync_event.set(message)
await self.async_event.wait()
| {"/venv/Lib/site-packages/a2wsgi/__init__.py": ["/venv/Lib/site-packages/a2wsgi/asgi.py"]} |
57,249 | RushiChavan-dev/fastapi-aws-lear | refs/heads/main | /venv/Lib/site-packages/a2wsgi/__init__.py | from .asgi import ASGIMiddleware
from .wsgi import WSGIMiddleware
VERSION = (1, 4, 0)
__version__: str = ".".join(map(str, VERSION))
__all__ = ("WSGIMiddleware", "ASGIMiddleware")
| {"/venv/Lib/site-packages/a2wsgi/__init__.py": ["/venv/Lib/site-packages/a2wsgi/asgi.py"]} |
57,250 | RushiChavan-dev/fastapi-aws-lear | refs/heads/main | /api/main.py | from fastapi import FastAPI, status, HTTPException
from fastapi.params import Depends
from fastapi.security import OAuth2PasswordBearer
from sqlalchemy.orm import Session
from a2wsgi import ASGIMiddleware
from api import models
from api import schemas
from api import database
from api.database import SessionLocal, engine
from mangum import Mangum
app = FastAPI()
schemas.Base.metadata.create_all(engine)
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
# @app.post('/create_user', status_code=status.HTTP_201_CREATED)
# def create_user(user: models.UserDetails, db: Session = Depends(get_db)):
# # name variable is from schemas file
# # user_name variable is from model file
# # we are assigning name = user_name
# new_user = schemas.User(name=user.user_name, surname=user.user_surname)
# db.add(new_user)
# db.commit()
# db.refresh(new_user)
# return new_user
#
#
# @app.get("/userlist")
# async def getall(token: str = Depends(oauth2_scheme), db: Session = Depends(get_db)):
# userList = db.query(schemas.User).all()
# return userList
#
#
# @app.get('/userlist/{id}')
# def show(id, db: Session = Depends(get_db)):
# userList = db.query(schemas.User).filter(schemas.User.id == id).first()
# return userList
@app.get("/")
async def root():
return {"message": "Hello Worlwd"}
@app.post('/createid', status_code=status.HTTP_201_CREATED)
def create_id(user: models.IDGenerate, db: Session = Depends(get_db)):
# name variable is from schemas file
# user_name variable is from model file
# we are assigning name = user_name
new_user = schemas.IdGeneratorSql(room_id=user.id_generate, name=user.person_name, mobile_number=user.mobile_number)
try:
db.add(new_user)
db.commit()
db.refresh(new_user)
return new_user
except Exception as e:
print("\nError Issur ", e)
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="Room ID already exist")
@app.get('/generator_id/{room_id}', status_code=status.HTTP_200_OK)
def generator_id(room_id: str, db: Session = Depends(get_db)):
try:
check = db.query(schemas.IdGeneratorSql).filter(schemas.IdGeneratorSql.room_id == room_id).first()
if check is None:
return {"details": "Not found"}
return check
except Exception as e:
return e.__str__()
@app.post('/login_with_id', status_code=status.HTTP_201_CREATED)
def login_with_id(user: models.PeopleWithID, db: Session = Depends(get_db)):
# name variable is from schemas file
# user_name variable is from model file
# we are assigning name = user_name
isExist = db.query(schemas.IdGeneratorSql).filter(schemas.IdGeneratorSql.room_id == user.id_generate).first()
if isExist is None:
print("\n Check ===== Data is None")
return {"details": "Not found"}
else:
userPresentWithData = db.query(schemas.PeopleWithId).filter(
schemas.PeopleWithId.room_id_new == user.id_generate,
schemas.PeopleWithId.mobile_number == user.mobile_number,
schemas.PeopleWithId.name == user.person_name).first()
print(f"\n {userPresentWithData}")
if userPresentWithData is None:
user_withId = schemas.PeopleWithId(room_id_new=user.id_generate, name=user.person_name,
mobile_number=user.mobile_number)
try:
db.add(user_withId)
db.commit()
db.refresh(user_withId)
return user_withId
except Exception as e:
print("\n Error = ", e)
else:
return userPresentWithData
@app.get('/allmember/{room_id}', status_code=status.HTTP_200_OK)
def member_with_id(room_id: str, db: Session = Depends(get_db)):
try:
check = db.query(schemas.PeopleWithId).filter(schemas.PeopleWithId.room_id_new == room_id).all()
if check is None:
return {"details": "Not found"}
return check
except Exception as e:
return e.__str__()
@app.post('/createpost', status_code=status.HTTP_201_CREATED)
def create_post(user: models.CreatePost, db: Session = Depends(get_db)):
# name variable is from schemas file
# user_name variable is from model file
# we are assigning name = user_name
new_user = schemas.CreatePostWithId(room_id=user.room_id,
mobile_number=user.mobile_number,
created_by_name=user.created_by_name,
task_assign_to=user.task_assign_to,
time_to_finish=user.time_to_finish,
date_time=user.date_time,
before_after=user.before_after,
possible_if=user.possible_if,
task_des=user.task_des,
is_active=user.is_active,
img_url=user.img_url,
title=user.title,
)
try:
db.add(new_user)
db.commit()
db.refresh(new_user)
return new_user
except Exception as e:
print("\nError Issur ", e)
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="Room ID already exist")
@app.get('/getpost/{room_id}', status_code=status.HTTP_200_OK)
def get_post_list(room_id: str, db: Session = Depends(get_db)):
try:
check = db.query(schemas.CreatePostWithId).filter(schemas.CreatePostWithId.room_id == room_id).all()
print(f"Error coming {check}")
if not check:
return {"details": "Not found"}
return check
except Exception as e:
return e.__str__()
@app.delete('/deleted/{post_id}')
def delete_post(post_id: int, db: Session = Depends(get_db)):
try:
check = db.query(schemas.CreatePostWithId).filter(schemas.CreatePostWithId.post_id == post_id).delete(
synchronize_session=False)
db.commit()
if not check:
return {"details": "Not found"}
return check
except Exception as e:
return e.__str__()
@app.put('/update/{post_id}')
def update_post(post_id, user: models.CreatePost, db: Session = Depends(get_db)):
check = db.query(schemas.CreatePostWithId).filter(schemas.CreatePostWithId.post_id == post_id)
isExist = check.first()
print(f"Error check again {isExist}")
if not isExist:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="No data present")
check.update(user.dict())
db.commit()
return "update data"
handler = Mangum(app=app) | {"/venv/Lib/site-packages/a2wsgi/__init__.py": ["/venv/Lib/site-packages/a2wsgi/asgi.py"]} |
57,251 | RushiChavan-dev/fastapi-aws-lear | refs/heads/main | /api/schemas.py | from datetime import datetime
from sqlalchemy import Boolean, Column, String, Integer, DateTime, text, TIMESTAMP, func
from api.database import Base
# class User(Base):
# __tablename__ = "userslist"
#
# id = Column(Integer, primary_key=True, index=True)
# name = Column(String, index=True)
# surname = Column(String)
# is_active = Column(Boolean, default=True)
class IdGeneratorSql(Base):
__tablename__ = "room_ids"
sr_no = Column(Integer, primary_key=True, autoincrement=True)
room_id = Column(String, unique=True)
name = Column(String, index=True)
mobile_number = Column(String, index=True)
is_active = Column(Boolean, default=True)
class PeopleWithId(Base):
__tablename__ = "peoplewitgid"
sr_no = Column(Integer, primary_key=True, autoincrement=True)
room_id_new = Column(String, index=True)
name = Column(String, index=True)
mobile_number = Column(String, index=True)
class CreatePostWithId(Base):
__tablename__ = "userpost"
sr_no = Column(Integer, primary_key=True, autoincrement=True)
room_id = Column(String, index=True)
mobile_number = Column(String, index=True)
created_by_name = Column(String, index=True)
title = Column(String, index=True)
task_assign_to = Column(String, index=True)
time_to_finish = Column(String, index=True)
date_time = Column(String, index=True)
before_after = Column(String, index=True)
possible_if = Column(String, index=True)
task_des = Column(String, index=True)
post_id = Column(Integer, autoincrement=True, index=True, unique=True)
is_active = Column(String, index=True)
img_url = Column(String, index=True)
created_at = Column(DateTime, index=True, default=datetime.now)
updated_at = Column(DateTime, index=True, default=datetime.now(), onupdate=date_time.onupdate)
| {"/venv/Lib/site-packages/a2wsgi/__init__.py": ["/venv/Lib/site-packages/a2wsgi/asgi.py"]} |
57,252 | RushiChavan-dev/fastapi-aws-lear | refs/heads/main | /api/models.py | from pydantic import BaseModel
# class UserDetails(BaseModel):
# user_name: str
# user_surname: str
class IDGenerate(BaseModel):
person_name: str
id_generate: str
mobile_number: str
class PeopleWithID(BaseModel):
person_name: str
id_generate: str
mobile_number: str
class CreatePost(BaseModel):
room_id: str
mobile_number: str
created_by_name: str
task_assign_to: str
time_to_finish: str
date_time: str
before_after: str
possible_if: str
task_des: str
is_active: str
img_url: str
title: str
| {"/venv/Lib/site-packages/a2wsgi/__init__.py": ["/venv/Lib/site-packages/a2wsgi/asgi.py"]} |
57,253 | tatsunoshirou/mysite | refs/heads/master | /polls/migrations/0004_auto_20190110_1501.py | # Generated by Django 2.1.4 on 2019-01-10 06:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0003_post_title'),
]
operations = [
migrations.AlterField(
model_name='post',
name='message',
field=models.TextField(max_length=145, verbose_name='投稿内容'),
),
]
| {"/polls/views.py": ["/polls/models.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.