text stringlengths 957 885k |
|---|
"""
closed-loop MILP solved to determine optimal ordering defined by ADG
"""
import sys
import yaml
import time
import matplotlib.colors as mcolors
import matplotlib
import matplotlib.pyplot as plt
import random
import logging
import time
import networkx as nx
import csv
import statistics as stat
import os
import sys
from mip import Model, ProgressLog, xsum, maximize, minimize, BINARY, CONTINUOUS, Constr, ConstrList
sys.path.insert(1, "functions/")
from planners import *
from visualizers import *
from milp_formulation import *
from robot import *
from adg import *
from adg_node import *
from process_results import *
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(name)s - %(levelname)s :: %(message)s', level=logging.INFO)
def main():
""" --------------------------- INPUTS --------------------------------- """
show_visual = False
show_ADG = True #not show_visual
run_MILP = True #False #True
save_file = False
sim_timeout = 500
# define prediction and control horizons: H_prediction >= H_control
H_prediction = np.NaN # integer value for forward node lookup
H_control = 5
random_seed = 0
mu = 0.5
robust_param = 0.0
delay_amount = 5
delayed_robot_cnt = 2
w = 1.4 # sub-optimality bound: w = 1.0 -> CBS, else ECBS!
fldr = "nuernberg_small" # auto_gen_01_nuernberg | auto_gen_00_large | auto_gen_02_simple | manual_03_maxplus
random.seed(random_seed)
np.random.seed(random_seed)
""" -------------------------------------------------------------------- """
# start initial
pwd = os.path.dirname(os.path.abspath(__file__))
logger.info(pwd)
map_file = pwd + "/data/" + fldr + "/csv_map_yaml.yaml"
robot_file = pwd + "/data/" + fldr + "/csv_robots_yaml.yaml"
robot_file_tmp = pwd + "/data/tmp/robots.yaml"
start_time = time.time()
plans = run_CBS(map_file, robot_file, w=w) # if w > 1.0, run_CBS uses ECBS!
logger.info(" with sub-optimality w={}".format(w))
logger.info(" plan statistics: {} \n".format(plans["statistics"]))
logger.debug(plans["schedule"])
# show factory map
# show_factory_map(map_file, robot_file, True)
# plt.show()
map_gen_robot_count = 10
map_gen_seedval = "NaN"
try:
map_gen_robot_count = int(sys.argv[1])
map_gen_seedval = int(sys.argv[2])
H_control = int(sys.argv[3])
robust_param = int(sys.argv[4])
random.seed(map_gen_seedval) # map_gen_seedval
np.random.seed(map_gen_seedval) # map_gen_seedval
except:
print(" no valid inputs given, ignoring ...")
# determine ADG, reverse ADG and dependency groups
ADG, robot_plan, goal_positions = determine_ADG(plans, show_graph=False)
nodes_all, edges_type_1, dependency_groups = analyze_ADG(ADG, plans, show_graph=False)
ADG_reverse = ADG.reverse(copy=False)
# initialize simulation
robots = []
solve_time = []
robots_done = []
time_to_goal = {}
colors = plt.cm.rainbow( np.arange(len(robot_plan))/len(robot_plan) )
for robot_id in robot_plan:
plan = robot_plan[robot_id]
logger.debug("Robot {} - plan: {} \t \t positions: {}".format(robot_id, plan["nodes"], plan["positions"]))
new_robot = Robot(robot_id, plan, colors[robot_id], goal_positions[robot_id])
robots.append(new_robot)
robots_done.append(False)
time_to_goal[robot_id] = 0
if show_visual:
visualizer = Visualizer(map_file, robots)
# initialize optimization MIP object m_opt
m_opt = Model('MILP_sequence', solver='CBC')
# print(m_opt.max_nodes)
pl_opt = ProgressLog()
# pl_opt.settings = "objective_value"
# print("pl_opt.settings: {}".format(pl_opt.settings))
# print("pl_opt.log: {}".format(pl_opt.log))
# pl_opt.instance = m_opt.name
# print("pl_opt.instance: {}".format(pl_opt.instance))
ADG_fig = plt.figure(figsize=(12,8))
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
metadata = dict(title='Movie Test', artist='Matplotlib',
comment='Movie support!')
writer = FFMpegWriter(fps=2, metadata=metadata)
with writer.saving(ADG_fig, "ADG_video.mp4", 500):
# run a simulation in time
k = 0
robot_IDs_to_delay = []
while (not all(robots_done)) and (k < sim_timeout):
print("pl_opt.log: {}".format(pl_opt.log))
m_opt.clear()
# show current robot status
logger.info("-------------------- @ time step k = {} --------------------".format(k))
for robot in robots:
node_info = ADG.node[robot.current_node]["data"]
logger.debug(" - Robot {} # {} @ {} => status: {}".format(robot.robot_ID, node_info.ID, node_info.s_loc, robot.status))
# solve MILP for the advanced ADG to potentially adjust ordering
res, solve_t = solve_MILP(robots, dependency_groups, ADG, ADG_reverse, H_control, H_prediction, m_opt, pl_opt, run=run_MILP, uncertainty_bound=robust_param)
solve_time.append(solve_t)
if not (res is None or res == "OptimizationStatus.OPTIMAL"):
ValueError("Optimization NOT optimal")
# ADG after MILP
if show_ADG:
#
draw_ADG(ADG, robots, "ADG after MILP ADG | k = {}".format(k), writer=writer)
# plt.show()
# check for cycles
try:
nx.find_cycle(ADG, orientation="original")
logger.warning("Cycle detected!!")
raise Exception("ADG has a cycle => deadlock! something is wrong with optimization")
except nx.NetworkXNoCycle:
logger.debug("no cycle detected in ADG => no deadlock. good!")
pass
if (k % delay_amount) == 0:
robot_IDs = np.arange(map_gen_robot_count)
robot_IDs_to_delay = np.random.choice(map_gen_robot_count, size=delayed_robot_cnt, replace=False)
logger.info("delaying robots (ID): {}".format(robot_IDs_to_delay))
# Advance robots if possible (dependencies have been met)
for robot in robots:
# check if all dependencies have been met, to advance to next node
node_info = ADG.node[robot.current_node]["data"]
node_dependencies_list = list(ADG_reverse.neighbors(robot.current_node))
all_dependencies_completed = True
for dependency in node_dependencies_list:
if (ADG.node[dependency]["data"].status != Status.FINISHED):
all_dependencies_completed = False
# if all dependencies are completed, the robot can advance!
# delay_amount = np.random.poisson(mu) # same sample every time
if all_dependencies_completed and k > 0: # (robot.robot_ID == 2 or k > 5)
if (not (robot.robot_ID in robot_IDs_to_delay)): # or (k < 10 or k > 20)): # or (robot.robot_ID == 3 or k > 8):
ADG.node[robot.current_node]["data"].status = Status.FINISHED
robot.advance()
if not robot.is_done():
time_to_goal[robot.robot_ID] += 1
else:
robots_done[robot.robot_ID] = True
if show_visual:
visualizer.redraw(robots, pause_length=0.1)
# return 0
k += 1
# end of while loop
total_time = 0
for idx, t in time_to_goal.items():
total_time += t
logger.info("Total time to complete missions: {}".format(total_time))
logger.info("horizon = {}".format(H_control))
logger.info("")
logger.info("Computation time:")
logger.info(" - max: {}".format(max(solve_time)))
logger.info(" - avg: {}".format(stat.mean(solve_time)))
# create data to save to YAML file
simulation_results = {}
simulation_results["parameters"] = {}
simulation_results["parameters"]["H_control"] = H_control
simulation_results["parameters"]["random seed"] = random_seed
simulation_results["parameters"]["ECBS w"] = w
simulation_results["parameters"]["mu"] = mu
simulation_results["parameters"]["robust param"] = robust_param
simulation_results["parameters"]["delay amount"] = delay_amount
simulation_results["map details"] = {}
simulation_results["map details"]["robot_count"] = map_gen_robot_count
simulation_results["map details"]["seed val"] = map_gen_seedval
simulation_results["results"] = {}
simulation_results["results"]["comp time"] = {}
simulation_results["results"]["comp time"]["solve_time"] = [solve_time]
simulation_results["results"]["comp time"]["max"] = max(solve_time)
simulation_results["results"]["comp time"]["avg"] = stat.mean(solve_time)
simulation_results["results"]["total time"] = total_time
logger.info(simulation_results)
file_name = pwd + "/results/robust_" +str(delayed_robot_cnt) + "x" + str(delay_amount) + "/res_robots_" + str(map_gen_robot_count) + "_horizon_" + str(H_control) + "_mapseed_" + str(map_gen_seedval) + "_robustparam_" + str(robust_param) + ".yaml"
if save_file:
save_to_yaml(simulation_results, file_name)
if __name__ == "__main__":
main()
|
from watson_machine_learning_client import WatsonMachineLearningAPIClient
from flask import request
from flask import jsonify
import os
from flask import Flask,render_template,request,jsonify
import io
import xarray as xr
import datetime
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from math import sqrt
from numpy import concatenate
import urllib3, requests, json
#
# 1. Fill in wml_credentials.
#
wrl = {
"apikey": <KEY>",
"instance_id": "f1e3dce4-3cb6-430a-bb2a-81969e0c8d48",
"url": "https://eu-gb.ml.cloud.ibm.com"
}
client = WatsonMachineLearningAPIClient( wrl )
#
# 2. Fill in one or both of these:
# - model_deployment_endpoint_url
# - function_deployment_endpoint_url
#
model_deployment_endpoint_url = 'https://eu-gb.ml.cloud.ibm.com/v3/wml_instances/f1e3dce4-3cb6-430a-bb2a-81969e0c8d48/deployments/ea6c1a0c-a30c-433d-b99f-3a136f6bcef8/online';
function_deployment_endpoint_url = "";
STATIC_FOLDER = 'templates/assets'
app = Flask(__name__,static_folder=STATIC_FOLDER)
# On IBM Cloud Cloud Foundry, get the port number from the environment variable PORT
# When running this app on the local machine, default the port to 8000
port = int(os.getenv('PORT', 8080))
def data_gathering(latitude, longitude):
nc = xr.open_dataset('../app/download.nc')
collected_lat = latitude
collected_lon = longitude
lat = nc.latitude
for i in range(len(lat)):
if (lat[i] == collected_lat):
print(i)
m = i
lon = nc.longitude
for j in range(len(lon)):
if (lon[j] == collected_lon):
print(j)
n = j
time = nc.time
list = ['u100', 'v100', 't2m', 'i10fg', 'sp']
df = pd.DataFrame()
for o in range(0, len(list)):
Values = []
for p in range(0, len(time)):
a = nc[list[o]][p][m][n]
a = np.array(a)
a = a.item()
Values.append(a)
df[list[o]] = pd.Series(Values)
df['Time'] = nc.time
df.set_index('Time', inplace=True)
df=df.fillna(df.mean())
df1 = pd.DataFrame()
df1['Air_Density'] = df.sp / (287.058 * df.t2m)
df1['Wind_Speed'] = np.sqrt((df.u100 ** 2) + (df.v100 ** 2))
return df1
def data_preperation(df1):
df = df1[(len(df1) - 1):]
return df
def Test_data_preperation(df, df1):
result = []
actual = []
values = df1.values
# ensure all data is float
values = values.astype('float32')
# normalize features
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
test_values = df.values
# ensure all data is float
test_values = test_values.astype('float32')
# normalize features
test_scaled = scaler.transform(test_values)
test_X = test_scaled
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
scoring_endpoint=model_deployment_endpoint_url
payload={"fields":["Air_Density","Wind_Speed"], "values": [[[str(test_X[0][0][0]),str(test_X[0][0][1])]]]}
for i in range(120):
y_prediction = client.deployments.score(scoring_endpoint,payload)
result.append([y_prediction['values'][0][0]])
result[i] = scaler.inverse_transform(result[i])
actual.append(result[i])
scaled = scaler.transform(result[i])
result_next = scaled.reshape((scaled.shape[0], 1, scaled.shape[1]))
payload={"fields":["Air_Density","Wind_Speed"], "values": [[[str(result_next[0][0][0]),str(result_next[0][0][1])]]]}
data = pd.DataFrame(np.concatenate(result), columns=['Air Density', 'Wind Speed'])
lists = [50, 70, 80, 100]
cp = 0.59
for i in range(len(lists)):
A = 3.14 * (lists[i] ** 2)
data['Energy_' + str(lists[i]) + 'm(MW)'] = (0.5 * (data['Air Density']) * A * (
data['Wind Speed'] ** 3) * cp) / 1000000
Turbines=[10,15,20,30]
Energy=['Energy_50m(MW)','Energy_70m(MW)','Energy_80m(MW)','Energy_100m(MW)']
for j in range(len(Turbines)):
for i in range(len(Energy)):
data['No_of_Turbines'+str(Turbines[j])+'T'+str(Energy[i])]=Turbines[j]*data[Energy[i]]
return data
@app.route('/Home.html')
def home():
return render_template('Home.html')
@app.route('/Predict.html',methods=['POST','GET'])
def Future():
return render_template('Predict.html')
@app.route('/Team.html')
def Team():
return render_template('Team.html')
@app.route('/Contact Us.html')
def Contact():
return render_template('Contact Us.html')
@app.route('/Land.html')
def Land():
return render_template('Land.html')
@app.route('/Login.html')
def Login():
return render_template('Login.html')
@app.route('/Registration.html')
def Registration():
return render_template('Registration.html')
@app.route('/predict', methods=['POST', 'GET'])
def predict():
int_features=[float(x) for x in request.form.values()]
final=[np.array(int_features)]
print(final)
latitude,longitude,time,radius,turbines=final[0][0],final[0][1],final[0][2],final[0][3],final[0][4]
print(latitude,longitude)
df1 = data_gathering(round(latitude), round(longitude))
df = data_preperation(df1)
print(df)
data = Test_data_preperation(df, df1)
rf48=pd.DataFrame()
Variables=data.columns
for i in range(2,len(Variables)):
rf48[Variables[i]]=[data[Variables[i]][0]+data[Variables[i]][1]]
rf72=pd.DataFrame()
Variables=data.columns
for i in range(2,len(Variables)):
rf72[Variables[i]]=[rf48[Variables[i]][0]+data[Variables[i]][2]]
rf1m=pd.DataFrame()
Variables=data.columns
for i in range(2,len(Variables)):
a=[]
for j in range(29):
a.append(data[Variables[i]][j]+data[Variables[i]][j+1])
b=np.sum(a)
rf1m[Variables[i]]=[b]
rf4m=pd.DataFrame()
Variables=data.columns
for i in range(2,len(Variables)):
a=[]
for j in range(119):
a.append(data[Variables[i]][j]+data[Variables[i]][j+1])
b=np.sum(a)
rf4m[Variables[i]]=[b]
if (time==24.0 and radius==50.0 and turbines==10.0):
rf24ft=pd.DataFrame()
rf24ft['Air Density']=[data['Air Density'][0]]
rf24ft['Wind Speed(24hr)']=[data['Wind Speed'][0]]
rf24ft['Energy_50m(MW)']=[data['Energy_50m(MW)'][0]]
rf24ft['10Turbines(MW)']=[data['No_of_Turbines10TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf24ft.to_html(classes='data')], titles=df.columns.values)
elif (time==24.0 and radius==50.0 and turbines==15.0):
rf24ff=pd.DataFrame()
rf24ff['Air Density']=[data['Air Density'][0]]
rf24ff['Wind Speed(24hr)']=[data['Wind Speed'][0]]
rf24ff['Energy_50m(MW)']=[data['Energy_50m(MW)'][0]]
rf24ff['15Turbines(MW)']=[data['No_of_Turbines15TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf24ff.to_html(classes='data')], titles=df.columns.values)
elif (time==24.0 and radius==50.0 and turbines==20.0):
rf24ftw=pd.DataFrame()
rf24ftw['Air Density']=[data['Air Density'][0]]
rf24ftw['Wind Speed(24hr)']=[data['Wind Speed'][0]]
rf24ftw['Energy_50m(MW)']=[data['Energy_50m(MW)'][0]]
rf24ftw['20Turbines(MW)']=[data['No_of_Turbines20TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf24ftw.to_html(classes='data')], titles=df.columns.values)
elif (time==24.0 and radius==50.0 and turbines==30.0):
rf24fth=pd.DataFrame()
rf24fth['Air Density']=[data['Air Density'][0]]
rf24fth['Wind Speed(24hr)']=[data['Wind Speed'][0]]
rf24fth['Energy_50m(MW)']=[data['Energy_50m(MW)'][0]]
rf24fth['30Turbines(MW)']=[data['No_of_Turbines30TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf24fth.to_html(classes='data')], titles=df.columns.values)
elif (time==24.0 and radius==70.0 and turbines==10.0):
rf24st=pd.DataFrame()
rf24st['Air Density']=[data['Air Density'][0]]
rf24st['Wind Speed(24hr)']=[data['Wind Speed'][0]]
rf24st['Energy_70m(MW)']=[data['Energy_70m(MW)'][0]]
rf24st['10Turbines(MW)']=[data['No_of_Turbines10TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf24st.to_html(classes='data')], titles=df.columns.values)
elif (time==24.0 and radius==70.0 and turbines==15.0):
rf24sf=pd.DataFrame()
rf24sf['Air Density']=[data['Air Density'][0]]
rf24sf['Wind Speed(24hr)']=[data['Wind Speed'][0]]
rf24sf['Energy_70m(MW)']=[data['Energy_70m(MW)'][0]]
rf24sf['15Turbines(MW)']=[data['No_of_Turbines15TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf24sf.to_html(classes='data')], titles=df.columns.values)
elif (time==24.0 and radius==70.0 and turbines==20.0):
rf24stw=pd.DataFrame()
rf24stw['Air Density']=[data['Air Density'][0]]
rf24stw['Wind Speed(24hr)']=[data['Wind Speed'][0]]
rf24stw['Energy_70m(MW)']=[data['Energy_70m(MW)'][0]]
rf24stw['20Turbines(MW)']=[data['No_of_Turbines20TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf24stw.to_html(classes='data')], titles=df.columns.values)
elif (time==24.0 and radius==70.0 and turbines==30.0):
rf24sth=pd.DataFrame()
rf24sth['Air Density']=[data['Air Density'][0]]
rf24sth['Wind Speed(24hr)']=[data['Wind Speed'][0]]
rf24sth['Energy_70m(MW)']=[data['Energy_70m(MW)'][0]]
rf24sth['30Turbines(MW)']=[data['No_of_Turbines30TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf24sth.to_html(classes='data')], titles=df.columns.values)
elif (time==24.0 and radius==80.0 and turbines==10.0):
rf24et=pd.DataFrame()
rf24et['Air Density']=[data['Air Density'][0]]
rf24et['Wind Speed(24hr)']=[data['Wind Speed'][0]]
rf24et['Energy_80m(MW)']=[data['Energy_80m(MW)'][0]]
rf24et['10Turbines(MW)']=[data['No_of_Turbines10TEnergy_80m(MW)'][0]]
return render_template('Predict.html',tables=[rf24et.to_html(classes='data')], titles=df.columns.values)
elif (time==24.0 and radius==80.0 and turbines==15.0):
rf24ef=pd.DataFrame()
rf24ef['Air Density']=[data['Air Density'][0]]
rf24ef['Wind Speed(24hr)']=[data['Wind Speed'][0]]
rf24ef['Energy_80m(MW)']=[data['Energy_80m(MW)'][0]]
rf24ef['15Turbines(MW)']=[data['No_of_Turbines15TEnergy_80m(MW)'][0]]
return render_template('Predict.html',tables=[rf24ef.to_html(classes='data')], titles=df.columns.values)
elif (time==24.0 and radius==80.0 and turbines==20.0):
rf24etw=pd.DataFrame()
rf24etw['Air Density']=[data['Air Density'][0]]
rf24etw['Wind Speed(24hr)']=[data['Wind Speed'][0]]
rf24etw['Energy_80m(MW)']=[data['Energy_80m(MW)'][0]]
rf24etw['20Turbines(MW)']=[data['No_of_Turbines20TEnergy_80m(MW)'][0]]
return render_template('Predict.html',tables=[rf24etw.to_html(classes='data')], titles=df.columns.values)
elif (time==24.0 and radius==80.0 and turbines==30.0):
rf24eth=pd.DataFrame()
rf24eth['Air Density']=[data['Air Density'][0]]
rf24eth['Wind Speed(24hr)']=[data['Wind Speed'][0]]
rf24eth['Energy_80m(MW)']=[data['Energy_80m(MW)'][0]]
rf24eth['30Turbines(MW)']=[data['No_of_Turbines30TEnergy_80m(MW)'][0]]
return render_template('Predict.html',tables=[rf24eth.to_html(classes='data')], titles=df.columns.values)
elif (time==24.0 and radius==100.0 and turbines==10.0):
rf24ht=pd.DataFrame()
rf24ht['Air Density']=[data['Air Density'][0]]
rf24ht['Wind Speed(24hr)']=[data['Wind Speed'][0]]
rf24ht['Energy_100m(MW)']=[data['Energy_100m(MW)'][0]]
rf24ht['10Turbines(MW)']=[data['No_of_Turbines10TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf24ht.to_html(classes='data')], titles=df.columns.values)
elif (time==24.0 and radius==100.0 and turbines==15.0):
rf24hf=pd.DataFrame()
rf24hf['Air Density']=[data['Air Density'][0]]
rf24hf['Wind Speed(24hr)']=[data['Wind Speed'][0]]
rf24hf['Energy_100m(MW)']=[data['Energy_100m(MW)'][0]]
rf24hf['15Turbines(MW)']=[data['No_of_Turbines15TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf24hf.to_html(classes='data')], titles=df.columns.values)
elif (time==24.0 and radius==100.0 and turbines==20.0):
rf24htw=pd.DataFrame()
rf24htw['Air Density']=[data['Air Density'][0]]
rf24htw['Wind Speed(24hr)']=[data['Wind Speed'][0]]
rf24htw['Energy_100m(MW)']=[data['Energy_100m(MW)'][0]]
rf24htw['20Turbines(MW)']=[data['No_of_Turbines20TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf24htw.to_html(classes='data')], titles=df.columns.values)
elif (time==24.0 and radius==100.0 and turbines==30.0):
rf24hth=pd.DataFrame()
rf24hth['Air Density']=[data['Air Density'][0]]
rf24hth['Wind Speed(24hr)']=[data['Wind Speed'][0]]
rf24hth['Energy_100m(MW)']=[data['Energy_100m(MW)'][0]]
rf24hth['30Turbines(MW)']=[data['No_of_Turbines30TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf24eth.to_html(classes='data')], titles=df.columns.values)
elif (time==48.0 and radius==50.0 and turbines==10.0):
rf48ft=pd.DataFrame()
rf48ft['Air Density']=[data['Air Density'][1]]
rf48ft['Wind Speed(48hr)']=[data['Wind Speed'][1]]
rf48ft['Energy_50m(MW)']=[rf48['Energy_50m(MW)'][0]]
rf48ft['10Turbines(MW)']=[rf48['No_of_Turbines10TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf48ft.to_html(classes='data')], titles=df.columns.values)
elif (time==48.0 and radius==50.0 and turbines==15.0):
rf48ff=pd.DataFrame()
rf48ff['Air Density']=[data['Air Density'][1]]
rf48ff['Wind Speed(48hr)']=[data['Wind Speed'][1]]
rf48ff['Energy_50m(MW)']=[rf48['Energy_50m(MW)'][0]]
rf48ff['15Turbines(MW)']=[rf48['No_of_Turbines15TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf48ff.to_html(classes='data')], titles=df.columns.values)
elif (time==48.0 and radius==50.0 and turbines==20.0):
rf48ftw=pd.DataFrame()
rf48ftw['Air Density']=[data['Air Density'][1]]
rf48ftw['Wind Speed(48hr)']=[data['Wind Speed'][1]]
rf48ftw['Energy_50m(MW)']=[rf48['Energy_50m(MW)'][0]]
rf48ftw['20Turbines(MW)']=[rf48['No_of_Turbines20TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf48ftw.to_html(classes='data')], titles=df.columns.values)
elif (time==48.0 and radius==50.0 and turbines==30.0):
rf48fth=pd.DataFrame()
rf48fth['Air Density']=[data['Air Density'][1]]
rf48fth['Wind Speed(48hr)']=[data['Wind Speed'][1]]
rf48fth['Energy_50m(MW)']=[rf48['Energy_50m(MW)'][0]]
rf48fth['30Turbines(MW)']=[rf48['No_of_Turbines30TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf24fth.to_html(classes='data')], titles=df.columns.values)
elif (time==48.0 and radius==70.0 and turbines==10.0):
rf48st=pd.DataFrame()
rf48st['Air Density']=[data['Air Density'][1]]
rf48st['Wind Speed(48hr)']=[data['Wind Speed'][1]]
rf48st['Energy_70m(MW)']=[rf48['Energy_70m(MW)'][0]]
rf48st['10Turbines(MW)']=[rf48['No_of_Turbines10TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf48st.to_html(classes='data')], titles=df.columns.values)
elif (time==48.0 and radius==70.0 and turbines==15.0):
rf48sf=pd.DataFrame()
rf48sf['Air Density']=[data['Air Density'][1]]
rf48sf['Wind Speed(48hr)']=[data['Wind Speed'][1]]
rf48sf['Energy_70m(MW)']=[rf48['Energy_70m(MW)'][0]]
rf48sf['15Turbines(MW)']=[rf48['No_of_Turbines15TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf48sf.to_html(classes='data')], titles=df.columns.values)
elif (time==48.0 and radius==70.0 and turbines==20.0):
rf48stw=pd.DataFrame()
rf48stw['Air Density']=[data['Air Density'][1]]
rf48stw['Wind Speed(48hr)']=[data['Wind Speed'][1]]
rf48stw['Energy_70m(MW)']=[rf48['Energy_70m(MW)'][0]]
rf48stw['20Turbines(MW)']=[rf48['No_of_Turbines20TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf48stw.to_html(classes='data')], titles=df.columns.values)
elif (time==48.0 and radius==70.0 and turbines==30.0):
rf48sth=pd.DataFrame()
rf48sth['Air Density']=[data['Air Density'][1]]
rf48sth['Wind Speed(48hr)']=[data['Wind Speed'][1]]
rf48sth['Energy_70m(MW)']=[rf48['Energy_70m(MW)'][0]]
rf48sth['30Turbines(MW)']=[rf48['No_of_Turbines30TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf24sth.to_html(classes='data')], titles=df.columns.values)
elif (time==48.0 and radius==80.0 and turbines==10.0):
rf48et=pd.DataFrame()
rf48et['Air Density(48hr)']=[data['Air Density'][1]]
rf48et['Wind Speed']=[data['Wind Speed'][1]]
rf48et['Energy_80m(MW)']=[rf48['Energy_80m(MW)'][0]]
rf48et['10Turbines(MW)']=[rf48['No_of_Turbines10TEnergy_80m(MW)'][0]]
return render_template('Predict.html',tables=[rf48et.to_html(classes='data')], titles=df.columns.values)
elif (time==48.0 and radius==80.0 and turbines==15.0):
rf48ef=pd.DataFrame()
rf48ef['Air Density']=[data['Air Density'][1]]
rf48ef['Wind Speed(48hr)']=[data['Wind Speed'][1]]
rf48ef['Energy_80m(MW)']=[rf48['Energy_80m(MW)'][0]]
rf48ef['15Turbines(MW)']=[rf48['No_of_Turbines15TEnergy_80m(MW)'][0]]
return render_template('Predict.html',tables=[rf48ef.to_html(classes='data')], titles=df.columns.values)
elif (time==48.0 and radius==80.0 and turbines==20.0):
rf48etw=pd.DataFrame()
rf48etw['Air Density']=[data['Air Density'][1]]
rf48etw['Wind Speed(48hr)']=[data['Wind Speed'][1]]
rf48etw['Energy_80m(MW)']=[rf48['Energy_80m(MW)'][0]]
rf48etw['20Turbines(MW)']=[rf48['No_of_Turbines20TEnergy_80m(MW)'][0]]
return render_template('Predict.html',tables=[rf48etw.to_html(classes='data')], titles=df.columns.values)
elif (time==48.0 and radius==80.0 and turbines==30.0):
rf48eth=pd.DataFrame()
rf48eth['Air Density']=[data['Air Density'][1]]
rf48eth['Wind Speed(48hr)']=[data['Wind Speed'][1]]
rf48eth['Energy_80m(MW)']=[rf48['Energy_80m(MW)'][0]]
rf48eth['30Turbines(MW)']=[rf48['No_of_Turbines30TEnergy_80m(MW)'][0]]
return render_template('Predict.html',tables=[rf48eth.to_html(classes='data')], titles=df.columns.values)
elif (time==48.0 and radius==100.0 and turbines==10.0):
rf48ht=pd.DataFrame()
rf48ht['Air Density']=[data['Air Density'][1]]
rf48ht['Wind Speed(48hr)']=[data['Wind Speed'][1]]
rf48ht['Energy_100m(MW)']=[rf48['Energy_100m(MW)'][0]]
rf48ht['10Turbines(MW)']=[rf48['No_of_Turbines10TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf48ht.to_html(classes='data')], titles=df.columns.values)
elif (time==48.0 and radius==100.0 and turbines==15.0):
rf48hf=pd.DataFrame()
rf48hf['Air Density']=[data['Air Density'][1]]
rf48hf['Wind Speed(48hr)']=[data['Wind Speed'][1]]
rf48hf['Energy_100m(MW)']=[rf48['Energy_100m(MW)'][0]]
rf48hf['15Turbines(MW)']=[rf48['No_of_Turbines15TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf48hf.to_html(classes='data')], titles=df.columns.values)
elif (time==48.0 and radius==100.0 and turbines==20.0):
rf48htw=pd.DataFrame()
rf48htw['Air Density']=[data['Air Density'][1]]
rf48htw['Wind Speed(48hr)']=[data['Wind Speed'][1]]
rf48htw['Energy_100m(MW)']=[rf48['Energy_100m(MW)'][0]]
rf48htw['20Turbines(MW)']=[rf48['No_of_Turbines20TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf48htw.to_html(classes='data')], titles=df.columns.values)
elif (time==48.0 and radius==100.0 and turbines==30.0):
rf48hth=pd.DataFrame()
rf48hth['Air Density']=[data['Air Density'][1]]
rf48hth['Wind Speed(48hr)']=[data['Wind Speed'][1]]
rf48hth['Energy_100m(MW)']=[rf48['Energy_100m(MW)'][0]]
rf48hth['30Turbines(MW)']=[rf48['No_of_Turbines30TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf48hth.to_html(classes='data')], titles=df.columns.values)
elif (time==72.0 and radius==50.0 and turbines==10.0):
rf72ft=pd.DataFrame()
rf72ft['Air Density']=[data['Air Density'][2]]
rf72ft['Wind Speed(72hr)']=[data['Wind Speed'][2]]
rf72ft['Energy_50m(MW)']=[rf72['Energy_50m(MW)'][0]]
rf72ft['10Turbines(MW)']=[rf72['No_of_Turbines10TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf72ft.to_html(classes='data')], titles=df.columns.values)
elif (time==72.0 and radius==50.0 and turbines==15.0):
rf72ff=pd.DataFrame()
rf72ff['Air Density']=[data['Air Density'][2]]
rf72ff['Wind Speed(72hr)']=[data['Wind Speed'][2]]
rf72ff['Energy_50m(MW)']=[rf72['Energy_50m(MW)'][0]]
rf72ff['15Turbines(MW)']=[rf72['No_of_Turbines15TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf72ff.to_html(classes='data')], titles=df.columns.values)
elif (time==72.0 and radius==50.0 and turbines==20.0):
rf72ftw=pd.DataFrame()
rf72ftw['Air Density']=[data['Air Density'][2]]
rf72ftw['Wind Speed(72hr)']=[data['Wind Speed'][2]]
rf72ftw['Energy_50m(MW)']=[rf72['Energy_50m(MW)'][0]]
rf72ftw['20Turbines(MW)']=[rf72['No_of_Turbines20TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf72ftw.to_html(classes='data')], titles=df.columns.values)
elif (time==72.0 and radius==50.0 and turbines==30.0):
rf72fth=pd.DataFrame()
rf72fth['Air Density']=[data['Air Density'][2]]
rf72fth['Wind Speed(72hr)']=[data['Wind Speed'][2]]
rf72fth['Energy_50m(MW)']=[rf72['Energy_50m(MW)'][0]]
rf72fth['30Turbines(MW)']=[rf72['No_of_Turbines30TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf72fth.to_html(classes='data')], titles=df.columns.values)
elif (time==72.0 and radius==70.0 and turbines==10.0):
rf72st=pd.DataFrame()
rf72st['Air Density']=[data['Air Density'][2]]
rf72st['Wind Speed(72hr)']=[data['Wind Speed'][2]]
rf72st['Energy_70m(MW)']=[rf72['Energy_70m(MW)'][0]]
rf72st['10Turbines(MW)']=[rf72['No_of_Turbines10TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf72st.to_html(classes='data')], titles=df.columns.values)
elif (time==72.0 and radius==70.0 and turbines==15.0):
rf72sf=pd.DataFrame()
rf72sf['Air Density']=[data['Air Density'][2]]
rf72sf['Wind Speed(72hr)']=[data['Wind Speed'][2]]
rf72sf['Energy_70m(MW)']=[rf72['Energy_70m(MW)'][0]]
rf72sf['15Turbines(MW)']=[rf72['No_of_Turbines15TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf72sf.to_html(classes='data')], titles=df.columns.values)
elif (time==72.0 and radius==70.0 and turbines==20.0):
rf72stw=pd.DataFrame()
rf72stw['Air Density']=[data['Air Density'][2]]
rf72stw['Wind Speed(72hr)']=[data['Wind Speed'][2]]
rf72stw['Energy_70m(MW)']=[rf72['Energy_70m(MW)'][0]]
rf72stw['20Turbines(MW)']=[rf72['No_of_Turbines20TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf72stw.to_html(classes='data')], titles=df.columns.values)
elif (time==72.0 and radius==70.0 and turbines==30.0):
rf72sth=pd.DataFrame()
rf72sth['Air Density']=[data['Air Density'][2]]
rf72sth['Wind Speed(72hr)']=[data['Wind Speed'][2]]
rf72sth['Energy_70m(MW)']=[rf72['Energy_70m(MW)'][0]]
rf72sth['30Turbines(MW)']=[rf72['No_of_Turbines30TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf72sth.to_html(classes='data')], titles=df.columns.values)
elif (time==72.0 and radius==80.0 and turbines==10.0):
rf72et=pd.DataFrame()
rf72et['Air Density']=[data['Air Density'][2]]
rf72et['Wind Speed(72hr)']=[data['Wind Speed'][2]]
rf72et['Energy_80m(MW)']=[rf72['Energy_80m(MW)'][0]]
rf72et['10Turbines(MW)']=[rf72['No_of_Turbines10TEnergy_80m(MW)'][0]]
return render_template('Predict.html',tables=[rf72et.to_html(classes='data')], titles=df.columns.values)
elif (time==72.0 and radius==80.0 and turbines==15.0):
rf72ef=pd.DataFrame()
rf72ef['Air Density']=[data['Air Density'][2]]
rf72ef['Wind Speed(72hr)']=[data['Wind Speed'][2]]
rf72ef['Energy_80m(MW)']=[rf72['Energy_80m(MW)'][0]]
rf72ef['15Turbines(MW)']=[rf72['No_of_Turbines15TEnergy_80m(MW)'][0]]
return render_template('Predict.html',tables=[rf72ef.to_html(classes='data')], titles=df.columns.values)
elif (time==72.0 and radius==80.0 and turbines==20.0):
rf72etw=pd.DataFrame()
rf72etw['Air Density']=[data['Air Density'][2]]
rf72etw['Wind Speed(72hr)']=[data['Wind Speed'][2]]
rf72etw['Energy_80m(MW)']=[rf72['Energy_80m(MW)'][0]]
rf72etw['20Turbines(MW)']=[rf72['No_of_Turbines20TEnergy_80m(MW)'][0]]
return render_template('Predict.html',tables=[rf72etw.to_html(classes='data')], titles=df.columns.values)
elif (time==72.0 and radius==80.0 and turbines==30.0):
rf72eth=pd.DataFrame()
rf72eth['Air Density']=[data['Air Density'][2]]
rf72eth['Wind Speed(72hr)']=[data['Wind Speed'][2]]
rf72eth['Energy_80m(MW)']=[rf72['Energy_80m(MW)'][0]]
rf72eth['30Turbines(MW)']=[rf72['No_of_Turbines30TEnergy_80m(MW)'][0]]
return render_template('Predict.html',tables=[rf72eth.to_html(classes='data')], titles=df.columns.values)
elif (time==72.0 and radius==100.0 and turbines==10.0):
rf72ht=pd.DataFrame()
rf72ht['Air Density']=[data['Air Density'][2]]
rf72ht['Wind Speed(72hr)']=[data['Wind Speed'][2]]
rf72ht['Energy_100m(MW)']=[rf72['Energy_100m(MW)'][0]]
rf72ht['10Turbines(MW)']=[rf72['No_of_Turbines10TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf72ht.to_html(classes='data')], titles=df.columns.values)
elif (time==72.0 and radius==100.0 and turbines==15.0):
rf72hf=pd.DataFrame()
rf72hf['Air Density']=[data['Air Density'][2]]
rf72hf['Wind Speed(72hr)']=[data['Wind Speed'][2]]
rf72hf['Energy_100m(MW)']=[rf72['Energy_100m(MW)'][0]]
rf72hf['15Turbines(MW)']=[rf72['No_of_Turbines15TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf72hf.to_html(classes='data')], titles=df.columns.values)
elif (time==72.0 and radius==100.0 and turbines==20.0):
rf72htw=pd.DataFrame()
rf72htw['Air Density']=[data['Air Density'][2]]
rf72htw['Wind Speed(72hr)']=[data['Wind Speed'][2]]
rf72htw['Energy_100m(MW)']=[rf72['Energy_100m(MW)'][0]]
rf72htw['20Turbines(MW)']=[rf72['No_of_Turbines20TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf72htw.to_html(classes='data')], titles=df.columns.values)
elif (time==72.0 and radius==100.0 and turbines==30.0):
rf72hth=pd.DataFrame()
rf72hth['Air Density']=[data['Air Density'][2]]
rf72hth['Wind Speed(72hr)']=[data['Wind Speed'][2]]
rf72hth['Energy_100m(MW)']=[rf72['Energy_100m(MW)'][0]]
rf72hth['30Turbines(MW)']=[rf72['No_of_Turbines30TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf72hth.to_html(classes='data')], titles=df.columns.values)
elif (time==1.0 and radius==50.0 and turbines==10.0):
rf1mft=pd.DataFrame()
rf1mft['Air Density']=[data['Air Density'][29]]
rf1mft['Wind Speed(1M)']=[data['Wind Speed'][29]]
rf1mft['Energy_50m(MW)']=[rf1m['Energy_50m(MW)'][0]]
rf1mft['10Turbines(MW)']=[rf1m['No_of_Turbines10TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf1mft.to_html(classes='data')], titles=df.columns.values)
elif (time==1.0 and radius==50.0 and turbines==15.0):
rf1mff=pd.DataFrame()
rf1mff['Air Density']=[data['Air Density'][29]]
rf1mff['Wind Speed(1M)']=[data['Wind Speed'][29]]
rf1mff['Energy_50m(MW)']=[rf1m['Energy_50m(MW)'][0]]
rf1mff['15Turbines(MW)']=[rf1m['No_of_Turbines15TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf1mff.to_html(classes='data')], titles=df.columns.values)
elif (time==1.0 and radius==50.0 and turbines==20.0):
rf1mftw=pd.DataFrame()
rf1mftw['Air Density']=[data['Air Density'][29]]
rf1mftw['Wind Speed(1M)']=[data['Wind Speed'][29]]
rf1mftw['Energy_50m(MW)']=[rf1m['Energy_50m(MW)'][0]]
rf1mftw['20Turbines(MW)']=[rf1m['No_of_Turbines20TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf1mftw.to_html(classes='data')], titles=df.columns.values)
elif (time==1.0 and radius==50.0 and turbines==30.0):
rf1mfth=pd.DataFrame()
rf1mfth['Air Density']=[data['Air Density'][29]]
rf1mfth['Wind Speed(1M)']=[data['Wind Speed'][29]]
rf1mfth['Energy_50m(MW)']=[rf1m['Energy_50m(MW)'][0]]
rf1mfth['30Turbines(MW)']=[rf1m['No_of_Turbines30TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf1mfth.to_html(classes='data')], titles=df.columns.values)
elif (time==1.0 and radius==70.0 and turbines==10.0):
rf1mst=pd.DataFrame()
rf1mst['Air Density']=[data['Air Density'][29]]
rf1mst['Wind Speed(1M)']=[data['Wind Speed'][29]]
rf1mst['Energy_70m(MW)']=[rf1m['Energy_70m(MW)'][0]]
rf1mst['10Turbines(MW)']=[rf1m['No_of_Turbines10TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf1mst.to_html(classes='data')], titles=df.columns.values)
elif (time==1.0 and radius==70.0 and turbines==15.0):
rf1msf=pd.DataFrame()
rf1msf['Air Density']=[data['Air Density'][29]]
rf1msf['Wind Speed(1M)']=[data['Wind Speed'][29]]
rf1msf['Energy_70m(MW)']=[rf1m['Energy_70m(MW)'][0]]
rf1msf['15Turbines(MW)']=[rf1m['No_of_Turbines15TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf1msf.to_html(classes='data')], titles=df.columns.values)
elif (time==1.0 and radius==70.0 and turbines==20.0):
rf1mstw=pd.DataFrame()
rf1mstw['Air Density']=[data['Air Density'][29]]
rf1mstw['Wind Speed(1M)']=[data['Wind Speed'][29]]
rf1mstw['Energy_70m(MW)']=[rf1m['Energy_70m(MW)'][0]]
rf1mstw['20Turbines(MW)']=[rf1m['No_of_Turbines20TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf1mstw.to_html(classes='data')], titles=df.columns.values)
elif (time==1.0 and radius==70.0 and turbines==30.0):
rf1msth=pd.DataFrame()
rf1msth['Air Density']=[data['Air Density'][29]]
rf1msth['Wind Speed(1M)']=[data['Wind Speed'][29]]
rf1msth['Energy_70m(MW)']=[rf1m['Energy_70m(MW)'][0]]
rf1msth['30Turbines(MW)']=[rf1m['No_of_Turbines30TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf1msth.to_html(classes='data')], titles=df.columns.values)
elif (time==1.0 and radius==80.0 and turbines==10.0):
rf1met=pd.DataFrame()
rf1met['Air Density']=[data['Air Density'][29]]
rf1met['Wind Speed(1M)']=[data['Wind Speed'][29]]
rf1met['Energy_80m(MW)']=[rf1m['Energy_80m(MW)'][0]]
rf1met['10Turbines(MW)']=[rf1m['No_of_Turbines10TEnergy_80m(MW)'][0]]
return render_template('Predict.html',tables=[rf1met.to_html(classes='data')], titles=df.columns.values)
elif (time==1.0 and radius==80.0 and turbines==15.0):
rf1mef=pd.DataFrame()
rf1mef['Air Density']=[data['Air Density'][29]]
rf1mef['Wind Speed(1M)']=[data['Wind Speed'][29]]
rf1mef['Energy_80m(MW)']=[rf1m['Energy_80m(MW)'][0]]
rf1mef['15Turbines(MW)']=[rf1m['No_of_Turbines15TEnergy_80m(MW)'][0]]
return render_template('Predict.html',tables=[rf1mef.to_html(classes='data')], titles=df.columns.values)
elif (time==1.0 and radius==80.0 and turbines==20.0):
rf1metw=pd.DataFrame()
rf1metw['Air Density']=[data['Air Density'][29]]
rf1metw['Wind Speed(1M)']=[data['Wind Speed'][29]]
rf1metw['Energy_80m(MW)']=[rf1m['Energy_80m(MW)'][0]]
rf1metw['20Turbines(MW)']=[rf1m['No_of_Turbines20TEnergy_80m(MW)'][0]]
return render_template('Predict.html',tables=[rf1metw.to_html(classes='data')], titles=df.columns.values)
elif (time==1.0 and radius==80.0 and turbines==30.0):
rf1meth=pd.DataFrame()
rf1meth['Air Density']=[data['Air Density'][29]]
rf1meth['Wind Speed(1M)']=[data['Wind Speed'][29]]
rf1meth['Energy_80m(MW)']=[rf1m['Energy_80m(MW)'][0]]
rf1meth['30Turbines(MW)']=[rf1m['No_of_Turbines30TEnergy_80m(MW)'][0]]
return render_template('Predict.html',tables=[rf1meth.to_html(classes='data')], titles=df.columns.values)
elif (time==1.0 and radius==100.0 and turbines==10.0):
rf1mht=pd.DataFrame()
rf1mht['Air Density']=[data['Air Density'][29]]
rf1mht['Wind Speed(1M)']=[data['Wind Speed'][29]]
rf1mht['Energy_100m(MW)']=[rf1m['Energy_100m(MW)'][0]]
rf1mht['10Turbines(MW)']=[rf1m['No_of_Turbines10TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf1mht.to_html(classes='data')], titles=df.columns.values)
elif (time==1.0 and radius==100.0 and turbines==15.0):
rf1mhf=pd.DataFrame()
rf1mhf['Air Density']=[data['Air Density'][29]]
rf1mhf['Wind Speed(1M)']=[data['Wind Speed'][29]]
rf1mhf['Energy_100m(MW)']=[rf1m['Energy_100m(MW)'][0]]
rf1mhf['15Turbines(MW)']=[rf1m['No_of_Turbines15TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf1mhf.to_html(classes='data')], titles=df.columns.values)
elif (time==1.0 and radius==100.0 and turbines==20.0):
rf1mhtw=pd.DataFrame()
rf1mhtw['Air Density']=[data['Air Density'][29]]
rf1mhtw['Wind Speed(1M)']=[data['Wind Speed'][29]]
rf1mhtw['Energy_100m(MW)']=[rf1m['Energy_100m(MW)'][0]]
rf1mhtw['20Turbines(MW)']=[rf1m['No_of_Turbines20TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf1mhtw.to_html(classes='data')], titles=df.columns.values)
elif (time==1.0 and radius==100.0 and turbines==30.0):
rf1mhth=pd.DataFrame()
rf1mhth['Air Density']=[data['Air Density'][29]]
rf1mhth['Wind Speed(1M)']=[data['Wind Speed'][29]]
rf1mhth['Energy_100m(MW)']=[rf1m['Energy_100m(MW)'][0]]
rf1mhth['30Turbines(MW)']=[rf1m['No_of_Turbines30TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf1mhth.to_html(classes='data')], titles=df.columns.values)
elif (time==4.0 and radius==50.0 and turbines==10.0):
rf4mft=pd.DataFrame()
rf4mft['Air Density']=[data['Air Density'][119]]
rf4mft['Wind Speed(4M)']=[data['Wind Speed'][119]]
rf4mft['Energy_50m(MW)']=[rf4m['Energy_50m(MW)'][0]]
rf4mft['10Turbines(MW)']=[rf4m['No_of_Turbines10TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf4mft.to_html(classes='data')], titles=df.columns.values)
elif (time==4.0 and radius==50.0 and turbines==15.0):
rf4mff=pd.DataFrame()
rf4mff['Air Density']=[data['Air Density'][119]]
rf4mff['Wind Speed(4M)']=[data['Wind Speed'][119]]
rf4mff['Energy_50m(MW)']=[rf4m['Energy_50m(MW)'][0]]
rf4mff['15Turbines(MW)']=[rf4m['No_of_Turbines15TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf4mff.to_html(classes='data')], titles=df.columns.values)
elif (time==4.0 and radius==50.0 and turbines==20.0):
rf4mftw=pd.DataFrame()
rf4mftw['Air Density']=[data['Air Density'][119]]
rf4mftw['Wind Speed(4M)']=[data['Wind Speed'][119]]
rf4mftw['Energy_50m(MW)']=[rf4m['Energy_50m(MW)'][0]]
rf4mftw['20Turbines(MW)']=[rf4m['No_of_Turbines20TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf4mftw.to_html(classes='data')], titles=df.columns.values)
elif (time==4.0 and radius==50.0 and turbines==30.0):
rf4mfth=pd.DataFrame()
rf4mfth['Air Density']=[data['Air Density'][119]]
rf4mfth['Wind Speed(4M)']=[data['Wind Speed'][119]]
rf4mfth['Energy_50m(MW)']=[rf4m['Energy_50m(MW)'][0]]
rf4mfth['30Turbines(MW)']=[rf4m['No_of_Turbines30TEnergy_50m(MW)'][0]]
return render_template('Predict.html',tables=[rf4mfth.to_html(classes='data')], titles=df.columns.values)
elif (time==4.0 and radius==70.0 and turbines==10.0):
rf4mst=pd.DataFrame()
rf4mst['Air Density']=[data['Air Density'][119]]
rf4mst['Wind Speed(4M)']=[data['Wind Speed'][119]]
rf4mst['Energy_70m(MW)']=[rf4m['Energy_70m(MW)'][0]]
rf4mst['10Turbines(MW)']=[rf4m['No_of_Turbines10TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf4mst.to_html(classes='data')], titles=df.columns.values)
elif (time==4.0 and radius==70.0 and turbines==15.0):
rf4msf=pd.DataFrame()
rf4msf['Air Density']=[data['Air Density'][119]]
rf4msf['Wind Speed(4M)']=[data['Wind Speed'][119]]
rf4msf['Energy_70m(MW)']=[rf4m['Energy_70m(MW)'][0]]
rf4msf['15Turbines(MW)']=[rf4m['No_of_Turbines15TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf4msf.to_html(classes='data')], titles=df.columns.values)
elif (time==4.0 and radius==70.0 and turbines==20.0):
rf4mstw=pd.DataFrame()
rf4mstw['Air Density']=[data['Air Density'][119]]
rf4mstw['Wind Speed(4M)']=[data['Wind Speed'][119]]
rf4mstw['Energy_70m(MW)']=[rf4m['Energy_70m(MW)'][0]]
rf4mstw['20Turbines(MW)']=[rf4m['No_of_Turbines20TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf4mstw.to_html(classes='data')], titles=df.columns.values)
elif (time==4.0 and radius==70.0 and turbines==30.0):
rf4msth=pd.DataFrame()
rf4msth['Air Density']=[data['Air Density'][119]]
rf4msth['Wind Speed(4M)']=[data['Wind Speed'][119]]
rf4msth['Energy_70m(MW)']=[rf4m['Energy_70m(MW)'][0]]
rf4msth['30Turbines(MW)']=[rf4m['No_of_Turbines30TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf4msth.to_html(classes='data')], titles=df.columns.values)
elif (time==4.0 and radius==80.0 and turbines==10.0):
rf4met=pd.DataFrame()
rf4met['Air Density']=[data['Air Density'][119]]
rf4met['Wind Speed(4M)']=[data['Wind Speed'][119]]
rf4met['Energy_70m(MW)']=[rf4m['Energy_70m(MW)'][0]]
rf4met['10Turbines(MW)']=[rf4m['No_of_Turbines10TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf4met.to_html(classes='data')], titles=df.columns.values)
elif (time==4.0 and radius==80.0 and turbines==15.0):
rf4mef=pd.DataFrame()
rf4mef['Air Density']=[data['Air Density'][119]]
rf4mef['Wind Speed(4M)']=[data['Wind Speed'][119]]
rf4mef['Energy_70m(MW)']=[rf4m['Energy_70m(MW)'][0]]
rf4mef['15Turbines(MW)']=[rf4m['No_of_Turbines15TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf4mef.to_html(classes='data')], titles=df.columns.values)
elif (time==4.0 and radius==80.0 and turbines==20.0):
rf4metw=pd.DataFrame()
rf4metw['Air Density']=[data['Air Density'][119]]
rf4metw['Wind Speed(4M)']=[data['Wind Speed'][119]]
rf4metw['Energy_70m(MW)']=[rf4m['Energy_70m(MW)'][0]]
rf4metw['20Turbines(MW)']=[rf4m['No_of_Turbines20TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf4metw.to_html(classes='data')], titles=df.columns.values)
elif (time==4.0 and radius==80.0 and turbines==30.0):
rf4meth=pd.DataFrame()
rf4meth['Air Density']=[data['Air Density'][119]]
rf4meth['Wind Speed(4M)']=[data['Wind Speed'][119]]
rf4meth['Energy_70m(MW)']=[rf4m['Energy_70m(MW)'][0]]
rf4meth['30Turbines(MW)']=[rf4m['No_of_Turbines30TEnergy_70m(MW)'][0]]
return render_template('Predict.html',tables=[rf4meth.to_html(classes='data')], titles=df.columns.values)
elif (time==4.0 and radius==100.0 and turbines==10.0):
rf4mht=pd.DataFrame()
rf4mht['Air Density']=[data['Air Density'][119]]
rf4mht['Wind Speed(4M)']=[data['Wind Speed'][119]]
rf4mht['Energy_100m(MW)']=[rf4m['Energy_100m(MW)'][0]]
rf4mht['10Turbines(MW)']=[rf4m['No_of_Turbines10TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf4mht.to_html(classes='data')], titles=df.columns.values)
elif (time==4.0 and radius==100.0 and turbines==15.0):
rf4mhf=pd.DataFrame()
rf4mhf['Air Density']=[data['Air Density'][119]]
rf4mhf['Wind Speed(4M)']=[data['Wind Speed'][119]]
rf4mhf['Energy_100m(MW)']=[rf4m['Energy_100m(MW)'][0]]
rf4mhf['15Turbines(MW)']=[rf4m['No_of_Turbines15TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf4mhf.to_html(classes='data')], titles=df.columns.values)
elif (time==4.0 and radius==100.0 and turbines==20.0):
rf4mhtw=pd.DataFrame()
rf4mhtw['Air Density']=[data['Air Density'][119]]
rf4mhtw['Wind Speed(4M)']=[data['Wind Speed'][119]]
rf4mhtw['Energy_100m(MW)']=[rf4m['Energy_100m(MW)'][0]]
rf4mhtw['20Turbines(MW)']=[rf4m['No_of_Turbines20TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf4mhtw.to_html(classes='data')], titles=df.columns.values)
elif (time==4.0 and radius==100.0 and turbines==30.0):
rf4mhth=pd.DataFrame()
rf4mhth['Air Density']=[data['Air Density'][119]]
rf4mhth['Wind Speed(4M)']=[data['Wind Speed'][119]]
rf4mhth['Energy_100m(MW)']=[rf4m['Energy_100m(MW)'][0]]
rf4mhth['30Turbines(MW)']=[rf4m['No_of_Turbines30TEnergy_100m(MW)'][0]]
return render_template('Predict.html',tables=[rf4mhth.to_html(classes='data')], titles=df.columns.values)
if __name__ == '__main__':
app.run(host='0.0.0.0',port=port,debug=False)
|
<filename>awacs/mobiletargeting.py
# Copyright (c) 2012-2013, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'Amazon Pinpoint'
prefix = 'mobiletargeting'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
CreateApp = Action('CreateApp')
CreateCampaign = Action('CreateCampaign')
CreateEmailTemplate = Action('CreateEmailTemplate')
CreateExportJob = Action('CreateExportJob')
CreateImportJob = Action('CreateImportJob')
CreateJourney = Action('CreateJourney')
CreatePushTemplate = Action('CreatePushTemplate')
CreateSegment = Action('CreateSegment')
CreateSmsTemplate = Action('CreateSmsTemplate')
CreateVoiceTemplate = Action('CreateVoiceTemplate')
DeleteAdmChannel = Action('DeleteAdmChannel')
DeleteApnsChannel = Action('DeleteApnsChannel')
DeleteApnsSandboxChannel = Action('DeleteApnsSandboxChannel')
DeleteApnsVoipChannel = Action('DeleteApnsVoipChannel')
DeleteApnsVoipSandboxChannel = Action('DeleteApnsVoipSandboxChannel')
DeleteApp = Action('DeleteApp')
DeleteBaiduChannel = Action('DeleteBaiduChannel')
DeleteCampaign = Action('DeleteCampaign')
DeleteEmailChannel = Action('DeleteEmailChannel')
DeleteEmailTemplate = Action('DeleteEmailTemplate')
DeleteEndpoint = Action('DeleteEndpoint')
DeleteEventStream = Action('DeleteEventStream')
DeleteGcmChannel = Action('DeleteGcmChannel')
DeleteJourney = Action('DeleteJourney')
DeletePushTemplate = Action('DeletePushTemplate')
DeleteSegment = Action('DeleteSegment')
DeleteSmsChannel = Action('DeleteSmsChannel')
DeleteSmsTemplate = Action('DeleteSmsTemplate')
DeleteUserEndpoints = Action('DeleteUserEndpoints')
DeleteVoiceChannel = Action('DeleteVoiceChannel')
DeleteVoiceTemplate = Action('DeleteVoiceTemplate')
GetAdmChannel = Action('GetAdmChannel')
GetApnsChannel = Action('GetApnsChannel')
GetApnsSandboxChannel = Action('GetApnsSandboxChannel')
GetApnsVoipChannel = Action('GetApnsVoipChannel')
GetApnsVoipSandboxChannel = Action('GetApnsVoipSandboxChannel')
GetApp = Action('GetApp')
GetApplicationSettings = Action('GetApplicationSettings')
GetApps = Action('GetApps')
GetBaiduChannel = Action('GetBaiduChannel')
GetCampaign = Action('GetCampaign')
GetCampaignActivities = Action('GetCampaignActivities')
GetCampaignVersion = Action('GetCampaignVersion')
GetCampaignVersions = Action('GetCampaignVersions')
GetCampaigns = Action('GetCampaigns')
GetChannels = Action('GetChannels')
GetEmailChannel = Action('GetEmailChannel')
GetEmailTemplate = Action('GetEmailTemplate')
GetEndpoint = Action('GetEndpoint')
GetEventStream = Action('GetEventStream')
GetExportJob = Action('GetExportJob')
GetExportJobs = Action('GetExportJobs')
GetGcmChannel = Action('GetGcmChannel')
GetImportJob = Action('GetImportJob')
GetImportJobs = Action('GetImportJobs')
GetJourney = Action('GetJourney')
GetPushTemplate = Action('GetPushTemplate')
GetReports = Action('GetReports')
GetSegment = Action('GetSegment')
GetSegmentExportJobs = Action('GetSegmentExportJobs')
GetSegmentImportJobs = Action('GetSegmentImportJobs')
GetSegmentVersion = Action('GetSegmentVersion')
GetSegmentVersions = Action('GetSegmentVersions')
GetSegments = Action('GetSegments')
GetSmsChannel = Action('GetSmsChannel')
GetSmsTemplate = Action('GetSmsTemplate')
GetUserEndpoints = Action('GetUserEndpoints')
GetVoiceChannel = Action('GetVoiceChannel')
GetVoiceTemplate = Action('GetVoiceTemplate')
ListJourneys = Action('ListJourneys')
ListTagsForResource = Action('ListTagsForResource')
ListTemplateVersions = Action('ListTemplateVersions')
ListTemplates = Action('ListTemplates')
PhoneNumberValidate = Action('PhoneNumberValidate')
PutEventStream = Action('PutEventStream')
PutEvents = Action('PutEvents')
RemoveAttributes = Action('RemoveAttributes')
SendMessages = Action('SendMessages')
SendUsersMessages = Action('SendUsersMessages')
TagResource = Action('TagResource')
UntagResource = Action('UntagResource')
UpdateAdmChannel = Action('UpdateAdmChannel')
UpdateApnsChannel = Action('UpdateApnsChannel')
UpdateApnsSandboxChannel = Action('UpdateApnsSandboxChannel')
UpdateApnsVoipChannel = Action('UpdateApnsVoipChannel')
UpdateApnsVoipSandboxChannel = Action('UpdateApnsVoipSandboxChannel')
UpdateApplicationSettings = Action('UpdateApplicationSettings')
UpdateBaiduChannel = Action('UpdateBaiduChannel')
UpdateCampaign = Action('UpdateCampaign')
UpdateEmailChannel = Action('UpdateEmailChannel')
UpdateEmailTemplate = Action('UpdateEmailTemplate')
UpdateEndpoint = Action('UpdateEndpoint')
UpdateEndpointsBatch = Action('UpdateEndpointsBatch')
UpdateGcmChannel = Action('UpdateGcmChannel')
UpdateJourney = Action('UpdateJourney')
UpdateJourneyState = Action('UpdateJourneyState')
UpdatePushTemplate = Action('UpdatePushTemplate')
UpdateSegment = Action('UpdateSegment')
UpdateSmsChannel = Action('UpdateSmsChannel')
UpdateSmsTemplate = Action('UpdateSmsTemplate')
UpdateTemplateActiveVersion = Action('UpdateTemplateActiveVersion')
UpdateVoiceChannel = Action('UpdateVoiceChannel')
UpdateVoiceTemplate = Action('UpdateVoiceTemplate')
|
from __future__ import annotations
import logging
import random
from typing import TYPE_CHECKING
from scripts.scenes.combat.elements.commander import Commander
from scripts.scenes.combat.elements.unit import Unit
if TYPE_CHECKING:
from typing import Dict, List, Optional, Tuple
from scripts.core.game import Game
__all__ = ["Troupe"]
class Troupe:
"""
Management of a group of units
"""
def __init__(self, game: Game, team: str, allies: List[str]):
self.game: Game = game
self._unit_ids: List[int] = [] # used to manage order
self._units: Dict[int, Unit] = {}
self.team: str = team
self.allies: List[str] = allies
@property
def units(self) -> Dict[int, Unit]:
units_ = {}
# build new dict respecting order
for id_ in self._unit_ids:
units_[id_] = self._units[id_]
return units_
def debug_init_units(self) -> List[int]:
"""
Initialise all units for Troupe faction. Returns list of created ids.
"""
ids = []
unit_types = self.game.data.get_units_by_category(self.allies)
for unit_type in unit_types:
id_ = self._add_unit_from_type(unit_type)
ids.append(id_)
return ids
def add_unit(self, unit: Unit):
"""
Add a unit instance to the troupe. Used when buying an existing unit, e.g. from Inn.
"""
self._units[unit.id] = unit
self._unit_ids.append(unit.id)
logging.debug(f"Unit {unit.type}({unit.id}) added to {self.team}'s troupe.")
def _add_unit_from_type(self, unit_type: str) -> int:
"""
Create a unit based on the unit type and add the unit to the troupe. Return id.
"""
id_ = self.game.memory.generate_id()
unit = Unit(self.game, id_, unit_type, self.team)
self._units[id_] = unit
self._unit_ids.append(id_)
logging.debug(f"Unit {unit.type}({unit.id}) created and added to {self.team}'s troupe.")
return id_
def remove_unit(self, id_: int):
try:
unit = self._units.pop(id_)
self._unit_ids.remove(id_)
logging.debug(f"Unit {unit.type}({unit.id}) removed from {unit.team}'s troupe.")
except KeyError:
logging.warning(f"remove_unit: {id_} not found in {self.units}. No unit removed.")
def remove_all_units(self):
"""
Remove all units from the Troupe
"""
self._units = {}
self._unit_ids = []
logging.debug(f"All units removed from {self.team}'s troupe.")
def generate_units(
self,
number_of_units: int,
tiers_allowed: List[int] = None,
duplicates: bool = False,
) -> List[int]:
"""
Generate units for the Troupe, based on parameters given. If no unit types are given then any unit type can
be chosen from any ally. Returns list of created ids.
unit_types is expressed as [unit.type, ...]
"""
unit_types = []
# get unit info
unit_types_ = []
unit_occur_rate = []
for unit_type in self.game.data.get_units_by_category(self.allies, tiers_allowed):
unit_types_.append(unit_type)
occur_rate = self.game.data.get_unit_occur_rate(unit_type)
unit_occur_rate.append(occur_rate)
# choose units
if duplicates:
chosen_types = self.game.rng.choices(unit_types_, unit_occur_rate, k=number_of_units)
else:
chosen_types = []
for i in range(number_of_units):
# choose unit
unit = self.game.rng.choices(unit_types_, unit_occur_rate)[0]
chosen_types.append(unit)
# remove unit and occur rate from option pool
unit_index = unit_types_.index(unit)
unit_types_.remove(unit)
del unit_occur_rate[unit_index]
# add to list
for chosen_type in chosen_types:
unit_types.append(chosen_type)
# create units
ids = []
for unit_type in unit_types:
id_ = self._add_unit_from_type(unit_type)
ids.append(id_)
return ids
def generate_specific_units(self, unit_types: List[str]) -> List[int]:
"""
Generate units for the Troupe, based on parameters given. Returns list of created ids.
unit_types is expressed as [unit.type, ...]
"""
# create units
ids = []
for unit_type in unit_types:
id_ = self._add_unit_from_type(unit_type)
ids.append(id_)
return ids
def upgrade_unit(self, id_: int, upgrade_type: str):
"""
Upgrade a unit with a given upgrade.
"""
# get unit
unit = self.units[id_]
try:
data = self.game.data.upgrades[upgrade_type]
unit.add_modifier(data["stat"], data["mod_amount"])
except KeyError:
logging.warning(f"Tried to upgrade {unit.id} with {upgrade_type} but upgrade not found. No action taken.")
def get_random_unit(self) -> Unit:
"""
Return a random unit from the Troupe.
"""
id_ = self.game.rng.choice(self.units)
return self.units[id_]
|
<reponame>joakimzhang/qtest
from django.contrib import admin
# Register your models here.
from .models import TestlinkCase, TestlinkDB, TestlinkReport, TestlinkBuild,BlogComment,ReportPicture
#admin.site.register(TestlinkCase)
from django.contrib.admin.options import *
from django.utils.translation import (
override as translation_override, string_concat, ugettext as _, ungettext,
)
class TestlinkAdmin(admin.ModelAdmin):
exclude = ('case_sum','internalid','suite_id')
"""aaa"""
def get_id(self, request):
return request.GET['id']
def formfield_for_dbfield(self, db_field,**kwargs):
field = super(TestlinkAdmin, self).formfield_for_dbfield(db_field, **kwargs)
if db_field.name == 'test_case':
# if kwargs:
try:
field.initial = kwargs['request'].GET['id']
except:
pass
if db_field.name == 'case_suite':
# if kwargs:
try:
field.initial = kwargs['request'].GET['id']
except:
pass
if db_field.name == 'parent_suite_name':
# if kwargs:
try:
field.initial = kwargs['request'].GET['id']
except:
pass
if db_field.name == 'test_report':
# if kwargs:
try:
field.initial = kwargs['request'].GET['id']
except:
pass
#print "aaaaaaaaaaaaaaaaaaaaaa",field,"aaaaaaaaaaaaa",db_field,"aaaaaaaaaaaa",kwargs
return field
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
obj_url = reverse(
'admin:%s_%s_change' % (opts.app_label, opts.model_name),
args=(quote(pk_value),),
current_app=self.admin_site.name,
)
# Add a link to the object's change form if the user can edit the obj.
if self.has_change_permission(request, obj):
obj_repr = format_html('<a href="{}">{}</a>', urlquote(obj_url), obj)
else:
obj_repr = force_text(obj)
msg_dict = {
'name': force_text(opts.verbose_name),
'obj': obj_repr,
}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
popup_response_data = json.dumps({
'value': six.text_type(value),
'obj': six.text_type(obj),
})
return SimpleTemplateResponse('admin/popup_response.html', {
'popup_response_data': popup_response_data,
})
elif "_continue" in request.POST or (
# Redirecting after "Save as new".
"_saveasnew" in request.POST and self.save_as_continue and
self.has_change_permission(request, obj)
):
msg = format_html(
_('The {name} "{obj}" was added successfully. You may edit it again below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
if post_url_continue is None:
post_url_continue = obj_url
post_url_continue = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts},
post_url_continue
)
return HttpResponseRedirect(post_url_continue)
elif "_addanother" in request.POST:
msg = format_html(
_('The {name} "{obj}" was added successfully. You may add another {name} below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = format_html(
_('The {name} "{obj}" was added successfully.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
#return self.response_post_save_add(request, obj)
#return HttpResponseRedirect("/testlink")
if opts.model_name == "testlinkdb":
#return HttpResponseRedirect("/testcase/%s"%(pk_value))
return HttpResponseRedirect("/testsuite/%s"%(request.GET['id']))
elif opts.model_name == "testlinkreport":
return HttpResponseRedirect("/testreport/%s"%(request.GET['id']))
else:
return HttpResponseRedirect("/testcase/%s"%(pk_value))
def response_change(self, request, obj):
"""
Determines the HttpResponse for the change_view stage.
"""
print "response change"
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
attr = str(to_field) if to_field else obj._meta.pk.attname
# Retrieve the `object_id` from the resolved pattern arguments.
value = request.resolver_match.args[0]
new_value = obj.serializable_value(attr)
popup_response_data = json.dumps({
'action': 'change',
'value': six.text_type(value),
'obj': six.text_type(obj),
'new_value': six.text_type(new_value),
})
return SimpleTemplateResponse('admin/popup_response.html', {
'popup_response_data': popup_response_data,
})
opts = self.model._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {
'name': force_text(opts.verbose_name),
'obj': format_html('<a href="{}">{}</a>', urlquote(request.path), obj),
}
if "_continue" in request.POST:
msg = format_html(
_('The {name} "{obj}" was changed successfully. You may edit it again below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_saveasnew" in request.POST:
msg = format_html(
_('The {name} "{obj}" was added successfully. You may edit it again below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(pk_value,),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_addanother" in request.POST:
msg = format_html(
_('The {name} "{obj}" was changed successfully. You may add another {name} below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_add' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = format_html(
_('The {name} "{obj}" was changed successfully.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
#return self.response_post_save_change(request, obj)
print "up!!!!!",self.admin_site.name,opts.model_name,opts.app_label,type(opts)
if opts.model_name == "testlinkdb":
#return HttpResponseRedirect("/testcase/%s"%(pk_value))
return HttpResponseRedirect("/testlink")
elif opts.model_name == "testlinkreport":
return HttpResponseRedirect("/testreport/%s"%(request.GET['id']))
else:
return HttpResponseRedirect("/testcase/%s"%(pk_value))
def response_delete(self, request, obj_display, obj_id):
opts = self.model._meta
if IS_POPUP_VAR in request.POST:
popup_response_data = json.dumps({
'action': 'delete',
'value': str(obj_id),
})
return SimpleTemplateResponse('admin/popup_response.html', {
'popup_response_data': popup_response_data,
})
self.message_user(
request,
_('The %(name)s "%(obj)s" was deleted successfully.') % {
'name': force_text(opts.verbose_name),
'obj': force_text(obj_display),
},
messages.SUCCESS,
)
if self.has_change_permission(request, None):
post_url = reverse(
'admin:%s_%s_changelist' % (opts.app_label, opts.model_name),
current_app=self.admin_site.name,
)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts}, post_url
)
post_url = "/testlink"
#if opts.model_name == "testlinkdb":
#return HttpResponseRedirect("/testcase/%s"%(pk_value))
# post_url = "/testlink"
#elif opts.model_name == "testlinkreport":
# post_url = ("/testreport/%s"%(request.GET['id']))
#else:
# post_url = ("/testcase/%s"%(pk_value))
else:
post_url = reverse('admin:index', current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
admin.site.register(TestlinkDB, TestlinkAdmin)
admin.site.register(TestlinkCase, TestlinkAdmin)
admin.site.register(TestlinkReport, TestlinkAdmin)
admin.site.register(ReportPicture, TestlinkAdmin)
admin.site.register(TestlinkBuild, TestlinkAdmin)
admin.site.register(BlogComment, TestlinkAdmin)
|
import os
import sys
import time
import argparse
import unicodedata
import librosa
import numpy as np
import pandas as pd
from tqdm import tqdm
from hparams import hparams
def run_prepare(args, hparams):
def normalize_wave(wave, sample_rate):
"""normalize wave format"""
wave = librosa.resample(wave, sample_rate, hparams.sample_rate)
return wave
def normalize_text(text):
"""normalize text format"""
text = ''.join(char for char in unicodedata.normalize('NFD', text)
if unicodedata.category(char) != 'Mn')
return text.strip()
if args.dataset == 'BIAOBEI':
dataset_name = 'BZNSYP'
dataset_path = os.path.join('./', dataset_name)
if not os.path.isdir(dataset_path):
print("BIAOBEI dataset folder doesn't exist")
sys.exit(0)
total_duration = 0
text_file_path = os.path.join(dataset_path, 'ProsodyLabeling', '000001-010000.txt')
try:
text_file = open(text_file_path, 'r', encoding='utf8')
except FileNotFoundError:
print('text file no exist')
sys.exit(0)
data_array = np.zeros(shape=(1, 3), dtype=str)
for index, each in tqdm(enumerate(text_file.readlines())):
if index % 2 == 0:
list = []
basename = each.strip().split()[0]
raw_text = each.strip().split()[1]
list.append(basename)
list.append(raw_text)
else:
pinyin_text = normalize_text(each)
list.append(pinyin_text)
data_array = np.append(data_array, np.array(list).reshape(1, 3), axis=0)
wave_file_path = os.path.join(dataset_path, 'Wave', '{}.wav'.format(basename))
if not os.path.exists(wave_file_path):
# print('wave file no exist')
continue
try:
wave, sr = librosa.load(wave_file_path, sr=None)
except EOFError:
print('wave format error at {}'.format(basename+'.wav'))
continue
if not sr == hparams.sample_rate:
wave = normalize_wave(wave, sr)
duration = librosa.get_duration(wave)
total_duration += duration
librosa.output.write_wav(wave_file_path, wave, hparams.sample_rate)
data_frame = pd.DataFrame(data_array[1:])
data_frame.to_csv(os.path.join(dataset_path, 'metadata.csv'), sep='|', header=False, index=False, encoding='utf8')
text_file.close()
print("total audio duration: %ss" % (time.strftime('%H:%M:%S', time.gmtime(total_duration))))
elif args.dataset == 'THCHS-30':
dataset_name = 'data_thchs30'
dataset_path = os.path.join('./', dataset_name)
if not os.path.isdir(dataset_path):
print("{} dataset folder doesn't exist".format(args.dataset))
sys.exit(0)
total_duration = 0
raw_dataset_path = os.path.join(dataset_path, 'wavs')
data_array = np.zeros(shape=(1, 3), dtype=str)
for root, dirs, files in os.walk(raw_dataset_path):
for file in tqdm(files):
if not file.endswith('.wav.trn'):
continue
list = []
basename = file[:-8]
list.append(basename)
text_file = os.path.join(raw_dataset_path, file)
if not os.path.exists(text_file):
print('text file {} no exist'.format(file))
continue
with open(text_file, 'r', encoding='utf8') as f:
lines = f.readlines()
raw_text = lines[0].rstrip('\n')
pinyin_text = lines[1].rstrip('\n')
pinyin_text = normalize_text(pinyin_text)
list.append(raw_text)
list.append(pinyin_text)
wave_file = os.path.join(raw_dataset_path, '{}.wav'.format(basename))
if not os.path.exists(wave_file):
print('wave file {}.wav no exist'.format(basename))
continue
try:
wave, sr = librosa.load(wave_file, sr=None)
except EOFError:
print('wave file {}.wav format error'.format(basename))
continue
if not sr == hparams.sample_rate:
print('sample rate of wave file {}.wav no match'.format(basename))
wave = librosa.resample(wave, sr, hparams.sample_rate)
duration = librosa.get_duration(wave)
if duration < 10:
total_duration += duration
librosa.output.write_wav(wave_file, wave, hparams.sample_rate)
data_array = np.append(data_array, np.array(list).reshape(1, 3), axis=0)
data_frame = pd.DataFrame(data_array[1:])
data_frame.to_csv(os.path.join(dataset_path, 'metadata.csv'), sep='|', header=False, index=False, encoding='utf8')
print("total audio duration: %ss" % (time.strftime('%H:%M:%S', time.gmtime(total_duration))))
elif args.dataset == 'AISHELL-2':
dataset_name = 'aishell2_16000'
dataset_path = os.path.join(os.getcwd(), args.dataset)
if os.path.isdir(dataset_path):
print("{} dataset folder already exists".format(args.dataset))
sys.exit(0)
os.mkdir(dataset_path)
dataset_path = os.path.join(dataset_path, dataset_name)
os.mkdir(dataset_path)
sample_rate = 16000 # original sample rate
total_duration = 0
raw_dataset_path = os.path.join(os.getcwd(), 'aishell2', 'dataAishell2')
wave_dir_path = os.path.join(raw_dataset_path, 'wav')
text_file_path = os.path.join(raw_dataset_path, 'transcript', 'aishell2_transcript.txt')
try:
text_file = open(text_file_path, 'r', encoding='utf8')
except FileNotFoundError:
print('text file no exist')
sys.exit(0)
def normalize_text(text):
"""normalize text format"""
text = ''.join(char for char in unicodedata.normalize('NFD', text)
if unicodedata.category(char) != 'Mn')
return text.strip()
def normalize_wave(wave):
"""normalize wave format"""
wave = librosa.resample(wave, sample_rate, hparams.sample_rate)
return wave
# for index, each in tqdm(enumerate(text_file.readlines())):
#
if __name__ == '__main__':
print('preparing dataset..')
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--dataset", choices=['AISHELL-2', 'THCHS-30', 'BIAOBEI'], default='THCHS-30',
help='dataset name')
args = parser.parse_args()
run_prepare(args, hparams)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
import pygame
import Axon
from OpenGL.GL import *
from OpenGL.GLU import *
from math import *
from Util3D import *
from Axon.ThreadedComponent import threadedcomponent
import time
from Kamaelia.UI.PygameDisplay import PygameDisplay
_cat = Axon.CoordinatingAssistantTracker
class Bunch: pass
class Display3D(Axon.AdaptiveCommsComponent.AdaptiveCommsComponent):
Inboxes = { "inbox" : "Default inbox, not currently used",
"control" : "NOT USED",
"notify" : "Receive requests for surfaces, overlays and events",
"events" : "Receive pygame events",
}
Outboxes = { "outbox" : "NOT USED",
"signal" : "NOT USED",
}
def setDisplayService(pygamedisplay, tracker = None):
"""\
Sets the given pygamedisplay as the service for the selected tracker or
the default one.
(static method)
"""
if not tracker:
tracker = _cat.coordinatingassistanttracker.getcat()
tracker.registerService("3ddisplay", pygamedisplay, "notify")
setDisplayService = staticmethod(setDisplayService)
def getDisplayService(tracker=None): # STATIC METHOD
"""\
Returns any live pygamedisplay registered with the specified (or default)
tracker, or creates one for the system to use.
(static method)
"""
if tracker is None:
tracker = _cat.coordinatingassistanttracker.getcat()
try:
service = tracker.retrieveService("3ddisplay")
return service
except KeyError:
display = Display3D()
display.activate()
Display3D.setDisplayService(display, tracker)
service=(display,"notify")
return service
getDisplayService = staticmethod(getDisplayService)
def overridePygameDisplay():
PygameDisplay.setDisplayService(Display3D.getDisplayService()[0])
overridePygameDisplay = staticmethod(overridePygameDisplay)
def __init__(self, **argd):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(Display3D,self).__init__()
self.caption = argd.get("title", "http://kamaelia.sourceforge.net")
self.width = argd.get("width",800)
self.height = argd.get("height",600)
self.background_colour = argd.get("background_colour", (255,255,255))
self.fullscreen = pygame.FULLSCREEN * argd.get("fullscreen", 0)
self.next_position = (0,0)
# 3D component handling
self.objects = []
self.movementMode = False
# Pygame component handling
self.surfaces = []
self.overlays = []
self.visibility = {}
self.events_wanted = {}
self.surface_to_eventcomms = {}
self.surface_to_eventservice = {}
self.surface_to_texnames = {}
self.surface_to_pow2surface = {}
self.surface_to_eventrequestcomms = {}
self.wrappedsurfaces = []
# determine projection parameters
self.nearPlaneDist = argd.get("near", 1.0)
self.farPlaneDist = argd.get("far", 100.0)
self.perspectiveAngle = argd.get("perspective", 45.0)
self.aspectRatio = float(self.width)/float(self.height)
global pi
self.farPlaneHeight = self.farPlaneDist*2.0/tan(pi/2.0-self.perspectiveAngle*pi/360.0)
self.farPlaneWidth = self.farPlaneHeight*self.aspectRatio
self.farPlaneScaling = self.farPlaneWidth/self.width
# initialize the display
pygame.init()
display = pygame.display.set_mode((self.width, self.height), self.fullscreen| pygame.DOUBLEBUF | pygame.OPENGL)
pygame.display.set_caption(self.caption)
pygame.mixer.quit()
glClearColor(1.0,1.0,1.0,0.0)
glClearDepth(1.0)
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LEQUAL)
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)
# enable translucency
glEnable (GL_BLEND);
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
# projection matrix
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(self.perspectiveAngle, self.aspectRatio, self.nearPlaneDist, self.farPlaneDist)
self.texnum = -1
def surfacePosition(self,surface):
"""Returns a suggested position for a surface. No guarantees its any good!"""
position = self.next_position
self.next_position = position[0]+50, position[1]+50
return position
def handleDisplayRequest(self):
"""\
Check "notify" inbox for requests for surfaces, events and overlays and
process them.
"""
# changed from if to while
while self.dataReady("notify"):
message = self.recv("notify")
if isinstance(message, Axon.Ipc.producerFinished): ### VOMIT : mixed data types
# print "SURFACE", message
surface = message.message
# print "SURFACE", surface
message.message = None
message = None
# print "BEFORE", [id(x[0]) for x in self.surfaces]
self.surfaces = [ x for x in self.surfaces if x[0] is not surface ]
# print "AFTER", self.surfaces
# print "Hmm...", self.surface_to_eventcomms.keys()
try:
eventcomms = self.surface_to_eventcomms[str(id(surface))]
except KeyError:
# This simply means the component wasn't listening for events!
pass
else:
# print "EVENT OUTBOX:", eventcomms
self.visibility = None
try:
self.removeOutbox(eventcomms)
except:
"This sucks"
pass
# print "REMOVED OUTBOX"
elif message.get("3DDISPLAYREQUEST", False):
eventservice = message.get("events", None)
eventcomms = None
if eventservice is not None:
eventcomms = self.addOutbox("eventsfeedback")
self.link((self,eventcomms), eventservice)
self.objects.append( (message.get("object"), eventcomms) )
elif message.get("WRAPPERREQUEST", False):
surface = message.get("surface")
self.wrappedsurfaces.append(str(id(surface)));
callbackservice = message["wrapcallback"]
callbackcomms = self.addOutbox("wrapfeedback")
pow2surface = self.surface_to_pow2surface[str(id(surface))]
#determine texture coordinates
tex_w = float(surface.get_width())/float(pow2surface.get_width())
tex_h = float(surface.get_height())/float(pow2surface.get_height())
# send display data
self.link((self,callbackcomms), callbackservice)
b = Bunch()
b.texname = self.surface_to_texnames[str(id(surface))]
b.tex_w = tex_w
b.tex_h = tex_h
b.width = surface.get_width()
b.height = surface.get_height()
try:
b.eventservice = self.surface_to_eventservice[str(id(surface))]
except KeyError:
b.eventservice = None
self.send(b, callbackcomms)
#handle only events if wrapped components can receive them
try :
eventcomms = self.surface_to_eventcomms[str(id(surface))]
# save callback for event requests
eventrequestservice = message["eventrequests"]
eventrequestcomms = self.addOutbox("eventrequests")
self.link((self,eventrequestcomms), eventrequestservice)
self.surface_to_eventrequestcomms[str(id(surface))] = eventrequestcomms
# transmit already requested eventtypes
for (etype, wanted) in self.events_wanted[eventcomms].items():
if wanted == True:
self.send( {"ADDLISTENEVENT":etype}, eventrequestcomms)
except KeyError: pass
elif message.get("DISPLAYREQUEST", False):
self.needsRedrawing = True
callbackservice = message["callback"]
eventservice = message.get("events", None)
size = message["size"]
surface = pygame.Surface(size)
#create another surface, with dimensions a power of two
# this is needed because otherwise texturing is REALLY slow
pow2size = (int(2**(ceil(log(size[0], 2)))), int(2**(ceil(log(size[1], 2)))))
pow2surface = pygame.Surface(pow2size)
alpha = message.get("alpha", 255)
surface.set_alpha(alpha)
if message.get("transparency", None):
surface.set_colorkey(message["transparency"])
position = message.get("position", self.surfacePosition(surface))
callbackcomms = self.addOutbox("displayerfeedback")
eventcomms = None
if eventservice is not None:
eventcomms = self.addOutbox("eventsfeedback")
self.events_wanted[eventcomms] = {}
self.link((self,eventcomms), eventservice)
self.visibility[eventcomms] = (surface,size,position)
self.surface_to_eventcomms[str(id(surface))] = eventcomms
self.surface_to_eventservice[str(id(surface))] = eventservice
self.link((self, callbackcomms), callbackservice)
self.send(surface, callbackcomms)
# generate texture name
texname = glGenTextures(1)
self.surface_to_texnames[str(id(surface))] = texname
self.surface_to_pow2surface[str(id(surface))] = pow2surface
self.surfaces.append( (surface, position, size, callbackcomms, eventcomms, pow2surface, texname) )
elif message.get("ADDLISTENEVENT", None) is not None:
# test if surface is beeing wrapped
if str(id(surface)) in self.wrappedsurfaces:
self.send(message, self.surface_to_eventrequestcomms[str(id(surface))])
else:
eventcomms = self.surface_to_eventcomms[str(id(message["surface"]))]
self.events_wanted[eventcomms][message["ADDLISTENEVENT"]] = True
elif message.get("REMOVELISTENEVENT", None) is not None:
# test if surface is beeing wrapped
if str(id(surface)) in self.wrappedsurfaces:
self.send(message, self.surface_to_eventrequestcomms[str(id(surface))])
else:
eventcomms = self.surface_to_eventcomms[str(id(message["surface"]))]
self.events_wanted[eventcomms][message["REMOVELISTENEVENT"]] = False
elif message.get("CHANGEDISPLAYGEO", False):
try:
surface = message.get("surface", None)
if surface is not None:
self.needsRedrawing = True
c = 0
found = False
while c < len(self.surfaces) and not found:
if self.surfaces[c][0] == surface:
found = True
break
c += 1
if found:
(surface, position, size, callbackcomms, eventcomms, pow2surface, texname) = self.surfaces[c]
new_position = message.get("position", position)
# update texture
self.updatePygameTexture(surface, texname)
self.surfaces[c] = (surface, new_position, callbackcomms, eventcomms, texname)
except Exception, e:
print "It all went horribly wrong", e
elif message.get("OVERLAYREQUEST", False):
self.needsRedrawing = True
size = message["size"]
pixformat = message["pixformat"]
position = message.get("position", (0,0))
overlay = pygame.Overlay(pixformat, size)
yuvdata = message.get("yuv", ("","",""))
# transform (y,u,v) to (y,v,u) because pygame seems to want that(!)
if len(yuvdata) == 3:
yuvdata = (yuvdata[0], yuvdata[2], yuvdata[1])
yuvservice = message.get("yuvservice",False)
if yuvservice:
yuvinbox = self.addInbox("overlay_yuv")
self.link( yuvservice, (self, yuvinbox) )
yuvservice = (yuvinbox, yuvservice)
posservice = message.get("positionservice",False)
if posservice:
posinbox = self.addInbox("overlay_position")
self.link (posservice, (self, posinbox) )
posservice = (posinbox, posservice)
self.overlays.append( {"overlay":overlay,
"yuv":yuvdata,
"position":position,
"size":size,
"yuvservice":yuvservice,
"posservice":posservice}
)
elif message.get("REDRAW", False):
self.needsRedrawing=True
surface = message["surface"]
self.updatePygameTexture(surface, self.surface_to_pow2surface[str(id(surface))], self.surface_to_texnames[str(id(surface))])
def handleEvents(self):
# pre-fetch all waiting events in one go
events = [ event for event in pygame.event.get() ]
# Handle Pygame events
for surface, position, size, callbackcomms, eventcomms, pow2surface, texname in self.surfaces:
# see if this component is interested in events
# skip surfaces which get wrapped
if eventcomms is not None:
listener = eventcomms
# go through events, for each, check if the listener is interested in that time of event
bundle = []
for event in events:
wanted = False
try: wanted = self.events_wanted[listener][event.type]
except KeyError: pass
if wanted:
# if event contains positional information, remap it
# for the surface's coordiate origin
if event.type in [ pygame.MOUSEMOTION, pygame.MOUSEBUTTONUP, pygame.MOUSEBUTTONDOWN ]:
if str(id(surface)) in self.wrappedsurfaces: continue
e = Bunch()
e.type = event.type
pos = event.pos[0],event.pos[1]
try:
e.pos = ( pos[0]-self.visibility[listener][2][0], pos[1]-self.visibility[listener][2][1] )
if event.type == pygame.MOUSEMOTION:
e.rel = event.rel
if event.type == pygame.MOUSEMOTION:
e.buttons = event.buttons
else:
e.button = event.button
event = e
except TypeError:
"XXXX GRRR"
pass
bundle.append(event)
# only send events to listener if we've actually got some
if bundle != []:
self.send(bundle, listener)
directions = {}
for event in events:
# Determine input mode
if event.type == pygame.KEYDOWN and event.key == pygame.K_LCTRL:
# print "Movement mode on"
self.movementMode = True
if event.type == pygame.KEYUP and event.key == pygame.K_LCTRL:
# print "Movement mode off"
self.movementMode = False
# Determine direction vectors
if event.type in [ pygame.MOUSEMOTION, pygame.MOUSEBUTTONUP, pygame.MOUSEBUTTONDOWN ]:
# determine intersection ray
xclick = float(event.pos[0]-self.width/2)*self.farPlaneWidth/float(self.width)
yclick = float(-event.pos[1]+self.height/2)*self.farPlaneHeight/float(self.height)
directions[id(event)] = Vector(xclick, yclick, -self.farPlaneDist).norm()
# Handle 3D object events
for obj, eventcomms in self.objects:
# see if this component is interested in events
if eventcomms is not None:
# go through events, for each, check if the listener is interested in that time of event
bundle = []
for event in events:
if event.type in [ pygame.MOUSEMOTION, pygame.MOUSEBUTTONUP, pygame.MOUSEBUTTONDOWN ]:
e = Bunch()
e.type = event.type
e.dir = directions[id(event)]
e.movementMode = self.movementMode
if event.type in [pygame.MOUSEBUTTONUP, pygame.MOUSEBUTTONDOWN]:
e.button = event.button
if event.type == pygame.MOUSEMOTION:
e.rel = event.rel
e.buttons = event.buttons
bundle.append(e)
# only send events to listener if we've actually got some
if bundle != []:
self.send(bundle, eventcomms)
def updatePygameTexture(self, surface, pow2surface, texname):
# print "UPDATE", texname
# blit component surface to power of 2 sized surface
pow2surface.blit(surface, (0,0))
# set surface as texture
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, texname)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
textureData = pygame.image.tostring(pow2surface, "RGBX", 1)
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, pow2surface.get_width(), pow2surface.get_height(), 0,
GL_RGBA, GL_UNSIGNED_BYTE, textureData );
glDisable(GL_TEXTURE_2D)
def drawPygameSurfaces(self):
# disable depth testing temporarely to ensure that pygame components
# are on top of everything
glDisable(GL_DEPTH_TEST)
glEnable(GL_TEXTURE_2D)
for surface, position, size, callbackcomms, eventcomms, pow2surface, texname in self.surfaces:
# create texture if not already done
if not glIsTexture(texname):
self.updatePygameTexture(surface, pow2surface, texname)
# skip surfaces which get wrapped
if str(id(surface)) in self.wrappedsurfaces: continue
glBindTexture(GL_TEXTURE_2D, texname)
# determine surface positions on far Plane
l = position[0]*self.farPlaneScaling-self.farPlaneWidth/2
t = -position[1]*self.farPlaneScaling+self.farPlaneHeight/2
r = l + size[0]*self.farPlaneScaling
b = t - size[1]*self.farPlaneScaling
#determine texture coordinates
tex_w = float(size[0])/float(pow2surface.get_width())
tex_h = float(size[1])/float(pow2surface.get_height())
# draw just the texture, no background
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE)
# draw faces
glBegin(GL_QUADS)
glColor3f(1, 0, 0)
glTexCoord2f(0.0, 1.0-tex_h); glVertex3f( l, b, -self.farPlaneDist)
glTexCoord2f(tex_w, 1.0-tex_h); glVertex3f( r, b, -self.farPlaneDist)
glTexCoord2f(tex_w, 1.0); glVertex3f( r, t, -self.farPlaneDist)
glTexCoord2f(0.0, 1.0); glVertex3f( l, t, -self.farPlaneDist)
glEnd()
glDisable(GL_TEXTURE_2D)
glEnable(GL_DEPTH_TEST)
def drawBackground(self):
glBegin(GL_QUADS)
glColor4f(0.85, 0.85, 1.0, 1.0)
glVertex3f(-self.farPlaneWidth/2.0, self.farPlaneHeight/2.0, -self.farPlaneDist)
glVertex3f(self.farPlaneWidth/2.0, self.farPlaneHeight/2.0, -self.farPlaneDist)
glVertex3f(self.farPlaneWidth/2.0, 0.0, -self.farPlaneDist)
glVertex3f(-self.farPlaneWidth/2.0, 0.0, -self.farPlaneDist)
glColor4f(0.75, 1.0, 0.75, 1.0)
glVertex3f(-self.farPlaneWidth/2.0, 0.0, -self.farPlaneDist)
glVertex3f(self.farPlaneWidth/2.0, 0.0, -self.farPlaneDist)
glVertex3f(self.farPlaneWidth/2.0, -self.farPlaneHeight/2.0, -self.farPlaneDist)
glVertex3f(-self.farPlaneWidth/2.0, -self.farPlaneHeight/2.0, -self.farPlaneDist)
glEnd()
def updateDisplay(self):
#display pygame components
self.drawPygameSurfaces()
# show frame
glFlush()
pygame.display.flip()
# clear drawing buffer
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
# draw background
self.drawBackground()
def main(self):
"""Main loop."""
while 1:
self.handleDisplayRequest()
self.handleEvents()
self.updateDisplay()
yield 1
if __name__=='__main__':
class Bunch: pass
class CubeRotator(Axon.Component.component):
def main(self):
while 1:
yield 1
self.send( Control3D(Control3D.REL_ROTATION, Vector(0.1, 0.1, 0.1)), "outbox")
class CubeMover(Axon.Component.component):
def main(self):
x,y,z = 3.0, 3.0, -20.0
dx = -0.03
dy = -0.03
dz = -0.03
while 1:
yield 1
self.send( Control3D(Control3D.POSITION, Vector(x, y, z)), "outbox")
x +=dx
y +=dy
z +=dz
if abs(x)>5: dx = -dx
if abs(y)>5: dy = -dy
if abs(z+20)>10: dz = -dz
# print x, y, abs(x), abs(y)
import random
class CubeBuzzer(Axon.Component.component):
def main(self):
r = 1.00
f = 0.01
while 1:
yield 1
if r>1.0: f -= 0.001
else: f += 0.001
r += f
self.send( Control3D(Control3D.SCALING, Vector(r, r, r)), "outbox")
text = """\
The lights begin to twinkle from the rocks;
The long day wanes; the slow moon climbs; the deep
Moans round with many voices. Come, my friends.
'T is not too late to seek a newer world.Push off, and sitting well in order smite
The sounding furrows; for my purpose holds
To sail beyond the sunset, and the baths
Of all the western stars, until I die.
It may be that the gulfs will wash us down;
It may be we shall touch the Happy Isles,
And see the great Achilles, whom we knew.
Tho' much is taken, much abides; and tho'
We are not now that strength which in old days
Moved earth and heaven, that which we are, we are,--
One equal temper of heroic hearts,
Made weak by time and fate, but strong in will
To strive, to seek, to find, and not to yield.
"""
class datasource(Axon.Component.component):
def main(self):
for x in text.split():
self.send(x,"outbox")
yield 1
from Kamaelia.Util.ConsoleEcho import consoleEchoer
from Kamaelia.Util.Graphline import Graphline
from Kamaelia.UI.Pygame.Button import Button
from Kamaelia.UI.Pygame.Ticker import Ticker
from SimpleCube import SimpleCube
from TexPlane import TexPlane
from pygame.locals import *
Display3D.getDisplayService()[0].overridePygameDisplay()
Graphline(
CUBE = SimpleCube(pos = Vector(3,3,-15)),
PLANE = TexPlane(pos=Vector(-3, 0,-10), tex="Kamaelia.png", name="1st Tex Plane"),
BUTTON1 = Button(caption="Press SPACE or click", key=K_SPACE),
BUTTON2 = Button(caption="Reverse colours",fgcolour=(255,255,255),bgcolour=(0,0,0)),
BUTTON3 = Button(caption="Mary...",msg="Mary had a little lamb", position=(200,100)),
ROTATOR = CubeRotator(),
MOVER = CubeMover(),
BUZZER = CubeBuzzer(),
ECHO = consoleEchoer(),
TICKER = Ticker(position = (400, 300), render_left = 0, render_right=350, render_top=0, render_bottom=257),
TEXT = datasource(),
linkages = {
("PLANE", "outbox") : ("ECHO", "inbox"),
("ROTATOR", "outbox") : ("PLANE", "control3d"),
("BUTTON1", "outbox") : ("ECHO", "inbox"),
("BUTTON2", "outbox") : ("ECHO", "inbox"),
("BUTTON3", "outbox") : ("TICKER", "inbox"),
("TEXT", "outbox") : ("TICKER", "inbox"),
("MOVER", "outbox") : ("CUBE", "control3d"),
} ).run()
Axon.Scheduler.scheduler.run.runThreads()
|
<gh_stars>1-10
#!/usr/bin/python
from NanoTCAD_ViDES import *
from pylab import *
# atoms per slice (or cell)
atoms=1;
#N=46;
N=5;
slices=4*N;
thop=-2.7;
#4*pi/(3*sqrt(3)*0.144);
delta=0.144*sqrt(3);
#kmin=-pi/delta;
kmin=8;
k=kmin;
Np=30;
#kmax=pi/delta
kmax=9.0
#kmax=10
dk=(kmax-kmin)/Np;
GNR=nanoribbon(1,N*2.5*0.144+(N-1)*0.144*0.5+2*0.144)
zzz=GNR.z[0:-2];
del GNR;
Efield=0;
Emax=0.2;
Emin=-0.2;
dE=1e-3;
kvect=linspace(kmin,kmax,Np)
Ne=int(floor((Emax-Emin)/dE));
Ne=NEmax
E=linspace(Emin,Emax,Ne);
X,Y=meshgrid(E,kvect);
Z=zeros((Ne,Np));
h=zeros((2*slices,3),dtype=complex);
h[0][0]=1;
for i in range(1,slices+1):
h[i][0]=i
h[i][1]=i
kk=1;
for ii in range(slices+1,2*slices):
if ((ii%2)==1):
h[ii][0]=kk;
h[ii][1]=kk+1;
h[ii][2]=thop;
kk=kk+1;
i=0;
while (k<(kmax*0.99)):
#while (k==4):
flaggo=0;
kk=1;
for ii in range(slices+1,2*slices):
if ((ii%2)==0):
h[ii][0]=kk;
h[ii][1]=kk+1;
if ((flaggo%2)==0):
h[ii][2]=thop+thop*exp(k*delta*1j);
else:
h[ii][2]=thop+thop*exp(-k*delta*1j);
flaggo=flaggo+1;
kk=kk+1;
H = Hamiltonian(atoms, slices)
H.z=zzz;
H.Eupper = Emax
H.Elower = Emin
H.H = h
H.mu1=H.mu2=0.3
H.dE=dE;
H.Phi=Efield/H.z[-1]*H.z;
H.charge_T()
Z[:,i]=H.T;
print i
i=i+1;
k=k+dk
# plot(H.E,H.T)
# show()
if (k<(kmax*0.99)):
del H
#plt.imshow(Z, interpolation='bilinear', cmap=cm.gray,
# origin='lower', extent=[kmin,kmax,Emin,Emax])
#show()
T=sum(Z,1)*2*dk/(2*pi)*1e3;
plot(H.E,T);
G0=loadtxt("g0.dat");
plot(G0[:,0],G0[:,1]);
show()
# atoms per slice (or cell)
atoms=1;
N=47;
#N=5;
slices=4*N;
thop=-2.7;
#4*pi/(3*sqrt(3)*0.144);
delta=0.144*sqrt(3);
#kmin=-pi/delta;
kmin=8;
k=kmin;
Np=30;
#kmax=pi/delta
kmax=9.0
#kmax=10
dk=(kmax-kmin)/Np;
GNR=nanoribbon(1,N*2.5*0.144+(N-1)*0.144*0.5+2*0.144)
zzz=GNR.z[0:-2];
del GNR;
Efield=-0.005*zzz[-1];
Emax=0.2;
Emin=-0.2;
dE=1e-3;
kvect=linspace(kmin,kmax,Np)
Ne=int(floor((Emax-Emin)/dE));
Ne=NEmax
E=linspace(Emin,Emax,Ne);
X,Y=meshgrid(E,kvect);
Z=zeros((Ne,Np));
h=zeros((2*slices,3),dtype=complex);
h[0][0]=1;
for i in range(1,slices+1):
h[i][0]=i
h[i][1]=i
kk=1;
for ii in range(slices+1,2*slices):
if ((ii%2)==1):
h[ii][0]=kk;
h[ii][1]=kk+1;
h[ii][2]=thop;
kk=kk+1;
i=0;
while (k<(kmax*0.99)):
#while (k==4):
flaggo=0;
kk=1;
for ii in range(slices+1,2*slices):
if ((ii%2)==0):
h[ii][0]=kk;
h[ii][1]=kk+1;
if ((flaggo%2)==0):
h[ii][2]=thop+thop*exp(k*delta*1j);
else:
h[ii][2]=thop+thop*exp(-k*delta*1j);
flaggo=flaggo+1;
kk=kk+1;
H = Hamiltonian(atoms, slices)
H.z=zzz;
H.Eupper = Emax
H.Elower = Emin
H.H = h
H.mu1=H.mu2=0.3
H.dE=dE;
H.Phi=Efield/H.z[-1]*H.z;
H.charge_T()
Z[:,i]=H.T;
print i
i=i+1;
k=k+dk
# plot(H.E,H.T)
# show()
if (k<(kmax*0.99)):
del H
#plt.imshow(Z, interpolation='bilinear', cmap=cm.gray,
# origin='lower', extent=[kmin,kmax,Emin,Emax])
#show()
print T
T=sum(Z,1)*2*dk/(2*pi)*1e3;
plot(H.E,T);
G=loadtxt("g.dat");
plot(G[:,0],G[:,1]);
show()
|
<filename>uitester/ui/case_manager/tag_editor.py
# -*- encoding: UTF-8 -*-
import os
from PyQt5 import uic
from PyQt5.QtGui import QIcon, QPixmap
from PyQt5.QtWidgets import QWidget, QMessageBox
from uitester.case_manager.database import DBCommandLineHelper
from uitester.config import Config
class TagEditorWidget(QWidget):
def __init__(self, refresh_signal, tag_name=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.refresh_signal = refresh_signal
self.db_helper = DBCommandLineHelper()
ui_dir_path = os.path.dirname(__file__)
ui_file_path = os.path.join(ui_dir_path, 'tag_editor.ui')
uic.loadUi(ui_file_path, self)
# set icon
save_icon = QIcon()
config = Config()
save_icon.addPixmap(QPixmap(config.images + '/save.png'), QIcon.Normal, QIcon.Off)
self.tag_save_btn.setIcon(save_icon)
self.tag_save_btn.clicked.connect(self.tag_save)
self.tag_id_line_edit.hide() # 隐藏line_edit
self.tag_name_line_edit.setPlaceholderText("Tag Name") # 设置提示文字
self.tag_description_text_edit.setPlaceholderText('Tag Description')
if tag_name:
self.tag = self.db_helper.query_tag_by_name(tag_name)
self.tag_id_line_edit.setText(str(self.tag.id))
self.tag_name_line_edit.setText(self.tag.name)
self.tag_description_text_edit.setPlainText(self.tag.description)
# self.tag_description_text_edit.setDocument(QTextDocument("Tag description")) # 设置提示文字
def closeEvent(self, close_even):
if self.tag_id_line_edit.text() != '':
if self.tag.name != self.tag_name_line_edit.text() or self.tag.description != self.tag_description_text_edit.toPlainText():
reply = QMessageBox.information(self, 'close window', 'Changes not saved, confirm close?',
QMessageBox.Yes, QMessageBox.No)
if reply != QMessageBox.Yes:
close_even.ignore()
return
else:
if self.tag_id_line_edit.text() != '' or self.tag_description_text_edit.toPlainText() != '':
reply = QMessageBox.information(self, 'close window', 'Changes not saved, confirm close?',
QMessageBox.Yes, QMessageBox.No)
if reply != QMessageBox.Yes:
close_even.ignore()
return
self.refresh_signal.emit()
def tag_save(self):
tag_id = self.tag_id_line_edit.text()
tag_name = self.tag_name_line_edit.text()
tag_description = self.tag_description_text_edit.toPlainText()
if tag_name == '' or tag_description == '':
QMessageBox.warning(self, 'tag editor', 'tag name and description can\'t be empty')
else:
if len(tag_name) > 8:
QMessageBox.warning(self, 'tag editor', 'tag name is not greater than 8 characters')
else:
if tag_id:
self.tag.name = tag_name
self.tag.description = tag_description
self.db_helper.update_tag()
QMessageBox.information(self, 'tag editor', 'tag update success')#todo 是否添加刷新
else:
tag = self.db_helper.query_tag_by_name(tag_name)
if tag is None:
tag = self.db_helper.insert_tag(tag_name, tag_description)
self.tag_id_line_edit.setText(str(tag.id))
self.tag = tag
QMessageBox.information(self, 'tag editor', 'tag insert success')#todo 是否添加刷新
else:
QMessageBox.warning(self, 'tag editor', 'tag has existed')
|
from all_imports import *
from numpy import array
from numpy import argmax
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
def get_data(spatial = True):
annotation_zip = tf.keras.utils.get_file('captions.zip',
cache_subdir=os.path.abspath('.'),
origin = 'http://images.cocodataset.org/annotations/annotations_trainval2014.zip',
extract = True)
annotation_file = os.path.dirname(annotation_zip)+'/annotations/captions_train2014.json'
name_of_zip = 'train2014.zip'
if not os.path.exists(os.path.abspath('.') + '/' + name_of_zip):
image_zip = tf.keras.utils.get_file(name_of_zip,
cache_subdir=os.path.abspath('.'),
origin = 'http://images.cocodataset.org/zips/train2014.zip',
extract = True)
PATH = os.path.dirname(image_zip)+'/train2014/'
else:
PATH = os.path.abspath('.')+'/train2014/'
print(PATH)
"""### Processing VQA Dataset"""
import collections
import operator
# read the json file
print("Reading annotation file...")
annotation_file = 'v2_mscoco_train2014_annotations.json'
with open(annotation_file, 'r') as f:
annotations = json.load(f)
# storing the captions and the image name in vectors
all_answers = []
all_answers_qids = []
all_img_name_vector = []
for annot in annotations['annotations']:
#print(annot)
ans_dic = collections.defaultdict(int)
for each in annot['answers']:
diffans = each['answer']
if diffans in ans_dic:
#print(each['answer_confidence'])
if each['answer_confidence']=='yes':
ans_dic[diffans]+=4
if each['answer_confidence']=='maybe':
ans_dic[diffans]+= 2
if each['answer_confidence']=='no':
ans_dic[diffans]+= 1
else:
if each['answer_confidence']=='yes':
ans_dic[diffans]= 4
if each['answer_confidence']=='maybe':
ans_dic[diffans]= 2
if each['answer_confidence']=='no':
ans_dic[diffans]= 1
#print(ans_dic)
most_fav = max(ans_dic.items(), key=operator.itemgetter(1))[0]
#print(most_fav)
caption = '<start> ' + most_fav + ' <end>' #each['answer']
image_id = annot['image_id']
question_id = annot['question_id']
full_coco_image_path = PATH + 'COCO_train2014_' + '%012d.jpg' % (image_id)
all_img_name_vector.append(full_coco_image_path)
all_answers.append(caption)
all_answers_qids.append(question_id)
print("Done reading annotation file.")
print("Reading Question file...")
# read the json file
question_file = 'v2_OpenEnded_mscoco_train2014_questions.json'
with open(question_file, 'r') as f:
questions = json.load(f)
# storing the captions and the image name in vectors
question_ids =[]
all_questions = []
all_img_name_vector_2 = []
for annot in questions['questions']:
caption = '<start> ' + annot['question'] + ' <end>'
image_id = annot['image_id']
full_coco_image_path = PATH + 'COCO_train2014_' + '%012d.jpg' % (image_id)
all_img_name_vector_2.append(full_coco_image_path)
all_questions.append(caption)
question_ids.append(annot['question_id'])
print(len(all_img_name_vector),len(all_answers), len(all_answers_qids))
print(all_img_name_vector[10:15],all_answers[10:15], all_answers_qids[10:15])
print(len(all_img_name_vector), len(all_questions) , len(question_ids))
print(all_img_name_vector_2[10:15],all_questions[10:15], question_ids[10:15])
# shuffling the captions and image_names together
# setting a random state
train_answers, train_questions, img_name_vector = shuffle(all_answers,all_questions,
all_img_name_vector,
random_state=1)
print("Done pre processing Questions answers and images")
print("Now preparing Image vectors...")
# selecting the first 30000 captions from the shuffled set
#num_examples = 3000
#train_answers = train_answers[:num_examples]
#train_questions = train_questions[:num_examples]
#img_name_vector = img_name_vector[:num_examples]
#print(img_name_vector[0],train_questions[0],train_answers[0])
print("Length of image name vector ",len(img_name_vector),"Length of training questions ",len(train_questions)," Length of train answers ",len(train_answers))
"""### Getting Image Feature vector using VGG"""
flag = False
for path in img_name_vector:
path_of_feature = path
if spatial==False:
if os.path.isfile(path_of_feature+"_dense.npy"):
flag = True
else:
if os.path.isfile(path_of_feature+".npy"):
flag = True
break
if flag == False:
print("Using VGG Convolution base...")
def load_image(image_path):
img = tf.io.read_file(image_path)
img = tf.image.decode_jpeg(img, channels=3)
# 224 x 224 for VGG 299x299 for Inception
img = tf.image.resize(img, (224, 224))
img = tf.keras.applications.inception_v3.preprocess_input(img)
return img, image_path
if(spatial == False):
image_model = tf.keras.applications.VGG16(include_top=True,
weights='imagenet',input_shape = (224,224,3))
new_input = image_model.input
hidden_layer = image_model.layers[-2].output
else:
image_model = tf.keras.applications.VGG16(include_top=False,
weights='imagenet',input_shape = (224,224,3))
new_input = image_model.input
hidden_layer = image_model.layers[-1].output
image_features_extract_model = tf.keras.Model(new_input, hidden_layer)
# getting the unique images
encode_train = sorted(set(img_name_vector))
# feel free to change the batch_size according to your system configuration
image_dataset = tf.data.Dataset.from_tensor_slices(encode_train)
image_dataset = image_dataset.map(
load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(16)
print("Converting..")
for img, path in image_dataset:
batch_features = image_features_extract_model(img)
batch_features = tf.reshape(batch_features,
(batch_features.shape[0], -1, batch_features.shape[1]))
#print(batch_features.shape)
for bf, p in zip(batch_features, path):
path_of_feature = p.numpy().decode("utf-8")
if spatial:
sv_p = path_of_feature+".npy"
else:
sv_p = path_of_feature+"_dense.npy"
np.save(sv_p, bf.numpy())
print("Done getting image feature vectors")
"""### Creating Question Vectors"""
print("Getting question vectors")
# This will find the maximum length of any question in our dataset
def calc_max_length(tensor):
return max(len(t) for t in tensor)
# choosing the top 10000 words from the vocabulary
top_k = 10000
tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=top_k,
oov_token="<unk>",
filters='!"#$%&()*+.,-/:;=?@[\]^_`{|}~ ')
tokenizer.fit_on_texts(train_questions)
train_question_seqs = tokenizer.texts_to_sequences(train_questions)
#new edit
#print(tokenizer.word_index)
ques_vocab = tokenizer.word_index
#print(train_question_seqs)
tokenizer.word_index['<pad>'] = 0
tokenizer.index_word[0] = '<pad>'
# creating the tokenized vectors
train_question_seqs = tokenizer.texts_to_sequences(train_questions)
# padding each vector to the max_length of the captions
# if the max_length parameter is not provided, pad_sequences calculates that automatically
question_vector = tf.keras.preprocessing.sequence.pad_sequences(train_question_seqs, padding='post')
#cap_vector
# calculating the max_length
# used to store the attention weights
max_length = calc_max_length(train_question_seqs)
print(max_length)
#new edit
max_q = max_length
print("Done getting question feature vectors")
"""### Creating answer one hot vectors"""
print("One hot encoding answer vectors...")
# considering all answers to be part of ans vocab
# define example
data = train_answers
values = array(data)
print(values[:10])
# integer encode
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(values)
#print(integer_encoded)
#new edit
ans_vocab = {l: i for i, l in enumerate(label_encoder.classes_)}
print("Length of answer vocab",len(ans_vocab))
# binary encode
#onehot_encoder = OneHotEncoder(sparse=False)
#integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
#onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
#print(onehot_encoded[0],len(onehot_encoded))
#answer_vector = onehot_encoded
#new edit
#len_ans_vocab = len(onehot_encoded[0])
#print(answer_vector)
#print(len(question_vector[0]), len(answer_vector[0]))
"""### TRAIN - TEST SPLIT"""
img_name_train, img_name_val, question_train, question_val,answer_train, answer_val = train_test_split(img_name_vector,
question_vector,
integer_encoded,
test_size=0.1,
random_state=0)
print(len(img_name_train), len(img_name_val), len(question_train), len(question_val),len(answer_train), len(answer_val))
"""### Almost done with data processing!!!"""
# feel free to change these parameters according to your system's configuration
BATCH_SIZE = 64 #2 #64
BUFFER_SIZE = 1000 #1000
# embedding_dim = 256
# units = 512
# vocab_size = len(tokenizer.word_index) + 1
num_steps = len(img_name_train) // BATCH_SIZE
# shape of the vector extracted from InceptionV3 is (64, 2048)
# these two variables represent that
features_shape = 512
attention_features_shape = 49
# loading the numpy files
def map_func(img_name, cap,ans):
img_tensor = np.load(img_name.decode('utf-8')+'.npy')
return img_tensor, cap,ans
dataset = tf.data.Dataset.from_tensor_slices((img_name_train, question_train.astype(np.float32), answer_train.astype(np.float32)))
# using map to load the numpy files in parallel
dataset = dataset.map(lambda item1, item2, item3: tf.numpy_function(map_func, [item1, item2, item3], [tf.float32, tf.float32, tf.float32]),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# shuffling and batching
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE,drop_remainder = True)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
test_dataset = tf.data.Dataset.from_tensor_slices((img_name_val, question_val.astype(np.float32), answer_val.astype(np.float32)))
# using map to load the numpy files in parallel
test_dataset = test_dataset.map(lambda item1, item2, item3: tf.numpy_function(
map_func, [item1, item2, item3], [tf.float32, tf.float32, tf.float32]),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# shuffling and batching
test_dataset = test_dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
test_dataset = test_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset,test_dataset,ques_vocab,ans_vocab,max_q,label_encoder,tokenizer
|
<gh_stars>1-10
import cv2
def draw_landmarks(image, landmark_point):
if len(landmark_point) > 0:
# Thumb
cv2.line(image, tuple(landmark_point[2]), tuple(landmark_point[3]),
(0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[2]), tuple(landmark_point[3]),
(255, 255, 255), 2)
cv2.line(image, tuple(landmark_point[3]), tuple(landmark_point[4]),
(0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[3]), tuple(landmark_point[4]),
(255, 255, 255), 2)
# Index finger
cv2.line(image, tuple(landmark_point[5]), tuple(landmark_point[6]),
(0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[5]), tuple(landmark_point[6]),
(255, 255, 255), 2)
cv2.line(image, tuple(landmark_point[6]), tuple(landmark_point[7]),
(0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[6]), tuple(landmark_point[7]),
(255, 255, 255), 2)
cv2.line(image, tuple(landmark_point[7]), tuple(landmark_point[8]),
(0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[7]), tuple(landmark_point[8]),
(255, 255, 255), 2)
# Middle finger
cv2.line(image, tuple(landmark_point[9]),
tuple(landmark_point[10]), (0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[9]),
tuple(landmark_point[10]), (255, 255, 255), 2)
cv2.line(image, tuple(landmark_point[10]),
tuple(landmark_point[11]), (0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[10]),
tuple(landmark_point[11]), (255, 255, 255), 2)
cv2.line(image, tuple(landmark_point[11]),
tuple(landmark_point[12]), (0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[11]),
tuple(landmark_point[12]), (255, 255, 255), 2)
# Ring finger
cv2.line(image, tuple(landmark_point[13]),
tuple(landmark_point[14]), (0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[13]),
tuple(landmark_point[14]), (255, 255, 255), 2)
cv2.line(image, tuple(landmark_point[14]),
tuple(landmark_point[15]), (0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[14]),
tuple(landmark_point[15]), (255, 255, 255), 2)
cv2.line(image, tuple(landmark_point[15]),
tuple(landmark_point[16]), (0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[15]),
tuple(landmark_point[16]), (255, 255, 255), 2)
# Little finger
cv2.line(image, tuple(landmark_point[17]),
tuple(landmark_point[18]), (0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[17]),
tuple(landmark_point[18]), (255, 255, 255), 2)
cv2.line(image, tuple(landmark_point[18]),
tuple(landmark_point[19]), (0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[18]),
tuple(landmark_point[19]), (255, 255, 255), 2)
cv2.line(image, tuple(landmark_point[19]),
tuple(landmark_point[20]), (0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[19]),
tuple(landmark_point[20]), (255, 255, 255), 2)
# Palm
cv2.line(image, tuple(landmark_point[0]), tuple(landmark_point[1]),
(0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[0]), tuple(landmark_point[1]),
(255, 255, 255), 2)
cv2.line(image, tuple(landmark_point[1]), tuple(landmark_point[2]),
(0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[1]), tuple(landmark_point[2]),
(255, 255, 255), 2)
cv2.line(image, tuple(landmark_point[2]), tuple(landmark_point[5]),
(0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[2]), tuple(landmark_point[5]),
(255, 255, 255), 2)
cv2.line(image, tuple(landmark_point[5]), tuple(landmark_point[9]),
(0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[5]), tuple(landmark_point[9]),
(255, 255, 255), 2)
cv2.line(image, tuple(landmark_point[9]),
tuple(landmark_point[13]), (0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[9]),
tuple(landmark_point[13]), (255, 255, 255), 2)
cv2.line(image, tuple(landmark_point[13]),
tuple(landmark_point[17]), (0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[13]),
tuple(landmark_point[17]), (255, 255, 255), 2)
cv2.line(image, tuple(landmark_point[17]),
tuple(landmark_point[0]), (0, 0, 0), 6)
cv2.line(image, tuple(landmark_point[17]),
tuple(landmark_point[0]), (255, 255, 255), 2)
# # Key Points
# for index, landmark in enumerate(landmark_point):
# if index == 0: # Root(palm)
# cv2.circle(image, (landmark[0], landmark[1]), 5,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
# if index == 1: # 手首2
# cv2.circle(image, (landmark[0], landmark[1]), 5,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
# if index == 2: # 親指:付け根
# cv2.circle(image, (landmark[0], landmark[1]), 5,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
# if index == 3: # 親指:第1関節
# cv2.circle(image, (landmark[0], landmark[1]), 5,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
# if index == 4: # 親指:指先
# cv2.circle(image, (landmark[0], landmark[1]), 8,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
# if index == 5: # 人差指:付け根
# cv2.circle(image, (landmark[0], landmark[1]), 5,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
# if index == 6: # 人差指:第2関節
# cv2.circle(image, (landmark[0], landmark[1]), 5,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
# if index == 7: # 人差指:第1関節
# cv2.circle(image, (landmark[0], landmark[1]), 5,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
# if index == 8: # 人差指:指先
# cv2.circle(image, (landmark[0], landmark[1]), 8,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
# if index == 9: # 中指:付け根
# cv2.circle(image, (landmark[0], landmark[1]), 5,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
# if index == 10: # 中指:第2関節
# cv2.circle(image, (landmark[0], landmark[1]), 5,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
# if index == 11: # 中指:第1関節
# cv2.circle(image, (landmark[0], landmark[1]), 5,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
# if index == 12: # 中指:指先
# cv2.circle(image, (landmark[0], landmark[1]), 8,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
# if index == 13: # 薬指:付け根
# cv2.circle(image, (landmark[0], landmark[1]), 5,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
# if index == 14: # 薬指:第2関節
# cv2.circle(image, (landmark[0], landmark[1]), 5,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
# if index == 15: # 薬指:第1関節
# cv2.circle(image, (landmark[0], landmark[1]), 5,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
# if index == 16: # 薬指:指先
# cv2.circle(image, (landmark[0], landmark[1]), 8,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
# if index == 17: # 小指:付け根
# cv2.circle(image, (landmark[0], landmark[1]), 5,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
# if index == 18: # 小指:第2関節
# cv2.circle(image, (landmark[0], landmark[1]), 5,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
# if index == 19: # 小指:第1関節
# cv2.circle(image, (landmark[0], landmark[1]), 5,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
# if index == 20: # 小指:指先
# cv2.circle(image, (landmark[0], landmark[1]), 8,
# (255, 255, 255), -1)
# cv2.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
return image
|
from urllib import request
from xml.dom.minidom import parseString
def subdata(node):
name = ''
for cnode in node.childNodes:
if cnode.nodeType == cnode.TEXT_NODE:
name = name + cnode.nodeValue
else:
name = name + subdata(cnode)
return name
def textnode(node):
name = ''
for cnode in node.childNodes:
if cnode.nodeType == cnode.TEXT_NODE:
name = name + cnode.nodeValue
return name
def process():
enumset= set()
cmdset = set()
url = "https://cvs.khronos.org/svn/repos/ogl/trunk/doc/registry/public/api/gl.xml"
text = request.urlopen(url).read().decode('utf-8')
doc = parseString(text)
for feature in doc.getElementsByTagName('feature'):
version = feature.getAttribute('name')
# We want everything before version 4
if version == "GL_VERSION_4_0":
break
requires = feature.getElementsByTagName('require')
removes = feature.getElementsByTagName('remove')
for require in requires:
enums = require.getElementsByTagName('enum')
for enum in enums:
name = enum.getAttribute('name')
enumset.add(name)
cmds = require.getElementsByTagName('command')
for cmd in cmds:
name = cmd.getAttribute('name')
cmdset.add(name)
for require in removes:
enums = require.getElementsByTagName('enum')
for enum in enums:
name = enum.getAttribute('name')
enumset.remove(name)
cmds = require.getElementsByTagName('command')
for cmd in cmds:
name = cmd.getAttribute('name')
cmdset.remove(name)
cmdtag = doc.getElementsByTagName('commands')
c_top = []
c_bot = []
for cmd in cmdtag[0].getElementsByTagName('command'):
name = cmd.getElementsByTagName('name')[0]
proto = cmd.getElementsByTagName('proto')[0]
params = cmd.getElementsByTagName('param')
cmdname= subdata(name)
protoname= textnode(proto)
pname = []
for param in params:
pname.append(subdata(param))
if cmdname in cmdset:
print ("typedef %s (WINAPI * %s)(%s);" % (protoname, cmdname.upper() + 'PROC', ','.join(pname)), file=h_file)
print ("extern %s %s;" % (cmdname.upper() + "PROC", cmdname), file=h_file);
print(file=h_file)
c_top.append("%s %s;" % (cmdname.upper() + "PROC", cmdname));
c_bot.append(" %s=(%s)wglGetProcAddress(\"%s\");" % (cmdname, cmdname.upper() + 'PROC', cmdname));
enumtag = doc.getElementsByTagName('enums')
for enumgroup in enumtag:
for enum in enumgroup.getElementsByTagName('enum'):
name = enum.getAttribute('name')
value = enum.getAttribute('value')
if name in enumset:
print('#define %s %s' % (name, value), file=h_file)
print('\n'.join(c_top), file=c_file);
print('''
void init_openg33()
{
''', file=c_file)
print('\n'.join(c_bot), file=c_file);
print ("}", file = c_file)
h_file = open("opengl33.h", "wt")
c_file = open("opengl33.cpp", "wt")
print (''' #include <windows.h>
#include <stdint.h>
typedef unsigned int GLenum;
typedef unsigned char GLboolean;
typedef unsigned int GLbitfield;
typedef void GLvoid;
typedef signed char GLbyte;
typedef short GLshort;
typedef int GLint;
typedef int GLclampx;
typedef unsigned char GLubyte;
typedef unsigned short GLushort;
typedef unsigned int GLuint;
typedef int GLsizei;
typedef float GLfloat;
typedef float GLclampf;
typedef double GLdouble;
typedef double GLclampd;
typedef void *GLeglImageOES;
typedef char GLchar;
typedef char GLcharARB;
typedef unsigned short GLhalfARB;
typedef unsigned short GLhalf;
typedef GLint GLfixed;
typedef ptrdiff_t GLintptr;
typedef ptrdiff_t GLsizeiptr;
typedef int64_t GLint64;
typedef uint64_t GLuint64;
typedef ptrdiff_t GLintptrARB;
typedef ptrdiff_t GLsizeiptrARB;
typedef int64_t GLint64EXT;
typedef uint64_t GLuint64EXT;
typedef struct __GLsync *GLsync;
struct _cl_context;
struct _cl_event;
typedef void ( *GLDEBUGPROC)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);
typedef void ( *GLDEBUGPROCARB)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);
typedef void ( *GLDEBUGPROCKHR)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);
''', file = h_file);
print ('#include "opengl33.h"', file=c_file)
process()
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# AWS only documentation fragment
DOCUMENTATION = r'''
options:
debug_botocore_endpoint_logs:
description:
- Use a botocore.endpoint logger to parse the unique (rather than total) "resource:action" API calls made during a task, outputing
the set to the resource_actions key in the task results. Use the aws_resource_action callback to output to total list made during
a playbook. The ANSIBLE_DEBUG_BOTOCORE_LOGS environment variable may also be used.
type: bool
default: 'no'
ec2_url:
description:
- URL to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints).
Ignored for modules where region is required. Must be specified for all other modules if region is not used.
If not set then the value of the EC2_URL environment variable, if any, is used.
type: str
aliases: [ aws_endpoint_url, endpoint_url ]
aws_secret_key:
description:
- C(AWS secret key). If not set then the value of the C(AWS_SECRET_ACCESS_KEY), C(AWS_SECRET_KEY), or C(EC2_SECRET_KEY) environment variable is used.
- If I(profile) is set this parameter is ignored.
- Passing the I(aws_secret_key) and I(profile) options at the same time has been deprecated
and the options will be made mutually exclusive after 2022-06-01.
type: str
aliases: [ ec2_secret_key, secret_key ]
aws_access_key:
description:
- C(AWS access key). If not set then the value of the C(AWS_ACCESS_KEY_ID), C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY) environment variable is used.
- If I(profile) is set this parameter is ignored.
- Passing the I(aws_access_key) and I(profile) options at the same time has been deprecated
and the options will be made mutually exclusive after 2022-06-01.
type: str
aliases: [ ec2_access_key, access_key ]
security_token:
description:
- C(AWS STS security token). If not set then the value of the C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN) environment variable is used.
- If I(profile) is set this parameter is ignored.
- Passing the I(security_token) and I(profile) options at the same time has been deprecated
and the options will be made mutually exclusive after 2022-06-01.
type: str
aliases: [ aws_security_token, access_token ]
aws_ca_bundle:
description:
- "The location of a CA Bundle to use when validating SSL certificates."
- "Not used by boto 2 based modules."
- "Note: The CA Bundle is read 'module' side and may need to be explicitly copied from the controller if not run locally."
type: path
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for
communication with the AWS APIs.
type: bool
default: yes
profile:
description:
- Using I(profile) will override I(aws_access_key), I(aws_secret_key) and I(security_token)
and support for passing them at the same time as I(profile) has been deprecated.
- I(aws_access_key), I(aws_secret_key) and I(security_token) will be made mutually exclusive with I(profile) after 2022-06-01.
type: str
aliases: [ aws_profile ]
aws_config:
description:
- A dictionary to modify the botocore configuration.
- Parameters can be found at U(https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html#botocore.config.Config).
- Only the 'user_agent' key is used for boto modules. See U(http://boto.cloudhackers.com/en/latest/boto_config_tut.html#boto) for more boto configuration.
type: dict
requirements:
- python >= 3.6
- boto3 >= 1.15.0
- botocore >= 1.18.0
notes:
- If parameters are not set within the module, the following
environment variables can be used in decreasing order of precedence
C(AWS_URL) or C(EC2_URL),
C(AWS_PROFILE) or C(AWS_DEFAULT_PROFILE),
C(AWS_ACCESS_KEY_ID) or C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY),
C(AWS_SECRET_ACCESS_KEY) or C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY),
C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN),
C(AWS_REGION) or C(EC2_REGION),
C(AWS_CA_BUNDLE)
- When no credentials are explicitly provided the AWS SDK (boto3) that
Ansible uses will fall back to its configuration files (typically
C(~/.aws/credentials)).
See U(https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html)
for more information.
- Modules based on the original AWS SDK (boto) may read their default
configuration from different files.
See U(https://boto.readthedocs.io/en/latest/boto_config_tut.html) for more
information.
- C(AWS_REGION) or C(EC2_REGION) can be typically be used to specify the
AWS region, when required, but this can also be defined in the
configuration files.
'''
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from . import base_sde
from . import misc
from ..settings import NOISE_TYPES, SDE_TYPES
from ..types import Sequence, TensorOrTensors
class AdjointSDE(base_sde.BaseSDE):
def __init__(self,
forward_sde: base_sde.ForwardSDE,
params: TensorOrTensors,
shapes: Sequence[torch.Size]):
# There's a mapping from the noise type of the forward SDE to the noise type of the adjoint.
# Usually, these two aren't the same, e.g. when the forward SDE has additive noise, the adjoint SDE's diffusion
# is a linear function of the adjoint variable, so it is not of additive noise.
sde_type = forward_sde.sde_type
noise_type = {
NOISE_TYPES.general: NOISE_TYPES.general,
NOISE_TYPES.additive: NOISE_TYPES.general,
NOISE_TYPES.scalar: NOISE_TYPES.scalar,
NOISE_TYPES.diagonal: NOISE_TYPES.diagonal,
}.get(forward_sde.noise_type)
super(AdjointSDE, self).__init__(sde_type=sde_type, noise_type=noise_type)
self.forward_sde = forward_sde
self.params = params
self._shapes = shapes
# Register the core functions. This avoids polluting the codebase with if-statements and achieves speed-ups
# by making sure it's a one-time cost. The `sde_type` and `noise_type` of the forward SDE determines the
# registered functions.
self.f = {
SDE_TYPES.ito: {
NOISE_TYPES.diagonal: self.f_corrected_diagonal,
NOISE_TYPES.additive: self.f_uncorrected,
NOISE_TYPES.scalar: self.f_corrected_default,
NOISE_TYPES.general: self.f_corrected_default
}.get(forward_sde.noise_type),
SDE_TYPES.stratonovich: self.f_uncorrected
}.get(forward_sde.sde_type)
self.f_and_g_prod = {
SDE_TYPES.ito: {
NOISE_TYPES.diagonal: self.f_and_g_prod_corrected_diagonal,
NOISE_TYPES.additive: self.f_and_g_prod_uncorrected,
NOISE_TYPES.scalar: self.f_and_g_prod_corrected_default,
NOISE_TYPES.general: self.f_and_g_prod_corrected_default
}.get(forward_sde.noise_type),
SDE_TYPES.stratonovich: self.f_and_g_prod_uncorrected
}.get(forward_sde.sde_type)
self.g_prod_and_gdg_prod = {
NOISE_TYPES.diagonal: self.g_prod_and_gdg_prod_diagonal,
}.get(forward_sde.noise_type, self.g_prod_and_gdg_prod_default)
########################################
# Helper functions #
########################################
def get_state(self, t, y_aug, v=None, extra_states=False):
"""Unpacks y_aug, whilst enforcing the necessary checks so that we can calculate derivatives wrt state."""
# These leaf checks are very important.
# get_state is used where we want to compute:
# ```
# with torch.enable_grad():
# s = some_function(y)
# torch.autograd.grad(s, [y] + params, ...)
# ```
# where `some_function` implicitly depends on `params`.
# However if y has history of its own then in principle it could _also_ depend upon `params`, and this call to
# `grad` will go all the way back to that. To avoid this, we require that every input tensor be a leaf tensor.
#
# This is also the reason for the `y0.detach()` in adjoint.py::_SdeintAdjointMethod.forward. If we don't detach,
# then y0 may have a history and these checks will fail. This is a spurious failure as
# `torch.autograd.Function.forward` has an implicit `torch.no_grad()` guard, i.e. we definitely don't want to
# use its history there.
assert t.is_leaf, "Internal error: please report a bug to torchsde"
assert y_aug.is_leaf, "Internal error: please report a bug to torchsde"
if v is not None:
assert v.is_leaf, "Internal error: please report a bug to torchsde"
requires_grad = torch.is_grad_enabled()
if extra_states:
shapes = self._shapes
else:
shapes = self._shapes[:2]
numel = sum(shape.numel() for shape in shapes)
y, adj_y, *extra_states = misc.flat_to_shape(y_aug.squeeze(0)[:numel], shapes)
# To support the later differentiation wrt y, we set it to require_grad if it doesn't already.
if not y.requires_grad:
y = y.detach().requires_grad_()
return y, adj_y, extra_states, requires_grad
def _f_uncorrected(self, f, y, adj_y, requires_grad):
vjp_y_and_params = misc.vjp(
outputs=f,
inputs=[y] + self.params,
grad_outputs=adj_y,
allow_unused=True,
retain_graph=True,
create_graph=requires_grad
)
if not requires_grad:
# We had to build a computational graph to be able to compute the above vjp.
# However, if we don't require_grad then we don't need to backprop through this function, so we should
# delete the computational graph to avoid a memory leak. (Which for example would keep the local
# variable `y` in memory: f->grad_fn->...->AccumulatedGrad->y.)
# Note: `requires_grad` might not be equal to what `torch.is_grad_enabled` returns here.
f = f.detach()
return misc.flatten((-f, *vjp_y_and_params)).unsqueeze(0)
def _f_corrected_default(self, f, g, y, adj_y, requires_grad):
g_columns = [g_column.squeeze(dim=-1) for g_column in g.split(1, dim=-1)]
dg_g_jvp = sum([
misc.jvp(
outputs=g_column,
inputs=y,
grad_inputs=g_column,
allow_unused=True,
create_graph=True
)[0] for g_column in g_columns
])
# Double Stratonovich correction.
f = f - dg_g_jvp
vjp_y_and_params = misc.vjp(
outputs=f,
inputs=[y] + self.params,
grad_outputs=adj_y,
allow_unused=True,
retain_graph=True,
create_graph=requires_grad
)
# Convert the adjoint Stratonovich SDE to Itô form.
extra_vjp_y_and_params = []
for g_column in g_columns:
a_dg_vjp, = misc.vjp(
outputs=g_column,
inputs=y,
grad_outputs=adj_y,
allow_unused=True,
retain_graph=True,
create_graph=requires_grad
)
extra_vjp_y_and_params_column = misc.vjp(
outputs=g_column,
inputs=[y] + self.params,
grad_outputs=a_dg_vjp,
allow_unused=True,
retain_graph=True,
create_graph=requires_grad
)
extra_vjp_y_and_params.append(extra_vjp_y_and_params_column)
vjp_y_and_params = misc.seq_add(vjp_y_and_params, *extra_vjp_y_and_params)
if not requires_grad:
# See corresponding note in _f_uncorrected.
f = f.detach()
return misc.flatten((-f, *vjp_y_and_params)).unsqueeze(0)
def _f_corrected_diagonal(self, f, g, y, adj_y, requires_grad):
g_dg_vjp, = misc.vjp(
outputs=g,
inputs=y,
grad_outputs=g,
allow_unused=True,
create_graph=True
)
# Double Stratonovich correction.
f = f - g_dg_vjp
vjp_y_and_params = misc.vjp(
outputs=f,
inputs=[y] + self.params,
grad_outputs=adj_y,
allow_unused=True,
retain_graph=True,
create_graph=requires_grad
)
# Convert the adjoint Stratonovich SDE to Itô form.
a_dg_vjp, = misc.vjp(
outputs=g,
inputs=y,
grad_outputs=adj_y,
allow_unused=True,
retain_graph=True,
create_graph=requires_grad
)
extra_vjp_y_and_params = misc.vjp(
outputs=g,
inputs=[y] + self.params,
grad_outputs=a_dg_vjp,
allow_unused=True,
retain_graph=True,
create_graph=requires_grad
)
vjp_y_and_params = misc.seq_add(vjp_y_and_params, extra_vjp_y_and_params)
if not requires_grad:
# See corresponding note in _f_uncorrected.
f = f.detach()
return misc.flatten((-f, *vjp_y_and_params)).unsqueeze(0)
def _g_prod(self, g_prod, y, adj_y, requires_grad):
vjp_y_and_params = misc.vjp(
outputs=g_prod,
inputs=[y] + self.params,
grad_outputs=adj_y,
allow_unused=True,
retain_graph=True,
create_graph=requires_grad
)
if not requires_grad:
# See corresponding note in _f_uncorrected.
g_prod = g_prod.detach()
return misc.flatten((-g_prod, *vjp_y_and_params)).unsqueeze(0)
########################################
# f #
########################################
def f_uncorrected(self, t, y_aug): # For Ito additive and Stratonovich.
y, adj_y, _, requires_grad = self.get_state(t, y_aug)
with torch.enable_grad():
f = self.forward_sde.f(-t, y)
return self._f_uncorrected(f, y, adj_y, requires_grad)
def f_corrected_default(self, t, y_aug): # For Ito general/scalar.
y, adj_y, _, requires_grad = self.get_state(t, y_aug)
with torch.enable_grad():
f, g = self.forward_sde.f_and_g(-t, y)
return self._f_corrected_default(f, g, y, adj_y, requires_grad)
def f_corrected_diagonal(self, t, y_aug): # For Ito diagonal.
y, adj_y, _, requires_grad = self.get_state(t, y_aug)
with torch.enable_grad():
f, g = self.forward_sde.f_and_g(-t, y)
return self._f_corrected_diagonal(f, g, y, adj_y, requires_grad)
########################################
# g #
########################################
def g(self, t, y):
# We don't want to define it, it's super inefficient to compute.
# In theory every part of the code which _could_ call it either does something else, or has some more
# informative error message to tell the user what went wrong.
# This is here as a fallback option.
raise RuntimeError("Adjoint `g` not defined. Please report a bug to torchsde.")
########################################
# f_and_g #
########################################
def f_and_g(self, t, y):
# Like g above, this is inefficient to compute.
raise RuntimeError("Adjoint `f_and_g` not defined. Please report a bug to torchsde.")
########################################
# prod #
########################################
def prod(self, g, v):
# We could define this just fine, but we don't expect to ever be able to compute the input `g`, so we should
# never get here.
raise RuntimeError("Adjoint `prod` not defined. Please report a bug to torchsde.")
########################################
# g_prod #
########################################
def g_prod(self, t, y_aug, v):
y, adj_y, _, requires_grad = self.get_state(t, y_aug, v)
with torch.enable_grad():
g_prod = self.forward_sde.g_prod(-t, y, v)
return self._g_prod(g_prod, y, adj_y, requires_grad)
########################################
# f_and_g_prod #
########################################
def f_and_g_prod_uncorrected(self, t, y_aug, v): # For Ito additive and Stratonovich.
y, adj_y, _, requires_grad = self.get_state(t, y_aug)
with torch.enable_grad():
f, g_prod = self.forward_sde.f_and_g_prod(-t, y, v)
f_out = self._f_uncorrected(f, y, adj_y, requires_grad)
g_prod_out = self._g_prod(g_prod, y, adj_y, requires_grad)
return f_out, g_prod_out
def f_and_g_prod_corrected_default(self, t, y_aug, v): # For Ito general/scalar.
y, adj_y, _, requires_grad = self.get_state(t, y_aug)
with torch.enable_grad():
f, g = self.forward_sde.f_and_g(-t, y)
g_prod = self.forward_sde.prod(g, v)
f_out = self._f_corrected_default(f, g, y, adj_y, requires_grad)
g_prod_out = self._g_prod(g_prod, y, adj_y, requires_grad)
return f_out, g_prod_out
def f_and_g_prod_corrected_diagonal(self, t, y_aug, v): # For Ito diagonal.
y, adj_y, _, requires_grad = self.get_state(t, y_aug)
with torch.enable_grad():
f, g = self.forward_sde.f_and_g(-t, y)
g_prod = self.forward_sde.prod(g, v)
f_out = self._f_corrected_diagonal(f, g, y, adj_y, requires_grad)
g_prod_out = self._g_prod(g_prod, y, adj_y, requires_grad)
return f_out, g_prod_out
########################################
# gdg_prod #
########################################
def g_prod_and_gdg_prod_default(self, t, y, v1, v2): # For Ito/Stratonovich general/additive/scalar.
raise NotImplementedError
def g_prod_and_gdg_prod_diagonal(self, t, y_aug, v1, v2): # For Ito/Stratonovich diagonal.
y, adj_y, _, requires_grad = self.get_state(t, y_aug, v2)
with torch.enable_grad():
g = self.forward_sde.g(-t, y)
g_prod = self.forward_sde.prod(g, v1)
vg_dg_vjp, = misc.vjp(
outputs=g,
inputs=y,
grad_outputs=v2 * g,
allow_unused=True,
retain_graph=True,
create_graph=requires_grad
)
dgdy, = misc.vjp(
outputs=g.sum(),
inputs=y,
allow_unused=True,
retain_graph=True,
create_graph=requires_grad
)
prod_partials_adj_y_and_params = misc.vjp(
outputs=g,
inputs=[y] + self.params,
grad_outputs=adj_y * v2 * dgdy,
allow_unused=True,
retain_graph=True,
create_graph=requires_grad
)
avg_dg_vjp, = misc.vjp(
outputs=g,
inputs=y,
grad_outputs=(adj_y * v2 * g).detach(),
allow_unused=True,
create_graph=True
)
mixed_partials_adj_y_and_params = misc.vjp(
outputs=avg_dg_vjp.sum(),
inputs=[y] + self.params,
allow_unused=True,
retain_graph=True,
create_graph=requires_grad
)
vjp_y_and_params = misc.seq_sub(prod_partials_adj_y_and_params, mixed_partials_adj_y_and_params)
return self._g_prod(g_prod, y, adj_y, requires_grad), misc.flatten((vg_dg_vjp,
*vjp_y_and_params)).unsqueeze(0)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\ruiri\Desktop\alarm-clock\Tests\listtest.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
inteiro = 0
array = []
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(522, 469)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(360, 60, 75, 23))
self.pushButton.clicked.connect(self.addItemToList)
self.pushButton.setObjectName("pushButton")
self.pushButton2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton2.setGeometry(QtCore.QRect(360, 120, 75, 23))
self.pushButton2.clicked.connect(self.removeItemToList)
self.pushButton2.setObjectName("pushButton2")
self.pushButton3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton3.setGeometry(QtCore.QRect(360, 180, 75, 23))
self.pushButton3.clicked.connect(self.clearItemToList)
self.pushButton3.setObjectName("pushButton3")
self.listWidget = QtWidgets.QListWidget(self.centralwidget)
self.listWidget.setGeometry(QtCore.QRect(10, 10, 256, 192))
self.listWidget.setObjectName("listWidget")
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setGeometry(QtCore.QRect(10, 220, 256, 192))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 522, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.listaDoCaralho()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "Add"))
self.pushButton2.setText(_translate("MainWindow", "Remove"))
self.pushButton3.setText(_translate("MainWindow", "Clear"))
def listaDoCaralho(self):
self.listWidget.addItems(self.array)
def addItemToList(self):
self.inteiro += 1
self.array.append(str(self.inteiro))
self.listWidget.clear()
self.listWidget.addItems(self.array)
def removeItemToList(self):
self.array.remove(self.array[-1])
self.listWidget.clear()
self.listWidget.addItems(self.array)
def clearItemToList(self):
self.array.clear()
self.listWidget.clear()
self.listWidget.addItems(self.array)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
<filename>src/layer_activation_with_guided_backprop.py
"""
Created on Thu Oct 26 11:23:47 2017
@author: <NAME> - github.com/utkuozbulak
"""
import torch
from torch.nn import ReLU
from src.misc_functions import (get_example_params,
convert_to_grayscale,
save_gradient_images,
get_positive_negative_saliency)
class GuidedBackprop():
"""
Produces gradients generated with guided back propagation from the given image
"""
def __init__(self, model):
self.model = model # 모델 등록하고
self.gradients = None
self.forward_relu_outputs = []
# Put model in evaluation mode
self.model.eval() # evale 로하고
self.update_relus() # 함수 등록
self.hook_layers()
def hook_layers(self):
def hook_function(module, grad_in, grad_out):
self.gradients = grad_in[0]
# Register hook to the first layer
first_layer = list(self.model.features._modules.items())[0][1]
first_layer.register_backward_hook(hook_function)
def update_relus(self):
"""
Updates relu activation functions so that
1- stores output in forward pass
2- imputes zero for gradient values that are less than zero
"""
def relu_backward_hook_function(module, grad_in, grad_out):
"""
If there is a negative gradient, change it to zero
"""
# Get last forward output
corresponding_forward_output = self.forward_relu_outputs[-1]
corresponding_forward_output[corresponding_forward_output > 0] = 1
modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)
del self.forward_relu_outputs[-1] # Remove last forward output
return (modified_grad_out,)
def relu_forward_hook_function(module, ten_in, ten_out):
"""
Store results of forward pass
"""
self.forward_relu_outputs.append(ten_out)
# Loop through layers, hook up ReLUs
for pos, module in self.model.features._modules.items():
if isinstance(module, ReLU):
module.register_backward_hook(relu_backward_hook_function)
module.register_forward_hook(relu_forward_hook_function)
def generate_gradients(self, input_image, target_class, cnn_layer, filter_pos):
self.model.zero_grad()
# Forward pass
x = input_image # 이미 전처리 된거
for index, layer in enumerate(self.model.features): # layer에서 하나씩 뽑음
# Forward pass layer by layer
# x is not used after this point because it is only needed to trigger
# the forward hook function
x = layer(x)
# Only need to forward until the selected layer is reached
if index == cnn_layer:
# (forward hook function triggered)
break # 해당 번째의 cnn_layer를 발견하면 hook 실행
conv_output = torch.sum(torch.abs( x[0, filter_pos] ))
# Backward pass
conv_output.backward()
# Convert Pytorch variable to numpy array
# [0] to get rid of the first channel (1,3,224,224)
gradients_as_arr = self.gradients.data.numpy()[0]
return gradients_as_arr
if __name__ == '__main__':
cnn_layer = 10
filter_pos = 5
target_example = 2 # Spider
(original_image, prep_img, target_class, file_name_to_export, pretrained_model) =\
get_example_params(target_example)
# File export name
file_name_to_export = file_name_to_export + '_layer' + str(cnn_layer) + '_filter' + str(filter_pos)
# Guided backprop
GBP = GuidedBackprop(pretrained_model)
# Get gradients # preprocess image가 prep_img 이다.
guided_grads = GBP.generate_gradients(prep_img, # 전처리된 이미지
target_class, # target class 위치
cnn_layer, # 해당 cnn 위치
filter_pos) # filter 위치
print(guided_grads.shape) # (3, 224, 224)
# Save colored gradients
save_gradient_images(guided_grads, file_name_to_export + '_Guided_BP_color')
# Convert to grayscale
grayscale_guided_grads = convert_to_grayscale(guided_grads)
# Save grayscale gradients
save_gradient_images(grayscale_guided_grads, file_name_to_export + '_Guided_BP_gray')
# Positive and negative saliency maps
# visual saliency 모델 => 이미지 내에서 시각적으로 중요한 부분들이 어딘지 또한 얼마나 중요한지 예측함
# 하나는 이미지 내에서 사람의 시선이 어디에 가장 많이 머물지를 예측해내는 방법이고,
# 또 다른 하나는 이미지 내에서 사람이 중요하다고 생각할 물체 또는 지역을 검출해내는 방법이다[
pos_sal, neg_sal = get_positive_negative_saliency(guided_grads)
save_gradient_images(pos_sal, file_name_to_export + '_pos_sal')
save_gradient_images(neg_sal, file_name_to_export + '_neg_sal')
print('Layer Guided backprop completed')
|
<reponame>danwent/python-quantumclient
# Copyright 2012 OpenStack LLC.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import sys
from quantumclient.common import exceptions
from quantumclient.tests.unit.test_cli20 import CLITestV20Base
from quantumclient.tests.unit.test_cli20 import MyApp
from quantumclient.quantum.v2_0.network import CreateNetwork
from quantumclient.quantum.v2_0.network import ListNetwork
from quantumclient.quantum.v2_0.network import UpdateNetwork
from quantumclient.quantum.v2_0.network import ShowNetwork
from quantumclient.quantum.v2_0.network import DeleteNetwork
class CLITestV20Network(CLITestV20Base):
def test_create_network(self):
"""Create net: myname."""
resource = 'network'
cmd = CreateNetwork(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = [name, ]
position_names = ['name', ]
position_values = [name, ]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_network_tenant(self):
"""Create net: --tenant_id tenantid myname."""
resource = 'network'
cmd = CreateNetwork(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--tenant_id', 'tenantid', name]
position_names = ['name', ]
position_values = [name, ]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_network_tags(self):
"""Create net: myname --tags a b."""
resource = 'network'
cmd = CreateNetwork(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = [name, '--tags', 'a', 'b']
position_names = ['name', ]
position_values = [name, ]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tags=['a', 'b'])
def test_create_network_state(self):
"""Create net: --admin_state_down myname."""
resource = 'network'
cmd = CreateNetwork(MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--admin_state_down', name, ]
position_names = ['name', ]
position_values = [name, ]
_str = self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
admin_state_up=False)
def test_list_nets_detail(self):
"""list nets: -D."""
resources = "networks"
cmd = ListNetwork(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_nets_tags(self):
"""List nets: -- --tags a b."""
resources = "networks"
cmd = ListNetwork(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, tags=['a', 'b'])
def test_list_nets_detail_tags(self):
"""List nets: -D -- --tags a b."""
resources = "networks"
cmd = ListNetwork(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, detail=True, tags=['a', 'b'])
def test_list_nets_fields(self):
"""List nets: --fields a --fields b -- --fields c d."""
resources = "networks"
cmd = ListNetwork(MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
fields_1=['a', 'b'], fields_2=['c', 'd'])
def test_update_network_exception(self):
"""Update net: myid."""
resource = 'network'
cmd = UpdateNetwork(MyApp(sys.stdout), None)
self.assertRaises(exceptions.CommandError, self._test_update_resource,
resource, cmd, 'myid', ['myid'], {})
def test_update_network(self):
"""Update net: myid --name myname --tags a b."""
resource = 'network'
cmd = UpdateNetwork(MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], }
)
def test_show_network(self):
"""Show net: --fields id --fields name myid."""
resource = 'network'
cmd = ShowNetwork(MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args,
['id', 'name'])
def test_show_network_by_name(self):
"""Show net: --fields id --fields name myname."""
resource = 'network'
cmd = ShowNetwork(MyApp(sys.stdout), None)
myname = 'myname'
args = ['--fields', 'id', '--fields', 'name', myname]
self._test_show_resource_by_name(resource, cmd, myname,
args, ['id', 'name'])
def test_delete_network(self):
"""Delete net: myid."""
resource = 'network'
cmd = DeleteNetwork(MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
|
<filename>sdk/automation/azure-mgmt-automation/azure/mgmt/automation/models/dsc_configuration.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .tracked_resource import TrackedResource
class DscConfiguration(TrackedResource):
"""Definition of the configuration type.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param location: The Azure Region where the resource lives
:type location: str
:param provisioning_state: Gets or sets the provisioning state of the
configuration. Possible values include: 'Succeeded'
:type provisioning_state: str or
~azure.mgmt.automation.models.DscConfigurationProvisioningState
:param job_count: Gets or sets the job count of the configuration.
:type job_count: int
:param parameters: Gets or sets the configuration parameters.
:type parameters: dict[str,
~azure.mgmt.automation.models.DscConfigurationParameter]
:param source: Gets or sets the source.
:type source: ~azure.mgmt.automation.models.ContentSource
:param state: Gets or sets the state of the configuration. Possible values
include: 'New', 'Edit', 'Published'
:type state: str or ~azure.mgmt.automation.models.DscConfigurationState
:param log_verbose: Gets or sets verbose log option.
:type log_verbose: bool
:param creation_time: Gets or sets the creation time.
:type creation_time: datetime
:param last_modified_time: Gets or sets the last modified time.
:type last_modified_time: datetime
:param node_configuration_count: Gets the number of compiled node
configurations.
:type node_configuration_count: int
:param description: Gets or sets the description.
:type description: str
:param etag: Gets or sets the etag of the resource.
:type etag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'DscConfigurationProvisioningState'},
'job_count': {'key': 'properties.jobCount', 'type': 'int'},
'parameters': {'key': 'properties.parameters', 'type': '{DscConfigurationParameter}'},
'source': {'key': 'properties.source', 'type': 'ContentSource'},
'state': {'key': 'properties.state', 'type': 'str'},
'log_verbose': {'key': 'properties.logVerbose', 'type': 'bool'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'last_modified_time': {'key': 'properties.lastModifiedTime', 'type': 'iso-8601'},
'node_configuration_count': {'key': 'properties.nodeConfigurationCount', 'type': 'int'},
'description': {'key': 'properties.description', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(DscConfiguration, self).__init__(**kwargs)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.job_count = kwargs.get('job_count', None)
self.parameters = kwargs.get('parameters', None)
self.source = kwargs.get('source', None)
self.state = kwargs.get('state', None)
self.log_verbose = kwargs.get('log_verbose', None)
self.creation_time = kwargs.get('creation_time', None)
self.last_modified_time = kwargs.get('last_modified_time', None)
self.node_configuration_count = kwargs.get('node_configuration_count', None)
self.description = kwargs.get('description', None)
self.etag = kwargs.get('etag', None)
|
"""The functions for converting to and from the Hyperbolic simplex basis"""
from copy import deepcopy
import logging
import numpy as np
#from psych_metric.distrib.simplex.euclidean import EuclideanSimplexTransform
def cart2polar(vectors):
"""Convert from 2d Cartesian coordinates to polar coordinates.
Parameters
----------
vectors : np.ndarray
2-dimensional array where the first dimension is the samples and the
second is the 2-dimensional Cartesian coordinates.
Returns
-------
2-dimensional array where the first dimension is the samples and the
second dimension contains 2 elements consisting of the polar
coordinates where the first element is the radius, and then followed by
the angle.
"""
return np.concatenate(
(
np.linalg.norm(vectors, axis=1, keepdims=True),
np.arctan2(vectors[:, 1], vectors[:, 0]).reshape([-1, 1]),
),
axis=1,
)
def polar2cart(vectors):
"""Convert from polar to 2d Cartesian coordinates.
Parameters
----------
vectors : np.ndarray
2-dimensional array where the first dimension is the samples and the
second dimension contains 2 elements consisting of the polar
coordinates where the first element is the radius, and then followed by
the angle.
Results
-------
np.ndarray
2-dimensional array where the first dimension is the samples and the
second is the 2-dimensional Cartesian coordinates.
"""
return vectors[:, [0]] * np.concatenate(
(
np.cos(vectors[:, [1]]),
np.sin(vectors[:, [1]]),
),
axis=1,
)
def cartesian_to_hypersphere(vectors):
"""Convert from Cartesian coordinates to hyperspherical coordinates of the
same n-dimensions.
Parameters
----------
vectors : np.ndarray
2-dimensional array where the first dimension is the samples and the
second is the n-dimensional Cartesian coordinates.
Results
-------
np.ndarray
2-dimensional array where the first dimension is the samples and the
second dimension contains n elements consisting of the n-1-dimensional
hyperspherical coordinates where the first element is the radius, and
then followed by n-1 angles for each dimension.
"""
if len(vectors.shape) == 1:
# single sample
vectors = vectors.reshape([1, -1])
if vectors.shape[1] == 2:
return cart2polar(vectors)
elif vectors.shape[1] < 2:
raise ValueError(' '.join([
'Expected the number of coordinate dimensions to be >= 2, but',
f'recieved vectors with shape {vectors.shape}. and axis being',
f'1.',
]))
#radii = np.linalg.norm(vectors[:, 0])
flipped = np.fliplr(vectors)
cumsqrt = np.sqrt(np.cumsum(flipped ** 2, axis=1))
#radii = cumsqrt[:, -1]
angles = np.arccos(flipped / cumsqrt)
# angles 1 -- n-2 = np.fliplr(angles[2:])
last_angle = np.pi - 2 * np.arctan(
(flipped[:, 1] + cumsqrt[:, 1]) / flipped[:, 0]
)
# radius followed by ascending n-1 angles per row
return np.concatenate(
(
cumsqrt[:, [-1]],
np.fliplr(angles[:, 2:]),
last_angle.reshape([-1, 1]),
),
axis=1,
)
def hypersphere_to_cartesian(vectors):
"""Convert from hyperspherical coordinates to Cartesian coordinates of the
same n-dimensions.
Parameters
----------
vectors : np.ndarray
2-dimensional array where the first dimension is the samples and the
second dimension contains n elements consisting of the n-1-dimensional
hyperspherical coordinates where the first element is the radius, and
then followed by n-1 angles for each dimension.
Results
-------
np.ndarray
2-dimensional array where the first dimension is the samples and the
second is the n-dimensional Cartesian coordinates.
"""
if len(vectors.shape) == 1:
# single sample
vectors = vectors.reshape([1, -1])
if vectors.shape[1] == 2:
return polar2cart(vectors)
elif vectors.shape[1] < 2:
raise ValueError(' '.join([
'Expected the number of coordinate dimensions to be >= 2, but',
f'recieved vectors with shape {vectors.shape}. and axis being',
f'1.',
]))
# x1 = radius * cos(rho_1)
# xn-1 = radius * sin(rho_1) * ... * sin(rho_n-2) * cos(rho_n-1)
# xn = radius * sin(rho_1) * ... * sin(rho_n-1)
sin = np.concatenate(
(
np.ones([vectors.shape[0], 1]),
np.cumprod(np.sin(vectors[:, 1:]), axis=1)
),
axis=1,
)
cos = np.concatenate(
(np.cos(vectors[:, 1:]), np.ones([vectors.shape[0], 1])),
axis=1,
)
return vectors[:, 0].reshape(-1, 1) * sin * cos
def givens_rotation(dim, x, y, angle):
"""Creates a transposed Givens rotation matrix."""
rotate = np.eye(dim)
rotate[x, x] = np.cos(angle)
rotate[x, y] = np.sin(angle)
rotate[y, x] = -np.sin(angle)
rotate[y, y] = np.cos(angle)
return rotate
def rotate_around(rotation_simplex, angle):
"""General n-dimension rotation."""
if rotation_simplex.shape[0] > rotation_simplex.shape[0]:
# expects points to contained in the rows, elements in columns
rotation_simplex = rotation_simplex.T
# Transpose the simplex st the first point is centered at origin
translation_vector = rotation_simplex[0].copy()
v = rotation_simplex - translation_vector
n = rotation_simplex.shape[1]
mat = np.eye(n)
k = 0
for r in range(1, n-1):
for c in list(range(r, n))[::-1]:
k += 1
rot = givens_rotation(
n,
c,
c - 1,
np.arctan2(v[r, c], v[r, c - 1]),
)
v = v @ rot
mat = mat @ rot
return (
translation_vector,
mat @ givens_rotation(n, n - 2, n - 1, angle) @ np.linalg.inv(mat),
)
def get_simplex_boundary_pts(prob_vectors, copy=True):
"""Returns the boundary points of the regular simplex whose circumscribed
hypersphenre's radius intersects through the provided points in Barycentric
coordinates. The given points define the angle of the line that passes
through the center of the simplex, the given point, and the respectie point
on the boundary of the simplex.
Parameters
----------
prob_vectors : np.ndarray
Array of probability vectors, or Barycentric coordinates of a regular
simplex. Each point defines the angle of the ray that intersects the
given point, starts at the center of the simplex, and intersects the
corresponding boundary point of the simplex.
copy : bool
If True, copies the given prob_vectors np.ndarray, otherwise modifies
the original.
"""
if copy:
prob_vectors = prob_vectors.copy()
# Probability vectors are already in Barycentric coordinates
# select minimum coord(s) as dim to zero to get boundary pt on d simplex
row_min = np.min(prob_vectors, axis=1)
dim = prob_vectors.shape[1] - 1
for i, minimum in enumerate(row_min):
min_mask = prob_vectors[i] == minimum
prob_vectors[i, np.logical_not(min_mask)] += minimum / dim * min_mask.sum()
prob_vectors[i, min_mask] = 0
return prob_vectors
class Rotator(object):
"""Class the contains the process for rotating about some n-2 space."""
def __init__(self, rotation_simplex, angle):
self.translate, self.rotate_drop_dim = rotate_around(
rotation_simplex,
angle,
)
def rotate(self, vectors, drop_dim=False):
"""Rotates the vectors of the n-1 simplex from n dimensions to n-1
dimensions.
"""
# TODO expects shape of 2, add check on vectors
result = (vectors - self.translate) @ self.rotate_drop_dim \
+ self.translate
if drop_dim:
return result[:, 1:]
return result
def inverse(self, vectors):
"""Rotates the vectors of the n-1 simplex from n-1 dimensions to n
dimensions.
"""
# TODO expects shape of 2, add check on vectors
if vectors.shape[1] == len(self.translate):
return (vectors - self.translate) \
@ np.linalg.inv(self.rotate_drop_dim) + self.translate
return (
(
np.hstack((np.zeros([len(vectors), 1]), vectors))
- self.translate
)
@ np.linalg.inv(self.rotate_drop_dim)
+ self.translate
)
class ProbabilitySimplexTransform(object):
"""Creates and contains the objects needed to convert to and from the
Probability Simplex basis.
Attributes
----------
cart_simplex : np.ndarray
centroid : np.ndarray
rotator : Rotator
Used to find cart_simplex and for reverse transforming from the
cartesian simplex to the original probability simplex.
Properties
----------
input_dim : int
The number of dimensions of the input samples before being transformed.
output_dim : int
Note
----
This ProbabilitySimplexTransform takes more time and more memory than the
original that used QR or SVD to find the rotation matrix. However, this
version preserves the simplex dimensions, keeping the simplex regular,
while the QR and SVD found rotation matrices do not.
"""
def __init__(self, dim):
prob_simplex_verts = np.eye(dim)
# Get the angle to rotate about the n-2 space to zero out first dim
angle_to_rotate = -np.arctan2(
1.0,
np.linalg.norm([1 / (dim - 1)] * (dim - 1)),
)
# Rotate to zero out one arbitrary dimension, drop that zeroed dim.
self.rotator = Rotator(prob_simplex_verts[1:], angle_to_rotate)
self.cart_simplex = self.rotator.rotate(prob_simplex_verts)
# Center Simplex in (N-1)-dim (find centroid and adjust via that)
self.centroid = np.mean(self.cart_simplex, axis=0)
self.cart_simplex -= self.centroid
# Save the vertices of the rotated simplex, transposed for ease of comp
self.cart_simplex = self.cart_simplex.T
# TODO Decide if keeping the cart_simplex for going from prob simplex
# to cart simplex with only one matrix multiplication is worth keeping
# the (n,n) matrix.
def __copy__(self):
cls = self.__class__
new = cls.__new__(cls)
new.__dict__.update(self.__dict__)
return new
def __deepcopy__(self, memo):
cls = self.__class__
new = cls.__new__(cls)
memo[id(self)] = new
for k, v in self.__dict__.items():
setattr(new, k, deepcopy(v, memo))
return new
@property
def input_dim(self):
# TODO wrap all code for obtaining cart_simllex in ProbabilityTransform
#return self.euclid_simplex_transform.input_dim
return self.cart_simplex.shape[0]
@property
def output_dim(self):
#return self.euclid_simplex_transform.output_dim
return self.cart_simplex.shape[1]
def to(self, vectors, drop_dim=True):
"""Transform given vectors into hyperbolic probability simplex space."""
# Convert from probability simplex to the Cartesian coordinates of the
# centered, regular simplex
if drop_dim:
return (vectors @ self.cart_simplex)[:, 1:]
return vectors @ self.cart_simplex
def back(self, vectors):
"""Transform given vectors into hyperbolic probability simplex space."""
# Convert from probability simplex to the Cartesian coordinates of the
# centered, regular simplex
#aligned = vectors @ self.aligned_simplex.T
# TODO expects shape of 2, add check on vectors
if vectors.shape[1] == self.input_dim:
return self.rotator.inverse(vectors + self.centroid)
return self.rotator.inverse(vectors + self.centroid)
class HyperbolicSimplexTransform(object):
"""
Attributes
----------
"""
def __init__(self, dim):
self.pst = ProbabilitySimplexTransform(dim)
# Save the radius of the simplex's circumscribed hypersphere
self.circumscribed_radius = np.linalg.norm(self.pst.cart_simplex[:, 0])
@property
def input_dim(self):
# TODO wrap all code for obtaining cart_simllex in ProbabilityTransform
return self.pst.input_dim
@property
def output_dim(self):
return self.pst.output_dim
def to(self, vectors):
"""Transform given vectors into hyperbolic probability simplex space."""
# Convert from probability simplex to the Cartesian coordinates of the
# centered, regular simplex
#aligned = vectors @ self.aligned_simplex.T
aligned = self.pst.to(vectors, drop_dim=True)
# Stretch simplex into hypersphere, no longer conserving the angles
hyperspherical = cartesian_to_hypersphere(aligned)
# Edge cases:
# Do not stretch points along line of vertex (all zeros but 1 element)
# Do not stretch centered points (all zeros).
non_edge_case = (aligned == 0).sum(axis=1) < aligned.shape[1] - 1
boundaries = get_simplex_boundary_pts(
vectors[non_edge_case],
copy=True,
)
# get boundary points radius
boundary_radii = np.linalg.norm(
self.pst.to(boundaries, drop_dim=True),
axis=1,
)
# scale each point radii by * circum_radius / simplex_boundary_radius
hyperspherical[non_edge_case, 0] = (
hyperspherical[non_edge_case, 0]
* self.circumscribed_radius / boundary_radii
)
# TODO Inverse Poincare' Ball method to go into hyperbolic space
return hyperspherical
def tf_to(self, vectors):
"""Transform given vectors into n-1 probability simplex space done in
tensorflow code.
"""
return
def back(self, vectors):
"""Transform given vectors out of n-1 probability simplex space."""
# Poinecare's Ball to get hypersphere
#hyperspherical = poincare_ball(vectors)
hyperspherical = vectors
# Circumscribed hypersphere to Cartesian simplex:
# vectors is the boundaries of the simplex, but in cart simplex.
simplex = self.pst.back(hypersphere_to_cartesian(hyperspherical))
non_edge_case = (simplex == 0).sum(axis=1) < simplex.shape[1] - 1
# Get the boundaries in Barycentric coordinates (probability vectors)
boundaries = get_simplex_boundary_pts(
simplex[non_edge_case],
copy=True,
)
# Get boundary points radius
boundary_radii = np.linalg.norm(
self.pst.to(boundaries, drop_dim=True),
axis=1,
)
# Scale each point radii by * simplex_boundary_radius / circum_radius
hyperspherical[non_edge_case, 0] = (
hyperspherical[non_edge_case, 0]
* boundary_radii / self.circumscribed_radius
)
# Cartesian simplex to probability distribution (Barycentric coord)
return self.pst.back(hypersphere_to_cartesian(hyperspherical))
def tf_from(self, vectors):
"""Transform given vectors out of n-1 probability simplex space done in
tensorflow code.
"""
return
|
# A simple graph representing a series of cities and the connections between
# them.
map = {
"Seattle": {"San Francisco", "Washington D.C."},
"San Francisco": {"Seattle", "Los Angeles", "Denver"},
"Los Angeles": {"San Francisco", "Phoenix"},
"Phoenix": {"Los Angeles", "Denver"},
"Denver": {"Phoenix", "San Francisco", "Houston", "Kansas City"},
"Kansas City": {"Denver", "Houston", "Chicago", "Nashville"},
"Houston": {"Kansas City", "Denver"},
"Chicago": {"Kansas City", "New York"},
"Nashville": {"Kansas City", "Houston", "Miami"},
"New York": {"Chicago", "Washington D.C."},
"Washington D.C.": {"Chicago", "Nashville", "Miami"},
"Miami": {"Washington D.C.", "Houston", "Nashville"},
}
DELIVERED = "Delivered"
# Use BFS to find the shortest path
def find_shortest_path_bfs(start, end):
# Question: Why is a Python list acceptable to use for this queue?
qq = []
qq.append([start])
visited = set()
while len(qq) > 0:
path = qq.pop()
city = path[-1]
if city == end:
return path
else:
if city not in visited:
visited.add(city)
for connection in map[city]:
new_path = list(path)
new_path.append(connection)
qq.insert(0, new_path)
return "Error: Path not found"
map_dij = {
"Seattle": {("San Francisco", 679), ("Washington D.C.", 1000000)},
"San Francisco": {("Seattle", 679), ("Los Angeles", 381), ("Denver", 474)},
"Los Angeles": {("San Francisco", 381), ("Phoenix", 357)},
"Phoenix": {("Los Angeles", 357), ("Denver", 586)},
"Denver": {
("Phoenix", 586),
("San Francisco", 474),
("Houston", 878),
("Kansas City", 557),
},
"Kansas City": {
("Denver", 557),
("Houston", 815),
("Chicago", 412),
("Nashville", 554),
},
"Houston": {("Kansas City", 815), ("Denver", 878)},
"Chicago": {("Kansas City", 412), ("New York", 712)},
"Nashville": {("Kansas City", 554), ("Houston", 665), ("Miami", 817)},
"New York": {("Chicago", 712), ("Washington D.C.", 203)},
"Washington D.C.": {("Chicago", 701), ("Nashville", 566), ("Miami", 926)},
"Miami": {("Washington D.C.", 926), ("Houston", 483), ("Nashville", 817)},
}
def find_shortest_path_dij(map, start, end):
# Track list of cities that we need to visit next
next_cities = [start]
# Cities that we have fully explored
visited = set()
# Track each city we encounter, the total distance to get there, and the previous city in the route
# We're tracking both of these values in one dict for each city
distances = {start: {"distance_from_start": 0, "previous": None}}
# While we have cities to explore and we have previously made it to our end location
while next_cities and end not in visited:
# Take the first city from the list
current = next_cities.pop(0)
print("CURRENTLY EXPLORING:", current)
# Get reference to the total distance it took to get here
current_distance = distances.get(current).get("distance_from_start")
# For each connected city, get a reference to the city name and distance of that leg of the route
for city, next_distance in map[current]:
# IF we haven't fully explored that city
# AND we either haven't been to that city before
# OR getting there from this route is shorter than our previous route
if city not in visited and (
city not in distances
or distances.get(city).get("distance_from_start")
> (current_distance + next_distance)
):
# Assign the updated total distance and pointer to the previous city
distances[city] = {
"distance_from_start": current_distance + next_distance,
"previous": current,
}
# Add the city to the list that we need to explore if it's not already there
if city not in next_cities:
next_cities.append(city)
# After we explored all connected cities, add this one in to our visited set
# This helps us make sure that we don't loop continuously and allows us to exit early after finding our end
visited.add(current)
# Find the city in the next_cities list that is closest to our start that we know of
if next_cities:
closest_next = min(
next_cities,
key=lambda city: distances.get(city).get("distance_from_start"),
)
# Put it at the beginning of the list so that on our next iteration we can take out the first element
next_cities.insert(0, next_cities.pop(next_cities.index(closest_next)))
# Logging our final calculated distances
print(f"\nFinal distances:\n{distances}\n")
return trace_back(distances, start, end)
def trace_back(distances, start, end):
if end not in distances:
return "Path not viable"
path = [end]
# Build our path from our end point, referencing the previous node at each step
while path[0] != start:
current = distances.get(path[0])
previous = current.get("previous")
path.insert(0, previous)
return path
# Determine the next step via BFS. Set location to delivered at end.
def advance_delivery(location, destination):
print("advancing", location, destination)
# shouldn't be called in this case
if location in [DELIVERED, destination]:
return DELIVERED
path = find_shortest_path_bfs(location, destination) # Old BFS way
path = find_shortest_path_dij(map_dij, location, destination) # Find a better path!
# Safe to say there is a next city if we get here
return path[1]
# Testing
print(
"BFS:", find_shortest_path_bfs("Seattle", "Washington D.C.")
) # BFS: ['Seattle', 'Washington D.C.']
print(
"Dij:", find_shortest_path_dij(map_dij, "Seattle", "Washington D.C.")
) # Dij: ['Seattle', 'San Francisco', 'Denver', 'Kansas City', 'Chicago', 'New York', 'Washington D.C.']
# Simpler example
simple_map_dij = {
"A": {("B", 30), ("F", 75), ("E", 200)},
"B": {("A", 30), ("C", 20)},
"C": {("B", 20), ("D", 30)},
"D": {("C", 30), ("E", 25)},
"E": {("A", 200), ("D", 25), ("F", 26)},
"F": {("E", 26), ("A", 75)},
}
"""
A----(75)---F
| \ |
| \ |
(30) \ |
| \ |
| (200) (26)
B \ |
| \ |
| \ |
(20) \ |
| \|
C E
\ /
(30) (25)
\ /
D
"""
print("Dij:", find_shortest_path_dij(simple_map_dij, "A", "B")) # Dij: ['A', 'B']
print("Dij:", find_shortest_path_dij(simple_map_dij, "A", "E")) # Dij: ['A', 'F', 'E']
print(
"Dij:", find_shortest_path_dij(simple_map_dij, "C", "F")
) # Dij: ['C', 'D', 'E', 'F']
|
from django.conf import settings
from django.test import TestCase
from django.urls import reverse
from main import models
class PostAnalyticAnonTestCase(TestCase):
"""Test post analytics for non logged in users."""
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
self.data = {
"title": "Welcome post",
"slug": "welcome-post",
"body": "Content sentence.",
}
self.post = models.Post.objects.create(owner=self.user, **self.data)
self.client.logout()
def test_post_analytic_anon(self):
response = self.client.get(
reverse("post_detail", args=(self.post.slug,)),
# needs HTTP_HOST because we need to request it on the subdomain
HTTP_HOST=self.user.username + "." + settings.CANONICAL_HOST,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(models.AnalyticPost.objects.filter(post=self.post).count(), 1)
class PostAnalyticTestCase(TestCase):
"""Test post analytics for logged in users do not count."""
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
self.data = {
"title": "Welcome post",
"slug": "welcome-post",
"body": "Content sentence.",
}
self.post = models.Post.objects.create(owner=self.user, **self.data)
def test_post_analytic_logged_in(self):
response = self.client.get(
reverse("post_detail", args=(self.post.slug,)),
HTTP_HOST=self.user.username + "." + settings.CANONICAL_HOST,
)
self.assertEqual(response.status_code, 200)
self.assertFalse(models.AnalyticPost.objects.filter(post=self.post).exists())
class PageAnalyticAnonTestCase(TestCase):
"""Test page analytics for non logged in users."""
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
self.data = {
"title": "About",
"slug": "about",
"body": "About this blog.",
"is_hidden": False,
}
self.page = models.Page.objects.create(owner=self.user, **self.data)
self.client.logout()
def test_page_analytic_anon(self):
response = self.client.get(
reverse("page_detail", args=(self.page.slug,)),
# needs HTTP_HOST because we need to request it on the subdomain
HTTP_HOST=self.user.username + "." + settings.CANONICAL_HOST,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
models.AnalyticPage.objects.filter(path=self.page.slug).count(), 1
)
class PageAnalyticTestCase(TestCase):
"""Test generic page analytics for logged in users do not count."""
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
self.data = {
"title": "About",
"slug": "about",
"body": "About this blog.",
"is_hidden": False,
}
self.page = models.Page.objects.create(owner=self.user, **self.data)
def test_page_analytic_logged_in(self):
response = self.client.get(
reverse("page_detail", args=(self.page.slug,)),
# needs HTTP_HOST because we need to request it on the subdomain
HTTP_HOST=self.user.username + "." + settings.CANONICAL_HOST,
)
self.assertEqual(response.status_code, 200)
self.assertFalse(
models.AnalyticPage.objects.filter(path=self.page.slug).exists()
)
class PageAnalyticIndexTestCase(TestCase):
"""Test 'index' special page analytics."""
def setUp(self):
self.user = models.User.objects.create(username="alice")
def test_index_analytic(self):
response = self.client.get(
reverse("index"),
# needs HTTP_HOST because we need to request it on the subdomain
HTTP_HOST=self.user.username + "." + settings.CANONICAL_HOST,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(models.AnalyticPage.objects.filter(path="index").count(), 1)
class PageAnalyticRSSTestCase(TestCase):
"""Test 'rss' special page analytics."""
def setUp(self):
self.user = models.User.objects.create(username="alice")
def test_rss_analytic(self):
response = self.client.get(
reverse("rss_feed"),
# needs HTTP_HOST because we need to request it on the subdomain
HTTP_HOST=self.user.username + "." + settings.CANONICAL_HOST,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(models.AnalyticPage.objects.filter(path="rss").count(), 1)
class AnalyticListTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
self.data = {
"title": "Welcome post",
"slug": "welcome-post",
"body": "Content sentence.",
}
self.post = models.Post.objects.create(owner=self.user, **self.data)
def test_analytic_list(self):
response = self.client.get(
reverse("analytic_list"),
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "List of pages:")
self.assertContains(response, "index")
self.assertContains(response, "rss")
self.assertContains(response, "List of posts:")
self.assertContains(response, "Welcome post")
class PostAnalyticDetailTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
self.data = {
"title": "Welcome post",
"slug": "welcome-post",
"body": "Content sentence.",
}
self.post = models.Post.objects.create(owner=self.user, **self.data)
# register one sample post analytic
self.client.logout() # need to logout for analytic to be counted
self.client.get(
reverse("post_detail", args=(self.post.slug,)),
HTTP_HOST=self.user.username + "." + settings.CANONICAL_HOST,
)
# need to login again to access analytic post detail dashboard page
self.client.force_login(self.user)
def test_post_analytic_detail(self):
response = self.client.get(
reverse("analytic_post_detail", args=(self.post.slug,)),
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<div class="analytics-chart">')
self.assertContains(
response,
'<svg version="1.1" viewBox="0 0 500 192" xmlns="http://www.w3.org/2000/svg">',
)
self.assertContains(response, "1 hits")
class PageAnalyticDetailTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
self.data = {
"title": "About",
"slug": "about",
"body": "About this blog.",
"is_hidden": False,
}
self.page = models.Page.objects.create(owner=self.user, **self.data)
# register one sample page analytic
self.client.logout() # need to logout for analytic to be counted
# register one sample page analytic
self.client.get(
reverse("page_detail", args=(self.page.slug,)),
HTTP_HOST=self.user.username + "." + settings.CANONICAL_HOST,
)
# need to login again to access analytic page detail dashboard page
self.client.force_login(self.user)
def test_page_analytic_detail(self):
response = self.client.get(
reverse("analytic_page_detail", args=(self.page.slug,)),
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<div class="analytics-chart">')
self.assertContains(
response,
'<svg version="1.1" viewBox="0 0 500 192" xmlns="http://www.w3.org/2000/svg">',
)
self.assertContains(response, "1 hits")
class PageAnalyticDetailIndexTestCase(TestCase):
"""Test analytic detail for 'index' special page."""
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
# logout so that analytic is counted
self.client.logout()
# register one sample index page analytic
self.client.get(
reverse("index"),
HTTP_HOST=self.user.username + "." + settings.CANONICAL_HOST,
)
# login again to access analytic page detail dashboard page
self.client.force_login(self.user)
def test_page_analytic_detail(self):
response = self.client.get(
reverse("analytic_page_detail", args=("index",)),
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<div class="analytics-chart">')
self.assertContains(
response,
'<svg version="1.1" viewBox="0 0 500 192" xmlns="http://www.w3.org/2000/svg">',
)
self.assertContains(response, "1 hits")
class PageAnalyticDetailRSSTestCase(TestCase):
"""Test analytic detail for 'rss' special page."""
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
# logout so that analytic is counted
self.client.logout()
# register one sample rss page analytic
self.client.get(
reverse("rss_feed"),
HTTP_HOST=self.user.username + "." + settings.CANONICAL_HOST,
)
# login again to access analytic page detail dashboard page
self.client.force_login(self.user)
def test_page_analytic_detail(self):
response = self.client.get(
reverse("analytic_page_detail", args=("rss",)),
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<div class="analytics-chart">')
self.assertContains(
response,
'<svg version="1.1" viewBox="0 0 500 192" xmlns="http://www.w3.org/2000/svg">',
)
self.assertContains(response, "1 hits")
|
<filename>nebpyclient/api/etickets.py
#
# Copyright 2020 Nebulon, Inc.
# All Rights Reserved.
#
# DISCLAIMER: THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO
# EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
# OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
from datetime import datetime
from .graphqlclient import GraphQLParam, NebMixin
from .common import PageInput, NebEnum, ResourceType, read_value
from .sorting import SortDirection
class SupportCaseStatus(NebEnum):
"""Indicates the status of the support case"""
New = "New"
Pending = "Pending"
Working = "Working"
Escalated = "Escalated"
Closed = "Closed"
class SupportCaseIssueType(NebEnum):
"""Indicates the type of support case"""
Question = "Question"
Hardware = "Hardware"
Software = "Software"
FeatureRequest = "FeatureRequest"
Unknown = "Unknown"
class SupportCasePriority(NebEnum):
"""Indicates the customer specified priority for the support case"""
High = "High"
Medium = "Medium"
Low = "Low"
class SupportCaseSort:
"""A sort object for support cases
Allows sorting support cases on common properties. The sort object allows
only one property to be specified.
"""
def __init__(
self,
status: SortDirection = None,
issue_type: SortDirection = None,
created_date: SortDirection = None,
updated_date: SortDirection = None
):
"""Constructs a new sort object for support cases.
Allows sorting support cases on common properties. The sort object
allows only one property to be specified.
:param status: Sort direction for the ``status`` property of a support
case object.
:type status: SortDirection, optional
:param issue_type: Sort direction for the ``issue_type`` property of a
support case object.
:type issue_type: SortDirection, optional
:param created_date: Sort direction for the ``created_date`` property
of a support case object.
:type created_date: SortDirection, optional
:param updated_date: Sort direction for the ``updated_date`` property of
a support case object.
:type updated_date: SortDirection, optional
"""
self.__status = status
self.__issue_type = issue_type
self.__created_date = created_date
self.__updated_date = updated_date
@property
def status(self) -> SortDirection:
"""Sort direction for the ``status`` property of a support case"""
return self.__status
@property
def issue_type(self) -> SortDirection:
"""Sort direction for the ``issue_type`` property of a support case"""
return self.__issue_type
@property
def created_date(self) -> SortDirection:
"""Sort direction for the ``created_date`` property of a support case"""
return self.__created_date
@property
def updated_date(self) -> SortDirection:
"""Sort direction for the ``updated_date`` property of a support case"""
return self.__updated_date
@property
def as_dict(self):
result = dict()
result["status"] = self.status
result["issueType"] = self.issue_type
result["createdDate"] = self.created_date
result["updatedDate"] = self.updated_date
return result
class SupportCaseFilter:
"""A filter object to filter support cases.
Allows filtering for specific support cases in nebulon ON. The
filter allows only one property to be specified.
"""
def __init__(
self,
number: str = None,
status: SupportCaseStatus = None,
issue_type: SupportCaseIssueType = None,
contact_uuid: str = None
):
"""Constructs a new filter object
The filter allows only one property to be specified. If defined, one
parameter must be supplied.
:param number: Filter based on support case number
:type number: str, optional
:param status: Filter based on support case status
:type status: SupportCaseStatus, optional
:param issue_type: Filter based on support case type
:type issue_type: SupportCaseIssueType, optional
:param contact_uuid: Filter based on the support case contact
:type contact_uuid: str, optional
"""
self.__number = number
self.__status = status
self.__issue_type = issue_type
self.__contact_uuid = contact_uuid
@property
def number(self) -> str:
"""Filter based on the support case number"""
return self.__number
@property
def status(self) -> SupportCaseStatus:
"""Filter based on the support case status"""
return self.__status
@property
def issue_type(self) -> SupportCaseIssueType:
"""Filter based on the support case type"""
return self.__issue_type
@property
def contact_uuid(self) -> str:
"""Filter based on the support case contact"""
return self.__contact_uuid
@property
def as_dict(self):
result = dict()
result["number"] = self.number
result["status"] = self.status
result["issueType"] = self.issue_type
result["contactID"] = self.contact_uuid
return result
class CreateSupportCaseInput:
"""An input object to create a new support case
Allows creation of a support case in nebulon ON. A support case allows
customers to get their issues associated with nebulon Cloud-Defined Storage
to be resolved with their preferred support channel. Issues may include
infrastructure and hardware issues, software issues, or questions.
"""
def __init__(
self,
subject: str,
description: str,
priority: SupportCasePriority,
issue_type: SupportCaseIssueType,
spu_serial: str = None,
resource_type: ResourceType = None,
resource_id: str = None
):
"""Constructs a new input object to create a support case
Depending on the type of support case the required parameters change.
At a minimum, customers need to supply a ``subject`` that describes the
high level summary of the issue, a ``description`` that details their
specific problem, a ``priority`` to indicate the urgency of the request,
and the ``issue_type`` to better route the support case to the appropriate
subject matter expert.
If the issue is related to a specific services processing unit (SPU) or
resource in nebulon ON or in the customer's datacenter, ``spu_serial``,
``resource_type``, and ``resource_id`` shall be specified.
:param subject: High level summary of an issue
:type subject: str
:param description: Detailed description of the issue that requires
resolution
:type description: str
:param priority: The urgency of the request
:type priority: SupportCasePriority,
:param issue_type: The type of issue. If the issue is not clearly
identifiable, use `SupportCaseIssueType.Unknown`.
:type issue_type: SupportCaseIssueType
:param spu_serial: The serial number of an SPU related to the support
case.
:type spu_serial: str, optional
:param resource_type: The type of resource related to the support case.
:type resource_type: ResourceType, optional
:param resource_id: The unique identifier of the resource related to
the support case. If ``resource_type`` is specified, also this
parameter should be supplied.
:type resource_id: str, optional
"""
self.__subject = subject
self.__description = description
self.__priority = priority
self.__issue_type = issue_type
self.__spu_serial = spu_serial
self.__resource_type = resource_type
self.__resource_id = resource_id
@property
def subject(self) -> str:
"""High level summary of the support case"""
return self.__subject
@property
def description(self) -> str:
"""Detailed description of the issue"""
return self.__description
@property
def priority(self) -> SupportCasePriority:
"""Urgency of the support case"""
return self.__priority
@property
def issue_type(self) -> SupportCaseIssueType:
"""Type of support case / issue"""
return self.__issue_type
@property
def spu_serial(self) -> str:
"""Serial number of the SPU related to the support case / issue"""
return self.__spu_serial
@property
def resource_type(self) -> ResourceType:
"""Type of resource related to the support case / issue"""
return self.__resource_type
@property
def resource_id(self) -> str:
"""Unique identifier for the resource related to the support case"""
return self.__resource_id
@property
def as_dict(self):
result = dict()
result["subject"] = self.subject
result["description"] = self.description
result["priority"] = self.priority
result["issueType"] = self.issue_type
result["spuSerial"] = self.spu_serial
result["resourceType"] = self.resource_type
result["resourceID"] = self.resource_id
return result
class UpdateSupportCaseInput:
"""An input object to update an existing support case
Allows updating of a support case in nebulon ON. A support case allows
customers to get their issues associated with nebulon Cloud-Defined Storage
to be resolved with their preferred support channel. Issues may include
infrastructure and hardware issues, software issues, or questions.
"""
def __init__(
self,
subject: str = None,
description: str = None,
priority: SupportCasePriority = None,
status: SupportCaseStatus = None,
contact_user_uuid: str = None,
improvement_suggestion: str = None,
comment: str = None
):
"""Constructs a new input object to update an existing support case
:param subject: High level summary of an issue
:type subject: str
:param description: Detailed description of the issue that requires
resolution
:type description: str
:param priority: The urgency of the request
:type priority: SupportCasePriority,
:param status: The new status of the support case. If an issue is
resolved, use `SupportCaseStatus.Closed`.
:type status: SupportCaseStatus
:param contact_user_uuid: Allows changing the user contact at the
customer that shall be contacted by support for this issue.
:type contact_user_uuid: str, optional
:param improvement_suggestion: Allows providing feedback to how support
handled the support case and suggest any improvements for future
requests.
:type improvement_suggestion: str, optional
:param comment: Allows specifying a comment for the support case as a
response to a support question or to provide further details.
:type comment: str, optional
"""
self.__subject = subject
self.__description = description
self.__priority = priority
self.__status = status
self.__contact_user_uuid = contact_user_uuid
self.__improvement_suggestion = improvement_suggestion
self.__comment = comment
@property
def subject(self) -> str:
"""High level summary of an issue"""
return self.__subject
@property
def description(self) -> str:
"""Detailed description of the issue"""
return self.__description
@property
def priority(self) -> SupportCasePriority:
"""The urgency of the request"""
return self.__priority
@property
def status(self) -> SupportCaseStatus:
"""The new status of the support case"""
return self.__status
@property
def contact_user_uuid(self) -> str:
"""The identifier for the user to be contacted for the support case"""
return self.__contact_user_uuid
@property
def improvement_suggestion(self) -> str:
"""Feedback for support for improvement"""
return self.__improvement_suggestion
@property
def comment(self) -> str:
"""A comment to add to the support case history"""
return self.__comment
@property
def as_dict(self):
result = dict()
result["subject"] = self.subject
result["description"] = self.description
result["priority"] = self.priority
result["status"] = self.status
result["contactUserUUID"] = self.contact_user_uuid
result["improvementSuggestion"] = self.improvement_suggestion
result["comment"] = self.comment
return result
class SupportCaseComment:
"""A comment in the support case history
Allows interaction between the customer and support for further
clarification of issues or providing support case status updates. Customers
and support can add comments to a support case for bi-directional
communication.
"""
def __init__(
self,
response: dict
):
"""Constructs a new support case comment object
This constructor expects a dict() object from the nebulon ON API. It
will check the returned data against the currently implemented schema
of the SDK.
:param response: The JSON response from the server
:type response: dict
:raises ValueError: An error if illegal data is returned from the server
"""
self.__time = read_value(
"time", response, datetime, True)
self.__name = read_value(
"name", response, str, True)
self.__text = read_value(
"text", response, str, True)
@property
def time(self) -> datetime:
"""The date and time when the comment was published"""
return self.__time
@property
def name(self) -> str:
"""The name of the user that published the comment"""
return self.__name
@property
def text(self) -> str:
"""The text contents of the comment"""
return self.__text
@staticmethod
def fields():
return [
"time",
"name",
"text",
]
class SupportCaseContact:
"""Represents the user contact for a support case
The support case contact is used by support to work on resolving an issue.
By default the contact for a support case is the user that created the
support case, but can be altered.
"""
def __init__(
self,
response: dict
):
"""Constructs a new support case comment object
This constructor expects a dict() object from the nebulon ON API. It
will check the returned data against the currently implemented schema
of the SDK.
:param response: The JSON response from the server
:type response: dict
:raises ValueError: An error if illegal data is returned from the server
"""
self.__contact_uuid = read_value(
"contactID", response, str, True)
self.__name = read_value(
"name", response, str, True)
self.__email = read_value(
"email", response, str, True)
self.__phone = read_value(
"phone", response, str, True)
self.__mobile = read_value(
"mobile", response, str, True)
@property
def contact_uuid(self) -> str:
"""The unique identifier of the contact"""
return self.__contact_uuid
@property
def name(self) -> str:
"""The name of the contact"""
return self.__name
@property
def email(self) -> str:
"""The email address of the contact"""
return self.__email
@property
def phone(self) -> str:
"""The phone number of the contact"""
return self.__phone
@property
def mobile(self) -> str:
"""The mobile phone number of the contact"""
return self.__mobile
@staticmethod
def fields():
return [
"contactID",
"name",
"email",
"phone",
"mobile",
]
class SupportCaseAttachment:
"""A file attachment to a support case
Allows customers to attach arbitrary data to a support case. Examples are
screenshots of the user interface, log files from application servers, or
other supporting data for resolving a support case.
"""
def __init__(
self,
response: dict
):
"""Constructs a new support case attachment object
This constructor expects a dict() object from the nebulon ON API. It
will check the returned data against the currently implemented schema
of the SDK.
:param response: The JSON response from the server
:type response: dict
:raises ValueError: An error if illegal data is returned from the server
"""
self.__file_name = read_value(
"fileName", response, str, True)
self.__file_link = read_value(
"fileLink", response, str, True)
self.__upload_time = read_value(
"uploadTime", response, datetime, True)
self.__file_size_bytes = read_value(
"fileSizeBytes", response, int, True)
self.__unique_id = read_value(
"uniqueID", response, str, True)
@property
def file_name(self) -> str:
"""The name of the uploaded file"""
return self.__file_name
@property
def file_link(self) -> str:
"""A link to the file where it is uploaded"""
return self.__file_link
@property
def upload_time(self) -> datetime:
"""The date and time of upload"""
return self.__upload_time
@property
def file_size_bytes(self) -> int:
"""The size of the file in bytes"""
return self.__file_size_bytes
@property
def unique_id(self) -> str:
"""The unique identifier of the uploaded file"""
return self.__unique_id
@staticmethod
def fields():
return [
"fileName",
"fileLink",
"uploadTime",
"fileSizeBytes",
"uniqueID",
]
class SupportCase:
"""A support case object in nebulon ON
A support case is used by customers to have their issues with nebulon
infrastructure resolved. Issues may include infrastructure and hardware
issues, software issues, or general questions.
"""
def __init__(
self,
response: dict
):
"""Constructs a new support case object
This constructor expects a dict() object from the nebulon ON API. It
will check the returned data against the currently implemented schema
of the SDK.
:param response: The JSON response from the server
:type response: dict
:raises ValueError: An error if illegal data is returned from the server
"""
self.__number = read_value(
"number", response, str, True)
self.__subject = read_value(
"subject", response, str, True)
self.__description = read_value(
"description", response, str, True)
self.__priority = read_value(
"priority", response, SupportCasePriority, True)
self.__issue_type = read_value(
"issueType", response, SupportCaseIssueType, True)
self.__status = read_value(
"status", response, SupportCaseStatus, True)
self.__created_date = read_value(
"createdDate", response, datetime, True)
self.__updated_date = read_value(
"updatedDate", response, datetime, False)
self.__closed_date = read_value(
"closedDate", response, datetime, False)
self.__contact = read_value(
"contact", response, SupportCaseContact, False)
self.__owner_name = read_value(
"ownerName", response, str, True)
self.__owner_email = read_value(
"ownerEmail", response, str, True)
self.__comments = read_value(
"comments", response, SupportCaseComment, False)
self.__attachments = read_value(
"attachments", response, SupportCaseAttachment, False)
self.__improvement_suggestion = read_value(
"improvementSuggestion", response, str, True)
self.__resource_type = read_value(
"resourceType", response, str, True)
self.__resource_id = read_value(
"resourceID", response, str, True)
self.__alert_id = read_value(
"alertID", response, str, True)
self.__spu_serial = read_value(
"spuSerial", response, str, True)
self.__kb_link = read_value(
"kbLink", response, str, True)
self.__oem_name = read_value(
"oemName", response, str, True)
self.__oem_case_number = read_value(
"oemCaseNumber", response, str, True)
self.__oem_created_date = read_value(
"oemCreatedDate", response, datetime, False)
self.__oem_updated_date = read_value(
"oemUpdatedDate", response, datetime, False)
@property
def number(self) -> str:
"""Support case number"""
return self.__number
@property
def subject(self) -> str:
"""High level summary of the support case / issue"""
return self.__subject
@property
def description(self) -> str:
"""Detailed description of the support case / issue"""
return self.__description
@property
def priority(self) -> SupportCasePriority:
"""Urgency of the support case"""
return self.__priority
@property
def issue_type(self) -> SupportCaseIssueType:
"""Type of issue"""
return self.__issue_type
@property
def status(self) -> SupportCaseStatus:
"""Status of the support case"""
return self.__status
@property
def created_date(self) -> datetime:
"""Date and time when the support case was created"""
return self.__created_date
@property
def updated_date(self) -> datetime:
"""Date and time when the support case was last updated"""
return self.__updated_date
@property
def closed_date(self) -> datetime:
"""Date and time when the support case / issue was resolved"""
return self.__closed_date
@property
def contact(self) -> SupportCaseContact:
"""The customer contact for the support case"""
return self.__contact
@property
def owner_name(self) -> str:
"""The case owner working the support case in support"""
return self.__owner_name
@property
def comments(self) -> [SupportCaseComment]:
"""List of comments for the support case"""
return self.__comments
@property
def attachments(self) -> [SupportCaseAttachment]:
"""List of attachments for the support case"""
return self.__attachments
@property
def improvement_suggestion(self) -> list:
"""Customer feedback for improving future requests"""
return self.__improvement_suggestion
@property
def resource_type(self) -> str:
"""Associated resource type for the support case"""
return self.__resource_type
@property
def resource_id(self) -> str:
"""Unique identifier of the associated resource for the support case"""
return self.__resource_id
@property
def spu_serial(self) -> str:
"""Serial number of the associated SPU for the support case"""
return self.__spu_serial
@property
def kb_link(self) -> str:
"""Knowledge Base article related to this support case"""
return self.__kb_link
@property
def oem_name(self) -> str:
"""Name of the server vendor associated with the infrastructure"""
return self.__oem_name
@property
def oem_case_number(self) -> str:
"""Support case number with the server vendor"""
return self.__oem_case_number
@property
def oem_created_date(self) -> datetime:
"""Date and time of support case creation with the server vendor"""
return self.__oem_created_date
@property
def oem_updated_date(self) -> datetime:
"""Date and time of last update with the server vendor"""
return self.__oem_updated_date
@staticmethod
def fields():
return [
"number",
"subject",
"description",
"priority",
"issueType",
"status",
"createdDate",
"updatedDate",
"closedDate",
"contact{%s}" % (",".join(SupportCaseContact.fields())),
"ownerName",
"ownerEmail",
"comments{%s}" % (",".join(SupportCaseComment.fields())),
"attachments{%s}" % (",".join(SupportCaseAttachment.fields())),
"improvementSuggestion",
"resourceType",
"resourceID",
"alertID",
"spuSerial",
"kbLink",
"oemName",
"oemCaseNumber",
"oemCreatedDate",
"oemUpdatedDate",
]
class SupportCaseList:
"""Paginated support case list object
Contains a list of support case objects and information for
pagination. By default a single page includes a maximum of `100` items
unless specified otherwise in the paginated query.
Consumers should always check for the property ``more`` as per default
the server does not return the full list of alerts but only one page.
"""
def __init__(
self,
response: dict
):
"""Constructs a new support case list object
This constructor expects a dict() object from the nebulon ON API. It
will check the returned data against the currently implemented schema
of the SDK.
:param response: The JSON response from the server
:type response: dict
:raises ValueError: An error if illegal data is returned from the server
"""
self.__more = read_value(
"more", response, bool, True)
self.__total_count = read_value(
"totalCount", response, int, True)
self.__filtered_count = read_value(
"filteredCount", response, int, True)
self.__items = read_value(
"items", response, SupportCase, True)
@property
def items(self) -> list:
"""List of support cases in the pagination list"""
return self.__items
@property
def more(self) -> bool:
"""Indicates if there are more items on the server"""
return self.__more
@property
def total_count(self) -> int:
"""The total number of items on the server"""
return self.__total_count
@property
def filtered_count(self) -> int:
"""The number of items on the server matching the provided filter"""
return self.__filtered_count
@staticmethod
def fields():
return [
"items{%s}" % (",".join(SupportCase.fields())),
"more",
"totalCount",
"filteredCount",
]
class SupportCaseMixin(NebMixin):
"""Mixin to add support case related methods to the GraphQL client"""
def get_support_cases(
self,
page: PageInput = None,
sc_filter: SupportCaseFilter = None,
sort: SupportCaseSort = None
) -> SupportCaseList:
"""Retrieves a list of support cases
:param page: The requested page from the server. This is an optional
argument and if omitted the server will default to returning the
first page with a maximum of `100` items.
:type page: PageInput, optional
:param sc_filter: A filter object to filter support cases on the
server. If omitted, the server will return all objects as a
paginated response.
:type sc_filter: SupportCaseFilter, optional
:param sort: A sort definition object to sort support case objects
on supported properties. If omitted objects are returned in the
order as they were created in.
:type sort: SupportCaseSort, optional
:returns SupportCaseList: A paginated list of support cases.
:raises GraphQLError: An error with the GraphQL endpoint.
"""
# setup query parameters
parameters = dict()
parameters["page"] = GraphQLParam(
page, "PageInput", False)
parameters["filter"] = GraphQLParam(
sc_filter, "SupportCaseFilter", False)
parameters["sort"] = GraphQLParam(
sort, "SupportCaseSort", False)
# make the request
response = self._query(
name="getSupportCases",
params=parameters,
fields=SupportCaseList.fields()
)
# convert to object
return SupportCaseList(response)
def create_support_case(
self,
subject: str,
description: str,
priority: SupportCasePriority,
issue_type: SupportCaseIssueType,
spu_serial: str = None,
resource_type: str = None,
resource_id: str = None
) -> SupportCase:
"""Allows creation of a new support case
Depending on the type of support case the required parameters change.
At a minimum, customers need to supply a ``subject`` that describes the
high level summary of the issue, a ``description`` that details their
specific problem, a ``priority`` to indicate the urgency of the request,
and the ``issue_type`` to better route the support case to the appropriate
subject matter expert.
If the issue is related to a specific services processing unit (SPU) or
resource in nebulon ON or in the customer's datacenter, ``spu_serial``,
``resource_type``, and ``resource_id`` shall be specified.
:param subject: High level summary of an issue
:type subject: str
:param description: Detailed description of the issue that requires
resolution
:type description: str
:param priority: The urgency of the request
:type priority: SupportCasePriority,
:param issue_type: The type of issue. If the issue is not clearly
identifiable, use `SupportCaseIssueType.Unknown`.
:type issue_type: SupportCaseIssueType
:param spu_serial: The serial number of an SPU related to the support
case.
:type spu_serial: str, optional
:param resource_type: The type of resource related to the support case.
:type resource_type: ResourceType, optional
:param resource_id: The unique identifier of the resource related to
the support case. If ``resource_type`` is specified, also this
parameter should be supplied.
:type resource_id: str, optional
:returns SupportCase: The created support case.
:raises GraphQLError: An error with the GraphQL endpoint.
"""
# setup parameters
parameters = dict()
parameters["input"] = GraphQLParam(
CreateSupportCaseInput(
subject=subject,
description=description,
priority=priority,
issue_type=issue_type,
spu_serial=spu_serial,
resource_type=resource_type,
resource_id=resource_id
),
"CreateSupportCaseInput",
True
)
# make the request
response = self._mutation(
name="createSupportCase",
params=parameters,
fields=SupportCase.fields()
)
# convert to object
return SupportCase(response)
def update_support_case(
self,
case_number: str,
subject: str = None,
description: str = None,
priority: SupportCasePriority = None,
status: SupportCaseStatus = None,
contact_user_uuid: str = None,
improvement_suggestion: str = None,
comment: str = None
) -> SupportCase:
"""Allows updating an existing support case
:param case_number: The case number of the support case to update
:type case_number: str
:param subject: High level summary of an issue
:type subject: str
:param description: Detailed description of the issue that requires
resolution
:type description: str
:param priority: The urgency of the request
:type priority: SupportCasePriority,
:param status: The new status of the support case. If an issue is
resolved, use `SupportCaseStatus.Closed`.
:type status: SupportCaseStatus
:param contact_user_uuid: Allows changing the user contact at the
customer that shall be contacted by support for this issue.
:type contact_user_uuid: str, optional
:param improvement_suggestion: Allows providing feedback to how support
handled the support case and suggest any improvements for future
requests.
:type improvement_suggestion: str, optional
:param comment: Allows specifying a comment for the support case as a
response to a support question or to provide further details.
:type comment: str, optional
:returns SupportCase: The updated support case.
:raises GraphQLError: An error with the GraphQL endpoint.
"""
# setup parameters
parameters = dict()
parameters["caseNumber"] = GraphQLParam(
case_number, "String", True)
parameters["input"] = GraphQLParam(
UpdateSupportCaseInput(
subject=subject,
description=description,
priority=priority,
status=status,
contact_user_uuid=contact_user_uuid,
improvement_suggestion=improvement_suggestion,
comment=comment
),
"UpdateSupportCaseInput",
True
)
# make the request
response = self._mutation(
name="updateSupportCase",
params=parameters,
fields=SupportCase.fields()
)
# convert to object
return SupportCase(response)
def upload_support_case_attachment(
self,
case_number: str,
file_path: str
) -> SupportCase:
"""Allows uploading and attaching files to a support case
:param case_number: The case number of the support case to update
:type case_number: str
:param file_path: The absolute path to the file to upload
:type file_path: str
:returns SupportCase: The updated support case.
:raises GraphQLError: An error with the GraphQL endpoint.
"""
# setup parameters
parameters = dict()
parameters["caseNumber"] = GraphQLParam(
case_number, "String", True)
parameters["attachment"] = GraphQLParam(
file_path, "Upload", True)
# make the request
response = self._mutation(
name="uploadSupportCaseAttachment",
params=parameters,
fields=SupportCase.fields()
)
# convert to object
return SupportCase(response)
|
<filename>utils.py
import torch
from torchvision.transforms import ToTensor
from torch.autograd import Variable
import numpy as np
import torchvision
import importlib
from PyQt5 import QtCore, QtGui, QtWidgets, uic
def GetIndexRangeOfBlk(height, width, blk_row, blk_col, blk_r, blk_c, over_lap = 0):
blk_h_size = height//blk_row
blk_w_size = width//blk_col
if blk_r >= blk_row or blk_c >= blk_col:
raise Exception("index is out of range...")
upper_left_r = blk_r * blk_h_size
upper_left_c = blk_c * blk_w_size
ol_upper_left_r = max(upper_left_r - over_lap, 0)
ol_upper_left_c = max(upper_left_c - over_lap, 0)
if blk_r == (blk_row - 1):
lower_right_r = height
ol_lower_right_r = lower_right_r
else:
lower_right_r = upper_left_r + blk_h_size
ol_lower_right_r = min(lower_right_r + over_lap, height)
if blk_c == (blk_col - 1):
lower_right_c = width
ol_lower_right_c = lower_right_c
else:
lower_right_c = upper_left_c + blk_w_size
ol_lower_right_c = min(lower_right_c + over_lap, width)
return (upper_left_c, upper_left_r, lower_right_c, lower_right_r), (ol_upper_left_c, ol_upper_left_r, ol_lower_right_c, ol_lower_right_r)
"""circularMask_mse_beta : /home/student/Documents/u-net-pytorch-original/lr001_weightdecay00001/"""
"""denoise&airysuperrez_beta : /home/student/Documents/u-net_denoising/dataset_small_mask/"""
"""circularMask_chi10_beta : /home/student/Documents/Atom Segmentation APP/AtomSegGUI/atomseg_bupt_new_10/"""
"""circularMask_chi100_beta : /home/student/Documents/Atom Segmentation APP/AtomSegGUI/atomseg_bupt_new_100/"""
"""gaussianMask+ : /home/student/Documents/Atom Segmentation APP/AtomSegGUI/atom_seg_gaussian_mask/"""
def load_model(model_path, model_num, data, cuda):
from unet_sigmoid import UNet
# model_name = 'model' + str(model_num)
# module = importlib.import_module(model_name, package = None)
use_padding = False
unet = UNet()
if cuda:
unet = unet.cuda()
if not cuda:
unet.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
else:
unet.load_state_dict(torch.load(model_path))
transform = ToTensor()
ori_tensor = transform(data)
if cuda:
ori_tensor = Variable(ori_tensor.cuda())
else:
ori_tensor = Variable(ori_tensor)
ori_tensor = torch.unsqueeze(ori_tensor,0)
padding_left = 0
padding_right = 0
padding_top = 0
padding_bottom = 0
ori_height = ori_tensor.size()[2]
ori_width = ori_tensor.size()[3]
if ori_height % 4:
padding_top = (4 - ori_height % 4)//2
padding_bottom = 4 - ori_height % 4 - padding_top
use_padding = True
if ori_width % 4:
padding_left = (4 - ori_width % 4)//2
padding_right = 4 - ori_width % 4 - padding_left
use_padding = True
if use_padding:
padding_transform = torch.nn.ConstantPad2d((padding_left, padding_right, padding_top, padding_bottom), 0)
ori_tensor = padding_transform(ori_tensor)
output = unet(ori_tensor)
if use_padding:
output = output[:,:,padding_top : (padding_top + ori_height), padding_left : (padding_left + ori_width)]
if cuda:
result = (output.data).cpu().numpy()
else:
result = (output.data).numpy()
result = result[0,0,:,:]
return result
def PIL2Pixmap(im):
"""Convert PIL image to QImage """
if im.mode == "RGB":
pass
elif im.mode == "L":
im = im.convert("RGBA")
data = im.convert("RGBA").tobytes("raw", "RGBA")
qim = QtGui.QImage(data, im.size[0], im.size[1], QtGui.QImage.Format_ARGB32)
pixmap = QtGui.QPixmap.fromImage(qim)
return pixmap
def map01(mat):
return (mat - mat.min())/(mat.max() - mat.min())
|
<gh_stars>1-10
# !/usr/bin/python3
"""
The Gene Ontology Categories Suite (GOcats)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides methods for the creation of directed acyclic concept subgraphs of Gene Ontology, along with methods
for evaluating those subgraphs.
"""
import os
import sys
import re
import csv
import jsonpickle
from . import ontologyparser
from . import godag
from . import subdag
from . import tools
from . import _version
jsonpickle.set_encoder_options('json', sort_keys=True, indent=2)
__version__ = _version.__version__
def json_format_graph(graph_object, graph_identifier):
"Creates a dictionary representing the edges in the graph and formats it in such a way that it can be encoded into JSON for comparing the graph objects between versions of GOcats."
json_dict = dict()
for edge in graph_object.edge_list:
json_dict[str(graph_identifier)+'_'+edge.json_edge[0]] = edge.json_edge[1]
return json_dict
def build_graph(args):
"""**Not yet implemented**
Try build_graph_interpreter to create a GO graph object to explore within a Python interpreter."""
# FIXME: JsonPickle is reaching max recursion depth because of the fact that objects point to one another.
if args['--supergraph_namespace']:
supergraph_namespace = args['--supergraph_namespace']
else:
supergraph_namespace = None
if args['--allowed_relationships']:
allowed_relationships = args['--allowed_relationships']
else:
allowed_relationships = None
database = open(args['<database_file>'], 'r')
output_directory = args['<output_directory>']
if not os.path.exists(output_directory):
os.makedirs(output_directory)
database_name = os.path.basename(args['<database_file>'])
graph_class = {'go.obo': godag.GoGraph(supergraph_namespace, allowed_relationships)}
graph = graph_class[database_name]
parsing_class = {'go.obo': ontologyparser.GoParser(database, graph)}
parsing_class[database_name].parse()
database.close()
def build_graph_interpreter(database_file, supergraph_namespace=None, allowed_relationships=None, relationship_directionality='gocats'):
"""Creates a graph object of GO, which can be traversed and queried within a Python interpreter.
:param file_handle database_file: Ontology database file.
:param str supergraph_namespace: Optional - Filter graph to a sub-ontology namespace.
:param list allowed_relationships: Optional - Filter graph to use only those relationships listed.
:param relationship_directionality: Optional - Any string other than 'gocats' will retain all original GO relationship directionalities. Defaults to reverseing has_part direction.
:return: A Graph object of the ontology provided.
:rtype: :py:obj:`class`
"""
database = open(database_file, 'r')
graph = godag.GoGraph(supergraph_namespace, allowed_relationships)
go_parser = ontologyparser.GoParser(database, graph, relationship_directionality=relationship_directionality)
go_parser.parse()
database.close()
return graph
def create_subgraphs(args):
"""Creates a graph object of an ontology, processed into :class:`gocats.dag.OboGraph` or to an object that
inherits from :class:`gocats.dag.OboGraph`, and then extracts subgraphs which represent concepts that are defined
by a list of provided keywords. Each subgraph is processed into :class:`gocats.subdag.SubGraph`.
:param database_file: Ontology database file.
:param keyword_file: A CSV file with two columns: column 1 naming categories, and column 2 listing search strings (no quotation marks, separated by semicolons).
:param output_directory: The directory where results are stored.
:param --supergraph_namespace=<None>: OPTIONAL-Specify a supergraph sub-ontology to filter e.g. cellular_component.
:param --subgraph_namespace=<None>: OPTIONAL-Specify a subgraph sub-ontology to filter e.g. cellular_component.
:param --supergraph_relationships=[]: OPTIONAL-Specify a list of relationships to limit in the supergraph e.g. [is_a, part_of].
:param --subgraph_relationships=[]: OPTIONAL-Specify a list of relationships to limit in subgraphs e.g. [is_a, part_of].
:param --map_supersets: OPTIONAL-Allow subgraphs to subsume other subgraphs.
:param --output_termlist: OPTIONAL-Create a translation of ontology terms to their names to improve interpretability of dev test results.
:param --go-basic-scoping: OPTIONAL-Creates a GO graph similar to go-basic with only scoping-type relationships (is_a and part_of).
:param --network_table_name=<None>: OPTIONAL-Make a specific name for the network table produced from the subgraphs (defaults to NetworkTable.csv)
:return: None
:rtype: :py:obj:`None`
"""
if args['--supergraph_namespace']:
supergraph_namespace = args['--supergraph_namespace']
else:
supergraph_namespace = None
if args['--subgraph_namespace']:
subgraph_namespace = args['--subgraph_namespace']
else:
subgraph_namespace = None
if args['--supergraph_relationships']:
supergraph_relationships = args['--supergraph_relationships']
else:
supergraph_relationships = None
if args['--subgraph_relationships']:
subgraph_relationships = args['--subgraph_relationships']
else:
subgraph_relationships = None
if args['--go-basic-scoping']:
supergraph_relationships = ['is_a', 'part_of']
subgraph_relationships = ['is_a', 'part_of']
if args['--test']:
test = True
else:
test = False
# Building the supergraph
database = open(args['<database_file>'], 'r')
output_directory = args['<output_directory>']
if not os.path.exists(output_directory):
os.makedirs(output_directory)
database_name = os.path.basename(args['<database_file>'])
graph_class = {'go.obo': godag.GoGraph(supergraph_namespace, supergraph_relationships)}
try:
supergraph = graph_class[database_name]
except KeyError:
print("The provided ontology filename was not recognized. Please do not rename ontology files. The accepted list of file names are as follows: \n", graph_class.keys())
sys.exit()
parsing_class = {'go.obo': ontologyparser.GoParser(database, supergraph)}
try:
parsing_class[database_name].parse()
except KeyError:
print("The provided ontology filename was not recognized. Please do not rename ontology files. The accepted list of file names are as follows: \n", graph_class.keys())
sys.exit()
if args['--output_termlist']:
tools.jsonpickle_save(list(supergraph.id_index.keys()), os.path.join(args['<output_directory>'], "termlist"))
id_translation = dict()
for id, node in supergraph.id_index.items():
id_translation[id] = node.name
tools.jsonpickle_save(id_translation, os.path.join(args['<output_directory>'], "id_translation"))
database.close()
# Building and collecting subgraphs
subgraph_collection = dict()
with open(args['<keyword_file>'], newline='') as file:
reader = csv.reader(file, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for row in reader:
subgraph_name = row[0]
keyword_list = [keyword for keyword in re.split(';', row[1])]
subgraph_collection[subgraph_name] = subdag.SubGraph.from_filtered_graph(supergraph, subgraph_name, keyword_list, subgraph_namespace, subgraph_relationships)
# Handling superset mapping
if not args['--map_supersets']:
category_subsets = find_category_subsets(subgraph_collection)
else:
print("NOTE: supersets were mapped.")
category_subsets = None
collection_id_mapping = dict()
collection_node_mapping = dict()
collection_content_mapping = dict()
for subgraph_name, subgraph in subgraph_collection.items():
for node_id, category_node_id in subgraph.root_id_mapping.items():
try:
collection_id_mapping[node_id].update([category_node_id])
except KeyError:
collection_id_mapping[node_id] = set([category_node_id])
for node, category_node in subgraph.root_node_mapping.items():
try:
collection_node_mapping[node].update(category_node)
except KeyError:
collection_node_mapping[node] = {category_node}
for rep_node, content in subgraph.content_mapping.items():
collection_content_mapping[rep_node] = content
# Remove root nodes that are subsets of existing root nodes from mapping
if category_subsets:
for node_id, root_id_list in collection_id_mapping.items():
for subset_id, superset_ids in category_subsets.items():
if subset_id in root_id_list:
[root_id_list.remove(node) for node in superset_ids if node in root_id_list]
# TODO: do the same for node_object_mapping
# Save mapping files and create report
tools.jsonpickle_save(collection_id_mapping, os.path.join(args['<output_directory>'], "GC_id_mapping"))
tools.json_save(collection_id_mapping, os.path.join(args['<output_directory>'], "GC_id_mapping"))
tools.jsonpickle_save(collection_content_mapping, os.path.join(args['<output_directory>'], "GC_content_mapping"))
tools.json_save(collection_content_mapping, os.path.join(args['<output_directory>'], "GC_content_mapping"))
with open(os.path.join(output_directory, 'subgraph_report.txt'), 'w') as report_file:
report_file.write(
'Subgraph data\nSupergraph filter: {}\nSubgraph filter: {}\nGO terms in the supergraph: {}\nGO terms in subgraphs: {}\nRelationship prevalence: {}'.format(
supergraph_namespace, subgraph_namespace, len(set(supergraph.node_list)),
len(set(collection_id_mapping.keys())), supergraph.relationship_count))
for subgraph_name, subgraph in subgraph_collection.items():
out_string = """
-------------------------
{}
Subgraph relationships: {}
Seeded size: {}
Representative node: {}
Nodes added: {}
Non-subgraph hits (orphans): {}
Total nodes: {}
""".format(subgraph_name, subgraph.relationship_count, subgraph.seeded_size,
[node.name for node in subgraph.category_node.child_node_set], len(subgraph.node_list) - subgraph.seeded_size,
len(subgraph.node_list) - len(subgraph.root_id_mapping.keys()),
len(subgraph.root_id_mapping.keys()))
report_file.write(out_string)
# FIXME: cannot json save due to recursion of objects within objects...
# tools.jsonpickle_save(collection_node_mapping, os.path.join(args['<output_directory>'], "GC_node_mapping"))
if args['--network_table_name']:
network_table_name = args['--network_table_name']
else:
network_table_name = "Network_table.csv"
# Making a file for network visualization via Cytoscape 3.0
with open(os.path.join(args['<output_directory>'], network_table_name), 'w', newline='') as network_table:
edgewriter = csv.writer(network_table, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for term_id, root_id_list in collection_id_mapping.items():
for root_id in root_id_list:
edgewriter.writerow([term_id, root_id])
if test:
json_graph_destination_file = os.path.join(args['<output_directory>'], str(__version__)+'_graph_output.json')
with open(json_graph_destination_file, 'w') as destfile:
destfile.write(jsonpickle.encode(json_format_graph(supergraph, 'supergraph')))
for subgraph_name, subgraph in subgraph_collection.items():
destfile.write(jsonpickle.encode(json_format_graph(subgraph, subgraph_name)))
jsonpickle_supergraph_destination_file = os.path.join(args['<output_directory>'], str(__version__)+'_supergraph_output.jsonpickle')
jsonpickle_clean_graph(supergraph)
with open(jsonpickle_supergraph_destination_file, 'w') as destfile:
destfile.write(jsonpickle.encode(supergraph))
for subgraph_name, subgraph in subgraph_collection.items():
jsonpickle_clean_graph(subgraph)
jsonpickle_subgraph_destination_file = os.path.join(args['<output_directory>'], str(__version__)+'_'+subgraph_name+'_output.jsonpickle')
with open(jsonpickle_subgraph_destination_file, 'w') as destfile:
destfile.write(jsonpickle.encode(subgraph))
def jsonpickle_clean_graph(graph):
# remove circular references from the nodes
for node in graph.node_list:
node.edges = None
if node.parent_node_set:
node.parent_node_set = sorted([str(node2.id) for node2 in node.parent_node_set])
if node.child_node_set:
node.child_node_set = sorted([str(node2.id) for node2 in node.child_node_set])
if node._descendants:
node._descendants = sorted([str(node2.id) for node2 in node._descendants])
if node._ancestors:
node._ancestors = sorted([str(node2.id) for node2 in node._ancestors])
# remove sets or unneeded references from the graph
if hasattr(graph, "super_graph") and graph.super_graph:
graph.super_graph = None
if graph._orphans:
graph._orphans = sorted([str(node2.id) for node2 in graph._orphans])
if graph._leaves:
graph._leaves = sorted([str(node2.id) for node2 in graph._leaves])
for vocab in graph.vocab_index:
graph.vocab_index[vocab] = sorted([str(node2.id) for node2 in graph.vocab_index[vocab]])
if graph.used_relationship_set:
graph.used_relationship_set = sorted([relationship for relationship in graph.used_relationship_set])
# TODO: the workaround for accessing the sole item in the set here is hacky, fix this later.
def find_category_subsets(subgraph_collection):
"""Finds subgraphs which are subsets of other subgraphs to remove redundancy, when specified.
:param subgraph_collection: A dictionary of subgraph objects (keys: subgraph name, values: subgraph object).
:return: A dictionary relating which subgraph objects are subsets of other subgraphs (keys: subset subgraph, values: superset subgraphs).
:rtype: :py:obj:`dict`
"""
is_subset_of = dict()
for subgraph in subgraph_collection.values():
for next_subgraph in subgraph_collection.values():
if len(subgraph.category_node.child_node_set) == 1 and len(next_subgraph.category_node.child_node_set) == 1:
if next(iter(subgraph.category_node.child_node_set)).id != next(iter(next_subgraph.category_node.child_node_set)).id and next(iter(subgraph.category_node.child_node_set)).id in next_subgraph.root_id_mapping.keys():
try:
is_subset_of[next(iter(subgraph.category_node.child_node_set)).id].add(next(iter(next_subgraph.category_node.child_node_set)).id)
except KeyError:
is_subset_of[next(iter(subgraph.category_node.child_node_set)).id] = {next(iter(next_subgraph.category_node.child_node_set)).id}
return is_subset_of
def categorize_dataset(args):
"""Reads in a Gene Annotation File (GAF) and maps the annotations contained therein to the categories organized by
GOcats or other methods. Outputs a mapped GAF and a list of unmapped genes in the specified output directory.
:param dataset_file: A file containing gene annotations.
:param term_mapping: A dictionary mapping category-defining ontology terms to their subgraph children terms. May be produced by GOcats or another method.
:param output_directory: Specify the directory where the output file will be stored.
:param mapped_dataset_filename: Specify the desired name of the mapped GAF.
:param --dataset_type: Enter file type for dataset [GAF|TSV|CSV]. Defaults to GAF.
:param --entity_col=<0>: If CSV or TSV file type, indicate which column the entity IDs are listed. Defaults to 0.
:param --go_col: If CSV or TSV file type, indicate which column the GO IDs are listed. Defaults to 1.
:param --retain_unmapped_annotations: If specified, annotations that are not mapped to a concept are copied into the mapped dataset output file with its original annotation.
:return: None
:rtype: :py:obj:`None`
"""
if args['--dataset_type']:
dataset_type = args['--dataset_type']
else:
dataset_type = None
if args['--entity_col']:
entity_id_index = int(args['--entity_col'])
else:
entity_id_index = 0
if args['--go_col']:
go_id_index = int(args['--go_col'])
else:
go_id_index = 1
mapping_dict = tools.jsonpickle_load(args['<term_mapping>'])
output_directory = os.path.realpath(args['<output_directory>'])
mapped_dataset_filename = args['<mapped_dataset_filename>']
unmapped_entities = set()
if not os.path.exists(output_directory):
os.makedirs(output_directory)
if dataset_type == "GAF" or not dataset_type:
loaded_gaf_array = tools.parse_gaf(args['<dataset_file>'])
mapped_gaf_array = list()
for line in loaded_gaf_array:
if line[4] in mapping_dict.keys():
mapped_terms = mapping_dict[line[4]]
for term in mapped_terms:
mapped_gaf_array.append(line[0:4] + [term] + line[5:-1])
else:
if line[2] == '':
unmapped_entities.add('NO_GENE:' + line[1])
else:
unmapped_entities.add(line[2])
tools.write_out_gaf(mapped_gaf_array, os.path.join(output_directory, mapped_dataset_filename))
tools.list_to_file(os.path.join(output_directory, mapped_dataset_filename + '_unmappedEntities'), unmapped_entities)
elif dataset_type == "CSV":
mapped_rows = []
with open(os.path.realpath(args['<dataset_file>']), 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
header = next(csv_reader)
for row in csv_reader:
mapped_row = row
if mapped_row[go_id_index] in mapping_dict.keys():
for concept_term in mapping_dict[mapped_row[go_id_index]]:
mapped_row[go_id_index] = concept_term
mapped_rows.append(mapped_row)
else:
unmapped_entities.add(mapped_row[entity_id_index])
if args['--retain_unmapped_annotations']:
mapped_rows.append(mapped_row)
mapped_rows.insert(0, header)
with open(os.path.join(output_directory, mapped_dataset_filename), 'w') as output_csv:
csv_writer = csv.writer(output_csv, delimiter=',')
for row in mapped_rows:
csv_writer.writerow([item for item in row])
tools.list_to_file(os.path.join(output_directory, 'unmappedEntities'), unmapped_entities)
|
#!/usr/bin/env python
# coding: utf8
# ____ _____
# ________ _________ ____ / __ \/ ___/
# / ___/ _ \/ ___/ __ \/ __ \/ / / /\__ \
# / / / __/ /__/ /_/ / / / / /_/ /___/ /
# /_/ \___/\___/\____/_/ /_/\____//____/
#
# ======================================================================
#
# project: ReconOS
# author: <NAME>, University of Paderborn
# <NAME>, University of Paderborn
# <NAME>, University of Paderborn
# description: Script to add the ReconOS cores and HWTs to an mhs.
#
# ======================================================================
import sys
import mhstools
DEFAULT_MEMIF_FIFO_DEPTH = 128
DEFAULT_OSIF_FIFO_DEPTH = 32
OSIF_FIFO_BASE_ADDR = 0x75A00000
OSIF_FIFO_MEM_SIZE = 0x10000
OSIF_INTC_BASE_ADDR = 0x7B400000
OSIF_INTC_MEM_SIZE = 0x10000
PROC_CONTROL_BASE_ADDR = 0x6FE00000
PROC_CONTROL_MEM_SIZE = 0x10000
#set default to zynq
HWT_BUS = "axi_hwt"
MEMORY_BUS = "axi_acp"
DEFAULT_CLK = "processing_system7_0_FCLK_CLK0"
DEFAULT_RST = "processing_system7_0_FCLK_RESET0_N"
DEFAULT_RST_POLARITY = 0 # 0 for egative, 1 for positive
def hwt_reconf(name, num, version, use_mem, num_static_hwts):
instance = mhstools.MHSPCore(name)
instance.addEntry("PARAMETER", "INSTANCE", "hwt_reconf_%d" % (num - num_static_hwts))
instance.addEntry("PARAMETER", "HW_VER", version)
instance.addEntry("BUS_INTERFACE", "OSIF_FIFO_Sw2Hw", "reconos_osif_fifo_%d_sw2hw_FIFO_S" % num)
instance.addEntry("BUS_INTERFACE", "OSIF_FIFO_Hw2Sw", "reconos_osif_fifo_%d_hw2sw_FIFO_M" % num)
if use_mem:
instance.addEntry("BUS_INTERFACE", "MEMIF_FIFO_Hwt2Mem", "reconos_memif_fifo_%d_hwt2mem_FIFO_M" % num)
instance.addEntry("BUS_INTERFACE", "MEMIF_FIFO_Mem2Hwt", "reconos_memif_fifo_%d_mem2hwt_FIFO_S" % num)
instance.addEntry("PORT", "HWT_Clk", DEFAULT_CLK)
instance.addEntry("PORT", "HWT_Rst", "reconos_proc_control_0_PROC_Hwt_Rst_%d" % num)
return instance
def hwt_static(name, num, version, use_mem):
instance = mhstools.MHSPCore(name)
instance.addEntry("PARAMETER", "INSTANCE", "hwt_static_%d" % num)
instance.addEntry("PARAMETER", "HW_VER", version)
instance.addEntry("BUS_INTERFACE", "OSIF_FIFO_Sw2Hw", "reconos_osif_fifo_%d_sw2hw_FIFO_S" % num)
instance.addEntry("BUS_INTERFACE", "OSIF_FIFO_Hw2Sw", "reconos_osif_fifo_%d_hw2sw_FIFO_M" % num)
if use_mem:
instance.addEntry("BUS_INTERFACE", "MEMIF_FIFO_Hwt2Mem", "reconos_memif_fifo_%d_hwt2mem_FIFO_M" % num)
instance.addEntry("BUS_INTERFACE", "MEMIF_FIFO_Mem2Hwt", "reconos_memif_fifo_%d_mem2hwt_FIFO_S" % num)
instance.addEntry("PORT", "HWT_Clk", DEFAULT_CLK)
instance.addEntry("PORT", "HWT_Rst", "reconos_proc_control_0_PROC_Hwt_Rst_%d" % num)
return instance
def osif_fifo(num, direction):
instance = mhstools.MHSPCore("reconos_fifo")
instance.addEntry("PARAMETER", "INSTANCE", "reconos_osif_fifo_%d_" % num + direction)
instance.addEntry("PARAMETER", "HW_VER", "1.00.a")
instance.addEntry("PARAMETER", "C_FIFO_DEPTH", DEFAULT_OSIF_FIFO_DEPTH)
instance.addEntry("BUS_INTERFACE", "FIFO_M", "reconos_osif_fifo_%d_" % num + direction + "_FIFO_M")
instance.addEntry("BUS_INTERFACE", "FIFO_S", "reconos_osif_fifo_%d_" % num + direction + "_FIFO_S")
if direction == "hw2sw":
instance.addEntry("PORT", "FIFO_Has_Data", "reconos_osif_fifo_%d_" % num + direction + "_FIFO_Has_Data")
instance.addEntry("PORT", "FIFO_Rst", "reconos_proc_control_0_PROC_Hwt_Rst_%d" % num)
instance.addEntry("PORT", "FIFO_S_Clk", DEFAULT_CLK)
return instance
# HW_VER, C_FIFO_WIDTH, OSIF_FIFO_BASE_ADDR, OSIF_FIFO_MEM_SIZE
def osif(num_hwts):
instance = mhstools.MHSPCore("reconos_osif")
instance.addEntry("PARAMETER", "INSTANCE", "reconos_osif_0")
instance.addEntry("PARAMETER", "HW_VER", "1.00.a")
instance.addEntry("PARAMETER", "C_BASEADDR", "0x%x" % OSIF_FIFO_BASE_ADDR)
instance.addEntry("PARAMETER", "C_HIGHADDR", "0x%x" % (OSIF_FIFO_BASE_ADDR + OSIF_FIFO_MEM_SIZE - 1))
instance.addEntry("PARAMETER", "C_NUM_FIFOS", num_hwts)
instance.addEntry("PARAMETER", "C_FIFO_WIDTH", "32")
for i in range(num_hwts):
instance.addEntry("BUS_INTERFACE", "FIFO_S_%d" % i, "reconos_osif_fifo_%d_hw2sw_FIFO_S" % i)
instance.addEntry("BUS_INTERFACE", "FIFO_M_%d" % i, "reconos_osif_fifo_%d_sw2hw_FIFO_M" % i)
instance.addEntry("BUS_INTERFACE", "S_AXI", HWT_BUS)
instance.addEntry("PORT", "S_AXI_ACLK", DEFAULT_CLK)
return instance
# HW_VER, OSIF_INTC_BASE_ADDR, OSIF_INCT_MEM_SIZE
def osif_intc(num_hwts):
instance = mhstools.MHSPCore("reconos_osif_intc")
instance.addEntry("PARAMETER", "INSTANCE", "reconos_osif_intc_0")
instance.addEntry("PARAMETER", "HW_VER", "1.00.a")
instance.addEntry("PARAMETER", "C_BASEADDR", "0x%x" % OSIF_INTC_BASE_ADDR)
instance.addEntry("PARAMETER", "C_HIGHADDR", "0x%x" % (OSIF_INTC_BASE_ADDR + OSIF_INTC_MEM_SIZE - 1))
instance.addEntry("PARAMETER", "C_NUM_INTERRUPTS", num_hwts)
instance.addEntry("BUS_INTERFACE", "S_AXI", HWT_BUS)
instance.addEntry("PORT", "S_AXI_ACLK", DEFAULT_CLK)
instance.addEntry("PORT", "OSIF_INTC_Out", "reconos_osif_intc_0_OSIF_INTC_Out")
for i in range(num_hwts):
instance.addEntry("PORT", "OSIF_INTC_In_%d" % i, "reconos_osif_fifo_%d_hw2sw_FIFO_Has_Data" % i)
instance.addEntry("PORT", "OSIF_INTC_Rst", "reconos_proc_control_0_PROC_Sys_Rst")
return instance
# C_FIFO_DEPTH
def memif_fifo(num, direction):
instance = mhstools.MHSPCore("reconos_fifo")
instance.addEntry("PARAMETER", "INSTANCE", "reconos_memif_fifo_%d_" % num + direction)
instance.addEntry("PARAMETER", "HW_VER", "1.00.a")
instance.addEntry("PARAMETER", "C_FIFO_DEPTH", DEFAULT_MEMIF_FIFO_DEPTH)
instance.addEntry("BUS_INTERFACE", "FIFO_M", "reconos_memif_fifo_%d_" % num + direction + "_FIFO_M")
instance.addEntry("BUS_INTERFACE", "FIFO_S", "reconos_memif_fifo_%d_" % num + direction + "_FIFO_S")
instance.addEntry("PORT", "FIFO_Rst", "reconos_proc_control_0_PROC_Hwt_Rst_%d" % num)
instance.addEntry("PORT", "FIFO_S_Clk", DEFAULT_CLK)
return instance
# HW_VER, PROC_CONTROL_BASE_ADDR, PROC_CONTROL_MEM_SIZE
def proc_control(num_hwts, use_mmu):
instance = mhstools.MHSPCore("reconos_proc_control")
instance.addEntry("PARAMETER", "INSTANCE", "reconos_proc_control_0")
instance.addEntry("PARAMETER", "HW_VER", "1.00.a")
instance.addEntry("PARAMETER", "C_BASEADDR", "0x%x" % PROC_CONTROL_BASE_ADDR)
instance.addEntry("PARAMETER", "C_HIGHADDR", "0x%x" % (PROC_CONTROL_BASE_ADDR + PROC_CONTROL_MEM_SIZE - 1))
instance.addEntry("PARAMETER", "C_NUM_HWTS", num_hwts)
instance.addEntry("BUS_INTERFACE", "S_AXI", HWT_BUS)
instance.addEntry("PORT", "S_AXI_ACLK", DEFAULT_CLK)
instance.addEntry("PORT", "PROC_Clk", DEFAULT_CLK)
instance.addEntry("PORT", "PROC_Rst", DEFAULT_RST)
for i in range(num_hwts):
instance.addEntry("PORT", "PROC_Hwt_Rst_%d" % i, "reconos_proc_control_0_PROC_Hwt_Rst_%d" % i)
instance.addEntry("PORT", "PROC_Sys_Rst", "reconos_proc_control_0_PROC_Sys_Rst")
if use_mmu:
instance.addEntry("PORT", "PROC_Pgf_Int", "reconos_proc_control_0_PROC_Pgf_Int")
instance.addEntry("PORT", "MMU_Pgf", "reconos_memif_mmu_0_MMU_Pgf")
instance.addEntry("PORT", "MMU_Fault_Addr", "reconos_memif_mmu_0_MMU_Fault_Addr")
instance.addEntry("PORT", "MMU_Retry", "reconos_proc_control_0_MMU_Retry")
instance.addEntry("PORT", "MMU_Pgd", "reconos_proc_control_0_MMU_Pgd")
instance.addEntry("PORT", "MMU_Tlb_Hits", "reconos_memif_mmu_0_MMU_Tlb_Hits")
instance.addEntry("PORT", "MMU_Tlb_Misses", "reconos_memif_mmu_0_MMU_Tlb_Misses")
return instance
# HW_VER
def arbiter(num_hwts):
instance = mhstools.MHSPCore("reconos_memif_arbiter")
instance.addEntry("PARAMETER", "INSTANCE", "reconos_memif_arbiter_0")
instance.addEntry("PARAMETER", "HW_VER", "1.00.a")
instance.addEntry("PARAMETER", "C_NUM_HWTS", num_hwts)
for i in range(num_hwts):
instance.addEntry("BUS_INTERFACE", "MEMIF_FIFO_In_Hwt2Mem_%d" % i, "reconos_memif_fifo_%d_hwt2mem_FIFO_S" % i)
instance.addEntry("BUS_INTERFACE", "MEMIF_FIFO_In_Mem2Hwt_%d" % i, "reconos_memif_fifo_%d_mem2hwt_FIFO_M" % i)
instance.addEntry("BUS_INTERFACE", "MEMIF_FIFO_Out_Mem2Hwt", "reconos_memif_arbiter_0_MEMIF_FIFO_Out_Mem2Hwt")
instance.addEntry("BUS_INTERFACE", "MEMIF_FIFO_Out_Hwt2Mem", "reconos_memif_arbiter_0_MEMIF_FIFO_Out_Hwt2Mem")
instance.addEntry("BUS_INTERFACE", "CTRL_FIFO_Out", "reconos_memif_arbiter_0_CTRL_FIFO_Out")
instance.addEntry("PORT", "TCTRL_Clk", DEFAULT_CLK)
instance.addEntry("PORT", "TCTRL_Rst", "reconos_proc_control_0_PROC_Sys_Rst")
return instance
# HW_VER, C_PAGE_SIZE, C_MAX_BURST_SIZE
def burst_converter():
instance = mhstools.MHSPCore("reconos_memif_burst_converter")
instance.addEntry("PARAMETER", "INSTANCE", "reconos_memif_burst_converter_0")
instance.addEntry("PARAMETER", "HW_VER", "1.00.a")
instance.addEntry("PARAMETER", "C_PAGE_SIZE", 4096)
instance.addEntry("PARAMETER", "C_MAX_BURST_SIZE", 256)
instance.addEntry("BUS_INTERFACE", "CTRL_FIFO_In", "reconos_memif_arbiter_0_CTRL_FIFO_Out")
instance.addEntry("BUS_INTERFACE", "CTRL_FIFO_Out", "reconos_memif_burst_converter_0_CTRL_FIFO_Out")
instance.addEntry("PORT", "BCONV_Clk", DEFAULT_CLK)
instance.addEntry("PORT", "BCONV_Rst", "reconos_proc_control_0_PROC_Sys_Rst")
return instance
# HW_VER, C_TLB_SIZE
def mmu(arch):
instance = mhstools.MHSPCore("reconos_memif_mmu_" + arch)
instance.addEntry("PARAMETER", "INSTANCE", "reconos_memif_mmu_0")
instance.addEntry("PARAMETER", "HW_VER", "1.00.a")
instance.addEntry("PARAMETER", "C_TLB_SIZE", 16)
instance.addEntry("BUS_INTERFACE", "CTRL_FIFO_In", "reconos_memif_burst_converter_0_CTRL_FIFO_Out")
instance.addEntry("BUS_INTERFACE", "CTRL_FIFO_Out", "reconos_memif_mmu_0_CTRL_FIFO_Out")
instance.addEntry("BUS_INTERFACE", "CTRL_FIFO_Mmu", "reconos_memif_mmu_0_CTRL_FIFO_Mmu")
instance.addEntry("BUS_INTERFACE", "MEMIF_FIFO_Mmu", "reconos_memif_memory_controller_0_MEMIF_FIFO_Mmu")
instance.addEntry("PORT", "MMU_Pgf", "reconos_memif_mmu_0_MMU_Pgf")
instance.addEntry("PORT", "MMU_Fault_Addr", "reconos_memif_mmu_0_MMU_Fault_Addr")
instance.addEntry("PORT", "MMU_Retry", "reconos_proc_control_0_MMU_Retry")
instance.addEntry("PORT", "MMU_Pgd", "reconos_proc_control_0_MMU_Pgd")
instance.addEntry("PORT", "MMU_Tlb_Hits", "reconos_memif_mmu_0_MMU_Tlb_Hits")
instance.addEntry("PORT", "MMU_Tlb_Misses", "reconos_memif_mmu_0_MMU_Tlb_Misses")
instance.addEntry("PORT", "MMU_Clk", DEFAULT_CLK)
instance.addEntry("PORT", "MMU_Rst", "reconos_proc_control_0_PROC_Sys_Rst")
return instance
# HW_VER
def memory_controller(use_mmu):
instance = mhstools.MHSPCore("reconos_memif_memory_controller")
instance.addEntry("PARAMETER", "INSTANCE", "reconos_memif_memory_controller_0")
instance.addEntry("PARAMETER", "HW_VER", "1.00.a")
if use_mmu:
instance.addEntry("PARAMETER", "C_USE_MMU_PORT", "TRUE")
instance.addEntry("BUS_INTERFACE", "CTRL_FIFO_Mmu", "reconos_memif_mmu_0_CTRL_FIFO_Mmu")
instance.addEntry("BUS_INTERFACE", "MEMIF_FIFO_Mmu", "reconos_memif_memory_controller_0_MEMIF_FIFO_Mmu")
instance.addEntry("BUS_INTERFACE", "CTRL_FIFO_Hwt", "reconos_memif_mmu_0_CTRL_FIFO_Out")
else:
instance.addEntry("PARAMETER", "C_USE_MMU_PORT", "FALSE")
instance.addEntry("BUS_INTERFACE", "CTRL_FIFO_Hwt", "reconos_memif_burst_converter_0_CTRL_FIFO_Out")
instance.addEntry("BUS_INTERFACE", "MEMIF_FIFO_Mem2Hwt", "reconos_memif_arbiter_0_MEMIF_FIFO_Out_Mem2Hwt")
instance.addEntry("BUS_INTERFACE", "MEMIF_FIFO_Hwt2Mem", "reconos_memif_arbiter_0_MEMIF_FIFO_Out_Hwt2Mem")
instance.addEntry("BUS_INTERFACE", "M_AXI", MEMORY_BUS)
instance.addEntry("PORT", "M_AXI_ACLK", DEFAULT_CLK)
instance.addEntry("PORT", "MEMCTRL_Clk", DEFAULT_CLK)
instance.addEntry("PORT", "MEMCTRL_Rst", "reconos_proc_control_0_PROC_Sys_Rst")
return instance
def print_help():
sys.stderr.write("Usage: mhsaddhwts.py [-nommu] [-reconf] [-nomem] <architecture> <system.mhs> <num_static_hwts> <num_reconf_regions> <hwt0_version>[#<count>] <hwt1_version>[#<count>] ...\n")
sys.stderr.write("Output: new mhs-file with added ReconOS system and hardware threads.\n")
class HWT:
"""
This class represents a HWT
fields: self.name
self.version
self.count
self.is_reconf
self.slots
"""
def __init__(self, hwt_str, is_reconf):
self.name = ""
self.version = ""
self.count = 0
self.is_reconf = is_reconf
self.slots = []
self.parse(hwt_str)
if not self.is_reconf and self.count == 0:
self.count = 1
def parse(self, hwt_str):
STATE_NAME = 0
STATE_STATIC_OPTION = 1
STATE_RECONF_OPTION = 2
state = STATE_NAME
for i in range(len(hwt_str)):
last = (i == len(hwt_str) - 1)
if state == STATE_NAME:
if hwt_str[i] == '#':
self.name = hwt_str[:i - 8]
self.version = hwt_str[i - 6:i].replace("_", ".")
if self.is_reconf:
start = i + 1
state = STATE_RECONF_OPTION
else:
state = STATE_STATIC_OPTION
elif last:
self.name = hwt_str[:i - 7]
self.version = hwt_str[i - 5:].replace("_", ".")
elif state == STATE_STATIC_OPTION:
self.count = int(hwt_str[i:])
break
elif state == STATE_RECONF_OPTION:
if hwt_str[i] == ',':
self.slots.append(int(hwt_str[start:i]))
self.count += 1
start = i + 1
elif last:
self.slots.append(int(hwt_str[start:]))
self.count += 1
def __str__(self):
return "HWT: " + self.name + ", Version: " + self.version + ", Count: " + str(self.count) + ", Reconfigurable: " + str(self.is_reconf)
def main():
# at first parse all parameters
if len(sys.argv) < 6:
print_help()
sys.exit(1)
arg_pos = 1
if sys.argv[arg_pos] == "-nommu":
use_mmu = False
arg_pos += 1
else:
use_mmu = True
if sys.argv[arg_pos] == "-reconf":
use_reconf = True
arg_pos += 1
else:
use_reconf = False
if sys.argv[arg_pos] == "-nomem":
use_mem = False
use_mmu = False
arg_pos += 1
else:
use_mem = True
arch = sys.argv[arg_pos]
mhs = mhstools.MHS(sys.argv[arg_pos + 1])
num_static_hwts = int(sys.argv[arg_pos + 2])
num_reconf_regions = int(sys.argv[arg_pos + 3])
hwts_str = sys.argv[arg_pos + 4:]
if arch == "zynq":
HWT_BUS = "axi_hwt"
MEMORY_BUS = "axi_acp"
DEFAULT_CLK = "processing_system7_0_FCLK_CLK0"
DEFAULT_RST = "processing_system7_0_FCLK_RESET0_N"
DEFAULT_RST_POLARITY = 0 # 0 for egative, 1 for positive
elif arch == "microblaze":
HWT_BUS = "axi_sys"
MEMORY_BUS = "axi_mem"
DEFAULT_CLK = "clk_100_0000MHzMMCM0"
DEFAULT_RST = "proc_sys_reset_0_Peripheral_aresetn"
DEFAULT_RST_POLARITY = 0 # 0 for egative, 1 for positive
else:
sys.stderr.write("ERROR: Architecture not supported\n")
sys.exit(1)
# insert reconos system
if num_static_hwts < 1 and num_reconf_regions < 1:
sys.stderr.write("ERROR: you must specify at least 1 hardware thread\n")
sys.exit(1)
num_hwts = num_static_hwts + num_reconf_regions
# parse HWT string
hwts = []
for i in range(len(hwts_str)):
if use_reconf and i == len(hwts_str) - 1:
hwts.append(HWT(hwts_str[i], True))
else:
hwts.append(HWT(hwts_str[i], False))
# insert HWTs
count = 0
# add all static hardware threads
for i in range(len(hwts)):
if not hwts[i].is_reconf:
for j in range(hwts[i].count):
mhs.addPCore(hwt_static(hwts[i].name, count, hwts[i].version, use_mem))
mhs.addPCore(osif_fifo(count, "sw2hw"))
mhs.addPCore(osif_fifo(count, "hw2sw"))
if use_mem:
mhs.addPCore(memif_fifo(count, "hwt2mem"))
mhs.addPCore(memif_fifo(count, "mem2hwt"))
count += 1
# add all reconfigurable hardware threads
for j in range(num_reconf_regions):
for i in range(len(hwts)):
if hwts[i].is_reconf:
if hwts[i].slots.count(j) == 0 and hwts[i].count != 0:
mhs.addPCore(hwt_reconf("reconos_hwt_idle", count, "1.00.a", use_mem, num_static_hwts))
else:
mhs.addPCore(hwt_reconf(hwts[i].name, count, hwts[i].version, use_mem, num_static_hwts))
mhs.addPCore(osif_fifo(count, "sw2hw"))
mhs.addPCore(osif_fifo(count, "hw2sw"))
mhs.addPCore(memif_fifo(count, "hwt2mem"))
mhs.addPCore(memif_fifo(count, "mem2hwt"))
count += 1
# add osif
mhs.addPCore(osif(num_hwts))
mhs.addPCore(osif_intc(num_hwts))
# insert proc control
mhs.addPCore(proc_control(num_hwts, use_mmu))
# add memory subsystem
if use_mem:
mhs.addPCore(arbiter(num_hwts))
mhs.addPCore(burst_converter())
if use_mmu:
mhs.addPCore(mmu(arch))
mhs.addPCore(memory_controller(use_mmu))
if arch == "zynq":
ps7 = mhs.getPCore("processing_system7_0")
assert isinstance(ps7, mhstools.MHSPCore)
if ps7 is None:
sys.stderr.write("ERROR: no processing system found\n")
sys.exit(1)
if use_mem:
# append instead overwrite
#ps7.addEntry("PORT", "IRQ_F2P", "reconos_proc_control_0_PROC_Pgf_Int & reconos_osif_intc_0_OSIF_INTC_Out")
curIRQ = ps7.getValue("IRQ_F2P");
curIRQ = curIRQ + " & reconos_proc_control_0_PROC_Pgf_Int & reconos_osif_intc_0_OSIF_INTC_Out";
ps7.setValue("IRQ_F2P", curIRQ);
ps7.addEntry("BUS_INTERFACE", "S_AXI_ACP", "axi_acp")
ps7.addEntry("PARAMETER", "C_INTERCONNECT_S_AXI_ACP_MASTERS", "reconos_memif_memory_controller_0.M_AXI")
else:
ps7.addEntry("PORT", "IRQ_F2P", "reconos_osif_intc_0_OSIF_INTC_Out")
elif arch == "microblaze":
intc = mhs.getPCore("microblaze_0_intc")
if intc is None:
sys.stderr.write("ERROR: no interupt controller found\n")
sys.exit(1)
intr = intc.getValue("INTR")
if use_mmu:
intr = "reconos_proc_control_0_PROC_Pgf_Int & reconos_osif_intc_0_OSIF_INTC_Out & " + intr
else:
intr = "reconos_osif_intc_0_OSIF_INTC_Out & " + intr
intc.setValue("INTR", intr)
memctrl = mhs.getPCore("DDR3_SDRAM")
if intc is None:
sys.stderr.write("ERROR: no memory controller found\n")
sys.exit(1)
memslv = memctrl.getValue("C_INTERCONNECT_S_AXI_MASTERS")
memslv = "reconos_memif_memory_controller_0.M_AXI & " + memslv
memctrl.setValue("C_INTERCONNECT_S_AXI_MASTERS", memslv)
sys.stdout.write(str(mhs))
if __name__ == "__main__":
main()
|
<filename>libs/strings.py
#!/usr/bin/env python3
"""Strings based routines"""
import os
import logging
import me.libs.decorators
import string
logger = logging.getLogger("STRINGS")
def strings(args=None):
logger.debug("strings")
system_string_funcs()
lc_alphabet()
uc_alphabet()
str_digits()
str_with_digits = "ABC123DEF456GHI789JKL"
remove_digits(str_with_digits)
s1 = "thistt"
s2 = "histt"
anagram(s1, s2)
rewrite_immutable()
longstrings()
quote_strings()
format_strings()
slice_strings()
lst = [1, 2, 3, 4, 5, 6, 7]
join_objects_as_strings(lst)
def system_string_funcs():
str_capitalize()
str_casefold()
str_center()
str_count()
str_encode()
str_endswith()
str_expandtabs()
str_find()
str_format()
str_format_map()
str_index()
str_isalnum()
str_isalpha()
str_isascii()
str_isdecimal()
str_isdigit()
str_isidentifier()
str_islower()
str_isnumeric()
str_isprintable()
str_isspace()
str_istitle()
str_isupper()
str_join()
str_ljust()
str_lower()
str_lstrip()
str_maketrans()
str_partition()
str_removeprefix()
str_removesuffix()
str_replace()
str_rfind()
str_rindex()
str_rjust()
str_rpartition()
str_rsplit()
str_rstrip()
str_split()
str_splitlines()
str_startswith()
str_strip()
str_swapcase()
str_title()
str_translate()
str_upper()
str_zfill()
def str_capitalize():
pass
def str_casefold():
pass
def str_center():
pass
def str_count():
pass
def str_encode():
pass
def str_endswith():
pass
def str_expandtabs():
pass
def str_find():
pass
def str_format():
pass
def str_format_map():
pass
def str_index():
pass
def str_isalnum():
pass
def str_isalpha():
pass
def str_isascii():
pass
def str_isdecimal():
pass
def str_isdigit():
pass
def str_isidentifier():
pass
def str_islower():
pass
def str_isnumeric():
pass
def str_isprintable():
pass
def str_isspace():
pass
def str_istitle():
pass
def str_isupper():
pass
def str_join():
pass
def str_ljust():
pass
def str_lower():
pass
def str_lstrip():
pass
def str_maketrans():
pass
def str_partition():
pass
def str_removeprefix():
pass
def str_removesuffix():
pass
def str_replace():
quote = "to be or not to be"
logger.info(quote.replace("be", "me"))
logger.info(quote)
def str_rfind():
pass
def str_rindex():
pass
def str_rjust():
pass
def str_rpartition():
pass
def str_rsplit():
pass
def str_rstrip():
pass
def str_split():
pass
def str_splitlines():
pass
def str_startswith():
pass
def str_strip():
pass
def str_swapcase():
pass
def str_title():
pass
def str_translate():
pass
def str_upper():
pass
def str_zfill():
pass
def longstrings():
password = "<PASSWORD>" "et" " " "agent " "man" "!"
logger.info(password)
longstring = """
WOW
0 0
---
"""
logger.info(longstring)
# we can't really re-write a string
# but we can use recipes to copy the string back to itself
def rewrite_immutable():
title = "Recipe 5: some immutable string, with, punctuation"
colon_position = title.index(":")
logger.info("colon_position: %s", colon_position)
pre_colon, post_colon = title[:colon_position], title[colon_position + 1 :]
logger.info("pre_colon: %s", pre_colon)
logger.info("post_colon: %s", post_colon)
pre_colon, middle, post_colon = title.partition(":")
logger.info("pre_colon: %s", pre_colon)
logger.info("middle: %s", middle)
logger.info("post_colon: %s", post_colon)
print()
def anagram(s1, s2):
from collections import Counter
logger.info(Counter(s1))
logger.info(Counter(s2))
logger.info("anagram") if Counter(s1) == Counter(s2) else logger.info("NOT anagram")
print()
def remove_digits(str):
res = "".join(list(filter(lambda x: x.isalpha(), str)))
logger.info("str with no digits: %s", res)
print()
def lc_alphabet():
logger.info(string.ascii_lowercase)
def uc_alphabet():
logger.info(string.ascii_uppercase)
def str_digits():
logger.info(string.digits)
def from_practice(args):
logger.info("hello as a stutter : " + stutter("hello"))
# search_help('builtins')
# logger.info(help(builtins.dict))
# somestring = "1\n\n,2,\n3,4,5,6\n\n\n\n"
# sstring = somestring.split(',')
# logger.info(type(sstring))
# logger.info(sstring)
# somestring = '1,2,3,4,5,6\n\n\n\n'
# sstring = somestring.splitlines()
# logger.info(type(sstring))
# logger.info(sstring)
# sstring = somestring
# sstring = somestring.strip('\n')
# logger.info(type(sstring))
# logger.info(sstring)
def join_objects_as_strings(lst):
# Join objects as strings
# https://blog.finxter.com/how-to-use-pythons-join-function-on-a-list-of-objects-rather-than-strings/
join_method_1(lst)
join_method_2(lst)
join_method_3(lst)
join_method_4(lst)
join_method_5(lst)
join_method_6(lst)
@me.libs.decorators.timer
def join_method_1(lst):
# Method 1 - list comprehension with join
logger.info("".join([str(x) for x in lst]))
# ''.join([str(x) for x in lst])
# 0124
@me.libs.decorators.timer
def join_method_2(lst):
# Method 2 - generator expression
logger.info("".join(str(x) for x in lst))
# ''.join(str(x) for x in lst)
# 0124
@me.libs.decorators.timer
def join_method_3(lst):
# todo: this doesn't work with int types... so exclude it
# this is because there is no int.val attr.
return
# Method 3 - generator expression with custom string repr
logger.info("".join(str(x.val) for x in lst))
# ''.join(str(x.val) for x in lst)
# 0124
@me.libs.decorators.timer
def join_method_4(lst):
# Method 4 - lambda + str
logger.info("".join(map(lambda x: str(x), lst)))
# ''.join(map(lambda x: str(x), lst))
# 0124
@me.libs.decorators.timer
def join_method_5(lst):
# Method 5 - map + str
logger.info("".join(map(str, lst)))
# ''.join(map(str, lst))
# 0124
@me.libs.decorators.timer
def join_method_6(lst):
# Method 6 - naive loop
s = ""
for x in lst:
s += str(x)
logger.info(s)
# 0124
class Obj:
def __init__(self, val):
self.val = val
def __str__(self):
return str(self.val)
# lst = [Obj(0), Obj(1), Obj(2), Obj(4)]
lst = [Obj(x) for x in range(0, 10)]
# STRING
# create a stuttering type string response
def stutter(word):
return word[:2] + "..." + word[:2] + "..." + word + "?"
def search_help(keyword):
os.system("pydoc -k {}".format(keyword))
def quote_strings():
weather = 'It\'s "kind of" Sunny'
logger.info(weather)
def format_strings():
# formatted strings
name = "johnny"
age = 55
print("Hi " + name + " you are " + str(age) + " years old")
logger.info("Hi " + name + " you are " + str(age) + " years old")
print(f"hi {name}. You are {age} years old")
logger.info(f"hi {name}. You are {age} years old")
# print('hi {age}. You are {name} years old'.format(name="blah", age=10))
# print(f'hi {age}. You are {name} years old')
def slice_strings():
selfish = "0123456789"
# count evens backwards
logger.info(selfish[-2::-2])
|
<filename>2019/25/py/run.py
#! /usr/bin/python3
import sys
import os
import time
from typing import Dict, List, Tuple
from collections import defaultdict
import re
from itertools import combinations
class IntCodeComputer():
def __init__(self, memory: List[int], inputs: List[int] = [], defaultInput: bool = False, defaultValue: int = -1):
self.memory = defaultdict(int, [(index, value)
for index, value in enumerate(memory)])
self.pointer = 0
self.inputs = inputs
self.outputs: List[int] = []
self.base = 0
self.running = True
self.polling = False
self.paused = False
self.outputing = False
self.default_input = defaultInput
self.default_value = defaultValue
def set_input(self, value: int):
self.inputs.insert(0, value)
def run(self) -> List[int]:
while self.running:
self.tick()
return self.outputs
def run_until_paused(self):
self.paused = False
while not self.paused and self.running:
self.tick()
return self
def get_parameter(self, offset: int, mode: int) -> int:
value = self.memory[self.pointer + offset]
if mode == 0: # POSITION
return self.memory[value]
if mode == 1: # IMMEDIATE
return value
elif mode == 2: # RELATIVE
return self.memory[self.base + value]
raise Exception("Unrecognized parameter mode", mode)
def get_address(self, offset: int, mode: int) -> int:
value = self.memory[self.pointer + offset]
if mode == 0: # POSITION
return value
if mode == 2: # RELATIVE
return self.base + value
raise Exception("Unrecognized address mode", mode)
def get_output(self) -> int:
self.outputing = False
return self.outputs.pop()
def add_input(self, value: int):
self.inputs.append(value)
def tick(self):
instruction = self.memory[self.pointer]
opcode, p1_mode, p2_mode, p3_mode = instruction % 100, (
instruction // 100) % 10, (instruction // 1000) % 10, (instruction // 10000) % 10
if not self.running:
return
if opcode == 1: # ADD
self.memory[self.get_address(3, p3_mode)] = self.get_parameter(
1, p1_mode) + self.get_parameter(2, p2_mode)
self.pointer += 4
elif opcode == 2: # MUL
self.memory[self.get_address(3, p3_mode)] = self.get_parameter(
1, p1_mode) * self.get_parameter(2, p2_mode)
self.pointer += 4
elif opcode == 3: # INPUT
if self.inputs:
self.polling = False
self.memory[self.get_address(1, p1_mode)] = self.inputs.pop(0)
self.pointer += 2
elif self.default_input:
self.memory[self.get_address(1, p1_mode)] = self.default_value
self.pointer += 2
self.polling = True
else:
self.paused = True
elif opcode == 4: # OUTPUT
self.outputing = True
self.outputs.append(self.get_parameter(1, p1_mode))
self.pointer += 2
elif opcode == 5: # JMP_TRUE
if self.get_parameter(1, p1_mode):
self.pointer = self.get_parameter(2, p2_mode)
else:
self.pointer += 3
elif opcode == 6: # JMP_FALSE
if not self.get_parameter(1, p1_mode):
self.pointer = self.get_parameter(2, p2_mode)
else:
self.pointer += 3
elif opcode == 7: # LESS_THAN
self.memory[self.get_address(3, p3_mode)] = 1 if self.get_parameter(
1, p1_mode) < self.get_parameter(2, p2_mode) else 0
self.pointer += 4
elif opcode == 8: # EQUALS
self.memory[self.get_address(3, p3_mode)] = 1 if self.get_parameter(
1, p1_mode) == self.get_parameter(2, p2_mode) else 0
self.pointer += 4
elif opcode == 9: # SET_BASE
self.base += self.get_parameter(1, p1_mode)
self.pointer += 2
elif opcode == 99: # HALT
self.running = False
else:
raise Exception(f"Unknown instruction", self.pointer,
instruction, opcode, p1_mode, p2_mode, p3_mode)
def parse_room_output(output: str) -> Tuple[str, List[str], List[str]]:
stage = 0
room = ""
doors: List[str] = []
items: List[str] = []
for line in output.split("\n"):
if stage == 0:
match = re.match(r"^== (?P<room>.*) ==", line)
if match:
room = match.group("room")
stage = 1
elif stage == 1:
if line.startswith("Doors"):
stage = 2
elif stage == 2:
match = re.match(r"- (?P<entry>.*)", line)
if match:
doors.append(match.group("entry"))
else:
stage = 3
elif stage == 3:
if line.startswith("Items"):
stage = 4
elif stage == 4:
match = re.match(r"- (?P<entry>.*)", line)
if match:
items.append(match.group("entry"))
else:
break
return room, doors, items
def run_command(droid: IntCodeComputer, command: str) -> str:
if command != "":
for c in command + '\n':
droid.inputs.append(ord(c))
droid.run_until_paused()
output = "".join(chr(c) for c in droid.outputs)
droid.outputs.clear()
return output
# use to play manually
def manual_scout(memory: List[int]):
print("Scounting")
droid = IntCodeComputer(memory)
command = ""
while command != "quit" and droid.running:
output = run_command(droid, command)
print(parse_room_output(output))
command = input("$ ")
WAY_INVERSE = {
"": "",
"north": "south",
"south": "north",
"west": "east",
"east": "west"
}
FORBIDEN_ITEMS = [
"molten lava",
"photons",
"infinite loop",
"giant electromagnet",
"escape pod"
]
PRESSURE_ROOM = "Pressure-Sensitive Floor"
SECURITY_CHECKPOINT = "Security Checkpoint"
TAKE = "take "
def navigate_rooms(droid: IntCodeComputer, command: str, destination: str, pickup_items: bool) -> Tuple[str, List[str], str]:
visited: List[Tuple[str, str]] = []
way_in: Dict[str, str] = {}
last_direction = ""
pressure_room_way_in = ""
inventory: List[str] = []
security_doors: List[str] = []
while droid.running:
output = run_command(droid, command)
room, doors, items = parse_room_output(output)
if room == destination:
break
if room == PRESSURE_ROOM:
pressure_room_way_in = last_direction
room = SECURITY_CHECKPOINT
doors = security_doors
if room == SECURITY_CHECKPOINT:
security_doors = doors
if room not in way_in:
way_in[room] = last_direction
if pickup_items:
for item in items:
if item not in FORBIDEN_ITEMS:
run_command(droid, TAKE + item)
inventory.append(item)
new_door = False
for door in doors:
if (room, door) not in visited:
if door == WAY_INVERSE[way_in[room]]:
continue
new_door = True
visited.append((room, door))
command = last_direction = door
break
if not new_door:
if way_in[room] == "":
# in first room
command = doors[0]
break
command = WAY_INVERSE[way_in[room]]
return command, inventory, pressure_room_way_in
DROP = "drop "
def find_password(memory: List[int]) -> str:
droid = IntCodeComputer(memory)
# navigate all rooms and pickup non-forbiden items
command, inventory, pressure_room_way_in = navigate_rooms(droid, "", "", True)
# go to Security Checkpoint
navigate_rooms(droid, command, SECURITY_CHECKPOINT, False)
# test combinations of items
for new_inventory in combinations(inventory, 4):
for item in new_inventory:
if item not in inventory:
run_command(droid, TAKE + item)
for item in inventory:
if item not in new_inventory:
run_command(droid, DROP + item)
output = run_command(droid, pressure_room_way_in)
password_match = re.search(r"typing (?P<password>\d+)", output)
if password_match:
return password_match.group("password")
inventory = new_inventory
raise Exception("Password not found")
def solve(memory: List[int]) -> Tuple[str, str]:
return find_password(memory), ""
def get_input(file_path: str) -> List[int]:
if not os.path.isfile(file_path):
raise FileNotFoundError(file_path)
with open(file_path, "r") as file:
return [int(i) for i in file.read().split(",")]
def main():
if len(sys.argv) != 2:
raise Exception("Please, add input file path as parameter")
start = time.perf_counter()
part1_result, part2_result = solve(get_input(sys.argv[1]))
end = time.perf_counter()
print("P1:", part1_result)
print("P2:", part2_result)
print()
print(f"Time: {end - start:.7f}")
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `precisionmapper` package."""
# To be tested with : python3 -m pytest -vs tests/test_precisionmapper.py
import pytest
import os
from precisionmapper import Survey, PrecisionMapper
from requests import ConnectionError
# Get useful environment variables
_LOGIN = os.environ.get('PRECISIONMAPPER_LOGIN', None)
_PASSWORD = os.environ.get('PRECISIONMAPPER_PASSWORD', None)
def test_class_Survey():
survey = Survey(
id=123, name="My survey", url="https://url.com",
drone_platform="DJI",
sensor="RGB", location="Toulouse, France",
date="2018-08-03T17:00:00.001Z", image_nb=3, size="150 MB",
thumbnail="https://url_to_thumbnail.com", altitude_in_m=90,
resolution_in_cm=2.5, area_in_ha=1.8)
assert survey.id == 123
assert str(survey) == "[My survey] (Toulouse, France - 03/08/2018 17:00) \
: 3 images, 150 MB, sensor : RGB, id : 123"
assert repr(survey) == "Survey(id=123, name=My survey)"
print()
print(survey)
# Test with the mandatory parameters only
survey = Survey(
id=456, name="My survey 2", url="https://url.com",
date="2018-08-03T16:00:00.001Z")
assert survey.id == 456
assert str(survey) == "[My survey 2] ( - 03/08/2018 16:00) \
: 0 images, 0 MB, sensor : , id : 456"
print(survey)
def test_class_Survey_errors():
with pytest.raises(TypeError):
# Bad type for image number
Survey(
id=123, name="My survey", url="https://url.com",
drone_platform="DJI",
sensor="RGB", location="Toulouse, France",
date="2018-08-03T17:00:00.001Z", image_nb="ABC", size="150 MB",
thumbnail="https://url_to_thumbnail.com", altitude_in_m=90,
resolution_in_cm=2.5, area_in_ha=1.8)
with pytest.raises(TypeError):
# Bad type for date
Survey(
id=123, name="My survey", url="https://url.com",
drone_platform="DJI",
sensor="RGB", location="Toulouse, France",
date="not a date", image_nb=3, size="150 MB",
thumbnail="https://url_to_thumbnail.com", altitude_in_m=90,
resolution_in_cm=2.5, area_in_ha=1.8)
with pytest.raises(TypeError):
# Bad type for id
Survey(
id="abc", name="My survey 2", url="https://url.com",
date="2018-08-03T16:00:00.001Z")
def test_class_PrecisionMapper():
pm = PrecisionMapper(login="username", password="password")
assert repr(pm) == "PrecisionMapper(login=username)"
assert str(pm) == "PrecisionMapper(login=username)"
def test_get_authenticity_token():
pm = PrecisionMapper(login=_LOGIN, password=_PASSWORD)
authenticity_token = pm.get_authenticity_token()
assert authenticity_token != ""
print()
print("authenticity_token = {}".format(authenticity_token))
def test_get_authenticity_token_errors():
with pytest.raises(ValueError):
# Bad type for image number
pm = PrecisionMapper(login=_LOGIN, password=_PASSWORD)
pm.get_authenticity_token(url="https://example.com")
def test_signin():
pm = PrecisionMapper(login=_LOGIN, password=_PASSWORD)
sign_in = pm.sign_in()
assert sign_in.status_code == 302
# Status code is 302 because there is a redirection when sign_in OK
def test_signin_errors():
with pytest.raises(ConnectionError):
# Bad login and password
pm = PrecisionMapper(login="bad_login", password="<PASSWORD>")
pm.sign_in()
def test_get_surveys():
pm = PrecisionMapper(login=_LOGIN, password=_PASSWORD)
pm.sign_in()
surveys = pm.get_surveys()
assert len(surveys) > 0
assert type(surveys[0]) == Survey
print()
for survey in surveys:
print(survey)
def test_get_shared_surveys():
pm = PrecisionMapper(login=_LOGIN, password=_PASSWORD)
pm.sign_in()
surveys = pm.get_shared_surveys()
assert len(surveys) > 0
assert type(surveys[0]) == Survey
print()
for survey in surveys:
print(survey)
|
<reponame>danielfrascarelli/esys-particle
#############################################################
## ##
## Copyright (c) 2003-2017 by The University of Queensland ##
## Centre for Geoscience Computing ##
## http://earth.uq.edu.au/centre-geoscience-computing ##
## ##
## Primary Business: Brisbane, Queensland, Australia ##
## Licensed under the Open Software License version 3.0 ##
## http://www.apache.org/licenses/LICENSE-2.0 ##
## ##
#############################################################
"""
Defines the L{CylinderExtractor} class.
"""
from .color import Colors
from .extractor import Extractor
class CylinderExtractor(Extractor):
"""
Objects of this class can be used in conjunction with the
L{esys.lsm.vis.core.GlyphData} class for extracting cylinder
info from data-records.
"""
def __init__(
self,
radiusMap = lambda dataRecord: dataRecord.getRadius(),
endPt1Map = lambda dataRecord: dataRecord.getEndPt1(),
endPt2Map = lambda dataRecord: dataRecord.getEndPt2(),
modifierMap = lambda dataRecord: None,
radiusScale = 1.0
):
"""
Constructs the extractor.
@type radiusMap: callable
@param radiusMap: A callable which accepts a single data-record
argument and returns a radius (float) value.
@type endPt1Map: callable
@param endPt1Map: A callable which accepts a single data-record
argument and returns a 3 float-element sequence (ie a 3D coordinate).
@type endPt2Map: callable
@param endPt2Map: A callable which accepts a single data-record
argument and returns a 3 float-element sequence (ie a 3D coordinate).
@type modifierMap: callable
@param modifierMap: A callable which accepts a single data-record
argument and returns an object modifier (or sequence of modifiers).
"""
self.radiusMap = radiusMap
self.endPt1Map = endPt1Map
self.endPt2Map = endPt2Map
self.modifierMap = modifierMap
self.radiusScale = radiusScale
def getRadius(self, dataRecord):
return self.radiusMap(dataRecord)*self.radiusScale
def getEndPt1(self, dataRecord):
return self.endPt1Map(dataRecord)
def getEndPt2(self, dataRecord):
return self.endPt2Map(dataRecord)
def getModifier(self, dataRecord):
return self.modifierMap(dataRecord)
def getRadiusScale(self):
return self.radiusScale
class DiskExtractor(Extractor):
"""
Objects of this class can be used in conjunction with the
L{esys.lsm.vis.core.GlyphData} class for extracting cylinder
info from data-records.
"""
def __init__(
self,
radiusMap = lambda dataRecord: dataRecord.getRadius(),
centerMap = lambda dataRecord: dataRecord.getCenter(),
heightMap = lambda dataRecord: 0.001,
directionMap = lambda dataRecord: None,
modifierMap = lambda dataRecord: None,
radiusScale = 1.0
):
"""
Constructs the extractor.
@type radiusMap: callable
@param radiusMap: A callable which accepts a single data-record
argument and returns a radius (float) value.
@type centerMap: callable
@param centerMap: A callable which accepts a single data-record
argument and returns a 3 float-element sequence (ie a 3D coordinate).
@type heightMap: callable
@param heightMap: A callable which accepts a single data-record
argument and returns a float (height of the cylinder).
@type modifierMap: callable
@param modifierMap: A callable which accepts a single data-record
argument and returns a color.
"""
self.radiusMap = radiusMap
self.centerMap = centerMap
self.heightMap = heightMap
self.directionMap = directionMap
self.modifierMap = modifierMap
self.radiusScale = radiusScale
def getRadius(self, dataRecord):
return self.radiusMap(dataRecord)*self.radiusScale
def getCenter(self, dataRecord):
return self.centerMap(dataRecord)
def getHeight(self, dataRecord):
return self.heightMap(dataRecord)
def getDirection(self, dataRecord):
return self.directionMap(dataRecord)
def getModifier(self, dataRecord):
return self.modifierMap(dataRecord)
def getRadiusScale(self):
return self.radiusScale
|
# Generated by Django 4.0.2 on 2022-02-12 20:03
from django.db import migrations
def seed(apps, schema_editor):
Player = apps.get_model('blacktop', 'Player')
Player(name="<NAME>",
nickname="<NAME>",
height=78,
attributes={
'offense': {
'layup': 98,
'post_shot': 70,
'mid_range': 99,
'three': 78,
'handles': 88,
'pass': 82,
'off_rebound': 58,
'dunk': 97
},
'defense': {
'def_rebound': 63,
'inside_defense': 70,
'outside_defense': 98,
'steal': 91,
'block': 59,
'post_defense': 70
},
'physical': {
'speed': 86,
'vertical': 96,
'strength': 68
}
}, tendencies={
'offense': {
'shoot_mid': 77,
'shoot_three': 47,
'pass': 72,
'attack_rim': 94,
'post_up': 95
},
'defense': {
'steal': 65,
'block': 70,
'intercept': 70
}
}, avatar="https://www.gannett-cdn.com/presto/2020/04/18/USAT/f5e5f735-2eff-426c-a783-baca6d2872ab-05.JPG?crop=984,1012,x0,y0&width=1588",
description="<NAME> is a former professional American basketball player, Olympic athlete, businessperson and actor. Considered one of the best basketball players ever, he dominated the sport from the mid-1980s to the late 1990s",
image="https://imagesvc.meredithcorp.io/v3/mm/image?url=https%3A%2F%2Fstatic.onecms.io%2Fwp-content%2Fuploads%2Fsites%2F20%2F2020%2F04%2F21%2Fmichael-jordan-3-1.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="King",
height=81,
attributes={
'offense': {
'layup': 98,
'post_shot': 88,
'mid_range': 81,
'three': 76,
'handles': 86,
'pass': 87,
'off_rebound': 75,
'dunk': 95
},
'defense': {
'def_rebound': 79,
'inside_defense': 80,
'outside_defense': 96,
'steal': 62,
'block': 52,
'post_defense': 79
},
'physical': {
'speed': 89,
'vertical': 84,
'strength': 92
}
}, tendencies={
'offense': {
'shoot_mid': 47,
'shoot_three': 57,
'pass': 65,
'attack_rim': 81,
'post_up': 55
},
'defense': {
'steal': 31,
'block': 70,
'intercept': 69
}
}, avatar="https://sportshub.cbsistatic.com/i/r/2020/03/25/5a9bf93f-1688-4251-af07-6caace3ef679/thumbnail/1200x675/8016dc48407290c004104a97fef5cb1b/lebron.jpg",
description="LeBron James is an American basketball player with the Los Angeles Lakers. James first garnered national attention as the top high school basketball player in the country. With his unique combination of size, athleticism and court vision, he became a four-time NBA MVP.",
image="https://static01.nyt.com/images/2020/06/10/us/politics/10lebron-voters/10lebron-voters-mediumSquareAt3X.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="Big Fundamental",
height=83,
attributes={
'offense': {
'layup': 79,
'post_shot': 93,
'mid_range': 95,
'three': 76,
'handles': 59,
'pass': 82,
'off_rebound': 94,
'dunk': 85
},
'defense': {
'def_rebound': 95,
'inside_defense': 99,
'outside_defense': 69,
'steal': 79,
'block': 96,
'post_defense': 95
},
'physical': {
'speed': 68,
'vertical': 80,
'strength': 96
}
}, tendencies={
'offense': {
'shoot_mid': 67,
'shoot_three': 37,
'pass': 65,
'attack_rim': 90,
'post_up': 99
},
'defense': {
'steal': 40,
'block': 80,
'intercept': 75
}
}, avatar="https://www.nydailynews.com/resizer/IT80S2462HG71fa8Gn-dW82rrGI=/415x276/top/arc-anglerfish-arc2-prod-tronc.s3.amazonaws.com/public/CEZW2RCG7LMRM2QXPE2OQL3F5Y.jpg",
description="<NAME> (born April 25, 1976) is an American former professional basketball player and coach. Nicknamed ' the Big Fundamental ', he is widely regarded as the greatest power forward of all time and one of the greatest players in NBA history.",
image="https://media.newyorker.com/photos/590978241c7a8e33fb38fb1d/master/w_2560%2Cc_limit/Crouch-Duncan.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="<NAME>",
height=83,
attributes={
'offense': {
'layup': 98,
'post_shot': 79,
'mid_range': 79,
'three': 70,
'handles': 87,
'pass': 84,
'off_rebound': 58,
'dunk': 91
},
'defense': {
'def_rebound': 92,
'inside_defense': 91,
'outside_defense': 95,
'steal': 59,
'block': 82,
'post_defense': 92
},
'physical': {
'speed': 90,
'vertical': 85,
'strength': 94
}
}, tendencies={
'offense': {
'shoot_mid': 67,
'shoot_three': 47,
'pass': 70,
'attack_rim': 95,
'post_up': 84
},
'defense': {
'steal': 50,
'block': 90,
'intercept': 69
}
}, avatar="https://library.sportingnews.com/2021-12/giannis-antetokounmpo-milwaukee-bucks_yvv75rnfa24r105azbf093kjx.png",
description="<NAME> is a Greek professional basketball player for the Milwaukee Bucks of the National Basketball Association (NBA). Antetokounmpo's nationality, in addition to his size, speed and ball-handling skills have earned him the nickname '<NAME>'",
image="https://library.sportingnews.com/2021-11/giannis-antetokounmpo-11122021-ftr-getty_s33eyyrb2ogz1gf18h024rfp9.jpeg",
tier=1).save()
Player(name="<NAME>",
nickname="Legend",
height=81,
attributes={
'offense': {
'layup': 72,
'post_shot': 94,
'mid_range': 96,
'three': 96,
'handles': 79,
'pass': 94,
'off_rebound': 61,
'dunk': 45
},
'defense': {
'def_rebound': 77,
'inside_defense': 85,
'outside_defense': 88,
'steal': 75,
'post_defense': 85,
'block': 47
},
'physical': {
'speed': 90,
'vertical': 85,
'strength': 94
}
}, tendencies={
'offense': {
'shoot_mid': 84,
'shoot_three': 84,
'pass': 75,
'attack_rim': 70,
'post_up': 84
},
'defense': {
'steal': 65,
'block': 40,
'intercept': 69
}
}, avatar="https://m.media-amazon.com/images/M/MV5BMTY0NzUyNzkyOF5BMl5BanBnXkFtZTcwNzczNDk5Nw@@._V1_.jpg",
description="<NAME> (born December 7, 1956) is an American former professional basketball player, coach and executive in the National Basketball Association (NBA). Nicknamed '<NAME> from <NAME>' and '<NAME>,' Bird is widely regarded as one of the greatest basketball players of all time",
image="https://fadeawayworld.net/.image/ar_1:1%2Cc_fill%2Ccs_srgb%2Cfl_progressive%2Cq_auto:good%2Cw_1200/MTgwMzI0NDk3MjAxNzAyNzY0/larry-bird-i-wasnt-real-quick-and-i-wasnt-real-strong-some-guys-will-just-take-off-and-its-like-whoa-so-i-beat-them-with-my-mind-and-my-fundamentals.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="Magic",
height=81,
attributes={
'offense': {
'layup': 93,
'post_shot': 90,
'mid_range': 87,
'three': 79,
'handles': 97,
'pass': 99,
'off_rebound': 74,
'dunk': 75
},
'defense': {
'def_rebound': 84,
'inside_defense': 80,
'outside_defense': 80,
'steal': 69,
'block': 62,
'post_defense': 80
},
'physical': {
'speed': 86,
'vertical': 84,
'strength': 76
}
}, tendencies={
'offense': {
'shoot_mid': 67,
'shoot_three': 57,
'pass': 99,
'attack_rim': 95,
'post_up': 76
},
'defense': {
'steal': 60,
'block': 60,
'intercept': 67
}
}, avatar="https://miro.medium.com/max/600/0*-1Xu3h__9dKnoAYK.jpg",
description="Johnson's career achievements include three NBA MVP Awards, nine NBA Finals appearances, twelve All-Star games, and ten All-NBA First and Second Team nominations. He led the league in regular season assists four times, and is the NBA's all-time leader in average assists per game, at 11.2.",
image="https://d.newsweek.com/en/full/1614084/newsweek-amplify-magic-johnson-nba-store.png",
tier=1).save()
Player(name="<NAME>",
nickname="The Dream",
height=83,
attributes={
'offense': {
'layup': 78,
'post_shot': 98,
'mid_range': 77,
'three': 63,
'handles': 61,
'pass': 60,
'off_rebound': 92,
'dunk': 95
},
'defense': {
'def_rebound': 95,
'inside_defense': 99,
'outside_defense': 49,
'steal': 65,
'block': 88,
'post_defense': 99
},
'physical': {
'speed': 53,
'vertical': 60,
'strength': 88
}
}, tendencies={
'offense': {
'shoot_mid': 57,
'shoot_three': 37,
'pass': 60,
'attack_rim': 99,
'post_up': 99
},
'defense': {
'steal': 60,
'block': 80,
'intercept': 59
}
}, avatar="https://i.guim.co.uk/img/media/58fcd5a6270acc3d7f72f150c476765bf39f924d/0_202_3589_2152/3589.jpg?width=700&quality=85&auto=format&fit=max&s=20dd508fa9848d088ffc4fbc6b81c58c",
description="Despite very nearly being traded during a bitter contract dispute before the 1992–93 season, he remained in Houston. He became the first non-American to be an All-Star and start in an All-Star Game, the first non-American to win NBA MVP, the first non-American to win NBA Defensive Player of the Year, and in the 1993–94 season, he became the only player in NBA history to win the NBA MVP, Defensive Player of the Year, and Finals MVP awards in the same season.",
image="https://cdn.britannica.com/19/200219-050-A33962F1/Hakeem-Olajuwon-1994.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="Mamba",
height=78,
attributes={
'offense': {
'layup': 98,
'post_shot': 74,
'mid_range': 94,
'three': 79,
'handles': 89,
'pass': 78,
'off_rebound': 55,
'dunk': 95
},
'defense': {
'def_rebound': 59,
'inside_defense': 68,
'outside_defense': 95,
'steal': 77,
'post_defense': 68,
'block': 51
},
'physical': {
'speed': 88,
'vertical': 88,
'strength': 75
}
}, tendencies={
'offense': {
'shoot_mid': 90,
'shoot_three': 70,
'pass': 40,
'attack_rim': 90,
'post_up': 86
},
'defense': {
'steal': 75,
'block': 60,
'intercept': 80
}
}, avatar="https://live-production.wcms.abc-cdn.net.au/6a0e2b28bb4567afb28e6d1d8aed521d?impolicy=wcms_crop_resize&cropH=1688&cropW=3000&xPos=0&yPos=77&width=862&height=485",
description="Widely regarded as one of the greatest basketball players of all time Bryant won five NBA championships, was an 18-time All-Star, a 15-time member of the All-NBA Team, a 12-time member of the All-Defensive Team, the 2008 NBA Most Valuable Player (MVP), and a two-time NBA Finals MVP.",
image="https://cdn.bleacherreport.net/images_root/slides/photos/000/368/570/102184753_original.jpg?1283212258",
tier=1).save()
Player(name="<NAME>",
nickname="Diesel",
height=84,
attributes={
'offense': {
'layup': 89,
'post_shot': 97,
'mid_range': 35,
'three': 26,
'handles': 64,
'pass': 77,
'off_rebound': 98,
'dunk': 99
},
'defense': {
'def_rebound': 97,
'inside_defense': 95,
'outside_defense': 49,
'steal': 74,
'post_defense': 49,
'block': 95
},
'physical': {
'speed': 76,
'vertical': 80,
'strength': 99
}
}, tendencies={
'offense': {
'shoot_mid': 30,
'shoot_three': 10,
'pass': 50,
'attack_rim': 99,
'post_up': 99
},
'defense': {
'steal': 40,
'block': 70,
'intercept': 45
}
}, avatar="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQTZmBYy9bn0ot_UkgrlgYJANVBNbEmIUkLMQ&usqp=CAU",
description="O'Neal was drafted by the Orlando Magic with the first overall pick in the 1992 NBA draft. He quickly became one of the best centers in the league, winning Rookie of the Year in 1992–93 and leading his team to the 1995 NBA Finals. After four years with the Magic, O'Neal signed as a free agent with the Los Angeles Lakers. They won three consecutive championships in 2000, 2001, and 2002.",
image="https://www.basketballnetwork.net/app/uploads/2021/05/shaq-copy-4.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="Big Ticket",
height=83,
attributes={
'offense': {
'layup': 82,
'post_shot': 89,
'mid_range': 94,
'three': 69,
'handles': 70,
'pass': 68,
'off_rebound': 93,
'dunk': 88
},
'defense': {
'def_rebound': 94,
'inside_defense': 97,
'post_defense': 97,
'outside_defense': 61,
'block': 80,
'steal': 65
},
'physical': {
'speed': 87,
'vertical': 87,
'strength': 88
}
}, tendencies={
'offense': {
'shoot_mid': 70,
'shoot_three': 30,
'pass': 40,
'attack_rim': 80,
'post_up': 85
},
'defense': {
'steal': 60,
'block': 75,
'intercept': 65
}
}, avatar="https://cdn.bleacherreport.net/images_root/slides/photos/000/739/416/108623866_original.jpg?1298304188",
description="<NAME> is an American former professional basketball player who played for 21 seasons in the National Basketball Association (NBA). Known for his intensity, defensive ability, and versatility, Garnett is considered one of the greatest power forwards of all time.",
image="https://sportshub.cbsistatic.com/i/r/2021/11/10/a6c845b1-9c30-415f-87af-d74d4169896f/thumbnail/1200x675/c522ef80fee9ee3cf13ecdfc0244d4eb/kevin-garnett.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="<NAME>",
height=74,
attributes={
'offense': {
'layup': 97,
'post_shot': 52,
'mid_range': 93,
'three': 99,
'handles': 97,
'pass': 96,
'off_rebound': 93,
'dunk': 36
},
'defense': {
'def_rebound': 53,
'inside_defense': 30,
'post_defense': 30,
'outside_defense': 82,
'block': 37,
'steal': 77
},
'physical': {
'speed': 85,
'vertical': 78,
'strength': 48
}
}, tendencies={
'offense': {
'shoot_mid': 70,
'shoot_three': 99,
'pass': 75,
'attack_rim': 80,
'post_up': 20
},
'defense': {
'steal': 70,
'block': 30,
'intercept': 40
}
}, avatar="https://static01.nyt.com/images/2019/11/01/sports/01CURRYalert/01CURRYalert-mobileMasterAt3x.jpg",
description="In 2014–15, Curry won his first NBA Most Valuable Player award and led the Warriors to their first championship since 1975. The following season, he became the first player in NBA history to be elected MVP by a unanimous vote and to lead the league in scoring while shooting above 50–40–90. That same year, the Warriors broke the record for the most wins in an NBA season en route to reaching the 2016 NBA Finals, which they lost to the Cleveland Cavaliers in seven games.",
image="https://s.hdnux.com/photos/01/17/15/02/20762796/14/rawImage.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="KD",
height=83,
attributes={
'offense': {
'layup': 94,
'post_shot': 88,
'mid_range': 98,
'three': 88,
'handles': 86,
'pass': 79,
'off_rebound': 32,
'dunk': 85
},
'defense': {
'def_rebound': 69,
'inside_defense': 70,
'post_defense': 70,
'outside_defense': 86,
'block': 66,
'steal': 58
},
'physical': {
'speed': 80,
'vertical': 77,
'strength': 63
}
}, tendencies={
'offense': {
'shoot_mid': 70,
'shoot_three': 80,
'pass': 40,
'attack_rim': 80,
'post_up': 50
},
'defense': {
'steal': 60,
'block': 50,
'intercept': 45
}
}, avatar="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRHkkdj1b_U9TuTtAq4gRfxUTyokprM44yD9A&usqp=CAU",
description="As a professional, he has won two NBA championships, an NBA Most Valuable Player Award, two Finals MVP Awards, two NBA All-Star Game Most Valuable Player Awards, four NBA scoring titles, the NBA Rookie of the Year Award, been named to nine All-NBA teams (including six First Teams), and selected 12 times as an NBA All-Star.",
image="https://www.si.com/.image/ar_4:3%2Cc_fill%2Ccs_srgb%2Cfl_progressive%2Cq_auto:good%2Cw_1200/MTg1NTEzODAyMzE2MzkxNTg1/usatsi_16965380_168388303_lowres.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="The Process",
height=84,
attributes={
'offense': {
'layup': 87,
'post_shot': 92,
'mid_range': 80,
'three': 82,
'handles': 57,
'pass': 55,
'off_rebound': 75,
'dunk': 90
},
'defense': {
'def_rebound': 90,
'inside_defense': 96,
'post_defense': 96,
'outside_defense': 64,
'block': 78,
'steal': 49
},
'physical': {
'speed': 69,
'vertical': 68,
'strength': 95
}
}, tendencies={
'offense': {
'shoot_mid': 70,
'shoot_three': 50,
'pass': 35,
'attack_rim': 80,
'post_up': 95
},
'defense': {
'steal': 50,
'block': 90,
'intercept': 36
}
}, avatar="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSv0KliYvyfNXV3FZI_wLAzvjFM5eb1pwsGUQ&usqp=CAU",
description="<NAME> is a Cameroonian professional basketball player for the Philadelphia 76ers of the National Basketball Association (NBA). After one year of college basketball with the Kansas Jayhawks, he was drafted with the third overall pick in the 2014 NBA draft by the 76ers.",
image="https://www.si.com/.image/ar_1:1%2Cc_fill%2Ccs_srgb%2Cfl_progressive%2Cq_auto:good%2Cw_1200/MTg2ODIwOTk1NDc3MTUzMjYz/joel-embiid.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="The Claw",
height=81,
attributes={
'offense': {
'layup': 93,
'post_shot': 88,
'mid_range': 87,
'three': 83,
'handles': 86,
'pass': 82,
'off_rebound': 70,
'dunk': 86
},
'defense': {
'def_rebound': 72,
'inside_defense': 83,
'post_defense': 83,
'outside_defense': 97,
'block': 50,
'steal': 92
},
'physical': {
'speed': 81,
'vertical': 79,
'strength': 79
}
}, tendencies={
'offense': {
'shoot_mid': 78,
'shoot_three': 60,
'pass': 50,
'attack_rim': 87,
'post_up': 67
},
'defense': {
'steal': 90,
'block': 76,
'intercept': 78
}
}, avatar="https://www.si.com/.image/t_share/MTY4NDg4NzE5OTk1NjQzNzM1/kawhi-leonard.jpg",
description="With the Spurs, Leonard won an NBA championship in 2014, where he was named the Finals Most Valuable Player. After seven seasons with the Spurs, Leonard was traded to the Toronto Raptors in 2018. In 2019, he led the Raptors to their first NBA championship in franchise history and won his second Finals MVP award",
image="https://image.cnbcfm.com/api/v1/image/106797780-1605566208011-gettyimages-1272714063-_dd15420_20200915104212178.jpeg?v=1605566326",
tier=1).save()
Player(name="<NAME>",
nickname="Pip",
height=80,
attributes={
'offense': {
'layup': 88,
'post_shot': 80,
'mid_range': 84,
'three': 87,
'handles': 80,
'pass': 89,
'off_rebound': 66,
'dunk': 91
},
'defense': {
'def_rebound': 69,
'inside_defense': 79,
'post_defense': 79,
'outside_defense': 98,
'block': 70,
'steal': 79
},
'physical': {
'speed': 87,
'vertical': 84,
'strength': 80
}
}, tendencies={
'offense': {
'shoot_mid': 60,
'shoot_three': 50,
'pass': 50,
'attack_rim': 89,
'post_up': 60
},
'defense': {
'steal': 90,
'block': 76,
'intercept': 78
}
}, avatar="https://heavy.com/wp-content/uploads/2020/04/gettyimages-288433-e1587404676462.jpg?quality=65&strip=all&w=780",
description="Considered one of the greatest small forwards of all time, Pippen was named to the NBA All-Defensive First Team eight consecutive times and the All-NBA First Team three times. He was a seven-time NBA All-Star and was the NBA All-Star Game MVP in 1994. He was named one of the 50 Greatest Players in NBA History during the 1996–97 season, and is one of four players to have his jersey retired by the Chicago Bulls",
image="https://www.nba.com/bulls/sites/bulls/files/200511_pippen_cp.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="Flash",
height=76,
attributes={
'offense': {
'layup': 95,
'post_shot': 79,
'mid_range': 89,
'three': 79,
'handles': 89,
'pass': 95,
'off_rebound': 59,
'dunk': 95
},
'defense': {
'def_rebound': 61,
'inside_defense': 67,
'post_defense': 67,
'outside_defense': 94,
'block': 59,
'steal': 79
},
'physical': {
'speed': 97,
'vertical': 85,
'strength': 79
}
}, tendencies={
'offense': {
'shoot_mid': 70,
'shoot_three': 50,
'pass': 60,
'attack_rim': 99,
'post_up': 45
},
'defense': {
'steal': 75,
'block': 50,
'intercept': 69
}
}, avatar="https://www.thefamouspeople.com/profiles/images/dwyane-wade-7.jpg",
description="<NAME> is an American former professional basketball player. Wade spent the majority of his 16-year career playing for the Miami Heat of the National Basketball Association (NBA) and won three NBA championships, was a 13-time NBA All-Star, an 8-time member of the All-NBA Team, and a 3-time member of the All-Defensive Team.",
image="https://static01.nyt.com/images/2018/09/17/sports/17wade/merlin_136020240_d603bb45-5616-41df-9230-6cb59368b979-superJumbo.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="<NAME>",
height=84,
attributes={
'offense': {
'layup': 78,
'post_shot': 99,
'mid_range': 89,
'three': 90,
'handles': 60,
'pass': 69,
'off_rebound': 83,
'dunk': 75
},
'defense': {
'def_rebound': 84,
'inside_defense': 80,
'post_defense': 80,
'outside_defense': 43,
'block': 69,
'steal': 50
},
'physical': {
'speed': 46,
'vertical': 40,
'strength': 80
}
}, tendencies={
'offense': {
'shoot_mid': 80,
'shoot_three': 80,
'pass': 50,
'attack_rim': 69,
'post_up': 96
},
'defense': {
'steal': 40,
'block': 40,
'intercept': 35
}
}, avatar="https://dsf-img-storage.s3.us-east-2.amazonaws.com/wp-content/uploads/2017/01/05130731/Mavs-Fanatic-10-of-52-e1510873435146.jpg",
description="Nowitzki is the only player ever to play for a single NBA franchise for 21 seasons. He is a 14-time All-Star, a 12-time All-NBA Team member,[10] the first European player to start in an All-Star Game, and the first European player to receive the NBA Most Valuable Player Award. Nowitzki is the highest-scoring foreign-born player in NBA history.",
image="https://th.bing.com/th/id/R.40ef4aa877f401f06511757a4992b69e?rik=1lgAORZN%2fgXNLA&riu=http%3a%2f%2fimg.bleacherreport.net%2fimg%2fimages%2fphotos%2f002%2f821%2f208%2fhi-res-94543bac4bf7dee10e5ffecbf8d2037a_crop_exact.jpg%3fw%3d1200%26h%3d1200%26q%3d75&ehk=uec%2bDuybaNxpmG%2fMcDFq8J8LTy77z2xaN2Z6k4oWC%2bM%3d&risl=&pid=ImgRaw&r=0",
tier=1).save()
Player(name="<NAME>",
nickname="The Answer",
height=72,
attributes={
'offense': {
'layup': 98,
'post_shot': 60,
'mid_range': 87,
'three': 79,
'handles': 97,
'pass': 90,
'off_rebound': 49,
'dunk': 65
},
'defense': {
'def_rebound': 55,
'inside_defense': 35,
'post_defense': 35,
'outside_defense': 88,
'block': 39,
'steal': 95
},
'physical': {
'speed': 97,
'vertical': 97,
'strength': 69
}
}, tendencies={
'offense': {
'shoot_mid': 70,
'shoot_three': 50,
'pass': 65,
'attack_rim': 90,
'post_up': 45
},
'defense': {
'steal': 90,
'block': 40,
'intercept': 84
}
}, avatar="http://assets.rappler.com/612F469A6EA84F6BAE882D2B94A4B421/img/FCDA36695F0C4003803EC177E3F4269F/allen-iverson-sixers-epa-file-20140801_FCDA36695F0C4003803EC177E3F4269F.jpg",
description="<NAME> is an American former professional basketball player. Nicknamed 'the Answer' and 'AI', he played 14 seasons in the National Basketball Association (NBA) at both the shooting guard and point guard positions. Iverson won NBA Rookie of the Year Award in 1997 and was an 11-time NBA All-Star, won the All-Star game MVP award in 2001 and 2005, and was the NBA's Most Valuable Player (MVP) in 2001. He was inducted into the Naismith Memorial Basketball Hall of Fame in 2016. In October 2021, Iverson was honored as one of the league's greatest players of all time by being named to the NBA 75th Anniversary Team.",
image="https://buysidesports.com/wp-content/uploads/2020/06/allen-iverson-surprising-facts-1-e1596251286150.jpeg",
tier=1).save()
Player(name="<NAME>",
nickname="CP3",
height=72,
attributes={
'offense': {
'layup': 92,
'post_shot': 55,
'mid_range': 86,
'three': 82,
'handles': 94,
'pass': 95,
'off_rebound': 37,
'dunk': 27
},
'defense': {
'def_rebound': 63,
'inside_defense': 65,
'post_defense': 65,
'outside_defense': 93,
'block': 32,
'steal': 85
},
'physical': {
'speed': 84,
'vertical': 78,
'strength': 58
}
}, tendencies={
'offense': {
'shoot_mid': 90,
'shoot_three': 70,
'pass': 80,
'attack_rim': 69,
'post_up': 30
},
'defense': {
'steal': 90,
'block': 40,
'intercept': 99
}
}, avatar="https://img.bleacherreport.net/img/slides/photos/003/499/468/459774635-chris-paul-of-the-los-angeles-clippers-looks-on-against_crop_exact.jpg?w=2975&h=2048&q=85",
description="<NAME> nicknamed 'CP3', is an American professional basketball player who plays for the Phoenix Suns of the National Basketball Association (NBA). Paul is a point guard who has won the NBA Rookie of the Year Award, an NBA All-Star Game Most Valuable Player Award, two Olympic gold medals, and led the NBA in assists four times and steals a record six times. He has also been selected to twelve NBA All-Star teams, ten All-NBA teams, and nine NBA All-Defensive teams. In 2021, he was named to the NBA 75th Anniversary Team.",
image="https://ewscripps.brightspotcdn.com/02/fd/2842c30f426e80a52e800548b7b7/ap21165821777548.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="<NAME>",
height=74,
attributes={
'offense': {
'layup': 96,
'post_shot': 64,
'mid_range': 97,
'three': 90,
'handles': 99,
'pass': 84,
'off_rebound': 36,
'dunk': 30
},
'defense': {
'def_rebound': 43,
'inside_defense': 32,
'post_defense': 32,
'outside_defense': 62,
'block': 45,
'steal': 66
},
'physical': {
'speed': 86,
'vertical': 81,
'strength': 37
}
}, tendencies={
'offense': {
'shoot_mid': 80,
'shoot_three': 80,
'pass': 50,
'attack_rim': 94,
'post_up': 45
},
'defense': {
'steal': 65,
'block': 30,
'intercept': 45
}
}, avatar="https://nypost.com/wp-content/uploads/sites/2/2020/02/kyrie-irving-4.jpg?quality=80&strip=all",
description="<NAME> is an American professional basketball player for the Brooklyn Nets of the National Basketball Association (NBA). He was named the Rookie of the Year after being selected by the Cleveland Cavaliers with the first overall pick in the 2011 NBA draft. A seven-time All-Star and three-time member of the All-NBA Team, he won an NBA championship with the Cavaliers in 2016.",
image="https://static01.nyt.com/images/2021/10/08/lens/08nba-kyrie-01/merlin_195444735_cfee78bc-ed13-4a08-9267-501cc59d7a9c-superJumbo.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="Cap",
height=86,
attributes={
'offense': {
'layup': 84,
'post_shot': 94,
'mid_range': 88,
'three': 30,
'handles': 48,
'pass': 58,
'off_rebound': 95,
'dunk': 95
},
'defense': {
'def_rebound': 96,
'inside_defense': 96,
'post_defense': 96,
'outside_defense': 37,
'block': 88,
'steal': 56
},
'physical': {
'speed': 78,
'vertical': 68,
'strength': 83
}
}, tendencies={
'offense': {
'shoot_mid': 50,
'shoot_three': 20,
'pass': 40,
'attack_rim': 99,
'post_up': 99
},
'defense': {
'steal': 50,
'block': 95,
'intercept': 65
}
}, avatar="https://fadeawayworld.net/.image/ar_1:1%2Cc_fill%2Ccs_srgb%2Cfl_progressive%2Cq_auto:good%2Cw_1200/MTg0NDg1MzE4NzEwMjczMTQ0/kareem-abdul-jabbar-iso-1981.jpg",
description="<NAME> is an American former professional basketball player for the Milwaukee Bucks and the Los Angeles Lakers. During his career as a center, Abdul-Jabbar was a record six-time NBA Most Valuable Player (MVP), a record 19-time NBA All-Star, a 15-time All-NBA selection, and an 11-time NBA All-Defensive Team member. A member of six NBA championship teams as a player and two more as an assistant coach, Abdul-Jabbar twice was voted NBA Finals MVP.",
image="https://www.doubleclutch.uk/wp-content/uploads/2020/04/Kareem_Abdul_Jabbar-e1587670066296.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="Melo",
height=80,
attributes={
'offense': {
'layup': 89,
'post_shot': 95,
'mid_range': 97,
'three': 80,
'handles': 94,
'pass': 84,
'off_rebound': 45,
'dunk': 85
},
'defense': {
'def_rebound': 78,
'inside_defense': 73,
'post_defense': 73,
'outside_defense': 72,
'block': 68,
'steal': 78
},
'physical': {
'speed': 92,
'vertical': 85,
'strength': 82
}
}, tendencies={
'offense': {
'shoot_mid': 75,
'shoot_three': 60,
'pass': 30,
'attack_rim': 80,
'post_up': 95
},
'defense': {
'steal': 40,
'block': 60,
'intercept': 39
}
}, avatar="https://a.espncdn.com/combiner/i?img=%2Fphoto%2F2021%2F0224%2Fr818864_1296x729_16%2D9.jpg",
description="<NAME> is an American professional basketball player for the Los Angeles Lakers of the National Basketball Association (NBA). He has been named an NBA All-Star ten times and an All-NBA Team member six times. He played college basketball for the Syracuse Orange, winning a national championship as a freshman in 2003 while being named the NCAA Tournament's Most Outstanding Player. During the NBA's 75th anniversary, he was named one of the 75 Greatest Players in NBA History.[2]",
image="https://vip.nypost.com/wp-content/uploads/sites/2/2020/03/carmelo-anthony.jpg?quality=90&strip=all",
tier=1).save()
Player(name="<NAME>",
nickname="Splash Brother",
height=78,
attributes={
'offense': {
'layup': 88,
'post_shot': 78,
'mid_range': 94,
'three': 97,
'handles': 76,
'pass': 77,
'off_rebound': 36,
'dunk': 65
},
'defense': {
'def_rebound': 47,
'inside_defense': 64,
'post_defense': 64,
'outside_defense': 91,
'block': 51,
'steal': 59
},
'physical': {
'speed': 69,
'vertical': 67,
'strength': 55
}
}, tendencies={
'offense': {
'shoot_mid': 80,
'shoot_three': 90,
'pass': 30,
'attack_rim': 70,
'post_up': 40
},
'defense': {
'steal': 80,
'block': 50,
'intercept': 76
}
}, avatar="https://cdn.nba.com/manage/2020/11/klay-thompson-iso-1568x1045.jpg",
description="<NAME> is an American professional basketball player for the Golden State Warriors of the National Basketball Association (NBA). He is credited as one of the greatest shooters in NBA history.[2][3] A three-time NBA champion with the Warriors, he is a five-time NBA All-Star and a two-time All-NBA Third Team honoree. He has also been named to the NBA All-Defensive Second Team.",
image="https://www.nbcsports.com/sites/rsnunited/files/article/hero/Klay-Thompson-Shooting-USATSI-17087442.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="CB4",
height=83,
attributes={
'offense': {
'layup': 88,
'post_shot': 80,
'mid_range': 84,
'three': 79,
'handles': 65,
'pass': 69,
'off_rebound': 87,
'dunk': 85
},
'defense': {
'def_rebound': 87,
'inside_defense': 85,
'post_defense': 85,
'outside_defense': 60,
'block': 78,
'steal': 59
},
'physical': {
'speed': 48,
'vertical': 65,
'strength': 85
}
}, tendencies={
'offense': {
'shoot_mid': 70,
'shoot_three': 50,
'pass': 55,
'attack_rim': 88,
'post_up': 80
},
'defense': {
'steal': 60,
'block': 85,
'intercept': 45
}
}, avatar="http://images.thepostgame.com/assets/public/styles/slideshow_image/public/GettyImages-150590242-compressor.jpg",
description="While at Toronto, Bosh became a five-time NBA All-Star, was named to the All-NBA Second Team once, played for the U.S. national team (with whom he won a gold medal at the 2008 Summer Olympics), and supplanted former fan favorite Vince Carter as the face and leader of the Raptors franchise. In the 2006–07 season, Bosh led the Raptors to their first playoff appearance in five years and their first-ever division title. He left Toronto in 2010 as the franchise's all-time leader in points, rebounds, blocks, and minutes played.",
image="https://www.cleveland.com/resizer/MH_H8zEUC6FWyOtbQDo1bzWCuaY=/1280x0/smart/advancelocal-adapter-image-uploads.s3.amazonaws.com/image.cleveland.com/home/cleve-media/width2048/img/cavs_impact/photo/chris-bosh-2f5fc84c60ad8d8f.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="Russ",
height=82,
attributes={
'offense': {
'layup': 76,
'post_shot': 65,
'mid_range': 45,
'three': 29,
'handles': 45,
'pass': 78,
'off_rebound': 97,
'dunk': 95
},
'defense': {
'def_rebound': 98,
'inside_defense': 98,
'post_defense': 98,
'outside_defense': 55,
'block': 99,
'steal': 72
},
'physical': {
'speed': 84,
'vertical': 88,
'strength': 94
}
}, tendencies={
'offense': {
'shoot_mid': 30,
'shoot_three': 10,
'pass': 60,
'attack_rim': 80,
'post_up': 75
},
'defense': {
'steal': 70,
'block': 99,
'intercept': 87
}
}, avatar="https://www.giantbomb.com/a/uploads/square_medium/14/141373/2681643-0711686871-dna06.jpg",
description="A five-time NBA Most Valuable Player and a 12-time All-Star, he was the centerpiece of the Celtics dynasty that won eleven NBA championships during his 13-year career. Russell and <NAME> of the National Hockey League are tied for the record of the most championships won by an athlete in a North American sports league. Russell led the San Francisco Dons to two consecutive NCAA championships in 1955 and 1956, and he captained the gold-medal winning U.S. national basketball team at the 1956 Summer Olympics.",
image="https://s-i.huffpost.com/gadgets/slideshows/304026/slide_304026_2589444_free.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="The Worm",
height=79,
attributes={
'offense': {
'layup': 55,
'post_shot': 48,
'mid_range': 65,
'three': 62,
'handles': 45,
'pass': 61,
'off_rebound': 99,
'dunk': 90
},
'defense': {
'def_rebound': 98,
'inside_defense': 98,
'post_defense': 98,
'outside_defense': 77,
'block': 50,
'steal': 57
},
'physical': {
'speed': 78,
'vertical': 88,
'strength': 86
}
}, tendencies={
'offense': {
'shoot_mid': 30,
'shoot_three': 10,
'pass': 30,
'attack_rim': 40,
'post_up': 60
},
'defense': {
'steal': 70,
'block': 99,
'intercept': 87
}
}, avatar="https://images.7news.com.au/publication/C-1046789/59774a3762246f68c2bceca98719361c6a8d46cc.jpg?imwidth=650&impolicy=sevennews_v2",
description="Rodman played at the small forward position in his early years before becoming a power forward. He earned NBA All-Defensive First Team honors seven times and won the NBA Defensive Player of the Year Award twice. He also led the NBA in rebounds per game for a record seven consecutive years and won five NBA championships.",
image="https://www.rollingstone.com/wp-content/uploads/2020/04/dennis-rodman-tattoo-t-shirt.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="Zeke",
height=73,
attributes={
'offense': {
'layup': 97,
'post_shot': 29,
'mid_range': 84,
'three': 72,
'handles': 97,
'pass': 94,
'off_rebound': 35,
'dunk': 65
},
'defense': {
'def_rebound': 45,
'inside_defense': 40,
'post_defense': 40,
'outside_defense': 81,
'block': 35,
'steal': 71
},
'physical': {
'speed': 92,
'vertical': 74,
'strength': 60
}
}, tendencies={
'offense': {
'shoot_mid': 80,
'shoot_three': 65,
'pass': 79,
'attack_rim': 90,
'post_up': 40
},
'defense': {
'steal': 85,
'block': 30,
'intercept': 74
}
}, avatar="https://th.bing.com/th/id/OIP.xkNKvPjHyXt-whUQbzK6bwHaEK?pid=ImgDet&rs=1",
description="A point guard, the 12-time NBA All-Star was named one of the 50 Greatest Players in NBA History as well as the 75 Greatest Players, and inducted into the Naismith Memorial Basketball Hall of Fame. He played his entire professional career for the Detroit Pistons of the National Basketball Association (NBA).",
image="https://th.bing.com/th/id/R.57c9dbd49ba36969e21aef35228a4467?rik=7lGk1xMz0OFqGA&riu=http%3a%2f%2fimages.complex.com%2fcomplex%2fimage%2fupload%2fc_limit%2cw_680%2ff_auto%2cfl_lossy%2cpg_1%2cq_auto%2fp7pkha6ef63hoq2vyjhr.jpg&ehk=wkCsEHX%2foJ4yXQ3PyY%2bzbeufl26SuWgU%2ffeSJL0bsnQ%3d&risl=&pid=ImgRaw&r=0",
tier=1).save()
Player(name="<NAME>",
nickname="Big Pat",
height=84,
attributes={
'offense': {
'layup': 69,
'post_shot': 98,
'mid_range': 83,
'three': 35,
'handles': 40,
'pass': 64,
'off_rebound': 67,
'dunk': 95
},
'defense': {
'def_rebound': 92,
'inside_defense': 94,
'post_defense': 94,
'outside_defense': 35,
'block': 90,
'steal': 59
},
'physical': {
'speed': 45,
'vertical': 46,
'strength': 92
}
}, tendencies={
'offense': {
'shoot_mid': 40,
'shoot_three': 10,
'pass': 50,
'attack_rim': 78,
'post_up': 99
},
'defense': {
'steal': 55,
'block': 89,
'intercept': 59
}
}, avatar="https://comicvine.gamespot.com/a/uploads/square_medium/11/114183/5167058-20150805-1-ewing.jpg",
description="He had a seventeen-year NBA career, predominantly playing for the New York Knicks, where he was an eleven-time all-star and named to seven All-NBA teams. The Knicks appeared in the NBA Finals twice (1994 and 1999) during his tenure. He won Olympic gold medals as a member of the 1984 and 1992 United States men's Olympic basketball teams. Ewing was selected as one of the 50 Greatest Players in NBA History in 1996 and as one of the 75 Greatest Players in NBA History in 2021.",
image="https://images.pristineauction.com/129/1290754/main_1566511888-Patrick-Ewing-Signed-New-York-Knicks-16x20-Photo-Steiner-COA-PristineAuction.com.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="T.P",
height=74,
attributes={
'offense': {
'layup': 97,
'post_shot': 58,
'mid_range': 87,
'three': 73,
'handles': 95,
'pass': 94,
'off_rebound': 30,
'dunk': 30
},
'defense': {
'def_rebound': 47,
'inside_defense': 54,
'post_defense': 54,
'outside_defense': 80,
'block': 35,
'steal': 85
},
'physical': {
'speed': 99,
'vertical': 85,
'strength': 50
}
}, tendencies={
'offense': {
'shoot_mid': 80,
'shoot_three': 70,
'pass': 60,
'attack_rim': 75,
'post_up': 20
},
'defense': {
'steal': 75,
'block': 20,
'intercept': 67
}
}, avatar="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQK-dEHtfzaUKXkUxDBemnOR80cl7UmRDB-SA&usqp=CAU",
description="Parker was named to six NBA All-Star games, three All-NBA Second Teams, an All-NBA Third Team and was named MVP of the 2007 NBA Finals. He was also a member of the All-Rookie Second Team and had his No. 9 retired by the Spurs. He is widely regarded as one of the greatest European players of all time.",
image="https://s.hdnux.com/photos/01/11/36/32/19261106/14/rawImage.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="Gino",
height=78,
attributes={
'offense': {
'layup': 92,
'post_shot': 65,
'mid_range': 80,
'three': 84,
'handles': 86,
'pass': 82,
'off_rebound': 52,
'dunk': 50
},
'defense': {
'def_rebound': 62,
'inside_defense': 45,
'post_defense': 45,
'outside_defense': 75,
'block': 51,
'steal': 70
},
'physical': {
'speed': 80,
'vertical': 67,
'strength': 43
}
}, tendencies={
'offense': {
'shoot_mid': 80,
'shoot_three': 60,
'pass': 40,
'attack_rim': 80,
'post_up': 30
},
'defense': {
'steal': 65,
'block': 40,
'intercept': 67
}
}, avatar="https://img.bleacherreport.net/img/images/photos/003/100/456/hi-res-86e2d4eeda744238fcdd7b0fe590af27_crop_north.jpg?1412881622&w=3072&h=2048",
description="Selected as the 57th overall pick in the 1999 NBA draft, Ginóbili joined the Spurs in 2002 and soon became a key player for the team. In addition to his four NBA championships, Ginóbili was named an All-Star in 2005 and 2011 and was selected twice for the All-NBA Third Team. In 2007–08, he was named the NBA Sixth Man of the Year. Ginóbili announced his retirement from the NBA on 27 August 2018.",
image="https://media.newyorker.com/photos/5b845a96244246652b65882f/4:3/w_2559,h_1919,c_limit/Cunningham-Manu-Ginobili.jpg",
tier=1).save()
def fallow(apps, schema_editor):
Player = apps.get_model('blacktop', 'Player')
Player.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('blacktop', '0001_initial'),
]
operations = [
migrations.RunPython(seed, fallow)
]
|
"""
Mandelbrot & Julia set class for tkinter application.
This handles the computation of the Mandelbrot or Julia set as a numpy rgb array
which is loaded into a ImageTk.PhotoImage (and ultimately into a Tk.Canvas
for viewing in the GUI).
NB: use of Numba @jit decorators and pranges to improve performance.
For more information refer to http://numba.pydata.org.
Created on 29 Mar 2020
@author: semuadmin
"""
# pylint: disable=invalid-name
from math import log, sin, sqrt, pi, floor, ceil
from PIL import Image
from numba import jit, prange
import numpy as np
from colormaps.cet_colormap import cet_CBC1, cet_CBTC1, cet_C1, cet_C4s, BlueBrown16
from colormaps.landscape256_colormap import landscape256
from colormaps.metallic256_colormap import metallic256
from colormaps.pastels256_colormap import pastels256
from colormaps.tropical_colormap import tropical16, tropical256
from colormaps.twilight256_colormap import twilight256
from colormaps.twilights512_colormap import twilights512
from colormaps.hsv256_colormap import hsv256
PERIODCHECK = True # Turn periodicity check optimisation on/off
MANDELBROT = 0
JULIA = 1
STANDARD = 0
BURNINGSHIP = 1
TRICORN = 2
MODES = ("Mandelbrot", "Julia")
VARIANTS = ("Standard", "BurningShip", "Tricorn")
THEMES = [
"Default",
"BlueBrown16",
"Tropical16",
"Tropical256",
"Pastels256",
"Metallic256",
"Twilight256",
"Twilights512",
"Landscape256",
"Colorcet_CET_C1",
"Colorcet_CET_CBC1",
"Colorcet_CET_CBTC1",
"Colorcet_CET_C4s",
"HSV256",
"Monochrome",
"BasicGrayscale",
"BasicHue",
"NormalizedHue",
"SqrtHue",
"LogHue",
"SinHue",
"SinSqrtHue",
"BandedRGB",
]
@jit(nopython=True, parallel=True, cache=True)
def plot(
imagemap,
settype,
setvar,
width,
height,
zoom,
radius,
exponent,
zxoff,
zyoff,
maxiter,
theme,
shift,
cxoff,
cyoff,
):
"""
Plots selected fractal type in the numpy rgb array 'imagemap'
"""
# For each pixel in array
for x_axis in prange(width): # pylint: disable=not-an-iterable
for y_axis in prange(height): # pylint: disable=not-an-iterable
# Invoke core algorithm to calculate escape scalars
i, za = fractal(
settype,
setvar,
width,
height,
x_axis,
y_axis,
zxoff,
zyoff,
zoom,
maxiter,
radius,
exponent,
cxoff,
cyoff,
)
# Look up color for these escape scalars and set pixel in imagemap
imagemap[y_axis, x_axis] = get_color(i, za, radius, maxiter, theme, shift)
@jit(nopython=True, cache=True)
def fractal(
settype,
setvar,
width,
height,
x_axis,
y_axis,
zxoff,
zyoff,
zoom,
maxiter,
radius,
exponent,
cxoff,
cyoff,
):
"""
Calculates fractal escape scalars i, za for each image pixel
and returns them for use in color rendering routines.
"""
zx_coord, zy_coord = ptoc(width, height, x_axis, y_axis, zxoff, zyoff, zoom)
lastz = complex(0, 0)
per = 0
z = complex(zx_coord, zy_coord)
if settype == JULIA: # Julia or variant
c = complex(cxoff, cyoff)
else: # Mandelbrot or variant
c = z
for i in prange(maxiter + 1): # pylint: disable=not-an-iterable
# Iterate till the value z is outside the escape radius.
if setvar == BURNINGSHIP:
z = complex(abs(z.real), -abs(z.imag))
if setvar == TRICORN:
z = z.conjugate()
z = z ** exponent + c
# Optimisation - periodicity check speeds
# up processing of points within set
if PERIODCHECK:
if z == lastz:
i = maxiter
break
per += 1
if per > 20:
per = 0
lastz = z
# ... end of optimisation
if abs(z) > radius ** 2:
break
return i, abs(z) # i, za
@jit(nopython=True, cache=True)
def ptoc(width, height, x, y, zxoff, zyoff, zoom):
"""
Converts actual pixel coordinates to complex space coordinates
(zxoff, zyoff are always the complex offsets).
"""
zx_coord = zxoff + ((width / height) * (x - width / 2) / (zoom * width / 2))
zy_coord = zyoff + (-1 * (y - height / 2) / (zoom * height / 2))
return zx_coord, zy_coord
@jit(nopython=True, cache=True)
def ctop(width, height, zx_coord, zy_coord, zxoff, zyoff, zoom):
"""
Converts complex space coordinates to actual pixel coordinates
(zxoff, zyoff are always the complex offsets).
"""
x_coord = (zx_coord + zxoff) * zoom * width / 2 / (width / height)
y_coord = (zy_coord + zyoff) * zoom * height / 2
return x_coord, y_coord
@jit(nopython=True, cache=True)
def get_color(i, za, radius, maxiter, theme, shift):
"""
Uses escape scalars i, za from the fractal algorithm to drive a variety
of color rendering algorithms or 'themes'.
NB: If you want to add more rendering algorithms, this is where to add them,
but you'll need to ensure they are 'Numba friendly' (i.e. limited to
Numba-recognised data types and suitably decorated library functions).
"""
if i == maxiter and theme != "BasicGrayscale": # Inside Mandelbrot set, so black
return 0, 0, 0
if theme == "Default":
theme = "BlueBrown16"
if theme == "Monochrome":
if shift == 0:
h = 0.0
s = 0.0
else:
h = 0.5 + shift / -200
s = 1.0
r, g, b = hsv_to_rgb(h, s, 1.0)
elif theme == "BasicGrayscale":
if i == maxiter:
return 255, 255, 255
r = 256 * i / maxiter
b = g = r
elif theme == "BasicHue":
h = ((i / maxiter) + (shift / 100)) % 1
r, g, b = hsv_to_rgb(h, 0.75, 1)
elif theme == "BandedRGB":
bands = [0, 32, 96, 192]
r = bands[(i // 4) % 4]
g = bands[i % 4]
b = bands[(i // 16) % 4]
elif theme == "NormalizedHue":
h = ((normalize(i, za, radius) / maxiter) + (shift / 100)) % 1
r, g, b = hsv_to_rgb(h, 0.75, 1)
elif theme == "SqrtHue":
h = ((normalize(i, za, radius) / sqrt(maxiter)) + (shift / 100)) % 1
r, g, b = hsv_to_rgb(h, 0.75, 1)
elif theme == "LogHue":
h = ((normalize(i, za, radius) / log(maxiter)) + (shift / 100)) % 1
r, g, b = hsv_to_rgb(h, 0.75, 1)
elif theme == "SinHue":
h = normalize(i, za, radius) * sin(((shift + 1) / 100) * pi / 2)
r, g, b = hsv_to_rgb(h, 0.75, 1)
elif theme == "SinSqrtHue":
steps = 1 + shift / 100
h = 1 - (sin((normalize(i, za, radius) / sqrt(maxiter) * steps) + 1) / 2)
r, g, b = hsv_to_rgb(h, 0.75, 1)
else: # Indexed colormap arrays
r, g, b = sel_colormap(i, za, radius, shift, theme)
return r, g, b
@jit(nopython=True, cache=True)
def sel_colormap(i, za, radius, shift, theme):
"""
Select from indexed colormap theme
"""
if theme == "Colorcet_CET_CBC1":
r, g, b = get_colormap(i, za, radius, shift, cet_CBC1)
elif theme == "Colorcet_CET_CBTC1":
r, g, b = get_colormap(i, za, radius, shift, cet_CBTC1)
elif theme == "Colorcet_CET_C1":
r, g, b = get_colormap(i, za, radius, shift, cet_C1)
elif theme == "Colorcet_CET_C4s":
r, g, b = get_colormap(i, za, radius, shift, cet_C4s)
elif theme == "BlueBrown16":
r, g, b = get_colormap(i, za, radius, shift, BlueBrown16)
elif theme == "Tropical16":
r, g, b = get_colormap(i, za, radius, shift, tropical16)
elif theme == "Tropical256":
r, g, b = get_colormap(i, za, radius, shift, tropical256)
elif theme == "Pastels256":
r, g, b = get_colormap(i, za, radius, shift, pastels256)
elif theme == "Metallic256":
r, g, b = get_colormap(i, za, radius, shift, metallic256)
elif theme == "Twilight256":
r, g, b = get_colormap(i, za, radius, shift, twilight256)
elif theme == "Twilights512":
r, g, b = get_colormap(i, za, radius, shift, twilights512)
elif theme == "Landscape256":
r, g, b = get_colormap(i, za, radius, shift, landscape256)
elif theme == "HSV256":
r, g, b = get_colormap(i, za, radius, shift, hsv256)
return r, g, b
@jit(nopython=True, cache=True)
def get_colormap(i, za, radius, shift, colmap):
"""
Get pixel color from colormap
"""
ni = normalize(i, za, radius) # normalised iteration count
sh = ceil(shift * len(colmap) / 100) # palette shift
col1 = colmap[(floor(ni) + sh) % len(colmap)]
col2 = colmap[(floor(ni) + sh + 1) % len(colmap)]
return interpolate(col1, col2, ni)
@jit(nopython=True, cache=True)
def normalize(i, za, radius):
"""
Normalize iteration count from escape scalars to produce
smoother color gradients.
"""
lzn = log(za ** 2) / 2
nu = log(lzn / log(radius)) / log(2)
return i + 1 - nu
@jit(nopython=True, cache=True)
def interpolate(col1, col2, ni):
"""
Linear interpolation between two adjacent colors in color palette.
"""
f = ni % 1 # fractional part of ni
r = (col2[0] - col1[0]) * f + col1[0]
g = (col2[1] - col1[1]) * f + col1[1]
b = (col2[2] - col1[2]) * f + col1[2]
return [r, g, b]
@jit(nopython=True, cache=True)
def hsv_to_rgb(h, s, v):
"""
Convert HSV values (in range 0-1) to RGB (in range 0-255).
"""
v = int(v * 255)
if s == 0.0:
return v, v, v
i = int(h * 6.0)
f = (h * 6.0) - i
p = int(v * (1.0 - s))
q = int(v * (1.0 - s * f))
t = int(v * (1.0 - s * (1.0 - f)))
i %= 6
if i == 0:
r, g, b = v, t, p
if i == 1:
r, g, b = q, v, p
if i == 2:
r, g, b = p, v, t
if i == 3:
r, g, b = p, q, v
if i == 4:
r, g, b = t, p, v
if i == 5:
r, g, b = v, p, q
return r, g, b
class Mandelbrot:
"""
Main computation and imaging class.
"""
def __init__(self, master):
"""
Constructor
"""
self.__master = master
self._kill = False
self._image = None
def plot_image(
self,
settype,
setvar,
width,
height,
zoom,
radius,
exp,
zxoff,
zyoff,
maxiter,
theme,
shift,
cxoff,
cyoff,
):
"""
Creates empty numpy rgb array, passes it to fractal calculation routine for
populating, then loads populated array into an ImageTk.PhotoImage.
"""
self._kill = False
imagemap = np.zeros((height, width, 3), dtype=np.uint8)
plot(
imagemap,
settype,
setvar,
width,
height,
zoom,
radius,
exp,
zxoff,
zyoff,
maxiter,
theme,
shift,
cxoff,
cyoff,
)
self._image = Image.fromarray(imagemap, "RGB")
def get_image(self):
"""
Return populated PhotoImage for display or saving.
"""
return self._image
def get_cancel(self):
"""
Return kill flag.
"""
return self._kill
def cancel_plot(self):
"""
Cancel in-flight plot operation (plot is normally so quick this is
largely redundant).
"""
self._kill = True
|
<reponame>jakgel/clusterbuster
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 15 18:54:38 2017
@author: jakobg
"""
#!/usr/bin/env python
# Some classes for the relic lsit
# ...
from __future__ import division,print_function
import csv
import numpy as np
import clusterbuster.surveyclasses as cbclass
import clusterbuster.dbclasses as dbclass
import clusterbuster.iout.misc as iom
import os
def findshort(phrase, shortlist):
if shortlist is not None:
if phrase is '':
return '\phantom{AAA00}'
with open(shortlist, 'rb') as csvfile:
shortlist = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for CL in shortlist:
if phrase==CL['Long']: return CL['Short']
return phrase
def ClusterVol_lum(surveys, location):
""" Output to start a MCMC based analysis
Work in progress
"""
GCllist = []
header = 'SurveyID, PowerCluster, M200, Simu?'
for ii,survey in enumerate(surveys):
print('ClusterVol_lum:', survey.name)
for GCl in survey.GCls:
if survey.Rmodel.simu:
#V_vol:
if 'MUSIC2' in survey.name:
array = [ii, GCl.Prest_vol, GCl.M200.value, survey.Rmodel.simu, len(survey.GCls)] #[ii, GCl.Prest_vol.val, survey.Rmodel.simu]
GCllist.append(array)
else:
array = [ii, GCl.Prest_vol.value, GCl.M200.value, survey.Rmodel.simu, len(survey.GCls)] #[ii, GCl.Prest_vol.val, survey.Rmodel.simu]
GCllist.append(array)
else:
array = [ii, GCl.P_rest.value, GCl.M200.value, survey.Rmodel.simu, len(survey.GCls)]
GCllist.append(array)
print("Save results in %s%s.csv" % (surveys[0].outfolder, location))
np.savetxt("%s%s.csv" % (surveys[0].outfolder, location), GCllist, delimiter=",", fmt='%.3e', header=header)
return 0
def create_table_frame(protoobj, caption, dictionary, delimiter='&', ender='\\', longtab=False, outer=True):
# l r r r r r r r r r r}
tabline = ''
idline = ''
unline = ''
sizeline = '\\scriptsize\n'
labeline = "" #"'\label{tab:%s}\n' % (label)
capline = '\caption{%s} \n' % (caption)
delime = '\hline\hline\n'
for ii, entry in enumerate(dictionary):
tabline += ' %s ' % entry[2]
if not isinstance(entry[0], list) and isinstance(entry[0](protoobj), dbclass.measurand):
idline += entry[0](protoobj).label
unline += entry[0](protoobj).unit_string()
else:
idline += entry[3]
unline += entry[4]
if ii < len(dictionary) - 1:
idline += delimiter
unline += delimiter
idline += ender + ender + '\n'
unline += ender + ender #+ '\hline\hline'
tabline += '} \n'
if longtab:
tabline = '\\begin{longtable}{'+tabline
capline += """\\\\"""
head = """\\begin{landscape}\n\\begin{center}""" + sizeline + tabline + capline + delime + labeline +idline +unline + """
\\endfirsthead
\multicolumn{11}{l}{\\tablename\ \\thetable\ -- \\textit{Continued from previous page}} \\\\
\hline""" + idline +unline + """
\\endhead
\hline\hline \multicolumn{11}{r}{\\textit{Continued on next page}} \\\\
\\endfoot""" + delime + '\\endlastfoot'
foot = '\hline\n\\end{longtable}\n\\end{center}\n\\end{landscape}'
else:
tabline = '\\begin{tabular}{' + tabline
head = ("""\\begin{table*}\n\\begin{center}""" + capline + sizeline) * int(outer) + tabline + delime +labeline + idline + unline + delime
foot = '\hline\hline\n\\end{tabular}\n'+int(outer)*'\\end{center}\n\\end{table*}'
return head, foot
def create_table_columns(objectlist, dictionary, delimiter='&', ender='\\\\'):
columns = []
for obj in objectlist:
line = ''
for ii, entry in enumerate(dictionary):
""" Formats the line: For each object the entry ( a lambda variable) of the list is looked for
line += entry[1] % entry[0](obj) is the simple version, but because I allow for formating of several value sin one column, I
"""
if isinstance(entry[0], list):
""" Multiple entries """
#[[lambda x: x.alpha(), lambda x: x.alpha.std[0]], '$%.2f\pm%0.2f$', 'r', '$\\alpha_\mathrm{int}$', ''],
for e0, e1 in zip(entry[0], entry[1]):
if not np.isnan(e0(obj)):
#print('!!!!', e1)
line += e1 % e0(obj)
elif len(entry) > 3:
""" One single entry """
line += entry[1] % entry[0](obj) #else:
else:
""" Measurand """
line += entry[1] % entry[0](obj)() #else:
if ii < len(dictionary) - 1: line += delimiter
line += '%s \n' % ender
line = line.replace('$$','')
columns.append(line)
return "".join(columns)
def create_table(objectlist, dictionary, caption='nocap', outer=False, longtab=False):
""" A function to create a late table based on an object list and an dictionary of values """
header, ender = create_table_frame(objectlist[0], caption, dictionary, outer=outer, longtab=outer)
columns = create_table_columns(objectlist, dictionary)
print(type(header), type(columns), type(ender))
print(columns)
return header + columns + ender
def RList2table_paper(location, survey, longtab=False):
survey.FilterCluster(**survey.cluster_filter_kwargs)
RList = survey.fetch_totalRelics()
RList.sort(key=iom.Object_natural_keys)
#lambda x: cbclass.measurand( x.R200/1000 , '$R_{200}$', un = 'Mpc' )
dictionary = [ [lambda x: x.name.replace('_', ' '), '%25s', 'l', 'Identifier', ''],
[lambda x: x.RA , '%5.2f', 'r'],
[lambda x: x.Dec, '%+7.2f', 'r'],
#[lambda x: x.Mach, '%.1f', 'r'],
#[lambda x: x.alpha, '%.2f', 'r'],
[[lambda x: x.alpha(), lambda x: x.alpha.std[0]], ['$%.2f$', '$\pm%0.2f$'], 'l', '$\\alpha_\mathrm{int}$', ''],
[[lambda x: x.flux(), lambda x: x.flux.std[0]], ['$%7.1f$', '$\pm%5.1f$'], 'r', '$S_{1.4}$', '[mJy]'],
[[lambda x: np.log10(x.P_rest()), lambda x: np.log10((x.P_rest()+x.P_rest.std[0])/x.P_rest()),
lambda x:np.log10((x.P_rest()-x.P_rest.std[0])/x.P_rest())], ['$%5.2f$', '$^{+%4.2f}$', '$_{%4.2f}$'],
'r', 'log$_{10}(P_{1.4})$', '$\mathrm{[W/Hz^{-1}]}$'],
[lambda x: x.LAS, '%5.2f', 'r'],
[lambda x: x.LLS, '%5.0f', 'r'],
[lambda x: x.iner_rat(), '%.2f', 'r', '$\lambda_2/\lambda_1$', ''],
[lambda x: x.Dproj_pix, '%.0f', 'r'],
#[lambda x: x.theta_rel(), '%.1f', 'r', '$\phi$', '[deg]']
]
caption = 'Radio relic emission islands identified in the %s images' % (survey.name)
table = create_table(RList, dictionary, caption=caption)
# WRITE COMMON FILE
mf = open(location, "w")
mf.write(table)
mf.close()
def GClList2table_paper(location, survey, shortlist=None, longtab=False):
outer = False
n_clusters = 0
n_compact_sources = 0
if longtab:
head = """\\begin{landscape}
\\begin{center}
\\scriptsize
\\begin{longtable}{ l r r r r c c}
% \centering
\caption{Radio relic hosting clusters}\\\\ \hline\hline
\label{tab:NVSSrelics}
Cluster & z & $M_{200}$ & $F_\mathrm{NVSS}$ & $F_\mathrm{1.4, lit}$ & Diff. Emi. & References\\\\
& [] & [$10^{14} M_\odot$] & [mJy] & [mJy] & RHR* & mass $\\mid$ relics \\\\
(1) & (2) & (3) & (4) & (5) & (6) & (7) \\\\\hline\hline
\\endfirsthead
\multicolumn{11}{l}{\\tablename\ \\thetable\ -- \\textit{Continued from previous page}} \\\\
\hline
Cluster & z & $M_{200}$ & $F_\mathrm{NVSS}$ & $F_\mathrm{1.4, lit}$ & Diff. Emi. & References\\\\
& [] & [$10^{14} M_\odot$] & [mJy] & [mJy] & RHR* & mass $\\mid$ relics \\\\
(1) & (2) & (3) & (4) & (5) & (6) & (7) \\\\\hline\hline
\\endhead
\hline\hline \multicolumn{11}{r}{\\textit{Continued on next page}} \\\\
\\endfoot
\hline\hline
\\endlastfoot"""
foot = '\hline\n\\end{longtable}\n\\end{center}\n\\end{landscape}'
else:
head = """\\begin{table*}
\\begin{center}
\caption{Radio relic hosting clusters}
\\scriptsize""" * int(outer) + """
\\begin{tabular}{ l c r r r c c}
\hline\hline
\label{tab:NVSSrelics}
Cluster & z & $M_{200}$ & $F_\mathrm{NVSS}$ & $F_\mathrm{1.4, lit}$ & Diff. Emi. & References\\\\
& [] & [$10^{14} M_\odot$] & [mJy] & [mJy] & RHR* & mass $\\mid$ relics \\\\
(1) & (2) & (3) & (4) & (5) & (6) & (7) \\\\\hline\hline
"""
foot = '\hline\hline\n\\end{tabular}\n'+int(outer)*'\\end{center}\n\\label{tab:NVSSrelics}\n\\end{table*}'
#RList.sort(key= iom.Object_natural_keys )
""" Start with relic cluster """
mf = open(location, "w")
mf.write(head)
for m in survey.GCls:
# WRITE COMMON FILE
if m.getstatusstring()[0] is not None:
# PS$^1$ &
# \'\'/Mpc &
# %4.1f & 1000./(m.cosmoPS*60)
# if m.P_rest() == 0:
# print '!!!!!!!! m.Prest == 0', m.name, m.P_rest
string = "%25s & $%.3f$ & %.1f & %s & $%5.1f$ &" % (m.name, m.z.value, m.M200.value/1e14, m.getstatusstring()[1], m.flux_lit.value) + '%s' % (m.gettypestring()) + '& %s - %s\\\\' % (findshort(m.Lx.ref.ident, shortlist) , findshort(m.flux_lit.ref.ident, shortlist) )
mf.write(string + '\n')
n_clusters += 1
print('n_clusters:', n_clusters)
mf.write(foot)
mf.close()
""" Write relic clusters coordinates """
mf = open(location + "centre", "w")
head_mod = """\\begin{table*}
\\begin{center}
\\caption{Radio relic hosting clusters centre coordinates}
\\scriptsize""" * int(outer) + """
\\begin{tabular}{ l r r c c c c}
\hline\hline
\label{tab:NVSSrelics}
Cluster & RA & Dec & Method & \multicolumn{2}{c}{$\\Delta_\\mathrm{simbad}$} & References\\\\
& [deg] & [deg] & & [\SI{}{\\arcsecond}] & $[R_{200}]$ & \\\\
(1) & (2) & (3) & (4) & (5) & (6) & (7) \\\\\hline\hline """
foot_mod = '\hline\hline\n\\end{tabular}\n' + int(outer) * '\\end{center}\n\\label{tab:NVSSrelics}\n\\end{table*}'
mf.write(head_mod)
for m in survey.GCls:
# WRITE COMMON FILE
if m.getstatusstring()[0] is not None:
# PS$^1$ &
# \'\'/Mpc &
# %4.1f & 1000./(m.cosmoPS*60)
# if m.P_rest() == 0:
# print '!!!!!!!! m.Prest == 0', m.name, m.P_rest
string = "%25s & %.3f & %.3f &" % (m.name, m.RA.value, m.Dec.value) + 'Planck & 0.01 & 0.1 & cite \\\\'
mf.write(string + '\n')
mf.write(foot_mod)
mf.close()
""" Write subtracted sources """
mf = open(location + "compacts", "w")
head_mod = """\\begin{table*}
\\begin{center}
\\scriptsize""" * int(outer) + """
\\begin{tabular}{ l r r r c c c c}
\hline\hline
\label{tab:NVSSrelics}
Cluster & RA & Dec & flux & type & $\\Theta_\\mathrm{major}$ & $\\Theta_\\mathrm{minor}$ & $\\theta$ \\\\
& [deg] & [deg] & [mJ] & & [ \SI{}{\\arcminute}] & [\SI{}{\\arcminute}] & [deg] \\\\
(1) & (2) & (3) & (4) & (5) & (6) & (7) & (8) \\\\\hline\hline
"""
foot_mod = '\hline\hline\n\\end{tabular}\n' + int(outer) * '\\end{center}\n\\label{tab:NVSS_compactsources}\n\\end{table*}'
mf.write(head_mod)
for m in survey.GCls:
# WRITE COMMON FILE
if m.getstatusstring()[0] is not None:
for compact in m.compacts:
values = iom.J2000ToCoordinate(compact['dir'])
values = [float(value) for value in values]
if values[0] > 0:
RA = (values[0]+values[1]/60+values[2]/3600)*15
else:
RA = (values[0]-values[1]/60-values[2]/3600)*15
Dec = values[3]+values[4]/60+values[5]/3600
string = "%25s & %.3f & %.3f &" % (m.name, RA, Dec)
if compact['shape'] == "Point":
string += "%.1f & %s & & & \\\\" % (float(compact['flux']), compact['shape'])
else:
string += "%.1f & %s & %.2f & %.2f & %.0f \\\\" % (float(compact['flux']), "Extended",
float(compact['majoraxis']), float(compact['minoraxis']),
float(compact['theta']))
mf.write(string + '\n')
n_compact_sources += 1
print('n_compact_sources:', n_compact_sources)
mf.write(foot_mod)
mf.close()
""" Do phoenix clusters """
mf = open(location+'phoenixes',"w")
mf.write(head)
for m in survey.GCls:
# if m.flux() == 0 or m.z < 0.05:
if m.getstatusstring()[0] is None:
# PS$^1$ &
# \'\'/Mpc &
# %4.1f & 1000./(m.cosmoPS*60)
# if m.P_rest() == 0:
# print '!!!!!!!! m.Prest == 0', m.name, m.P_rest
string = "%25s & $%.3f$ & %.1f & %s & $%5.1f$ &" % (m.name, m.z.value, m.M200.value/1e14, m.getstatusstring()[1], m.flux_lit.value) + '%s' % (m.gettypestring()) + '& %s - %s\\\\' % (findshort(m.Lx.ref.ident, shortlist) , findshort(m.flux_lit.ref.ident, shortlist) )
mf.write(string + '\n')
mf.write( foot )
mf.close()
def LOFARwiki(survey):
""" A functionality created to embed images into an internal wiki """
# Create LOFAR-wiki links
string = ''
for ii, GCl in enumerate(survey.GCls):
if ii % 3 == 0:
string += '\n'
string += '<<EmbedObject(NVSS-%s.pdf,width=500.0,height=500,menu = False)>>\n' % (GCl.name)
return string
#=== Old===#
#==========#
def RList2nparray_csv(RList, AddInfo):
nparray = np.empty( (19, len(RList)), dtype=str)
for ii,relic in enumerate(RList):
nparray[:,ii] = np.asarray( [relic.GCl.name, relic.region.name, relic.GCl.z, relic.RA, relic.Dec, relic.region.rtype, relic.dinfo.limit, relic.flux, np.log10(relic.P), np.log10(relic.Prest), relic.Dproj[0], relic.Dproj[1], relic.LLS, relic.LAS, 0, 0, 0, 0, relic.area] )
header = "ClusterID; relic; z; RA; Dec; rtype; detlimit; flux; log10(Pobsframe[W/Hz/m^2]); log10(P1400MHz); Distance(arcsec); Distance(kpc); LLS; LAS; a (kpc); b (kpc); a(arcsec); b(arcsec); Area(kpc^2); Area(arcsec^2); angle(relic); relative angle; alpha; alpha?"
return nparray, header |
import pandas as pd
import pyodbc
import omfn.xdttm as odt
import omfn.vbafn as vbf
import requests
TOKEN = '<KEY>'
def custom_msg_sender(chatid, msg):
url = "https://api.telegram.org/bot" + TOKEN + "/sendMessage?chat_id=" + str(chatid) + "&text=" + msg
requests.get(url)
class mssq:
def __init__(self):
self.socdb = "Driver={SQL Server};SERVER=192.168.88.121;DATABASE=SOC_Roster;UID=sa;PWD=<PASSWORD>&"
self.conx = pyodbc.connect(self.socdb)
def check_existance_by_ref(self, tbl, colname, value):
qry = "select * from " + tbl + " where " + colname + "='" + value + "'"
print(qry)
df = pd.read_sql(qry, self.conx)
rw = df.shape[0]
return rw
def query_full_tbl(self, tbl):
qry = "select * from " + tbl
print(qry)
df = pd.read_sql(qry, self.conx)
dic = df.to_dict()
return dic
def insert_new_entry(self, tbl, colnames, values):
qry = "insert into " + tbl + " (" + colnames + ") values (" + values + ")"
print(qry)
curs = self.conx.cursor()
rs = curs.execute(qry)
print(rs)
def apend_into(self, tbl, colname, value, refcolname, refvalue):
qry1 = "select " + colname + " from " + tbl + " where " + refcolname + "='" + refvalue + "'"
print(qry1)
curs = self.conx.cursor()
rsl = curs.execute(qry1)
rs = rsl.fetchall()
print(rs)
vl = value
qry = "UPDATE " + tbl + " SET " + colname + "='" + vl + "' WHERE " + refcolname + "='" + refvalue + "'"
print(qry)
rs2 = curs.execute(qry)
print(rs2)
def query_by_single_ref(self, tbl, colname, value):
qry = "select * from " + tbl + " where " + colname + "='" + value + "'"
print(qry)
df = pd.read_sql(qry, self.conx)
dic = df.to_dict()
return dic
def query_by_double_ref(self, tbl, colname1, value1, colname2, value2):
qry = "select * from " + tbl + " where " + colname1 + "='" + value1 + "' AND " + colname2 + "='" + value2 + "'"
print(qry)
df = pd.read_sql(qry, self.conx)
dic = df.to_dict()
return dic
def query_string(self, tbl, colname, value):
qry = "select * from " + tbl + " where " + colname + " like " + value
print(qry)
df = pd.read_sql(qry, self.conx)
dic = df.to_dict()
return dic
def upd_by_ref(self, tbl, colnames, values, ref, refvalue):
qry = "UPDATE " + tbl + " SET " + colnames + "='" + values + "' WHERE " + ref + "='" + refvalue + "'"
curs = self.conx.cursor()
rs = curs.execute(qry)
return 'updated'
def del_by_ref(self, tbl, colname, value):
qry = "DELETE FROM " + tbl + " WHERE " + colname + "='" + value + "'"
curs = self.conx.cursor()
rs = curs.execute(qry)
return 'deleted'
def bot_usr_add(self, nam, uid, pas, msisdn):
td = odt.Now()
tday = td.strftime('%Y-%m-%d')
print(tday)
dt = td.strftime('%d')
mn = td.strftime("%m")
wkdy = td.strftime('%a')
valu = ""
ps = wkdy[2] + dt[0] + wkdy[1] + dt[1] + wkdy[0] + 'ao' + mn + 'io'
print('psscode=', ps)
if pas == ps or pas == '07085122':
colnm = "NAME,UID,JOIN_DATE,MSISDN,Status,GroupEnabled,Special"
valu = "'" + nam + "','" + uid + "','" + tday + "','" + msisdn + "','Y','N','N'"
qry = "insert into om_socbot_access (" + colnm + ") values (" + valu + ")"
print(qry)
curs = self.conx.cursor()
rs = curs.execute(qry)
print(rs)
custom_msg_sender(uid, 'congrats, write help to the secrat to use me')
else:
custom_msg_sender(uid, 'you send wrong passcode')
self.conx.close()
def bot_usr_list(self, secrat):
secr = "07085122"
if secrat == secr or secrat == 'jahid1998':
qry = 'select * from om_socbot_access'
df = pd.read_sql(qry, self.conx)
dic = df.to_dict()
x = vbf.pyvb(dic)
return x.print_all_row_comm_seperated()
def bot_usr_delete(self, sl, secrat):
secr = "07085122"
if secrat == secr or secrat == 'jahid1998':
qry = "DELETE FROM om_socbot_access WHERE SL ='" + sl + "'"
print(qry)
curs = self.conx.cursor()
rs = curs.execute(qry)
return 'user deleted success'
def bot_today_pass(self, secrat):
if secrat == '07085122' or secrat == 'jahid1998':
td = odt.Now()
tday = td.strftime('%Y-%m-%d')
print(tday)
dt = td.strftime('%d')
mn = td.strftime("%m")
wkdy = td.strftime('%a')
valu = ""
ps = wkdy[2] + dt[0] + wkdy[1] + dt[1] + wkdy[0] + 'ao' + mn + 'io'
return ps
else:
return 'unauthorized attempt'
def auth_check_db(self, uid, qryfrom):
df1 = pd.read_sql("select * from om_socbot_access", self.conx)
df = df1[df1['UID'].str.contains(uid)]
x = df.shape[0]
if x == 0:
return str(x)
else:
Status = df['Status'].iloc[0]
special = df['Special'].iloc[0]
if qryfrom != 'private' and special != 'Y':
return 0
elif qryfrom == 'private' and Status == 'Y':
return '1'
elif special == 'Y':
return '1'
x = mssq()
# print(x.check_existance_by_ref('incident_tracker_v2','Incident_ID','INY00001138080'))
# df = pd.DataFrame(x.query_full_tbl('incident_tracker_v2'))
# x.bot_usr_delete('4','07085122')
print(x.bot_usr_list('07085122'))
#
# vl = ""
# x.insert_new_entry('om_socbot_access',colnm,vl)
# print(df)
|
from math import atan2
import numpy as np
from functools import partial
import dask.array as da
from numba import cuda
import xarray as xr
from xrspatial.utils import ngjit
from xrspatial.utils import cuda_args
from xrspatial.utils import ArrayTypeFunctionMapping
from xrspatial.utils import not_implemented_func
from typing import Optional
# 3rd-party
try:
import cupy
except ImportError:
class cupy(object):
ndarray = False
RADIAN = 180 / np.pi
@ngjit
def _run_numpy(data: np.ndarray):
data = data.astype(np.float32)
out = np.zeros_like(data, dtype=np.float32)
out[:] = np.nan
rows, cols = data.shape
for y in range(1, rows-1):
for x in range(1, cols-1):
a = data[y-1, x-1]
b = data[y-1, x]
c = data[y-1, x+1]
d = data[y, x-1]
f = data[y, x+1]
g = data[y+1, x-1]
h = data[y+1, x]
i = data[y+1, x+1]
dz_dx = ((c + 2 * f + i) - (a + 2 * d + g)) / 8
dz_dy = ((g + 2 * h + i) - (a + 2 * b + c)) / 8
if dz_dx == 0 and dz_dy == 0:
# flat surface, slope = 0, thus invalid aspect
out[y, x] = -1.
else:
_aspect = np.arctan2(dz_dy, -dz_dx) * RADIAN
# convert to compass direction values (0-360 degrees)
if _aspect < 0:
out[y, x] = 90.0 - _aspect
elif _aspect > 90.0:
out[y, x] = 360.0 - _aspect + 90.0
else:
out[y, x] = 90.0 - _aspect
return out
@cuda.jit(device=True)
def _gpu(arr):
a = arr[0, 0]
b = arr[0, 1]
c = arr[0, 2]
d = arr[1, 0]
f = arr[1, 2]
g = arr[2, 0]
h = arr[2, 1]
i = arr[2, 2]
dz_dx = ((c + 2 * f + i) - (a + 2 * d + g)) / 8
dz_dy = ((g + 2 * h + i) - (a + 2 * b + c)) / 8
if dz_dx == 0 and dz_dy == 0:
# flat surface, slope = 0, thus invalid aspect
_aspect = -1
else:
_aspect = atan2(dz_dy, -dz_dx) * 57.29578
# convert to compass direction values (0-360 degrees)
if _aspect < 0:
_aspect = 90 - _aspect
elif _aspect > 90:
_aspect = 360 - _aspect + 90
else:
_aspect = 90 - _aspect
if _aspect > 359.999: # lame float equality check...
return 0
else:
return _aspect
@cuda.jit
def _run_gpu(arr, out):
i, j = cuda.grid(2)
di = 1
dj = 1
if (i-di >= 0 and
i+di < out.shape[0] and
j-dj >= 0 and
j+dj < out.shape[1]):
out[i, j] = _gpu(arr[i-di:i+di+1, j-dj:j+dj+1])
def _run_cupy(data: cupy.ndarray) -> cupy.ndarray:
data = data.astype(cupy.float32)
griddim, blockdim = cuda_args(data.shape)
out = cupy.empty(data.shape, dtype='f4')
out[:] = cupy.nan
_run_gpu[griddim, blockdim](data, out)
return out
def _run_dask_numpy(data: da.Array) -> da.Array:
data = data.astype(np.float32)
_func = partial(_run_numpy)
out = data.map_overlap(_func,
depth=(1, 1),
boundary=np.nan,
meta=np.array(()))
return out
def aspect(agg: xr.DataArray,
name: Optional[str] = 'aspect') -> xr.DataArray:
"""
Calculates the aspect value of an elevation aggregate.
Calculates, for all cells in the array, the downward slope direction
of each cell based on the elevation of its neighbors in a 3x3 grid.
The value is measured clockwise in degrees with 0 (due north), and 360
(again due north). Values along the edges are not calculated.
Direction of the aspect can be determined by its value:
From 0 to 22.5: North
From 22.5 to 67.5: Northeast
From 67.5 to 112.5: East
From 112.5 to 157.5: Southeast
From 157.5 to 202.5: South
From 202.5 to 247.5: West
From 247.5 to 292.5: Northwest
From 337.5 to 360: North
Note that values of -1 denote flat areas.
Parameters
----------
agg : xarray.DataArray
2D NumPy, CuPy, or Dask with NumPy-backed xarray DataArray
of elevation values.
name : str, default='aspect'
Name of ouput DataArray.
Returns
-------
aspect_agg : xarray.DataArray of the same type as `agg`
2D aggregate array of calculated aspect values.
All other input attributes are preserved.
References
----------
- arcgis: http://desktop.arcgis.com/en/arcmap/10.3/tools/spatial-analyst-toolbox/how-aspect-works.htm#ESRI_SECTION1_4198691F8852475A9F4BC71246579FAA # noqa
Examples
--------
Aspect works with NumPy backed xarray DataArray
.. sourcecode:: python
>>> import numpy as np
>>> import xarray as xr
>>> from xrspatial import aspect
>>> data = np.array([
[1, 1, 1, 1, 1],
[1, 1, 1, 2, 0],
[1, 1, 1, 0, 0],
[4, 4, 9, 2, 4],
[1, 5, 0, 1, 4],
[1, 5, 0, 5, 5]
], dtype=np.float32)
>>> raster = xr.DataArray(data, dims=['y', 'x'], name='raster')
>>> print(raster)
<xarray.DataArray 'raster' (y: 6, x: 5)>
array([[1., 1., 1., 1., 1.],
[1., 1., 1., 2., 0.],
[1., 1., 1., 0., 0.],
[4., 4., 9., 2., 4.],
[1., 5., 0., 1., 4.],
[1., 5., 0., 5., 5.]])
Dimensions without coordinates: y, x
>>> aspect_agg = aspect(raster)
>>> print(aspect_agg)
<xarray.DataArray 'aspect' (y: 6, x: 5)>
array([[ nan, nan , nan , nan , nan],
[ nan, -1. , 225. , 135. , nan],
[ nan, 343.61045967, 8.97262661, 33.69006753, nan],
[ nan, 307.87498365, 71.56505118, 54.46232221, nan],
[ nan, 191.30993247, 144.46232221, 255.96375653, nan],
[ nan, nan , nan , nan , nan]])
Dimensions without coordinates: y, x
Aspect works with Dask with NumPy backed xarray DataArray
.. sourcecode:: python
>>> import dask.array as da
>>> data_da = da.from_array(data, chunks=(3, 3))
>>> raster_da = xr.DataArray(data_da, dims=['y', 'x'], name='raster_da')
>>> print(raster_da)
<xarray.DataArray 'raster' (y: 6, x: 5)>
dask.array<array, shape=(6, 5), dtype=int64, chunksize=(3, 3), chunktype=numpy.ndarray>
Dimensions without coordinates: y, x
>>> aspect_da = aspect(raster_da)
>>> print(aspect_da)
<xarray.DataArray 'aspect' (y: 6, x: 5)>
dask.array<_trim, shape=(6, 5), dtype=float32, chunksize=(3, 3), chunktype=numpy.ndarray>
Dimensions without coordinates: y, x
>>> print(aspect_da.compute()) # compute the results
<xarray.DataArray 'aspect' (y: 6, x: 5)>
array([[ nan, nan , nan , nan , nan],
[ nan, -1. , 225. , 135. , nan],
[ nan, 343.61045967, 8.97262661, 33.69006753, nan],
[ nan, 307.87498365, 71.56505118, 54.46232221, nan],
[ nan, 191.30993247, 144.46232221, 255.96375653, nan],
[ nan, nan , nan , nan , nan]])
Dimensions without coordinates: y, x
Aspect works with CuPy backed xarray DataArray.
Make sure you have a GPU and CuPy installed to run this example.
.. sourcecode:: python
>>> import cupy
>>> data_cupy = cupy.asarray(data)
>>> raster_cupy = xr.DataArray(data_cupy, dims=['y', 'x'])
>>> aspect_cupy = aspect(raster_cupy)
>>> print(type(aspect_cupy.data))
<class 'cupy.core.core.ndarray'>
>>> print(aspect_cupy)
<xarray.DataArray 'aspect' (y: 6, x: 5)>
array([[ nan, nan, nan, nan, nan],
[ nan, -1., 225., 135., nan],
[ nan, 343.61047, 8.972626, 33.690067, nan],
[ nan, 307.87497, 71.56505 , 54.462322, nan],
[ nan, 191.30994, 144.46233 , 255.96376, nan],
[ nan, nan, nan, nan, nan]],
dtype=float32)
Dimensions without coordinates: y, x
"""
mapper = ArrayTypeFunctionMapping(
numpy_func=_run_numpy,
dask_func=_run_dask_numpy,
cupy_func=_run_cupy,
dask_cupy_func=lambda *args: not_implemented_func(
*args, messages='aspect() does not support dask with cupy backed DataArray') # noqa
)
out = mapper(agg)(agg.data)
return xr.DataArray(out,
name=name,
coords=agg.coords,
dims=agg.dims,
attrs=agg.attrs)
|
<gh_stars>1-10
from numpy.core.multiarray import normalize_axis_index
from scipy import ndimage
import tensorflow as tf
import numpy as np
def random_rotation(
image,
rotation_range,
channel_axis=2,
fill_mode='nearest',
cval=255,
interpolation_order=1
):
"""
:param image: image as numpy array, needs to have three dimensions
:param rotation_range: rotation range in degrees, image will be shifted some value from -range to +range
:param channel_axis: index of axis for channels in the input tensor.
:param fill_mode: str, points outside the boundaries of the input are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
:param cval: int or float, value used for points outside the boundaries of the input if `mode='constant'`
:param interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
:return: rotated image as numpy
"""
theta = np.random.uniform(-rotation_range, rotation_range)
image = apply_affine_transform(
image, theta=theta, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval, order=interpolation_order
)
return image
@tf.function
def tf_rotate_image_numpy(image, rotation, fill_mode, cval):
"""
:param image:
:param rotation:
:param fill_mode:
:param cval:
:return:
"""
img_shape = image.shape
if len(img_shape) == 4:
idx = 0
imgs = tf.zeros_like(image)
for img in image:
img = tf.numpy_function(random_rotation, [img, rotation, 2, fill_mode,
cval, 1], tf.float32)
imgs = tf.concat([imgs, tf.expand_dims(img, 0)], 0)[1:, ...]
imgs = tf.reshape(imgs, img_shape)
img = imgs
else:
img = tf.numpy_function(random_rotation, [image, rotation, 0, 1, 2, fill_mode,
cval, 1], tf.float32)
return tf.reshape(img, img_shape)
def tf_rollaxis(matrix, axis, start):
"""
this is a copy of numpy.rollaxis, because it cannot be used with a tensor
:param matrix:
:param axis:
:param start:
:return:
"""
n = matrix.ndim
axis = normalize_axis_index(axis, n)
if start < 0:
start += n
msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"
if not (0 <= start < n + 1):
raise np.AxisError(msg % ('start', -n, 'start', n + 1, start))
if axis < start:
# it's been removed
start -= 1
if axis == start:
return matrix[...]
axes = list(range(0, n))
axes.remove(axis)
axes.insert(start, axis)
return tf.transpose(matrix, axes)
def transform_matrix_offset_center(matrix, x, y):
"""
:param matrix:
:param x:
:param y:
:return:
"""
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_affine_transform(
x,
theta=0,
row_axis=0,
col_axis=1,
channel_axis=2,
fill_mode='constant',
cval=255,
order=1
):
"""
:param x: 2D numpy array, single image.
:param theta: Rotation angle in degrees.
:param row_axis: Index of axis for rows in the input image.
:param col_axis: Index of axis for columns in the input image.
:param channel_axis: Index of axis for channels in the input image.
:param fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
:param cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
order (int): int, order of interpolation
:param order:
:return: transformed (np.ndarray): The transformed version of the input.
"""
transform_matrix = None
if theta is not None and theta != 0:
theta = np.deg2rad(theta)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[ 0, 0, 1]])
transform_matrix = rotation_matrix
if transform_matrix is not None:
h, w = x.shape[row_axis], x.shape[col_axis]
# added for tf.numpy_function
h = tf.cast(h, tf.float32)
w = tf.cast(w, tf.float32)
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
# put the channel in the first dimension (to iterate over 3 channels later?)
x = tf_rollaxis(x, channel_axis, 0) # np.rollaxis(x, channel_axis, 0) # does not work with tf.Tensor
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
if not isinstance(fill_mode, str):
fill_mode = fill_mode.decode("utf-8")
channel_images = [
ndimage.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=order,
mode=fill_mode,
cval=cval
) for x_channel in x
]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1) # works now, because x is an ndarray, not a tensor anymore!
return x
@tf.function
def tf_shift_image_numpy(image, width_shift, height_shift, fill_mode, cval):
"""
:param image:
:param width_shift:
:param height_shift:
:param fill_mode:
:param cval:
:return:
"""
img_shape = image.shape
width_shift = int(img_shape[-2] * width_shift)
height_shift = int(img_shape[-3] * height_shift)
img = tf.numpy_function(shift_numpy, [image, width_shift, height_shift, cval], tf.float32)
return tf.reshape(img, img_shape)
def shift_numpy(image, width_shift, height_shift, cval):
"""
:param image:
:param width_shift:
:param height_shift:
:param cval:
:return:
"""
image_new = np.ones_like(image)*cval
if height_shift > 0:
if width_shift > 0:
image_new[..., height_shift:, width_shift:, :] = image[..., :-height_shift, :-width_shift, :]
elif width_shift < 0:
image_new[..., height_shift:, :width_shift, :] = image[..., :-height_shift, -width_shift:, :]
else:
image_new[..., height_shift:, :, :] = image[..., :-height_shift, :, :]
elif height_shift < 0:
if width_shift > 0:
image_new[..., :height_shift, width_shift:, :] = image[..., -height_shift:, :-width_shift, :]
elif width_shift < 0:
image_new[..., :height_shift, :width_shift, :] = image[..., -height_shift:, -width_shift:, :]
else:
image_new[..., :height_shift, :, :] = image[..., -height_shift:, :, :]
else:
if width_shift > 0:
image_new[..., width_shift:, :] = image[..., :-width_shift, :]
elif width_shift < 0:
image_new[..., :width_shift, :] = image[..., -width_shift:, :]
else:
image_new = image
return image_new
|
<reponame>abstractvector/python-victron<filename>victron/__init__.py
"""Python API wrapper for Victron"""
from json import loads as decode
import logging
import re
from victron.gateway_mqtt import MQTTGateway
from victron.system import System
_LOGGER = logging.getLogger(__name__)
class Victron:
"""Victron client"""
def __init__(self, broker="venus.local", port=1883, client_id=None):
self.gateway = MQTTGateway(broker, port, client_id)
self.gateway.recv = self.recv
self.serial = None
self.devices = {}
for system in System.get_supported_systems():
self.devices[system] = set()
def connect(self):
self.gateway.connect()
def auto_discover(self):
_LOGGER.debug("Beginning auto-discovery")
for system in self.devices:
self.gateway.subscribe(f"N/{self.serial}/{system}/+/DeviceInstance")
def subscribe(self, system, id, keys=None):
_LOGGER.debug(f"Subscribing to {system}")
currentSystem = System.factory(system)
if currentSystem is None:
_LOGGER.warn(f"Trying to subscribe to an unsupported system: {system}")
return
for k in keys or currentSystem.get_subscription_keys():
self.gateway.subscribe(f"N/{self.serial}/{system}/{id}/{k}")
def on_connect(self, serial):
pass
def on_device(self, system, id):
pass
def on_state(self, system, id, key, value):
pass
def __on_serial(self, serial):
if self.serial == serial:
return
self.serial = serial
_LOGGER.debug(f"Received system serial: {serial}")
self.on_connect(serial)
def __on_device(self, system, id):
if system not in self.devices:
_LOGGER.debug(f"Unsupported device system: {system}")
return
if id in self.devices[system]:
_LOGGER.debug(f"Device with ID [{id}] already exists in {system} system")
return
self.devices[system].add(id)
self.on_device(system, id)
def __on_state(self, system_name, id, key, value):
if system_name not in self.devices:
_LOGGER.debug(f"Unsupported device system: {system_name}")
return
if int(id) not in self.devices[system_name]:
_LOGGER.debug(f"Unregistered device {id} in {system_name} system")
return
if value is None:
# early return for empty values
return
system = System.factory(system_name)
if system is None:
_LOGGER.warn(f"Received state for an unsupported system: {system_name}")
return
k, v = system.translate(key, value)
if k is not None:
self.on_state(system_name, id, k, v)
def recv(self, topic, payload, qos, retain):
data = decode(payload)
value = data["value"]
_LOGGER.debug(f"Received message on topic: {topic} {data['value']}")
# parse serial topic
if re.search("^N/[0-9a-f]+/system/0/Serial$", topic):
self.__on_serial(value)
# parse auto discovery topics
match = re.search(rf"^N/{self.serial}/(.+?)/[0-9]+/DeviceInstance$", topic)
if match:
self.__on_device(match.group(1), value)
# parse state messages
match = re.search(rf"^N/{self.serial}/(.+?)/([0-9]+)/(.+)$", topic)
if match:
self.__on_state(match.group(1), match.group(2), match.group(3), value)
|
<reponame>krakan/plocka<filename>main.py
import kivy
kivy.require('1.11.0') # replace with your current kivy version !
from kivy.app import App
from kivy.base import runTouchApp
from kivy.clock import Clock
from kivy.utils import platform
from kivy.uix.stacklayout import StackLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.textinput import TextInput
from kivy.uix.checkbox import CheckBox
from kivy.uix.dropdown import DropDown
from kivy.uix.button import Button
from kivy.uix.popup import Popup
from kivy.uix.rst import RstDocument
import os, sys, json, re, time, shutil
from glob import glob
from datetime import datetime, timedelta
from bookmarks import BookmarkList
from buttons import ToggleImageButton, ImageButton, LongpressButton, LongpressImageButton
__version__ = '1.7.0'
# +----------------------------------+
# | +------------------------------+ |
# | | +---------------+ +--------+ | |
# | | | Title | | Search | | |
# | | +---------------+ +--------+ | |
# | +------------------------------+ |
# | +------------------------------+ |
# | | +----------------------+ ^ | |
# | | | +------------------+ | | | | |
# | | | | Section 1 | | | | | |
# | | | +------------------+ | | | | |
# | | | +-++---------------+ | | | | |
# | | | |x|| Item 1 | | | | | |
# | | | +-++---------------+ | | | | |
# | | | +-++---------------+ | | | | |
# | | | |x|| Item 2 | | | | | |
# | | | +-++---------------+ | | | | |
# | | | +------------------+ | | | | |
# | | | | Section 2 | | | | | |
# | | | +------------------+ | | | | |
# | | | ... | | | | |
# | | +----------------------+ v | |
# | +------------------------------+ |
# | +------------------------------+ |
# | | +------+ +------+ +--------+ | |
# | | | Hide | | Undo | | Bookm. | | |
# | | +------+ +------+ +--------+ | |
# | +------------------------------+ |
# +----------------------------------+
class CheckList(BoxLayout):
def __init__(self, **kwargs):
super(CheckList, self).__init__(**kwargs)
if platform == "android":
from android.storage import primary_external_storage_path
from android.permissions import request_permissions, check_permission, Permission
sdcard = primary_external_storage_path()
dataDir = sdcard + '/plocka'
if not os.path.exists(dataDir):
request_permissions([Permission.READ_EXTERNAL_STORAGE, Permission.WRITE_EXTERNAL_STORAGE])
while not check_permission(Permission.WRITE_EXTERNAL_STORAGE):
time.sleep(1)
else:
dataDir = os.environ['HOME'] + '/.config/Plocka'
os.makedirs(dataDir, exist_ok=True)
scriptDir = os.path.dirname(os.path.realpath(__file__))
global shoppingList
try:
with open(dataDir + '/Plocka.json') as fd:
shoppingList = json.load(fd)
except:
shoppingList = [
{"section": "Section 1", "items": [
{"item": "Item 1", "done": False},
{"item": "Item 2", "done": True},
{"item": "Item 3", "done": True}
]},
{"section": "Section 2", "items": [
{"item": "Item 1", "done": True},
{"item": "Item 2", "done": False},
{"item": "Item 3", "done": False},
{"item": "Item 4", "done": True}
]},
{"section": "Section 3", "items": [
{"item": "Item 1", "done": True},
{"item": "Item 2", "done": True},
{"item": "Item 3", "done": False}
]}
]
defaultSettings = {
'headerSize': '40sp',
'sectionSize': '20sp',
'sectionColor': [0.1, 0.2, 0.2, 1],
'sectionTextSize': '10sp',
'itemSize': '30sp',
'itemColor': [0.20, 0.25, 0.29, 1],
'doneColor': [0.24, 0.30, 0.35, 1],
'actionColor': [.2, .7, .9, 1],
'activeColor': [1, 1, 1, 1],
'inactiveColor': [1, 1, 1, 0.5],
'redColor': [1, 0, 0, 0.5],
'greenColor': [0, 1, 0, 0.5],
'backupsToKeep': 10,
'maxBackupAge': 1,
'showSections': 'maybe',
}
try:
with open(dataDir + '/settings.json') as fd:
settings = json.load(fd)
for key in defaultSettings:
if not key in settings:
settings[key] = defaultSettings[key]
except:
settings = defaultSettings
backups = sorted(glob(f'{dataDir}/Plocka-*.json'))
cutoff = (datetime.now() - timedelta(days=settings['maxBackupAge'])).strftime("%Y%m%d%H%M%S")
for backup in backups[:-settings['backupsToKeep']]:
if backup < f'{dataDir}/Plocka-{cutoff}.json':
print('deleting backup file ' + backup)
os.remove(backup)
def hide(widget):
if widget.height:
widget.restore = widget.height
widget.height = 0
widget.opacity = 0
widget.disabled = True
def unhide(widget):
if widget.disabled:
widget.height = widget.restore
widget.opacity = 1
widget.disabled = False
def checkSection(stack, current):
for item in stack.children[::-1]:
if item.type == 'section':
section = item
if item == current:
break
index = stack.children.index(section)
for item in stack.children[index-1::-1]:
if item.type == 'item' and item.check.state == 'normal':
return
if item.type == 'section':
break
hide(section)
self.writeDeferred = False
def writeFile(dt):
if dt and not self.writeDeferred:
return
self.writeDeferred = False
activeList = []
for item in stack.children[::-1]:
if item.type == 'item':
entry = {
"item": item.text,
"done": item.check.state == 'down'
}
section["items"].append(entry)
elif item.type == 'section':
section = {
"section": item.origText,
"items": []
}
activeList.append(section)
shoppingList['lists'][shoppingList['active']]['name'] = title.text
shoppingList['lists'][shoppingList['active']]['list'] = activeList
now = datetime.now().strftime("%Y%m%d%H%M%S")
if os.path.exists(f'{dataDir}/Plocka.json'):
os.rename(f'{dataDir}/Plocka.json', f'{dataDir}/Plocka-{now}.json')
with open(f'{dataDir}/Plocka.json', 'w', encoding='utf8') as fd:
json.dump(shoppingList, fd, indent=2, ensure_ascii=False)
saveBtn.color = settings['greenColor']
def undo(instance):
global shoppingList
try:
last = sorted(glob(f'{dataDir}/Plocka-*.json'))[-1]
os.rename(last, f'{dataDir}/Plocka.json')
with open(dataDir + '/Plocka.json') as fd:
shoppingList = json.load(fd)
populate()
hideUnHide(hideBtn)
except: pass
def toggle(instance):
if instance.check.state == 'down':
instance.background_color = settings['itemColor']
instance.color = settings['activeColor']
instance.check.state = 'normal'
else:
instance.background_color = settings['doneColor']
instance.color = settings['inactiveColor']
instance.check.state = 'down'
if not self.writeDeferred:
self.writeDeferred = True
saveBtn.color = settings['actionColor']
Clock.schedule_once(writeFile, 10)
if hideBtn.state == 'down' and instance.check.state == 'down':
hide(instance.check)
hide(instance)
checkSection(stack, instance)
def hideUnHide(instance):
if hideBtn.state != "down" and not searchInput.text:
for item in stack.children[:]:
unhide(item)
elif searchInput.text == searchInput.text.upper() and searchInput.text != searchInput.text.lower():
activeSection = False
for item in stack.children[::-1]:
if item.type == 'section':
if re.search(searchInput.text, item.text):
activeSection = True
else:
activeSection = False
if activeSection and (
hideBtn.state != 'down' or
item.type != 'item' and item.type != 'check' or
hideBtn.state == 'down' and item.type == 'check' and item.state != 'down' or
hideBtn.state == 'down' and item.type == 'item' and item.check.state != 'down'
):
unhide(item)
else:
hide(item)
else:
hasChildren = False
regexp = searchInput.text if searchInput.text else '.'
for item in stack.children[:]:
if item.type == 'item':
if hideBtn.state == "down" and item.check.state == 'down' or not re.search(regexp, item.text, re.IGNORECASE):
hide(item.check)
hide(item)
else:
unhide(item.check)
unhide(item)
hasChildren = True
elif item.type == 'section':
if hasChildren and settings['showSections'] == 'always':
unhide(item)
hasChildren = False
else:
hide(item)
def crossCheck(instance):
toggle(instance.label)
def edit(instance):
entry = TextInput(
text = instance.text,
size_hint = (0.5, None),
height = settings['itemSize'],
multiline = False,
on_text_validate = lambda w: updateItem(w),
)
if instance.type == 'section':
entry.text = instance.origText
relative = ImageButton(
source = 'data/left.png' if instance.type == 'item' else 'data/right.png',
color_normal = [1, 1, 1, .7],
height = settings['itemSize'],
size_hint = (0.1, None),
on_release = lambda w: updateItem(entry),
)
before = ImageButton(
source = 'data/up.png',
color_normal = [1, 1, 1, .7],
height = settings['itemSize'],
size_hint = (0.1, None),
on_release = lambda w: updateItem(entry),
)
replace = ImageButton(
source = 'data/ok.png',
color_normal = [0, .5, 0, 1],
height = settings['itemSize'],
size_hint = (0.1, None),
on_release = lambda w: updateItem(entry),
)
after = ImageButton(
source = 'data/down.png',
color_normal = [1, 1, 1, .7],
height = settings['itemSize'],
size_hint = (0.1, None),
on_release = lambda w: updateItem(entry),
)
delete = ImageButton(
source = 'data/delete.png',
color_normal = [.5, 0, 0, 1],
height = settings['itemSize'],
size_hint = (0.1, None),
on_release = lambda w: updateItem(entry),
)
entry.orig = instance
entry.relative = relative
entry.before = before
entry.replace = replace
entry.after = after
entry.delete = delete
entry.type = 'entry'
relative.type = 'relative'
before.type = 'before'
replace.type = 'replace'
after.type = 'after'
delete.type = 'delete'
hide(instance)
if instance.type == 'item':
hide(instance.check)
index = stack.children.index(instance)
stack.add_widget(delete, index)
stack.add_widget(entry, index)
stack.add_widget(relative, index)
stack.add_widget(before, index)
stack.add_widget(replace, index)
stack.add_widget(after, index)
entry.select_all()
Clock.schedule_once(lambda dt: reselect(entry), 0.5)
def reselect(entry):
# repeat select_all after half a second to avoid losing focus on release
entry.focused = True
entry.select_all()
def updateItem(entry):
todo = 'replace'
if entry.delete.state == 'down':
todo = 'delete'
elif entry.before.state == 'down':
todo = 'before'
elif entry.after.state == 'down':
todo = 'after'
elif entry.relative.state == 'down':
if entry.orig.type == 'section':
todo = 'item'
else:
todo = 'section'
orig = entry.orig
text = entry.text
stack.remove_widget(entry.relative)
stack.remove_widget(entry.before)
stack.remove_widget(entry.replace)
stack.remove_widget(entry.after)
stack.remove_widget(entry.delete)
stack.remove_widget(entry)
if todo == 'delete':
stack.remove_widget(orig)
if orig.type == 'item':
stack.remove_widget(orig.check)
else:
unhide(orig)
if orig.type == 'item':
unhide(orig.check)
if todo == 'replace':
if orig.type == 'section':
orig.origText = text
orig.text = text.upper()
else:
orig.text = text
else:
if orig.type == 'section' and todo != 'item' or todo == 'section':
label = sectionButton(text)
else:
label = itemButtonPair(text, False)
index = stack.children.index(orig)
if todo == 'before' or todo == 'section':
index += 1
if orig.type == 'item':
if todo == 'before' or todo == 'section':
index += 1
if label.type == 'item':
stack.add_widget(label.check, index)
stack.add_widget(label, index)
edit(label)
writeFile(0)
self.searchDeferred = False
def doSearch(text, undo):
if not self.searchDeferred:
self.searchDeferred = True
Clock.schedule_once(filterOut, 1)
return text
def filterOut(dt):
self.searchDeferred = False
hideUnHide(hideBtn)
def setBookmark():
now = datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
os.makedirs(f'{dataDir}/bookmarks', exist_ok=True)
print(f"set bookmark '{dataDir}/bookmarks/{now}.json'")
shutil.copy(f'{dataDir}/Plocka.json', f'{dataDir}/bookmarks/{now}.json')
bookmark = ''
def getBookmark():
popup = Popup(
title = "Bookmarks",
content = BookmarkList(
dataDir = dataDir,
settings = settings,
orientation = 'vertical',
),
size_hint = (0.9, 0.9),
)
popup.bind(on_pre_dismiss = useBookmark)
popup.open()
def useBookmark(w):
global shoppingList
bookmark = w.content.chosen
if not bookmark:
print('no bookmark chosen')
return
writeFile(0)
shutil.copy(f'{dataDir}/bookmarks/{bookmark}.json', f'{dataDir}/Plocka.json')
with open(f'{dataDir}/Plocka.json') as fd:
shoppingList = json.load(fd)
populate()
def selectList(w):
global shoppingList
dropdown = DropDown(
on_select = lambda instance, selected: setActive(selected),
)
index = -1
for item in shoppingList['lists']:
index += 1
if index == shoppingList['active']:
continue
btn = Button(
text = item['name'],
size_hint_y = None,
background_color = settings['sectionColor'],
height=settings['itemSize'],
)
btn.index = index
btn.bind(on_release=lambda btn: dropdown.select(btn.index))
dropdown.add_widget(btn)
about = Button(
text = "About Plocka",
size_hint_y = None,
background_color = settings['sectionColor'],
height=settings['itemSize'],
)
about.bind(on_release=lambda about: dropdown.select(-1))
dropdown.add_widget(about)
dropdown.open(w)
def setActive(selected):
if selected > -1:
global shoppingList
shoppingList['active'] = selected
populate()
writeFile(0)
else:
with open(scriptDir + '/ABOUT.rst') as fd:
about = fd.read()
with open(scriptDir + '/LICENSE') as fd:
license = fd.read()
aboutText = RstDocument(
text = about + '\n\nLicense\n-------\n\n' + license,
)
popup = Popup(
title = "Plocka " + __version__,
content = aboutText,
)
popup.open()
def editList(w):
buttonBox = BoxLayout()
top.add_widget(buttonBox)
delete = ImageButton(
source = 'data/delete.png',
color_normal = [.5, 0, 0, 1],
size_hint_x = None,
width = settings['headerSize'],
on_release = lambda w: deleteList(w),
)
buttonBox.add_widget(delete)
entry = TextInput(
text = w.text,
height = settings['headerSize'],
multiline = False,
on_text_validate = lambda w: setListName(w),
)
buttonBox.add_widget(entry)
saveBtn = ImageButton(
source = "data/ok.png",
color_normal = settings['greenColor'],
size_hint_x = None,
width = settings['headerSize'],
on_release = lambda x: setListName(entry),
)
buttonBox.add_widget(saveBtn)
copy = ImageButton(
source = 'data/copy.png',
color_normal = [1, 1, 1, .7],
size_hint_x = None,
width = settings['headerSize'],
on_release = lambda w: copyList(w),
)
buttonBox.add_widget(copy)
new = ImageButton(
source = 'data/new.png',
color_normal = settings['greenColor'],
size_hint_x = None,
width = settings['headerSize'],
on_release = lambda w: createList(entry),
)
buttonBox.add_widget(new)
top.remove_widget(title)
top.remove_widget(searchBtn)
entry.focused = True
def closeEditor(w):
top.add_widget(title)
top.add_widget(searchBtn)
top.remove_widget(w.parent)
def setListName(w):
title.text = w.text
closeEditor(w)
writeFile(0)
def createList(w):
global shoppingList
new = {
'name': 'New list',
'list': [{
'section': 'Section',
'items': []
}]}
at = shoppingList['active'] + 1
shoppingList['lists'].insert(at, new)
setActive(at)
closeEditor(w)
def copyList(w):
global shoppingList
at = shoppingList['active']
new = json.loads(json.dumps(shoppingList['lists'][at]))
new['name'] += ' 2'
at += 1
shoppingList['lists'].insert(at, new)
setActive(at)
closeEditor(w)
def deleteList(w):
at = shoppingList['active']
del(shoppingList['lists'][at])
if at >= len(shoppingList['lists']):
at = at - 1
setActive(at)
closeEditor(w)
# Widgets
def sectionButton(text):
label = LongpressButton(
text = text.upper(),
font_size = settings['sectionTextSize'],
height = settings['sectionSize'],
background_color = settings['sectionColor'],
size_hint = (1, None),
on_long_press = lambda w: edit(w),
)
label.origText = text
label.type = 'section'
return label
def itemButtonPair(text, done):
label = LongpressButton(
text = text,
height = settings['itemSize'],
background_color = settings['itemColor'],
size_hint = (0.95, None),
on_short_press = lambda w: toggle(w),
on_long_press = lambda w: edit(w),
)
label.type = 'item'
check = CheckBox(
height = settings['itemSize'],
size_hint = (0.05, None),
on_release = lambda w: crossCheck(w),
)
if done:
label.background_color = settings['doneColor']
label.color = settings['inactiveColor']
check.state = 'down'
check.type = 'check'
check.label = label
label.check = check
return label
def populate():
global shoppingList
if not 'lists' in shoppingList:
shoppingList = {
'lists': [
{
'name': 'Plocka',
'list': shoppingList
}
]
}
if not 'active' in shoppingList:
shoppingList['active'] = 0
title.text = shoppingList['lists'][shoppingList['active']]['name']
stack.clear_widgets()
for section in shoppingList['lists'][shoppingList['active']]['list']:
if settings['showSections'] != 'never':
sectionLabel = sectionButton(section['section'])
stack.add_widget(sectionLabel)
for item in section['items']:
label = itemButtonPair(item['item'], item['done'])
stack.add_widget(label.check)
stack.add_widget(label)
def toggleSearch(widget):
if searchInput.disabled:
top.add_widget(searchInput,1)
searchInput.disabled = False
searchInput.focused = True
else:
searchInput.text = ''
searchInput.disabled = True
top.remove_widget(searchInput)
hideUnHide(hideBtn)
# MAIN
top = BoxLayout(
size_hint=(1, None),
height = settings['headerSize'],
)
self.add_widget(top)
title = LongpressButton(
text = 'Unknown',
background_color = settings['sectionColor'],
on_short_press = selectList,
on_long_press = editList,
)
top.add_widget(title)
searchBtn = ImageButton(
source = 'data/search.png',
width = settings['headerSize'],
color_normal = [1, 1, 1, .6],
on_release = toggleSearch,
size_hint_x = None,
)
top.add_widget(searchBtn)
searchInput = TextInput(
disabled = True,
multiline = False,
input_filter = doSearch,
on_text_validate = lambda w: hideUnHide(hideBtn),
)
scrollBox = ScrollView(
size_hint=(1, .9),
do_scroll_x=False,
)
self.add_widget(scrollBox)
stack = StackLayout(size_hint=(1, None))
stack.bind(
minimum_height=stack.setter('height')
)
scrollBox.add_widget(stack)
populate()
buttons = BoxLayout(
size_hint=(1, None),
height = settings['headerSize'],
)
self.add_widget(buttons)
saveBtn = ImageButton(
source = "data/ok.png",
color_normal = settings['greenColor'],
on_release = lambda x: writeFile(0),
)
buttons.add_widget(saveBtn)
hideBtn = ToggleImageButton(
image_down = "data/show.png",
image_normal = "data/hide.png",
color_down = [1, 1, 1, .9],
color_normal = [1, 1, 1, .6],
on_release = hideUnHide,
)
buttons.add_widget(hideBtn)
undoBtn = ImageButton(
source = 'data/undo.png',
color_normal = settings['redColor'],
on_release = undo,
)
buttons.add_widget(undoBtn)
bookmarkBtn = LongpressImageButton(
source = 'data/bookmark.png',
color_normal = settings['greenColor'],
on_short_press = lambda w: setBookmark(),
on_long_press = lambda w: getBookmark(),
)
buttons.add_widget(bookmarkBtn)
class Plocka(App):
def build(self):
return CheckList(orientation = 'vertical')
if __name__ == '__main__':
Plocka().run()
|
<gh_stars>1-10
#!/usr/bin/env python
"""Unique Crater Distribution Functions
Functions for extracting craters from model target predictions and filtering
out duplicates.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import h5py
import sys
import utils.template_match_target as tmt
import utils.processing as proc
import utils.transform as trf
from keras.models import load_model
#########################
def get_model_preds(CP):
"""Reads in or generates model predictions.
Parameters
----------
CP : dict
Containins directory locations for loading data and storing
predictions.
Returns
-------
craters : h5py
Model predictions.
"""
n_imgs, dtype = CP['n_imgs'], CP['datatype']
data = h5py.File(CP['dir_data'], 'r')
Data = {
dtype: [data['input_images'][:n_imgs].astype('float32'),
data['target_masks'][:n_imgs].astype('float32')]
}
data.close()
proc.preprocess(Data)
model = load_model(CP['dir_model'])
preds = model.predict(Data[dtype][0])
# save
h5f = h5py.File(CP['dir_preds'], 'w')
h5f.create_dataset(dtype, data=preds)
print("Successfully generated and saved model predictions.")
return preds
#########################
def add_unique_craters(craters, craters_unique, thresh_longlat2, thresh_rad):
"""Generates unique crater distribution by filtering out duplicates.
Parameters
----------
craters : array
Crater tuples from a single image in the form (long, lat, radius).
craters_unique : array
Master array of unique crater tuples in the form (long, lat, radius)
thresh_longlat2 : float.
Hyperparameter that controls the minimum squared longitude/latitude
difference between craters to be considered unique entries.
thresh_rad : float
Hyperparaeter that controls the minimum squared radius difference
between craters to be considered unique entries.
Returns
-------
craters_unique : array
Modified master array of unique crater tuples with new crater entries.
"""
k2d = 180. / (np.pi * 1737.4) # km to deg
Long, Lat, Rad = craters_unique.T
for j in range(len(craters)):
lo, la, r = craters[j].T
la_m = (la + Lat) / 2.
minr = np.minimum(r, Rad) # be liberal when filtering dupes
# duplicate filtering criteria
dL = (((Long - lo) / (minr * k2d / np.cos(np.pi * la_m / 180.)))**2
+ ((Lat - la) / (minr * k2d))**2)
dR = np.abs(Rad - r) / minr
index = (dR < thresh_rad) & (dL < thresh_longlat2)
if len(np.where(index == True)[0]) == 0:
craters_unique = np.vstack((craters_unique, craters[j]))
return craters_unique
#########################
def estimate_longlatdiamkm(dim, llbd, distcoeff, coords):
"""First-order estimation of long/lat, and radius (km) from
(Orthographic) x/y position and radius (pix).
For images transformed from ~6000 pixel crops of the 30,000 pixel
LROC-Kaguya DEM, this results in < ~0.4 degree latitude, <~0.2
longitude offsets (~2% and ~1% of the image, respectively) and ~2% error in
radius. Larger images thus may require an exact inverse transform,
depending on the accuracy demanded by the user.
Parameters
----------
dim : tuple or list
(width, height) of input images.
llbd : tuple or list
Long/lat limits (long_min, long_max, lat_min, lat_max) of image.
distcoeff : float
Ratio between the central heights of the transformed image and original
image.
coords : numpy.ndarray
Array of crater x coordinates, y coordinates, and pixel radii.
Returns
-------
craters_longlatdiamkm : numpy.ndarray
Array of crater longitude, latitude and radii in km.
"""
# Expand coords.
long_pix, lat_pix, radii_pix = coords.T
# Determine radius (km).
km_per_pix = 1. / trf.km2pix(dim[1], llbd[3] - llbd[2], dc=distcoeff)
radii_km = radii_pix * km_per_pix
# Determine long/lat.
deg_per_pix = km_per_pix * 180. / (np.pi * 1737.4)
long_central = 0.5 * (llbd[0] + llbd[1])
lat_central = 0.5 * (llbd[2] + llbd[3])
# Iterative method for determining latitude.
lat_deg_firstest = lat_central - deg_per_pix * (lat_pix - dim[1] / 2.)
latdiff = abs(lat_central - lat_deg_firstest)
# Protect against latdiff = 0 situation.
latdiff[latdiff < 1e-7] = 1e-7
lat_deg = lat_central - (deg_per_pix * (lat_pix - dim[1] / 2.) *
(np.pi * latdiff / 180.) /
np.sin(np.pi * latdiff / 180.))
# Determine longitude using determined latitude.
long_deg = long_central + (deg_per_pix * (long_pix - dim[0] / 2.) /
np.cos(np.pi * lat_deg / 180.))
# Return combined long/lat/radius array.
return np.column_stack((long_deg, lat_deg, radii_km))
def extract_unique_craters(CP, craters_unique):
"""Top level function that extracts craters from model predictions,
converts craters from pixel to real (degree, km) coordinates, and filters
out duplicate detections across images.
Parameters
----------
CP : dict
Crater Parameters needed to run the code.
craters_unique : array
Empty master array of unique crater tuples in the form
(long, lat, radius).
Returns
-------
craters_unique : array
Filled master array of unique crater tuples.
"""
# Load/generate model preds
try:
preds = h5py.File(CP['dir_preds'], 'r')[CP['datatype']]
print("Loaded model predictions successfully")
except:
print("Couldnt load model predictions, generating")
preds = get_model_preds(CP)
# need for long/lat bounds
P = h5py.File(CP['dir_data'], 'r')
llbd, pbd, distcoeff = ('longlat_bounds', 'pix_bounds',
'pix_distortion_coefficient')
#r_moon = 1737.4
dim = (float(CP['dim']), float(CP['dim']))
N_matches_tot = 0
for i in range(CP['n_imgs']):
id = proc.get_id(i)
coords = tmt.template_match_t(preds[i])
# convert, add to master dist
if len(coords) > 0:
new_craters_unique = estimate_longlatdiamkm(
dim, P[llbd][id], P[distcoeff][id][0], coords)
N_matches_tot += len(coords)
# Only add unique (non-duplicate) craters
if len(craters_unique) > 0:
craters_unique = add_unique_craters(new_craters_unique,
craters_unique,
CP['llt2'], CP['rt'])
else:
craters_unique = np.concatenate((craters_unique,
new_craters_unique))
np.save(CP['dir_result'], craters_unique)
return craters_unique
|
<filename>code/ffi_tools.py
import numpy as np
#import fitsio
import matplotlib.pyplot as plt
import sep
from matplotlib.patches import Rectangle
def preprocess_dFFI(raw_data):
"""Preprocess the driftscan FFI
Currently performs these pre processing steps:
- Trims the edges to remove dark rows and collateral rows.
TODO: currently hard coded to guesstimate of edges
- Must use numpy C order to comply with sep's C functions
Args:
raw_data (numpy.ndarray): 2D numpy of one FFI channel
Returns:
trimmed_data (numpy.ndarray): A trimmed version of the input
"""
raw_data = raw_data.copy(order='C')
return raw_data[19:-28, 12:-20]
def quick_plot(data):
"""Make a quick plot of the FFI with sane screen stretch
Returns:
None
"""
m, s = np.mean(data), np.std(data)
plt.imshow(data, interpolation='nearest', cmap='gray', vmin=m-s, vmax=m+s, origin='lower')
plt.colorbar();
def background_subtract(data, plot_bkg=False, return_bkg=False):
"""Background subtract the driftscan FFI
Performs these steps:
- Computes a mask based on the top 95th percentile of values
- Estimate background with sep
Args:
data (numpy.ndarray): 2D numpy of one FFI channel
plot_bkg (bool): Flag for whether to plot the background
default = False
return_bkg (bool): Flag for whether to return the background
default = False
Returns:
background-subtracted FFI or optionally
tuple of background-subtracted FFI, and background estimate
"""
data = data.copy(order='C')
mask = data > np.percentile(data, 95)
bkg = sep.Background(data, mask=mask)
if plot_bkg:
bkg_image = bkg.back()
# show the background
plt.imshow(bkg_image, interpolation='nearest', cmap='gray', origin='lower')
plt.colorbar();
if return_bkg:
return (data - bkg, bkg)
else:
return data - bkg
def get_kernel(read_saved=True, estimate_from=None, xy=[250,310,810,985]):
"""Get an estimate for the Kernel for this channel
Defaults to reading a local kernel.
Args:
read_saved (bool): If True, return locally saved kernel
estimate_from (numpy.ndarray): 2D numpy of one FFI channel
The data to estimate from if read_saved=False
xy (list): Limits of window around robust driftscan segment
Returns:
background-subtracted FFI or optionally
tuple of background-subtracted FFI, and background estimate
"""
if read_saved:
return np.load('../data/ch41_FFI1_structured_kernel.npy')
# Otherwise, compute everything from scratch...
data = estimate_from
kernel_raw = data[xy[0]:xy[1], xy[2]:xy[3]]
kernel_raw = kernel_raw / np.percentile(kernel_raw, 95)
ny, nx = kernel_raw.shape
kernel_x = kernel_raw.mean(axis=0)
kernel_y = kernel_raw.mean(axis=1)
# Draw a line defined by the marginals
x0, x1 = np.where(kernel_x/np.max(kernel_x) >0.5)[0][[0, -1]]
y0, y1 = np.where(kernel_y/np.max(kernel_y) >0.5)[0][[0, -1]]
line_vec_x, line_vec_y = np.arange(x0, x1), np.linspace(y0, y1, x1-x0)
kernel_mask = kernel_raw*0.0
for i,j in zip(line_vec_x, line_vec_y):
kernel_mask[np.int(j), np.int(i)] = 1
#Convolve the line with a 2 pixel Gaussian blur.
from scipy.ndimage import gaussian_filter
# Smooth hotdog-like kernel:
final_kernel = gaussian_filter(kernel_mask, 2)
# option for structured, Gaussian-tapered kernel:
#final_kernel = np.abs(final_kernel*kernel_raw)
return final_kernel
def get_aperture_mask(kernel, boxcar_size):
"""Get an estimate for the Kernel for this channel
Defaults to reading a local kernel.
Args:
read_saved (bool): If True, return locally saved kernel
estimate_from (numpy.ndarray): 2D numpy of one FFI channel
The data to estimate from if read_saved=False
xy (list): Limits of window around robust driftscan segment
Returns:
background-subtracted FFI or optionally
tuple of background-subtracted FFI, and background estimate
"""
#aper_mask = kernel
from scipy.ndimage import gaussian_filter
from scipy.signal import convolve
aper_mask = gaussian_filter(kernel, 1)
aper_mask = aper_mask / np.max(aper_mask)
boxcar = np.ones((boxcar_size,boxcar_size))
aper_mask = convolve(aper_mask,boxcar, mode='same')
aper_mask = aper_mask >0.01
return aper_mask
def estimate_driftscan_line_trace(data, xc, yc, aper_mask, show_plot=False):
"""Estimate the line trace of a driftscan line segment
Args:
data (numpy.ndarray): Background subtracted driftscan FFI
xc (int): x index position of center of star trail
yc (int): y index position of center of star trail
show_plot (bool): Whether to show the FFI window-aperture overplot
Returns:
line_trace, flag: aperture photometry of line trace, and flag if it fails
"""
ny, nx = aper_mask.shape
x0 = np.int(xc - nx / 2)
y0 = np.int(yc - ny / 2)
ffi_cutout = data[y0:y0+ny, x0:x0+nx]
# Plot every 8th trace
if show_plot:
m, s = np.mean(ffi_cutout), np.std(ffi_cutout)
plt.imshow(aper_mask, interpolation='nearest', cmap='BuGn', vmin=0, vmax=1, origin='lower')
plt.imshow(ffi_cutout, interpolation='nearest', cmap='gray', vmin=m-s, vmax=m+s, origin='lower', alpha=0.3)
plt.show()
try:
output = np.sum(ffi_cutout * aper_mask, axis=0)
flag = True
except:
output = np.zeros(nx)
flag = False
return (output, flag)
def iterate_line_traces(data, xcs, ycs, aper_mask, show_every_n_plots=np.NaN):
"""Estimate the line traces for many input x, y coordinates
Assumes the same aperture mask for all traces
Args:
data (numpy.ndarray): Background subtracted driftscan FFI
xcs (array-like): x indices of center of star trail
ycs (array-like): y indices of center of star trail
show_every_n_plots (int): Show every Nth plot
Returns:
line_traces: array of aperture photometry of line traces
"""
n_sources = len(xcs)
traces = []
flags = []
for i in range(n_sources):
show_plot = (i % show_every_n_plots) == 0
trace, flag = estimate_driftscan_line_trace(data, xcs[i], ycs[i], aper_mask, show_plot=show_plot)
traces.append(trace)
flags.append(flag)
return (np.array(traces), np.array(flags))
def get_delta_x_offsets(traces, template_trace):
"""Get delta x offsets from traces
Cross-correlates with a high signal-to-noise trace.
Args:
traces (numpy.ndarray): 2D array (N_traces x N_x)
of star trail traces
template_trace (numpy.array): 1D array (N_x)
of high signal-to-noise ratio trace
Returns:
delta_xs (numpy.ndarray): the offsets in x from traces to target
"""
from scipy.signal import correlate
n_traces, n_x = traces.shape
non_zero = template_trace > 0
xcor = correlate(template_trace[non_zero], template_trace[non_zero], 'same')
default_center = np.argmax(xcor)
delta_xs = []
for i in range(n_traces):
xcor = correlate(template_trace[non_zero], traces[i, non_zero], 'same')
center = np.argmax(xcor) - default_center
delta_xs.append(center)
return np.array(delta_xs)
def plot_extractions(data_sub, df_objects):
"""Plot the detected traces with rectangles on the input image
"""
# plot background-subtracted image
fig, ax = plt.subplots()
m, s = np.mean(data_sub), np.std(data_sub)
im = ax.imshow(data_sub, interpolation='nearest', cmap='gray',
vmin=m-s, vmax=m+s, origin='lower')
# plot an ellipse for each object
for i in range(len(df_objects)):
if df_objects.poor_fit[i]:
color = 'red'
elif df_objects.saturated[i]:
color = 'yellow'
else:
color = 'blue'
e = Rectangle(xy=(df_objects.x[i]-80, df_objects.y[i]-18),
width=160,
height=10,
angle=df_objects.theta[i] * 180. / np.pi)
e.set_facecolor('none')
e.set_edgecolor(color)
ax.add_artist(e)
plt.axis('off')
plt.show()
def plot_kernel(kernel, aper_mask=None):
"""Make a quick plot of the kernel with sane screen stretch
Args:
aper_mask (np.ndarray): Optionally overplot an aperture mask
Returns:
None
"""
# show the image
plt.imshow(kernel, interpolation='nearest', cmap='BuGn', vmin=0, vmax=np.max(kernel), origin='lower')
if aper_mask is not None:
plt.imshow(aper_mask, interpolation='nearest', cmap='BuGn', vmin=0, vmax=1, origin='lower', alpha=0.5)
plt.colorbar();
|
<filename>insights/parsers/sctp.py
"""
SCTP Socket State Parser
========================
Parsers provided by this module include:
SCTPEps - file ``/proc/net/sctp/eps``
-------------------------------------
SCTPAsc - file ``/proc/net/sctp/assocs``
----------------------------------------
"""
from insights import Parser, parser
from insights.parsers import SkipException, ParseException
from . import keyword_search
from insights.specs import Specs
@parser(Specs.sctp_eps)
class SCTPEps(Parser):
"""
This parser parses the content of ``/proc/net/sctp/eps`` file.
It returns a list of dictionaries. The dictionary contains detail
information of individual SCTP endpoint, which includes Endpoints, Socket,
Socket type, Socket State, hash bucket, bind port, UID, socket inodes,
Local IP address.
Typical contents of ``/proc/net/sctp/eps`` file are::
ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS
ffff88017e0a0200 ffff880300f7fa00 2 10 29 11165 200 299689357 10.0.0.102 10.0.0.70
ffff880612e81c00 ffff8803c28a1b00 2 10 30 11166 200 273361203 10.0.0.102 10.0.0.70 172.31.1.2
Output data is stored in the list of dictionaries
Examples:
>>> type(sctp_info)
<class 'insights.parsers.sctp.SCTPEps'>
>>> sorted(sctp_info.sctp_local_ports) == sorted(['11165', '11166'])
True
>>> sorted(sctp_info.sctp_local_ips) == sorted(['10.0.0.102', '10.0.0.70', '172.31.1.2'])
True
>>> sorted(sctp_info.search(local_port="11165")) == sorted([{'endpoints': 'ffff88017e0a0200', 'socket': 'ffff880299f7fa00', 'sk_type': '2', 'sk_state': '10', 'hash_bkt': '29', 'local_port': '11165', 'uid': '200', 'inode': '299689357', 'local_addr': ['10.0.0.102', '10.0.0.70']}])
True
>>> len(sctp_info.search(local_port="11165")) == 1
True
>>> len(sctp_info.search(endpoints="ffff88017e0a0200")) == 1
True
>>> sctp_info.sctp_eps_ips
{'ffff88017e0a0200': ['10.0.0.102', '10.0.0.70'], 'ffff880612e81c00': ['10.0.0.102', '10.0.0.70', '172.31.1.2']}
"""
COLUMN_IDX = [
'endpoints',
'socket',
'sk_type',
'sk_state',
'hash_bkt',
'local_port',
'uid',
'inode',
'local_addr'
]
def parse_content(self, content):
if (not content) or (not self.file_path):
raise SkipException("No Contents")
line = content[0].strip().split()
keys_cnt = len(self.COLUMN_IDX)
if "LPORT" not in line or len(line) != keys_cnt:
raise ParseException("Contents are not compatible to this parser".format(line))
self.data = []
for line in content[1:]:
line = line.strip().split(None, keys_cnt - 1)
line[-1] = line[-1].split()
self.data.append(dict(zip(self.COLUMN_IDX, line)))
self._sctp_local_ports = set()
self._sctp_local_ips = set()
self._sctp_eps_ips = {}
for line in self.data:
self._sctp_local_ports.add(line['local_port'])
local_addr = line['local_addr']
self._sctp_local_ips.update(local_addr)
if line['endpoints'] not in self._sctp_eps_ips:
self._sctp_eps_ips[line['endpoints']] = []
self._sctp_eps_ips[line['endpoints']].extend(local_addr)
@property
def sctp_local_ports(self):
"""
(list): This function returns a list of SCTP ports if SCTP
endpoints are created, else `[]`.
"""
return sorted(self._sctp_local_ports)
@property
def sctp_local_ips(self):
"""
(list): This function returns a list of all local ip addresses
if SCTP endpoints are created, else `[]`.
"""
return sorted(self._sctp_local_ips)
@property
def sctp_eps_ips(self):
"""
(dict): This function returns a dict of all endpoints and corresponding
local ip addresses used by SCTP endpoints if SCTP endpoints are
created, else `{}`.
"""
return self._sctp_eps_ips
def search(self, **args):
"""
(list): This function return a list of all endpoints when args search matches,
when args search do not match then it returns `[]`.
"""
return keyword_search(self.data, **args)
@parser(Specs.sctp_asc)
class SCTPAsc(Parser):
"""
This parser parses the content of ``/proc/net/sctp/assocs`` file.
And returns a list of dictionaries. The dictionary contains details
of individual SCTP endpoint, which includes Association Struct, Socket,
Socket type, Socket State, Association state, hash bucket, association id,
tx queue, rx queue, uid, inode, local port, remote port, 'local addr,
remote addr, heartbeat interval, max in-stream, max out-stream, max
retransmission attempt, number of init chunks send, number of shutdown
chunks send, data chunks retransmitted'
Typical contents of ``/proc/net/sctp/assocs`` file are::
ASSOC SOCK STY SST ST HBKT ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT RPORT LADDRS <-> RADDRS HBINT INS OUTS MAXRT T1X T2X RTXC
ffff88045ac7e000 ffff88062077aa00 2 1 4 1205 963 0 0 200 273361167 11567 11166 10.0.0.102 10.0.0.70 <-> *10.0.0.109 10.0.0.77 1000 2 2 10 0 0 0
ffff88061fbf2000 ffff88060ff92500 2 1 4 1460 942 0 0 200 273360669 11566 11167 10.0.0.102 10.0.0.70 <-> *10.0.0.109 10.0.0.77 1000 2 2 10 0 0 0
Output data is stored in the list of dictionaries
Examples:
>>> type(sctp_asc)
<class 'insights.parsers.sctp.SCTPAsc'>
>>> sorted(sctp_asc.sctp_local_ports) == sorted(['11567','11566'])
True
>>> sorted(sctp_asc.sctp_remote_ports) == sorted(['11166','11167'])
True
>>> sorted(sctp_asc.sctp_local_ips) == sorted(['10.0.0.102', '10.0.0.70'])
True
>>> sorted(sctp_asc.sctp_remote_ips) == sorted(['*10.0.0.109', '10.0.0.77'])
True
>>> sorted(sctp_asc.search(local_port='11566')) == sorted([{'init_chunks_send': '0', 'uid': '200', 'shutdown_chunks_send': '0', 'max_outstream': '2', 'tx_que': '0', 'inode': '273360669', 'hrtbt_intrvl': '1000', 'sk_type': '2', 'remote_addr': ['*10.0.0.109', '10.0.0.77'], 'data_chunks_retrans': '0', 'local_addr': ['10.0.0.102', '10.0.0.70'], 'asc_id': '942', 'max_instream': '2', 'remote_port': '11167', 'asc_state': '4', 'max_retrans_atmpt': '10', 'sk_state': '1', 'socket': 'ffff88060ff92500', 'asc_struct': 'ffff88061fbf2000', 'local_port': '11566', 'hash_bkt': '1460', 'rx_que': '0'}])
True
"""
COLUMN_IDX = [
'asc_struct',
'socket',
'sk_type',
'sk_state',
'asc_state',
'hash_bkt',
'asc_id',
'tx_que',
'rx_que',
'uid',
'inode',
'local_port',
'remote_port',
'local_addr',
'remote_addr',
'hrtbt_intrvl',
'max_instream',
'max_outstream',
'max_retrans_atmpt',
'init_chunks_send',
'shutdown_chunks_send',
'data_chunks_retrans',
'relation', # should be ignore
]
def parse_content(self, content):
if (not content) or (not self.file_path):
raise SkipException("No Contents")
line = content[0].strip().split()
keys_cnt = len(self.COLUMN_IDX)
if "LPORT" not in line or len(line) != keys_cnt:
raise ParseException("Contents are not compatible to this parser".format(line))
self.data = []
laddr_idx = line.index('LADDRS')
raddr_ridx = len(line) - line.index('RADDRS')
for line in content[1:]:
line_1 = line.strip().split(None, laddr_idx)
line_end = line_1.pop()
idx = line_end.index('<->')
laddrs = line_end[:idx].strip().split()
line_end = line_end[idx + 3:].strip().rsplit(None, raddr_ridx - 1)
raddrs = line_end.pop(0).split()
line_1.append(laddrs)
line_1.append(raddrs)
line_1.extend(line_end)
self.data.append(dict(zip(self.COLUMN_IDX[:-1], line_1)))
self._sctp_local_ports = set()
self._sctp_remote_ports = set()
self._sctp_local_ips = set()
self._sctp_remote_ips = set()
for line in self.data:
self._sctp_local_ports.add(line['local_port'])
self._sctp_remote_ports.add(line['remote_port'])
self._sctp_local_ips.update(line['local_addr'])
self._sctp_remote_ips.update(line['remote_addr'])
@property
def sctp_local_ports(self):
"""
(list): This function returns a list of SCTP local peer ports
if SCTP endpoints are created, else `[]`.
"""
return sorted(self._sctp_local_ports)
@property
def sctp_remote_ports(self):
"""
(list): This function returns a list of SCTP remote peer ports
if SCTP endpoints are created, else `[]`.
"""
return sorted(self._sctp_remote_ports)
@property
def sctp_local_ips(self):
"""
(list): This function returns a list of all local peer's ip addresses
if SCTP endpoints are created, else `[]`.
"""
return sorted(self._sctp_local_ips)
@property
def sctp_remote_ips(self):
"""
(list): This function returns a list of all remote peer's ip addresses
if SCTP endpoints are created, else `[]`.
"""
return sorted(self._sctp_remote_ips)
def search(self, **args):
"""
(list): This function return a list of all SCTP associations when args search matches,
when args search do not match then it returns `[]`.
"""
return keyword_search(self.data, **args)
|
try:
import ax
except ImportError:
ax = None
import logging
from ray.tune.suggest import Searcher
logger = logging.getLogger(__name__)
class AxSearch(Searcher):
"""Uses `Ax <https://ax.dev/>`_ to optimize hyperparameters.
Ax is a platform for understanding, managing, deploying, and
automating adaptive experiments. Ax provides an easy to use
interface with BoTorch, a flexible, modern library for Bayesian
optimization in PyTorch. More information can be found in https://ax.dev/.
To use this search algorithm, you must install Ax and sqlalchemy:
.. code-block:: bash
$ pip install ax-platform sqlalchemy
Parameters:
parameters (list[dict]): Parameters in the experiment search space.
Required elements in the dictionaries are: "name" (name of
this parameter, string), "type" (type of the parameter: "range",
"fixed", or "choice", string), "bounds" for range parameters
(list of two values, lower bound first), "values" for choice
parameters (list of values), and "value" for fixed parameters
(single value).
objective_name (str): Name of the metric used as objective in this
experiment. This metric must be present in `raw_data` argument
to `log_data`. This metric must also be present in the dict
reported/returned by the Trainable.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute. Defaults to "max".
parameter_constraints (list[str]): Parameter constraints, such as
"x3 >= x4" or "x3 + x4 >= 2".
outcome_constraints (list[str]): Outcome constraints of form
"metric_name >= bound", like "m1 <= 3."
max_concurrent (int): Deprecated.
use_early_stopped_trials: Deprecated.
.. code-block:: python
from ax.service.ax_client import AxClient
from ray import tune
from ray.tune.suggest.ax import AxSearch
parameters = [
{"name": "x1", "type": "range", "bounds": [0.0, 1.0]},
{"name": "x2", "type": "range", "bounds": [0.0, 1.0]},
]
def easy_objective(config):
for i in range(100):
intermediate_result = config["x1"] + config["x2"] * i
tune.track.log(score=intermediate_result)
client = AxClient(enforce_sequential_optimization=False)
client.create_experiment(parameters=parameters, objective_name="score")
algo = AxSearch(client)
tune.run(easy_objective, search_alg=algo)
"""
def __init__(self,
ax_client,
mode="max",
use_early_stopped_trials=None,
max_concurrent=None):
assert ax is not None, "Ax must be installed!"
self._ax = ax_client
exp = self._ax.experiment
self._objective_name = exp.optimization_config.objective.metric.name
self.max_concurrent = max_concurrent
self._parameters = list(exp.parameters)
self._live_trial_mapping = {}
super(AxSearch, self).__init__(
metric=self._objective_name,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials)
if self._ax._enforce_sequential_optimization:
logger.warning("Detected sequential enforcement. Be sure to use "
"a ConcurrencyLimiter.")
def suggest(self, trial_id):
if self.max_concurrent:
if len(self._live_trial_mapping) >= self.max_concurrent:
return None
parameters, trial_index = self._ax.get_next_trial()
self._live_trial_mapping[trial_id] = trial_index
return parameters
def on_trial_complete(self, trial_id, result=None, error=False):
"""Notification for the completion of trial.
Data of form key value dictionary of metric names and values.
"""
if result:
self._process_result(trial_id, result)
self._live_trial_mapping.pop(trial_id)
def _process_result(self, trial_id, result):
ax_trial_index = self._live_trial_mapping[trial_id]
metric_dict = {
self._objective_name: (result[self._objective_name], 0.0)
}
outcome_names = [
oc.metric.name for oc in
self._ax.experiment.optimization_config.outcome_constraints
]
metric_dict.update({on: (result[on], 0.0) for on in outcome_names})
self._ax.complete_trial(
trial_index=ax_trial_index, raw_data=metric_dict)
|
<reponame>mverleg/notex_package
from datetime import datetime
from inspect import isclass
from collections import OrderedDict
from copy import copy
from sys import stderr
from json_tricks.nonp import load
from os import listdir, walk, remove
from os.path import join, relpath, exists
from package_versions import VersionRange, VersionRangeMismatch
from shutil import rmtree
from compiler.utils import hash_str, hash_file, import_obj, link_or_copy
from notexp.bases import Configuration
from notexp.utils import PackageNotInstalledError, InvalidPackageConfigError
from frozenobj import frozen
from .license import LICENSES
from .resource import get_resources
from .utils import get_package_dir
CONFIG_REQUIRED = {'name', 'version', 'license',}
CONFIG_DEFAULTS = {
'requirements': {},
'pip_requirements': [],
'external_requirements': [],
'conflicts_with': {},
'command_arguments': [], # todo: possibly merge with config (not all commands are config though)
'config': None,
'pre_processors': [],
'parser': None,
'tags': {},
'compilers': [],
'linkers': [],
'substitutions': None,
'post_processors': [],
'renderer': None,
'template': None,
'static': [],
'styles': [],
'scripts': [],
'readme': 'readme.rst',
'credits': 'credits.txt',
}
CONFIG_FUNCTIONAL = {'command_arguments', 'pre_processors', 'parser', 'tags', 'compilers', 'linkers', 'substitutions',
'post_processors', 'renderer', 'template', 'static', 'styles', 'scripts',}
class Package:
def __init__(self, name, version, options, logger, cache, compile_conf, *, packages=None, packages_dir=None):
self.loaded = False
self.name = name
self.logger = logger
self.cache = cache
self.compile_conf = compile_conf
self.packages = packages
if packages_dir is None:
packages_dir = get_package_dir()
self.packages_dir = packages_dir
if not options:
options = {}
self.options = options
self.version_request = version
self.path = self.version = None
self.choose_version()
# initialize actions
self.config = self.parser = self.renderer = None
self.pre_processors = self.compilers = self.linkers = self.post_processors = ()
self.tags = OrderedDict()
self.substitutions = OrderedDict()
def __repr__(self):
return '<{0:}.{1:s}: {2:s} {3:s}>'.format(self.__class__.__module__, self.__class__.__name__, self.name,
self.version or self.version_request)
def get_versions(self):
try:
vdirs = sorted(listdir(join(self.packages_dir, self.name)))
except FileNotFoundError:
raise PackageNotInstalledError('package {0:s} not found (checked "{1:s}" which contains: [{2:s}])' \
.format(self.name, self.packages_dir, ', '.join(listdir(self.packages_dir))))
return vdirs
def choose_version(self):
"""
Choose from among the available versions, or raise a VersionRangeMismatch if there are no candidates.
"""
versions = self.get_versions()
try:
choice = VersionRange(self.version_request).choose(versions, conflict='error')
except VersionRangeMismatch:
raise VersionRangeMismatch('package {0:s} has no installed version that satisfies {1:s} [it has {2:s}]' \
.format(self.name, self.version_request, ', '.join(versions))) #todo: add note about installing?
self.version = choice
self.path = join(self.packages_dir, self.name, self.version)
# print('chose version', choice, 'from', versions, 'because of', self.version_request)
def load(self):
"""
Loading should not be automatic because it should also work for untrusted packages (e.g. to get the signature).
"""
# link_or_copy(self.path, join(self.compile_conf.PACKAGE_DIR, self.name))
try:
with open(join(self.path, 'config.json')) as fh:
conf = load(fh)
except FileNotFoundError:
raise InvalidPackageConfigError('config.json was not found in "{0:s}"'.format(self.path))
except ValueError as err:
raise InvalidPackageConfigError('config file for {0:} is not valid json'.format(self, str(err)))
if not (conf.get('name', None) == self.name and conf.get('version', None) == self.version):
raise InvalidPackageConfigError(('Package config for {0:} contains mismatching name and/or version '
'{1:s} {2:s}').format(self, conf.get('name', None), conf.get('version', None)))
# print(self.get_signature()[:8])
self.load_meta(conf)
conf = self.config_add_defaults(conf)
self.config_load_textfiles(conf)
self.load_resources(conf)
self.load_actions(conf)
self.loaded = True
return self
def load_resources(self, conf):
"""
Load non-python files: template, styles, scripts and static files.
"""
self.template, self.styles, self.scripts, self.static = get_resources(group_name=self.name, path=self.path,
logger=self.logger, cache=self.cache, compile_conf=self.compile_conf, template_conf=conf['template'],
style_conf=conf['styles'], script_conf=conf['scripts'], static_conf=conf['static'],
note='from package {0:s}'.format(self.name)
)
def load_meta(self, conf):
"""
Load meta data file which is added by the package index server.
"""
self.date = datetime.now() #todo: tmp
self.author = '??' # todo
self.signature = self.get_signature() #todo: this should be the one from meta file; can't hash the whole project every load
self.is_approved = True
self.approved_on = datetime.now() # todo (None if not approved)
def _set_up_import_dir(self):
imp_dir = join(self.compile_conf.PACKAGE_DIR, self.name)
if exists(imp_dir):
try:
with open(join(self.compile_conf.PACKAGE_DIR, '{0:s}.version'.format(self.name)), 'r') as fh:
stored_version = fh.read()
except IOError:
stored_version = None
if self.version != stored_version:
self.logger.info('removing wrong version {2:} of package {0:s} from "{1:s}"'.format(self.name,
imp_dir, stored_version), level=3)
try:
remove(imp_dir)
except IsADirectoryError:
rmtree(imp_dir)
if not exists(imp_dir):
self.logger.info('copy package {0:} to "{1:}"'.format(self.name, imp_dir), level=3)
link_or_copy(self.path, join(self.compile_conf.PACKAGE_DIR, self.name), exist_ok=True, allow_linking=True)
with open(join(self.compile_conf.PACKAGE_DIR, '{0:s}.version'.format(self.name)), 'w+') as fh:
fh.write(self.version)
def _import_from_package(self, imp_path):
"""
First try to import from the package, otherwise fall back to normal pythonpath.
"""
try:
return import_obj('{0:s}.{1:s}'.format(self.name, imp_path))
except ImportError:
return import_obj(imp_path)
def load_actions(self, conf):
"""
Load actions like pretty much everything: pre-processors, parsers, tags, compilers, linkers, substitutions,
post_processors and renderers).
"""
def instantiate_action(action, **kwargs):
if isclass(action):
try:
action = action(self.config, **kwargs)
except TypeError as err:
raise InvalidPackageConfigError(('action {0:} for package {1:} did not accept the given arguments: '
'config and kwargs {2:}; alternatively it might have raised a TypeError {3:}')
.format(action, self, kwargs, err))
if not hasattr(action, '__call__'):
raise InvalidPackageConfigError(('action {0:} for package {1:} should be a class (and/)or a callable')
.format(action, self, kwargs))
return action
#todo: better errors, also logging
self._set_up_import_dir()
if conf['config']:
Config = Configuration
if conf['config'] is True:
Config = Configuration
else:
Config = self._import_from_package(conf['config'])
self.config = Config(self.options, logger=frozen(self.logger), cache=frozen(self.cache),
compile_conf=frozen(self.compile_conf), parser=frozen(self.packages.get_parser()))
self.pre_processors = tuple(instantiate_action(self._import_from_package(obj_imp_path))
for obj_imp_path in conf['pre_processors'])
if conf['parser']:
Parser = self._import_from_package(conf['parser'])
self.parser = Parser(self.config)
# cache tags which are known under two names, for performance and so that they are identical
_tag_alias_cache = {}
for name, obj_imp_path in conf['tags'].items():
if obj_imp_path not in _tag_alias_cache:
_tag_alias_cache[obj_imp_path] = instantiate_action(
self._import_from_package(obj_imp_path))
self.tags[name] = _tag_alias_cache[obj_imp_path]
self.compilers = tuple(instantiate_action(self._import_from_package(obj_imp_path))
for obj_imp_path in conf['compilers'])
self.linkers = tuple(instantiate_action(self._import_from_package(obj_imp_path))
for obj_imp_path in conf['linkers'])
if conf['substitutions']: #todo (maybe)
raise NotImplementedError('substitutions')
self.post_processors = tuple(instantiate_action(self._import_from_package(obj_imp_path))
for obj_imp_path in conf['post_processors'])
if conf['renderer']:
Renderer = self._import_from_package(conf['renderer'])
self.renderer = Renderer(self.config)
def config_add_defaults(self, config):
"""
Add default values for all parameters that have defaults, check that all parameters
without defaults have some value, and check that there are no unknown parameters.
"""
unknown_keys = set(config.keys()) - CONFIG_REQUIRED - set(CONFIG_DEFAULTS.keys())
if unknown_keys:
raise InvalidPackageConfigError('{0:} has unknown configuration parameter(s): {1:}'.format(
self, ', '.join(unknown_keys)))
missing_keys = CONFIG_REQUIRED - set(config.keys())
if missing_keys:
raise InvalidPackageConfigError('{0:} is missing a value for configuration parameter(s): {1:}'.format(
self, ', '.join(missing_keys)))
conf = copy(CONFIG_DEFAULTS)
conf.update(config)
for func_key in CONFIG_FUNCTIONAL:
if conf[func_key]:
break
else:
raise InvalidPackageConfigError('{0:} does not have any functionality ({1:s} are all empty)'.format(
self, ', '.join(CONFIG_FUNCTIONAL))) #todo
logger.strict_fail('{0:} does not have any functionality ({1:s} are all empty)'.format(
self, ', '.join(CONFIG_FUNCTIONAL)))
return conf
def config_load_textfiles(self, conf):
try:
with open(join(self.path, conf['readme'])) as fh:
self.readme = fh.read()
except FileNotFoundError:
self.readme = None
try:
with open(join(self.path, conf['credits'])) as fh:
self.credits = fh.read()
except FileNotFoundError:
self.credits = None
if conf['license'] in LICENSES:
self.license_text = LICENSES[conf['license']].format(name=self.author, year=self.date.year)
else:
self.license_text = '??'
stderr.write('not an approved package (wrong license)')
def yield_files_list(self):
for root, directories, filenames in walk(self.path):
for filename in filenames:
yield relpath(join(root, filename), self.path)
def get_file_signatures(self):
file_sigs = OrderedDict()
for file in sorted(self.yield_files_list()):
file_sigs[file] = hash_file(join(self.path, file))
return file_sigs
def get_file_signatures_string(self):
return '\n'.join('{0:s}\t{1:s}'.format(name, hash) for name, hash in self.get_file_signatures().items())
def get_signature(self):
#on installing, not all the time
return hash_str(self.get_file_signatures_string())
def check_filenames(self):
#on installing, not all the time
#todo make sure all filenames are boring: alphanumeric or -_. or space(?)
pass
# def yield_compilers(self):
# for compiler in self.compilers:
# yield compiler
|
<filename>apps/user_accounts/migrations/0001_initial.py
# Generated by Django 4.0.3 on 2022-03-31 21:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SubadminTable',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=255, unique=True)),
('admin_password', models.CharField(blank=True, max_length=255, null=True)),
('email', models.EmailField(max_length=255, unique=True)),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('country', models.CharField(choices=[('Ghana', 'Ghana'), ('Nigeria', 'Nigeria'), ('Cameroon', 'Cameroon'), ('Ivory Coast', 'Ivory Coast'), ('Togo', 'Togo')], max_length=1000)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ClientsTable',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=255, unique=True)),
('first_name', models.CharField(blank=True, max_length=255, null=True)),
('last_name', models.CharField(blank=True, max_length=255, null=True)),
('gender', models.CharField(blank=True, choices=[('Male', 'Male'), ('Female', 'Female')], max_length=10, null=True)),
('subadmin_password', models.CharField(blank=True, max_length=255, null=True)),
('admin_password', models.CharField(blank=True, max_length=255, null=True)),
('sponsortID', models.CharField(blank=True, max_length=255, null=True)),
('uplinerID', models.CharField(blank=True, max_length=255, null=True)),
('user_type', models.CharField(choices=[('IBA', 'IBA'), ('Customer', 'Customer')], default='IBA', max_length=1000)),
('my_position', models.CharField(choices=[('Left', 'Left'), ('right', 'right')], default='Left', max_length=1000)),
('my_left_child', models.CharField(choices=[('Left', 'Left'), ('right', 'right')], default='Left', max_length=1000)),
('my_right_child', models.CharField(choices=[('Left', 'Left'), ('right', 'right')], default='Left', max_length=1000)),
('email', models.EmailField(max_length=255, unique=True)),
('phone', models.IntegerField(blank=True, default=0, help_text='Personal Phone', null=True)),
('tel_fixe', models.IntegerField(blank=True, default=0, help_text='Fixed Line', null=True)),
('address', models.CharField(blank=True, max_length=20, null=True)),
('city', models.CharField(blank=True, max_length=20, null=True)),
('country', models.CharField(choices=[('Ghana', 'Ghana'), ('Nigeria', 'Nigeria'), ('Cameroon', 'Cameroon'), ('Ivory Coast', 'Ivory Coast'), ('Togo', 'Togo')], max_length=1000)),
('bank_name', models.CharField(blank=True, max_length=20, null=True)),
('bank_branch_name', models.CharField(blank=True, max_length=20, null=True)),
('account_name', models.CharField(blank=True, max_length=20, null=True)),
('account_number', models.IntegerField(blank=True, default=0, help_text='Personal Phone', null=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_updated', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='admin', to=settings.AUTH_USER_MODEL)),
('my_subadmin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to='apps_user_accounts.subadmintable')),
],
managers=[
('clients', django.db.models.manager.Manager()),
],
),
]
|
<reponame>ShengyuH/Scene-Recognition-in-3D
import torch
import torch.utils.data
import torch.optim as optim
from tensorboardX import SummaryWriter
import os,sys
import MinkowskiEngine as ME
import time
from sklearn.metrics import jaccard_score,accuracy_score
import numpy as np
import pandas as pd
sys.path.append('..')
from libs.solvers import PolyLR
from libs.metrics import get_IoU, get_Recall,get_Acc
from tqdm import tqdm
EPSILON=sys.float_info.epsilon
TYPE_VALID_CLASS_IDS=[1,2,3,4,8,9,13,14,15,16,18,20,21]
TYPE_VALID_CLASS_IDS=[ele-1 for ele in TYPE_VALID_CLASS_IDS]
def get_IoU_Recall_Acc(gt,pred,num_labels):
assert pred.shape == gt.shape
idxs=gt<num_labels
n_correct=(gt[idxs]==pred[idxs]).sum()
n_samples=idxs.sum()
acc=round(n_correct*1.0/n_samples,3)
confusion_matrix=np.bincount(pred[idxs]*num_labels+gt[idxs],
minlength=num_labels**2).reshape((num_labels,num_labels)).astype(np.ulonglong)+EPSILON
IoU=np.around(
confusion_matrix.diagonal()/(confusion_matrix.sum(0)+confusion_matrix.sum(1)-confusion_matrix.diagonal()),decimals=4)
recall=np.around(
confusion_matrix.diagonal()/confusion_matrix.sum(0),decimals=4)
recall=recall[TYPE_VALID_CLASS_IDS]
IoU=IoU[TYPE_VALID_CLASS_IDS]
return round(IoU.mean(),3),round(recall.mean(),3), acc
def init_stats():
'''
stats used for evaluation
'''
stats={}
stats['sem_iou']=0
stats['sem_loss']=0
stats['sem_k_iou']=[]
stats['sem_acc']=0
stats['clf_correct']=0
stats['clf_loss']=0
stats['num_samples']=0
return stats
# this works for one parameter group
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
# adjust learning rate by
def adjust_learning_rate(optimizer):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
c_lr = get_lr(optimizer)
for param_group in optimizer.param_groups:
param_group['lr'] = c_lr*0.1
def train(net, loaders, device, logger, config):
######################
# optimizer
######################
if(config['optimizer']=='SGD'):
optimizer= optim.SGD(net.parameters(),lr=config['sgd_lr'],momentum=config['sgd_momentum'],
dampening=config['sgd_dampening'],weight_decay=config['weight_decay'])
elif config['optimizer'] == 'Adam':
optimizer= optim.Adam(net.parameters(),lr=config['adam_lr'],
betas=(config['adam_beta1'], config['adam_beta2']),weight_decay=config['weight_decay'])
######################
# loss
######################
clf_criterion=torch.nn.CrossEntropyLoss()
######################
# restore model
######################
writer = SummaryWriter(log_dir=config['dump_dir'])
restore_iter = 1
curr_best_metric=0
path_checkpoint=config['pretrained_weights']
if os.path.exists(path_checkpoint) and config['restore']:
checkpoint = torch.load(path_checkpoint) # checkpoint
pretrained_dict=checkpoint['state_dict']
# update pretrained layers
model_dict=net.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
net.load_state_dict(model_dict)
# freeze layers
for (name,layer) in net._modules.items():
if(not name.startswith('clf')):
c_module=eval('net.%s' % name)
for param in c_module.parameters():
param.requires_grad=False
#######################
# train and val loader
#######################
train_loader=iter(loaders['train'])
val_loader=loaders['val']
###########################
## training
###########################
net.train()
start=time.time()
stats_train=init_stats()
optimizer.zero_grad()
for curr_iter in tqdm(range(restore_iter, config['max_iter'])):
# train on one batch and optimize
data_dict = train_loader.next()
coords_batch0, feats_batch0 = ME.utils.sparse_collate(data_dict['coords'], data_dict['feats'])
sin = ME.SparseTensor(feats_batch0, coords=coords_batch0).to(device)
clf_out = net(sin)
writer.add_scalar('lr',get_lr(optimizer),curr_iter)
###########################
## scene classification part
###########################
loss=clf_criterion(clf_out,data_dict['clf_labels'].to(device))
loss.backward()
if(curr_iter % 2 == 0):
optimizer.step()
optimizer.zero_grad()
writer.add_scalar('train/iter_loss',loss.item(),curr_iter)
stats_train['clf_loss']+=loss.item()
is_correct = data_dict['clf_labels'] == torch.argmax(clf_out, 1).cpu()
stats_train['clf_correct']+=is_correct.sum().item()
stats_train['num_samples']+=data_dict['clf_labels'].size()[0]
###########################
## validation
###########################
if curr_iter % config['val_freq']==0:
end=time.time()
### evaluate
net.eval()
stats_val=init_stats()
n_iters=0
optimizer.zero_grad()
with torch.no_grad(): # avoid out of memory problem
for data_dict in val_loader:
coords_batch0, feats_batch0 = ME.utils.sparse_collate(data_dict['coords'], data_dict['feats'])
sin = ME.SparseTensor(feats_batch0, coords=coords_batch0).to(device)
clf_out = net(sin)
###########################
## scene classification part
###########################
loss=clf_criterion(clf_out,data_dict['clf_labels'].to(device))
stats_val['clf_loss']+=loss.item()
is_correct = data_dict['clf_labels'] == torch.argmax(clf_out, 1).cpu()
stats_val['clf_correct']+=is_correct.sum().item()
stats_val['num_samples']+=data_dict['clf_labels'].size()[0]
n_iters+=1
###########################
## scene stats
###########################
writer.add_scalar('train/clf_loss',stats_train['clf_loss']/config['val_freq'],curr_iter)
writer.add_scalar('train/clf_acc',stats_train['clf_correct']/stats_train['num_samples'],curr_iter)
writer.add_scalar('validate/clf_loss',stats_val['clf_loss']/n_iters,curr_iter)
writer.add_scalar('validate/clf_acc',stats_val['clf_correct']/stats_val['num_samples'],curr_iter)
logger.info('Iter: %d, time: %d s' % (curr_iter,end-start))
logger.info('Train: clf_acc: %.3f, clf_loss: %.3f'%
(stats_train['clf_correct']/stats_train['num_samples'],
stats_train['clf_loss']/config['val_freq']))
logger.info('Val : clf_acc: %.3f, clf_loss: %.3f' %
(stats_val['clf_correct']/stats_val['num_samples'],
stats_val['clf_loss']/n_iters))
### update checkpoint
val_acc=stats_val['clf_correct']/stats_val['num_samples']
c_metric=val_acc
if(c_metric>curr_best_metric):
curr_best_metric=c_metric
torch.save({
'state_dict': net.state_dict()
}, os.path.join(config['dump_dir'],'best_model.pth'))
logger.info('---------- model updated, best metric: %.3f ----------' % curr_best_metric)
stats_train=init_stats()
start=time.time()
net.train()
def test(net, loader, device,config):
######################
# restore model
######################
path_checkpoint=os.path.join(config['dump_dir'],config['checkpoint'])
if os.path.exists(path_checkpoint) and config['restore']:
checkpoint = torch.load(path_checkpoint) # checkpoint
net.load_state_dict(checkpoint['state_dict'])
###########################
## test
###########################
predictions,scene_names=[],[]
net.eval()
with torch.no_grad(): # avoid out of memory problem
for data_dict in loader:
sin = ME.SparseTensor(data_dict['feats'],data_dict['coords'].int()).to(device)
clf_out = net(sin)
###########################
## scene classification part
###########################
preds = torch.argmax(clf_out.F, 1).cpu().tolist()
scene_name=data_dict['scene_names']
predictions.extend(preds)
scene_names.extend(scene_name)
print(preds)
remapper=np.ones(14)*(100)
for i,x in enumerate([1,2,3,4,8,9,13,14,15,16,18,20,21]):
remapper[i]=x
# write to the prediction file
f=open(os.path.join(config['dump_dir'],'prediction.txt'),'w')
for i in range(len(scene_names)):
scene_name=scene_names[i]
predict=remapper[predictions[i]]
f.write('%s %d\n' % (scene_name,predict))
f.close()
def validate(net,loader,device,config):
######################
# restore model
######################
path_checkpoint=os.path.join(config['dump_dir'],config['checkpoint'])
if os.path.exists(path_checkpoint) and config['restore']:
checkpoint = torch.load(path_checkpoint) # checkpoint
net.load_state_dict(checkpoint['state_dict'])
###########################
## validate
###########################
predictions,scene_names=[],[]
gts=[]
pred_histos=[]
net.eval()
with torch.no_grad(): # avoid out of memory problem
for data_dict in tqdm(loader):
sin = ME.SparseTensor(data_dict['feats'],data_dict['coords'].int()).to(device)
clf_out = net(sin)
###########################
## scene classification part
###########################
pred_histos.append(np.array(clf_out.F.cpu()))
preds = torch.argmax(clf_out.F, 1).cpu().tolist()
scene_name=data_dict['scene_names']
predictions.extend(preds)
scene_names.extend(scene_name)
gts.extend(data_dict['clf_labels'].tolist())
iou,recall,acc=get_IoU_Recall_Acc(np.array(gts),np.array(predictions),21)
print(iou,recall,acc)
df=pd.DataFrame(columns=['scene_names','prediction','ground_truth'],
data=np.array([scene_names,predictions,gts]).T)
df.to_csv(os.path.join(config['dump_dir'],'prediction.csv'),index=False) |
import datetime
import logging
import uuid
import marshmallow as ma
from flask import url_for, g, jsonify
from flask.views import MethodView
from flask_smorest import Blueprint, abort
import http.client as http_client
from drift.core.extensions.jwt import current_user, requires_roles
from drift.core.extensions.urlregistry import Endpoints
from driftbase.config import get_server_heartbeat_config
from driftbase.models.db import (
Machine, Server, Match, ServerDaemonCommand
)
log = logging.getLogger(__name__)
bp = Blueprint("servers", __name__, url_prefix="/servers", description="Battle server processes")
endpoints = Endpoints()
def drift_init_extension(app, api, **kwargs):
api.register_blueprint(bp)
endpoints.init_app(app)
def utcnow():
return datetime.datetime.utcnow()
class ServersGetArgsSchema(ma.Schema):
machine_id = ma.fields.Integer()
rows = ma.fields.Integer()
class ServersPostRequestSchema(ma.Schema):
machine_id = ma.fields.Integer()
version = ma.fields.String()
public_ip = ma.fields.IPv4()
port = ma.fields.Integer()
command_line = ma.fields.String()
command_line_custom = ma.fields.String()
pid = ma.fields.Integer()
status = ma.fields.String()
image_name = ma.fields.String()
instance_name = ma.fields.String()
branch = ma.fields.String()
commit_id = ma.fields.String()
process_info = ma.fields.Dict()
details = ma.fields.Dict()
repository = ma.fields.String()
ref = ma.fields.String()
build = ma.fields.String()
build_number = ma.fields.Integer()
target_platform = ma.fields.String()
build_info = ma.fields.Dict()
placement = ma.fields.String()
class ServersPostResponseSchema(ma.Schema):
server_id = ma.fields.Integer(required=True)
machine_id = ma.fields.Integer(required=True)
url = ma.fields.Url(required=True)
machine_url = ma.fields.Url(required=True)
heartbeat_url = ma.fields.Url(required=True)
commands_url = ma.fields.Url(required=True)
token = ma.fields.String(required=True)
next_heartbeat_seconds = ma.fields.Number(required=True)
heartbeat_timeout = ma.fields.Str(required=True)
class ServerPutRequestSchema(ma.Schema):
status = ma.fields.String(required=True)
machine_id = ma.fields.Integer()
version = ma.fields.String()
public_ip = ma.fields.IPv4()
port = ma.fields.Integer()
command_line = ma.fields.String()
command_line_custom = ma.fields.String()
pid = ma.fields.Integer()
image_name = ma.fields.String()
error = ma.fields.String()
branch = ma.fields.String()
commit_id = ma.fields.String()
process_info = ma.fields.Dict()
details = ma.fields.Dict()
repository = ma.fields.String()
ref = ma.fields.String()
build = ma.fields.String()
build_number = ma.fields.Integer()
target_platform = ma.fields.String()
build_info = ma.fields.Dict()
class ServerPutResponseSchema(ma.Schema):
server_id = ma.fields.Integer(required=True)
machine_id = ma.fields.Integer(required=True)
url = ma.fields.Url(required=True)
machine_url = ma.fields.Url(required=True)
heartbeat_url = ma.fields.Url(required=True)
class ServerHeartbeatPutResponseSchema(ma.Schema):
last_heartbeat = ma.fields.DateTime(metadata=dict(description="Timestamp of the previous heartbeat"))
this_heartbeat = ma.fields.DateTime(metadata=dict(description="Timestamp of this heartbeat"))
next_heartbeat = ma.fields.DateTime(metadata=dict(description="Timestamp when the next heartbeat is expected"))
next_heartbeat_seconds = ma.fields.Integer(metadata=dict(description="Number of seconds until the next heartbeat is expected"))
heartbeat_timeout = ma.fields.DateTime(
metadata=dict(description="Timestamp when the server times out if no heartbeat is received"))
heartbeat_timeout_seconds = ma.fields.Integer(
metadata=dict(description="Number of seconds until the server times out if no heartbeat is received"))
@bp.route('', endpoint='list')
class ServersAPI(MethodView):
@requires_roles("service")
@bp.arguments(ServersGetArgsSchema, location='query')
def get(self, args):
"""
Get a list of the last 100 battle servers that have been registered in
the system.
"""
num_rows = args.get("rows") or 100
query = g.db.query(Server)
if args.get("machine_id"):
query = query.filter(Server.machine_id == args.get("machine_id"))
query = query.order_by(-Server.server_id)
query = query.limit(num_rows)
rows = query.all()
ret = []
for row in rows:
record = row.as_dict()
record["url"] = url_for("servers.entry", server_id=row.server_id, _external=True)
ret.append(record)
return jsonify(ret)
@requires_roles("service")
@bp.arguments(ServersPostRequestSchema)
@bp.response(http_client.CREATED, ServersPostResponseSchema)
def post(self, args):
"""
The daemon process (and server, for local development) post here
to register the server instance with the backend. You need to
register the server before you can register a battle.
"""
machine_id = args.get("machine_id")
log.info("registering a server on machine_id %s, realm %s and public_ip %s",
machine_id, args.get("realm"), args.get("public_ip"))
# If we don't already have a machine we make one just in time now on the realm "Local".
# This is to support local devs where an external daemon is not running and the server iself
# does this registration without a prior registration on the machines endpoint
if not machine_id:
realm = "local"
instance_name = args.get("instance_name")
placement = args.get("placement") or "<unknown placement>"
if not instance_name:
abort(http_client.BAD_REQUEST, description="You need to supply an instance_name")
machine = g.db.query(Machine).filter(Machine.realm == realm,
Machine.instance_name == instance_name,
Machine.placement == placement).first()
if machine:
machine_id = machine.machine_id
log.info("machine_id %s found for server", machine_id)
else:
machine = Machine(realm=realm, instance_name=instance_name,
placement=placement, server_count=0)
g.db.add(machine)
g.db.flush()
machine_id = machine.machine_id
log.info("Created machine_id %s for server instance \"%s\"",
machine_id, instance_name)
else:
machine = g.db.query(Machine).get(machine_id)
if not machine:
abort(http_client.NOT_FOUND, description="Machine %s was not found" % machine_id)
token = str(uuid.uuid4()).replace("-", "")[:20]
def get_or_null(ip):
return ip and str(ip) or None
server = Server(machine_id=machine_id,
version=args.get("version"),
public_ip=get_or_null(args.get("public_ip")),
port=args.get("port"),
command_line=args.get("command_line"),
command_line_custom=args.get("command_line_custom"),
pid=args.get("pid"),
status=args.get("status"),
image_name=args.get("image_name"),
branch=args.get("branch"),
commit_id=args.get("commit_id"),
process_info=args.get("process_info"),
details=args.get("details"),
repository=args.get("repository"),
ref=args.get("ref"),
build=args.get("build"),
build_number=args.get("build_number"),
target_platform=args.get("target_platform"),
build_info=args.get("build_info"),
token=token
)
g.db.add(server)
machine.server_count += 1
machine.server_date = utcnow()
g.db.commit()
server_id = server.server_id
resource_url = url_for("servers.entry", server_id=server_id, _external=True)
machine_url = url_for("machines.entry", machine_id=machine_id, _external=True)
heartbeat_url = url_for("servers.heartbeat", server_id=server_id, _external=True)
commands_url = url_for("servers.commands", server_id=server_id, _external=True)
response_header = {
"Location": resource_url,
}
log.info("Server %s has been registered on machine_id %s", server_id, machine_id)
heartbeat_period, heartbeat_timeout = get_server_heartbeat_config()
return {"server_id": server_id,
"url": resource_url,
"machine_id": machine_id,
"machine_url": machine_url,
"heartbeat_url": heartbeat_url,
"commands_url": commands_url,
"token": token,
"next_heartbeat_seconds": heartbeat_period,
"heartbeat_timeout": utcnow() + datetime.timedelta(seconds=heartbeat_timeout),
}, None, response_header
@bp.route('/<int:server_id>', endpoint='entry')
class ServerAPI(MethodView):
"""
Interface to battle servers instances. A battle server instance is
a single run of a battle server executable. The battle server will
have a single battle on it. You should never have a battle resource
without an associated battle server resource.
"""
@requires_roles("service")
def get(self, server_id):
"""
Get information about a single battle server instance.
Returns information from the machine and the associated
battle if found.
"""
server = g.db.query(Server).get(server_id)
if not server:
log.warning("Requested a non-existant battle server: %s", server_id)
abort(http_client.NOT_FOUND, description="Server not found")
machine_id = server.machine_id
record = server.as_dict()
record["url"] = url_for("servers.entry", server_id=server_id, _external=True)
record["heartbeat_url"] = url_for("servers.heartbeat", server_id=server_id, _external=True)
record["commands_url"] = url_for("servers.commands", server_id=server_id, _external=True)
record["machine_url"] = None
if machine_id:
machine = g.db.query(Machine).get(machine_id)
if machine:
record["machine_url"] = url_for("machines.entry", machine_id=machine_id,
_external=True)
matches = []
rows = g.db.query(Match).filter(Match.server_id == server_id).all()
for row in rows:
match_id = row.match_id
match = {"match_id": match_id,
"url": url_for("matches.entry", match_id=match_id, _external=True),
"num_players": row.num_players,
}
matches.append(match)
record["matches"] = matches
commands = []
rows = g.db.query(ServerDaemonCommand).filter(ServerDaemonCommand.server_id == server_id,
ServerDaemonCommand.status == "pending").all()
for row in rows:
command = {"command_id": row.command_id,
"command": row.command,
"arguments": row.arguments,
"create_date": row.create_date,
"url": url_for("servers.command", server_id=server_id,
command_id=row.command_id, _external=True)
}
commands.append(command)
record["pending_commands"] = commands
log.debug("Returning info for battle server %s", server_id)
return jsonify(record)
@requires_roles("service")
@bp.arguments(ServerPutRequestSchema)
@bp.response(http_client.OK, ServerPutResponseSchema)
def put(self, args, server_id):
"""
The battle server management (celery) process calls this to update
the status of running a specific battle server task
"""
log.info("Updating battle server %s", server_id)
server = g.db.query(Server).get(server_id)
if not server:
abort(http_client.NOT_FOUND)
if args.get("status"):
log.info("Changing status of battle server %s from '%s' to '%s'",
server_id, server.status, args["status"])
public_ip = args.pop("public_ip", None)
if public_ip:
server.public_ip = str(public_ip)
for arg in args:
setattr(server, arg, args[arg])
g.db.commit()
machine_id = server.machine_id
machine_url = None
if machine_id:
machine_url = url_for("machines.entry", machine_id=machine_id, _external=True)
return {"server_id": server_id,
"url": url_for("servers.entry", server_id=server_id, _external=True),
"machine_id": machine_id,
"machine_url": machine_url,
"heartbeat_url": url_for("servers.heartbeat", server_id=server_id, _external=True),
}
@bp.route('/<int:server_id>/heartbeat', endpoint='heartbeat')
class ServerHeartbeatAPI(MethodView):
"""
Thin heartbeat API
"""
@requires_roles("service")
@bp.response(http_client.OK, ServerHeartbeatPutResponseSchema)
def put(self, server_id):
"""
Battle server heartbeat
"""
log.debug("%s is heart beating battle server %s",
current_user.get("user_name", "unknown"), server_id)
server = g.db.query(Server).get(server_id)
if not server:
abort(http_client.NOT_FOUND, description="Server not found")
heartbeat_period, heartbeat_timeout = get_server_heartbeat_config()
now = utcnow()
last_heartbeat = server.heartbeat_date
if last_heartbeat + datetime.timedelta(seconds=heartbeat_timeout) < now:
msg = "Heartbeat timeout. Last heartbeat was at {} and now we are at {}" \
.format(last_heartbeat, now)
log.info(msg)
abort(http_client.NOT_FOUND, message=msg)
server.heartbeat_count += 1
server.heartbeat_date = now
g.db.commit()
return {
"last_heartbeat": last_heartbeat,
"this_heartbeat": server.heartbeat_date,
"next_heartbeat": server.heartbeat_date + datetime.timedelta(seconds=heartbeat_period),
"next_heartbeat_seconds": heartbeat_period,
"heartbeat_timeout": now + datetime.timedelta(seconds=heartbeat_timeout),
"heartbeat_timeout_seconds": heartbeat_timeout,
}
class ServerCommandsPostSchema(ma.Schema):
command = ma.fields.String(required=True)
arguments = ma.fields.Dict()
details = ma.fields.Dict()
@bp.route('/<int:server_id>/commands', endpoint='commands')
class ServerCommandsAPI(MethodView):
"""
Commands for the battle server daemon
"""
@requires_roles("service")
@bp.arguments(ServerCommandsPostSchema)
def post(self, args, server_id):
"""
Add a new command for the daemon to execute
"""
server = g.db.query(Server).get(server_id)
if not server:
abort(http_client.NOT_FOUND)
status = "pending"
command = ServerDaemonCommand(server_id=server_id,
command=args["command"],
arguments=args.get("arguments"),
details=args.get("details"),
status=status,
)
g.db.add(command)
g.db.commit()
resource_url = url_for("servers.command", server_id=server_id,
command_id=command.command_id, _external=True)
return jsonify({"command_id": command.command_id,
"url": resource_url,
"status": status,
}), http_client.CREATED, None
@requires_roles("service")
def get(self, server_id):
rows = g.db.query(ServerDaemonCommand) \
.filter(ServerDaemonCommand.server_id == server_id) \
.all()
ret = []
for r in rows:
command = r.as_dict()
command["url"] = url_for("servers.command",
server_id=server_id,
command_id=r.command_id,
_external=True)
ret.append(command)
return jsonify(ret)
class ServerCommandPatchSchema(ma.Schema):
status = ma.fields.String(required=True)
details = ma.fields.Dict()
@bp.route('/<int:server_id>/commands/<int:command_id>', endpoint='command')
class ServerCommandAPI(MethodView):
@requires_roles("service")
@bp.arguments(ServerCommandPatchSchema)
def patch(self, args, server_id, command_id):
return self._patch(args, server_id, command_id)
@requires_roles("service")
@bp.arguments(ServerCommandPatchSchema)
def put(self, args, server_id, command_id):
return self._patch(args, server_id, command_id)
def _patch(self, args, server_id, command_id):
"""
Add a new command for the daemon to execute
"""
server = g.db.query(Server).get(server_id)
if not server:
abort(http_client.NOT_FOUND)
row = g.db.query(ServerDaemonCommand).get(command_id)
row.status = args["status"]
row.status_date = utcnow()
if "details" in args:
row.details = args["details"]
g.db.commit()
ret = row.as_dict()
ret["url"] = url_for("servers.command", server_id=server_id, command_id=row.command_id,
_external=True)
return jsonify(ret)
@requires_roles("service")
def get(self, server_id, command_id):
row = g.db.query(ServerDaemonCommand).get(command_id)
ret = row.as_dict()
ret["url"] = url_for("servers.command", server_id=server_id, command_id=row.command_id,
_external=True)
return jsonify(ret)
@endpoints.register
def endpoint_info(*args):
ret = {"servers": url_for("servers.list", _external=True), }
return ret
|
class Zoo:
def __init__(self, name, budget, animal_capacity, workers_capacity):
self.name = name
self.animals = []
self.workers = []
self.__animal_capacity = animal_capacity
self.__workers_capacity = workers_capacity
self.__budget = budget
def is_capacity_animals(self):
return len(self.animals) < self.__animal_capacity
def is_capacity_workers(self):
return len(self.workers) < self.__workers_capacity
def add_animal(self, animal, price):
if self.__budget >= price and self.is_capacity_animals():
self.__budget -= price
self.animals.append(animal)
return f"{animal.name} the {animal.Type} added to the zoo"
elif self.__budget < price and self.is_capacity_animals():
return "Not enough budget"
return "Not enough space for animal"
def hire_worker(self, worker):
if self.__budget >= worker.salary and self.is_capacity_workers():
self.workers.append(worker)
return f"{worker.name} the {worker.Type} hired successfully"
return "Not enough space for worker"
def fire_worker(self, worker_name):
for worker in self.workers:
if worker.name == worker_name:
self.workers.remove(worker)
return f"{worker_name} fired successfully"
return f"There is no {worker_name} in the zoo"
def pay_workers(self):
budget_needed = sum([worker.salary for worker in self.workers])
if self.__budget >= budget_needed:
self.__budget -= budget_needed
return f"You payed your workers. They are happy. Budget left: {self.__budget}"
return "You have no budget to pay your workers. They are unhappy"
def tend_animals(self):
budget_needed = sum([animal.get_needs() for animal in self.animals])
if self.__budget >= budget_needed:
self.__budget -= budget_needed
return f"You tended all the animals. They are happy. Budget left: {self.__budget}"
return "You have no budget to tend the animals. They are unhappy."
def profit(self, amount):
self.__budget += amount
def animals_status(self):
result = f"You have {len(self.animals)} animals"
lions = []
tigers = []
cheetahs = []
for animal in self.animals:
if animal.Type == "Lion":
lions.append(animal)
elif animal.Type == "Tiger":
tigers.append(animal)
elif animal.Type == "Cheetah":
cheetahs.append(animal)
if lions:
result += f"\n----- {len(lions)} Lions:"
for lion in lions:
result += f"\n{lion}"
if tigers:
result += f"\n----- {len(tigers)} Tigers:"
for tiger in tigers:
result += f"\n{tiger}"
if cheetahs:
result += f"\n----- {len(cheetahs)} Cheetahs:"
for cheetah in cheetahs:
result += f"\n{cheetah}"
return result
def workers_status(self):
result = f"You have {len(self.workers)} workers"
keepers = []
caretakers = []
vets = []
for worker in self.workers:
if worker.Type == "Keeper":
keepers.append(worker)
elif worker.Type == "Caretaker":
caretakers.append(worker)
elif worker.Type == "Vet":
vets.append(worker)
if keepers:
result += f"\n----- {len(keepers)} Keepers:"
for keeper in keepers:
result += f"\n{keeper}"
if caretakers:
result += f"\n----- {len(caretakers)} Caretakers:"
for caretaker in caretakers:
result += f"\n{caretaker}"
if vets:
result += f"\n----- {len(vets)} Vets:"
for vet in vets:
result += f"\n{vet}"
return result
|
# ## Tutorial
#
# This guide can help you start working with NetworkX.
#
# ### Creating a graph
#
# Create an empty graph with no nodes and no edges.
import networkx as nx
G = nx.Graph()
# By definition, a `Graph` is a collection of nodes (vertices) along with
# identified pairs of nodes (called edges, links, etc). In NetworkX, nodes can
# be any [hashable](https://docs.python.org/3/glossary.html#term-hashable) object e.g., a text string, an image, an XML object,
# another Graph, a customized node object, etc.
#
# # Nodes
#
# The graph `G` can be grown in several ways. NetworkX includes many graph
# generator functions and facilities to read and write graphs in many formats.
# To get started though we’ll look at simple manipulations. You can add one node
# at a time,
G.add_node(1)
# or add nodes from any [iterable](https://docs.python.org/3/glossary.html#term-iterable) container, such as a list
G.add_nodes_from([2, 3])
# You can also add nodes along with node
# attributes if your container yields 2-tuples of the form
# `(node, node_attribute_dict)`:
#
# ```
# >>> G.add_nodes_from([
# ... (4, {"color": "red"}),
# ... (5, {"color": "green"}),
# ... ])
# ```
#
# Node attributes are discussed further below.
#
# Nodes from one graph can be incorporated into another:
H = nx.path_graph(10)
G.add_nodes_from(H)
# `G` now contains the nodes of `H` as nodes of `G`.
# In contrast, you could use the graph `H` as a node in `G`.
G.add_node(H)
# The graph `G` now contains `H` as a node. This flexibility is very powerful as
# it allows graphs of graphs, graphs of files, graphs of functions and much more.
# It is worth thinking about how to structure your application so that the nodes
# are useful entities. Of course you can always use a unique identifier in `G`
# and have a separate dictionary keyed by identifier to the node information if
# you prefer.
#
# # Edges
#
# `G` can also be grown by adding one edge at a time,
G.add_edge(1, 2)
e = (2, 3)
G.add_edge(*e) # unpack edge tuple*
# by adding a list of edges,
G.add_edges_from([(1, 2), (1, 3)])
# or by adding any ebunch of edges. An *ebunch* is any iterable
# container of edge-tuples. An edge-tuple can be a 2-tuple of nodes or a 3-tuple
# with 2 nodes followed by an edge attribute dictionary, e.g.,
# `(2, 3, {'weight': 3.1415})`. Edge attributes are discussed further
# below.
G.add_edges_from(H.edges)
# There are no complaints when adding existing nodes or edges. For example,
# after removing all nodes and edges,
G.clear()
# we add new nodes/edges and NetworkX quietly ignores any that are
# already present.
G.add_edges_from([(1, 2), (1, 3)])
G.add_node(1)
G.add_edge(1, 2)
G.add_node("spam") # adds node "spam"
G.add_nodes_from("spam") # adds 4 nodes: 's', 'p', 'a', 'm'
G.add_edge(3, 'm')
# At this stage the graph `G` consists of 8 nodes and 3 edges, as can be seen by:
G.number_of_nodes()
G.number_of_edges()
DG = nx.DiGraph()
DG.add_edge(2, 1) # adds the nodes in order 2, 1
DG.add_edge(1, 3)
DG.add_edge(2, 4)
DG.add_edge(1, 2)
assert list(DG.successors(2)) == [1, 4]
assert list(DG.edges) == [(2, 1), (2, 4), (1, 3), (1, 2)]
# # Examining elements of a graph
#
# We can examine the nodes and edges. Four basic graph properties facilitate
# reporting: `G.nodes`, `G.edges`, `G.adj` and `G.degree`. These
# are set-like views of the nodes, edges, neighbors (adjacencies), and degrees
# of nodes in a graph. They offer a continually updated read-only view into
# the graph structure. They are also dict-like in that you can look up node
# and edge data attributes via the views and iterate with data attributes
# using methods `.items()`, `.data('span')`.
# If you want a specific container type instead of a view, you can specify one.
# Here we use lists, though sets, dicts, tuples and other containers may be
# better in other contexts.
list(G.nodes)
list(G.edges)
list(G.adj[1]) # or list(G.neighbors(1))
G.degree[1] # the number of edges incident to 1
# One can specify to report the edges and degree from a subset of all nodes
# using an nbunch. An *nbunch* is any of: `None` (meaning all nodes),
# a node, or an iterable container of nodes that is not itself a node in the
# graph.
G.edges([2, 'm'])
G.degree([2, 3])
# # Removing elements from a graph
#
# One can remove nodes and edges from the graph in a similar fashion to adding.
# Use methods
# `Graph.remove_node()`,
# `Graph.remove_nodes_from()`,
# `Graph.remove_edge()`
# and
# `Graph.remove_edges_from()`, e.g.
G.remove_node(2)
G.remove_nodes_from("spam")
list(G.nodes)
G.remove_edge(1, 3)
# # Using the graph constructors
#
# Graph objects do not have to be built up incrementally - data specifying
# graph structure can be passed directly to the constructors of the various
# graph classes.
# When creating a graph structure by instantiating one of the graph
# classes you can specify data in several formats.
G.add_edge(1, 2)
H = nx.DiGraph(G) # create a DiGraph using the connections from G
list(H.edges())
edgelist = [(0, 1), (1, 2), (2, 3)]
H = nx.Graph(edgelist)
# # What to use as nodes and edges
#
# You might notice that nodes and edges are not specified as NetworkX
# objects. This leaves you free to use meaningful items as nodes and
# edges. The most common choices are numbers or strings, but a node can
# be any hashable object (except `None`), and an edge can be associated
# with any object `x` using `G.add_edge(n1, n2, object=x)`.
#
# As an example, `n1` and `n2` could be protein objects from the RCSB Protein
# Data Bank, and `x` could refer to an XML record of publications detailing
# experimental observations of their interaction.
#
# We have found this power quite useful, but its abuse
# can lead to surprising behavior unless one is familiar with Python.
# If in doubt, consider using `convert_node_labels_to_integers()` to obtain
# a more traditional graph with integer labels.
#
# # Accessing edges and neighbors
#
# In addition to the views `Graph.edges`, and `Graph.adj`,
# access to edges and neighbors is possible using subscript notation.
G = nx.Graph([(1, 2, {"color": "yellow"})])
G[1] # same as G.adj[1]
G[1][2]
G.edges[1, 2]
# You can get/set the attributes of an edge using subscript notation
# if the edge already exists.
G.add_edge(1, 3)
G[1][3]['color'] = "blue"
G.edges[1, 2]['color'] = "red"
G.edges[1, 2]
# Fast examination of all (node, adjacency) pairs is achieved using
# `G.adjacency()`, or `G.adj.items()`.
# Note that for undirected graphs, adjacency iteration sees each edge twice.
FG = nx.Graph()
FG.add_weighted_edges_from([(1, 2, 0.125), (1, 3, 0.75), (2, 4, 1.2), (3, 4, 0.375)])
for n, nbrs in FG.adj.items():
for nbr, eattr in nbrs.items():
wt = eattr['weight']
if wt < 0.5: print(f"({n}, {nbr}, {wt:.3})")
# Convenient access to all edges is achieved with the edges property.
for (u, v, wt) in FG.edges.data('weight'):
if wt < 0.5:
print(f"({u}, {v}, {wt:.3})")
# # Adding attributes to graphs, nodes, and edges
#
# Attributes such as weights, labels, colors, or whatever Python object you like,
# can be attached to graphs, nodes, or edges.
#
# Each graph, node, and edge can hold key/value attribute pairs in an associated
# attribute dictionary (the keys must be hashable). By default these are empty,
# but attributes can be added or changed using `add_edge`, `add_node` or direct
# manipulation of the attribute dictionaries named `G.graph`, `G.nodes`, and
# `G.edges` for a graph `G`.
#
# ## Graph attributes
#
# Assign graph attributes when creating a new graph
G = nx.Graph(day="Friday")
G.graph
# Or you can modify attributes later
G.graph['day'] = "Monday"
G.graph
# # Node attributes
#
# Add node attributes using `add_node()`, `add_nodes_from()`, or `G.nodes`
G.add_node(1, time='5pm')
G.add_nodes_from([3], time='2pm')
G.nodes[1]
G.nodes[1]['room'] = 714
G.nodes.data()
# Note that adding a node to `G.nodes` does not add it to the graph, use
# `G.add_node()` to add new nodes. Similarly for edges.
#
# # Edge Attributes
#
# Add/change edge attributes using `add_edge()`, `add_edges_from()`,
# or subscript notation.
G.add_edge(1, 2, weight=4.7 )
G.add_edges_from([(3, 4), (4, 5)], color='red')
G.add_edges_from([(1, 2, {'color': 'blue'}), (2, 3, {'weight': 8})])
G[1][2]['weight'] = 4.7
G.edges[3, 4]['weight'] = 4.2
# The special attribute `weight` should be numeric as it is used by
# algorithms requiring weighted edges.
#
# Directed graphs
#
# The `DiGraph` class provides additional methods and properties specific
# to directed edges, e.g.,
# `DiGraph.out_edges`, `DiGraph.in_degree`,
# `DiGraph.predecessors()`, `DiGraph.successors()` etc.
# To allow algorithms to work with both classes easily, the directed versions of
# `neighbors()` is equivalent to `successors()` while `degree` reports
# the sum of `in_degree` and `out_degree` even though that may feel
# inconsistent at times.
DG = nx.DiGraph()
DG.add_weighted_edges_from([(1, 2, 0.5), (3, 1, 0.75)])
DG.out_degree(1, weight='weight')
DG.degree(1, weight='weight')
list(DG.successors(1))
list(DG.neighbors(1))
# Some algorithms work only for directed graphs and others are not well
# defined for directed graphs. Indeed the tendency to lump directed
# and undirected graphs together is dangerous. If you want to treat
# a directed graph as undirected for some measurement you should probably
# convert it using `Graph.to_undirected()` or with
H = nx.Graph(G) # create an undirected graph H from a directed graph G
# # Multigraphs
#
# NetworkX provides classes for graphs which allow multiple edges
# between any pair of nodes. The `MultiGraph` and
# `MultiDiGraph`
# classes allow you to add the same edge twice, possibly with different
# edge data. This can be powerful for some applications, but many
# algorithms are not well defined on such graphs.
# Where results are well defined,
# e.g., `MultiGraph.degree()` we provide the function. Otherwise you
# should convert to a standard graph in a way that makes the measurement
# well defined.
MG = nx.MultiGraph()
MG.add_weighted_edges_from([(1, 2, 0.5), (1, 2, 0.75), (2, 3, 0.5)])
dict(MG.degree(weight='weight'))
GG = nx.Graph()
for n, nbrs in MG.adjacency():
for nbr, edict in nbrs.items():
minvalue = min([d['weight'] for d in edict.values()])
GG.add_edge(n, nbr, weight = minvalue)
nx.shortest_path(GG, 1, 3)
# # Graph generators and graph operations
#
# In addition to constructing graphs node-by-node or edge-by-edge, they
# can also be generated by
#
# 1. Applying classic graph operations, such as:
#
# 1. Using a call to one of the classic small graphs, e.g.,
#
# 1. Using a (constructive) generator for a classic graph, e.g.,
#
# like so:
K_5 = nx.complete_graph(5)
K_3_5 = nx.complete_bipartite_graph(3, 5)
barbell = nx.barbell_graph(10, 10)
lollipop = nx.lollipop_graph(10, 20)
# 1. Using a stochastic graph generator, e.g,
#
# like so:
er = nx.erdos_renyi_graph(100, 0.15)
ws = nx.watts_strogatz_graph(30, 3, 0.1)
ba = nx.barabasi_albert_graph(100, 5)
red = nx.random_lobster(100, 0.9, 0.9)
# 1. Reading a graph stored in a file using common graph formats,
# such as edge lists, adjacency lists, GML, GraphML, pickle, LEDA and others.
nx.write_gml(red, "path.to.file")
mygraph = nx.read_gml("path.to.file")
# For details on graph formats see Reading and writing graphs
# and for graph generator functions see Graph generators
#
# # Analyzing graphs
#
# The structure of `G` can be analyzed using various graph-theoretic
# functions such as:
G = nx.Graph()
G.add_edges_from([(1, 2), (1, 3)])
G.add_node("spam") # adds node "spam"
list(nx.connected_components(G))
sorted(d for n, d in G.degree())
nx.clustering(G)
# Some functions with large output iterate over (node, value) 2-tuples.
# These are easily stored in a [dict](https://docs.python.org/3/library/stdtypes.html#dict) structure if you desire.
sp = dict(nx.all_pairs_shortest_path(G))
sp[3]
# See Algorithms for details on graph algorithms
# supported.
#
# # Drawing graphs
#
# NetworkX is not primarily a graph drawing package but basic drawing with
# Matplotlib as well as an interface to use the open source Graphviz software
# package are included. These are part of the `networkx.drawing` module and will
# be imported if possible.
#
# First import Matplotlib’s plot interface (pylab works too)
import matplotlib.pyplot as plt
# To test if the import of `networkx.drawing` was successful draw `G` using one of
G = nx.petersen_graph()
subax1 = plt.subplot(121)
nx.draw(G, with_labels=True, font_weight='bold')
subax2 = plt.subplot(122)
nx.draw_shell(G, nlist=[range(5, 10), range(5)], with_labels=True, font_weight='bold')
# when drawing to an interactive display. Note that you may need to issue a
# Matplotlib
plt.show()
# command if you are not using matplotlib in interactive mode (see
# [this Matplotlib FAQ](https://matplotlib.org/stable/faq/installing_faq.html)).
options = {
'node_color': 'black',
'node_size': 100,
'width': 3,
}
subax1 = plt.subplot(221)
nx.draw_random(G, **options)
subax2 = plt.subplot(222)
nx.draw_circular(G, **options)
subax3 = plt.subplot(223)
nx.draw_spectral(G, **options)
subax4 = plt.subplot(224)
nx.draw_shell(G, nlist=[range(5,10), range(5)], **options)
# You can find additional options via `draw_networkx()` and
# layouts via `layout`.
# You can use multiple shells with `draw_shell()`.
G = nx.dodecahedral_graph()
shells = [[2, 3, 4, 5, 6], [8, 1, 0, 19, 18, 17, 16, 15, 14, 7], [9, 10, 11, 12, 13]]
nx.draw_shell(G, nlist=shells, **options)
# To save drawings to a file, use, for example
nx.draw(G)
plt.savefig("path.png")
# writes to the file `path.png` in the local directory. If Graphviz and
# PyGraphviz or pydot, are available on your system, you can also use
# `nx_agraph.graphviz_layout(G)` or `nx_pydot.graphviz_layout(G)` to get the
# node positions, or write the graph in dot format for further processing.
from networkx.drawing.nx_pydot import write_dot
pos = nx.nx_agraph.graphviz_layout(G)
nx.draw(G, pos=pos)
write_dot(G, 'file.dot')
# See Drawing for additional details.
|
<gh_stars>1000+
# Copyright 2019 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example of multiprocess concurrency with gRPC."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from concurrent import futures
import contextlib
import datetime
import logging
import math
import multiprocessing
import socket
import sys
import time
import grpc
import prime_pb2
import prime_pb2_grpc
_LOGGER = logging.getLogger(__name__)
_ONE_DAY = datetime.timedelta(days=1)
_PROCESS_COUNT = multiprocessing.cpu_count()
_THREAD_CONCURRENCY = _PROCESS_COUNT
def is_prime(n):
for i in range(2, int(math.ceil(math.sqrt(n)))):
if n % i == 0:
return False
else:
return True
class PrimeChecker(prime_pb2_grpc.PrimeCheckerServicer):
def check(self, request, context):
_LOGGER.info('Determining primality of %s', request.candidate)
return prime_pb2.Primality(isPrime=is_prime(request.candidate))
def _wait_forever(server):
try:
while True:
time.sleep(_ONE_DAY.total_seconds())
except KeyboardInterrupt:
server.stop(None)
def _run_server(bind_address):
"""Start a server in a subprocess."""
_LOGGER.info('Starting new server.')
options = (('grpc.so_reuseport', 1),)
server = grpc.server(futures.ThreadPoolExecutor(
max_workers=_THREAD_CONCURRENCY,),
options=options)
prime_pb2_grpc.add_PrimeCheckerServicer_to_server(PrimeChecker(), server)
server.add_insecure_port(bind_address)
server.start()
_wait_forever(server)
@contextlib.contextmanager
def _reserve_port():
"""Find and reserve a port for all subprocesses to use."""
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 0:
raise RuntimeError("Failed to set SO_REUSEPORT.")
sock.bind(('', 0))
try:
yield sock.getsockname()[1]
finally:
sock.close()
def main():
with _reserve_port() as port:
bind_address = 'localhost:{}'.format(port)
_LOGGER.info("Binding to '%s'", bind_address)
sys.stdout.flush()
workers = []
for _ in range(_PROCESS_COUNT):
# NOTE: It is imperative that the worker subprocesses be forked before
# any gRPC servers start up. See
# https://github.com/grpc/grpc/issues/16001 for more details.
worker = multiprocessing.Process(target=_run_server,
args=(bind_address,))
worker.start()
workers.append(worker)
for worker in workers:
worker.join()
if __name__ == '__main__':
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('[PID %(process)d] %(message)s')
handler.setFormatter(formatter)
_LOGGER.addHandler(handler)
_LOGGER.setLevel(logging.INFO)
main()
|
__author__ = '<NAME>'
import time
import boto3
from flow.core.abstract_cluster import AbstractCluster, ClusterError
from flow.core.s3_filesystem import S3Filesystem
# `http://boto3.readthedocs.io/en/latest/reference/services/emr.html#EMR.Client.describe_cluster`_
CLUSTER_STATE_TERMINATED_WITH_ERRORS = 'TERMINATED_WITH_ERRORS'
CLUSTER_STATE_TERMINATED = 'TERMINATED'
CLUSTER_STATE_TERMINATING = 'TERMINATING'
CLUSTER_STATE_WAITING = 'WAITING'
CLUSTER_STATE_RUNNING = 'RUNNING'
CLUSTER_STATE_BOOTSTRAPPING = 'BOOTSTRAPPING'
CLUSTER_STATE_STARTING = 'STARTING'
# `http://boto3.readthedocs.io/en/latest/reference/services/emr.html#EMR.Client.describe_step`_
STEP_STATE_PENDING = 'PENDING'
STEP_STATE_CANCEL_PENDING = 'CANCEL_PENDING'
STEP_STATE_RUNNING = 'RUNNING'
STEP_STATE_COMPLETED = 'COMPLETED'
STEP_STATE_CANCELLED = 'CANCELLED'
STEP_STATE_FAILED = 'FAILED'
STEP_STATE_INTERRUPTED = 'INTERRUPTED'
class EmrCluster(AbstractCluster):
""" implementation of the abstract API for the case of AWS EMR """
def __init__(self, name, context, **kwargs):
super(EmrCluster, self).__init__(name, context, **kwargs)
self._filesystem = S3Filesystem(self.logger, context, **kwargs)
self.jobflow_id = None # it is both ClusterId and the JobflowId
self.client_b3 = boto3.client(service_name='emr',
region_name=context.settings['aws_cluster_region'],
aws_access_key_id=context.settings['aws_access_key_id'],
aws_secret_access_key=context.settings['aws_secret_access_key'])
@property
def filesystem(self):
return self._filesystem
def _poll_step(self, step_id):
""" method polls the state for given step_id and awaits its completion """
def _current_state():
step = self.client_b3.describe_step(ClusterId=self.jobflow_id, StepId=step_id)
return step['Step']['Status']['State']
state = _current_state()
while state in [STEP_STATE_PENDING, STEP_STATE_RUNNING]:
# Job flow step is being spawned. Idle and recheck the status.
time.sleep(20.0)
state = _current_state()
if state in [STEP_STATE_CANCELLED, STEP_STATE_INTERRUPTED, STEP_STATE_CANCEL_PENDING, STEP_STATE_FAILED]:
raise ClusterError('EMR Step {0} failed'.format(step_id))
elif state == STEP_STATE_COMPLETED:
self.logger.info('EMR Step {0} has completed'.format(step_id))
else:
self.logger.warning('Unknown state {0} during EMR Step {1} execution'.format(state, step_id))
return state
def run_pig_step(self, uri_script, **kwargs):
"""
method starts a Pig step on a cluster and monitors its execution
:raise EmrLauncherError: in case the cluster is not launched
:return: step state or None if the step failed
"""
# `https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-commandrunner.html`_
# `http://boto3.readthedocs.io/en/latest/reference/services/emr.html#EMR.Client.add_job_flow_steps`_
if not self.jobflow_id:
raise ClusterError('EMR Cluster {0} is not launched'.format(self.name))
self.logger.info('Pig Script Step {')
try:
step = {
'Name': 'SynergyPigStep',
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['pig-script', '--run-pig-script', '--args', '-f', uri_script]
}
}
if kwargs:
properties = [{'Key': '{}'.format(k), 'Value': '{}'.format(v)} for k, v in kwargs.items()]
step['HadoopJarStep']['Properties'] = properties
step_args = []
for k, v in kwargs.items():
step_args.append('-p')
step_args.append('{0}={1}'.format(k, v))
step['HadoopJarStep']['Args'].extend(step_args)
step_response = self.client_b3.add_job_flow_steps(JobFlowId=self.jobflow_id, Steps=[step])
step_ids = step_response['StepIds']
assert len(step_ids) == 1
return self._poll_step(step_ids[0])
except ClusterError as e:
self.logger.error('Pig Script Step Error: {0}'.format(e), exc_info=True)
return None
except Exception as e:
self.logger.error('Pig Script Step Unexpected Exception: {0}'.format(e), exc_info=True)
return None
finally:
self.logger.info('}')
def run_spark_step(self, uri_script, language, **kwargs):
# `https://github.com/dev-86/aws-cli/blob/29756ea294aebc7c854b3d9a2b1a56df28637e11/tests/unit/customizations/emr/test_create_cluster_release_label.py`_
# `https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-commandrunner.html`_
# `http://boto3.readthedocs.io/en/latest/reference/services/emr.html#EMR.Client.add_job_flow_steps`_
if not self.jobflow_id:
raise ClusterError('EMR Cluster {0} is not launched'.format(self.name))
self.logger.info('Spark Step {')
try:
step = {
'Name': 'SynergyPysparkStep',
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['spark-submit', '--deploy-mode', 'cluster', uri_script]
}
}
if kwargs:
properties = [{'Key': '{}'.format(k), 'Value': '{}'.format(v)} for k, v in kwargs.items()]
step['HadoopJarStep']['Properties'] = properties
step_response = self.client_b3.add_job_flow_steps(JobFlowId=self.jobflow_id, Steps=[step])
step_ids = step_response['StepIds']
assert len(step_ids) == 1
return self._poll_step(step_ids[0])
except ClusterError as e:
self.logger.error('Spark Step Error: {0}'.format(e), exc_info=True)
return None
except Exception as e:
self.logger.error('Spark Step Unexpected Exception: {0}'.format(e), exc_info=True)
return None
finally:
self.logger.info('}')
def run_hadoop_step(self, uri_script, **kwargs):
# `https://github.com/dev-86/aws-cli/blob/29756ea294aebc7c854b3d9a2b1a56df28637e11/tests/unit/customizations/emr/test_create_cluster_release_label.py`_
pass
def run_shell_command(self, uri_script, **kwargs):
# `https://github.com/dev-86/aws-cli/blob/29756ea294aebc7c854b3d9a2b1a56df28637e11/tests/unit/customizations/emr/test_create_cluster_release_label.py`_
pass
def _launch(self):
"""
method launches the cluster and returns when the cluster is fully operational
and ready to accept business steps
:see: `http://boto3.readthedocs.io/en/latest/reference/services/emr.html#EMR.Client.add_job_flow_steps`_
"""
self.logger.info('Launching EMR Cluster {0} {{'.format(self.name))
try:
response = self.client_b3.run_job_flow(
Name=self.context.settings['aws_cluster_name'],
ReleaseLabel='emr-5.12.0',
Instances={
'MasterInstanceType': 'm3.xlarge',
'SlaveInstanceType': 'm3.xlarge',
'InstanceCount': 3,
'KeepJobFlowAliveWhenNoSteps': True,
'TerminationProtected': True,
'Ec2KeyName': self.context.settings.get('aws_key_name', ''),
},
BootstrapActions=[
{
'Name': 'Maximize Spark Default Config',
'ScriptBootstrapAction': {
'Path': 's3://support.elasticmapreduce/spark/maximize-spark-default-config',
}
},
],
Applications=[
{
'Name': 'Spark',
},
{
'Name': 'Pig',
},
],
VisibleToAllUsers=True,
JobFlowRole='EMR_EC2_DefaultRole',
ServiceRole='EMR_DefaultRole'
)
self.logger.info('EMR Cluster Initialization Request Successful.')
return response['JobFlowId']
except:
self.logger.error('EMR Cluster failed to launch', exc_info=True)
raise ClusterError('EMR Cluster {0} launch failed'.format(self.name))
finally:
self.logger.info('}')
def _get_cluster(self):
try:
clusters = self.client_b3.list_clusters(ClusterStates=['STARTING', 'BOOTSTRAPPING', 'RUNNING', 'WAITING'])
for cluster in clusters['Clusters']:
if cluster['Name'] != self.context.settings['aws_cluster_name']:
continue
return cluster['Id']
return None
except:
return None
def _wait_for_cluster(self, cluster_id):
""" method polls the state for the cluster and awaits until it is ready to start processing """
def _current_state():
cluster = self.client_b3.describe_cluster(ClusterId=cluster_id)
return cluster['Cluster']['Status']['State']
state = _current_state()
while state in [CLUSTER_STATE_STARTING, CLUSTER_STATE_BOOTSTRAPPING, CLUSTER_STATE_RUNNING]:
# Cluster is being spawned. Idle and recheck the status.
time.sleep(20.0)
state = _current_state()
if state in [CLUSTER_STATE_TERMINATING, CLUSTER_STATE_TERMINATED, CLUSTER_STATE_TERMINATED_WITH_ERRORS]:
raise ClusterError('EMR Cluster {0} launch failed'.format(self.name))
elif state == CLUSTER_STATE_WAITING:
# state WAITING marks readiness to process business steps
cluster = self.client_b3.describe_cluster(ClusterId=cluster_id)
master_dns = cluster['Cluster']['MasterPublicDnsName']
self.logger.info('EMR Cluster Launched Successfully. Master DNS node is {0}'.format(master_dns))
else:
self.logger.warning('Unknown state {0} during EMR Cluster launch'.format(state))
return state
def launch(self):
self.logger.info('Launching EMR Cluster: {0} {{'.format(self.context.settings['aws_cluster_name']))
if self.jobflow_id \
and self._wait_for_cluster(self.jobflow_id) in [CLUSTER_STATE_STARTING, CLUSTER_STATE_BOOTSTRAPPING,
CLUSTER_STATE_RUNNING]:
raise ClusterError('EMR Cluster {0} has already been launched with id {1}. Use it or dispose it.'
.format(self.name, self.jobflow_id))
cluster_id = self._get_cluster()
if cluster_id:
self.logger.info('Reusing existing EMR Cluster: {0} {{'.format(cluster_id))
else:
cluster_id = self._launch()
self._wait_for_cluster(cluster_id)
self.jobflow_id = cluster_id
self.logger.info('}')
def terminate(self):
""" method terminates the cluster """
if not self.jobflow_id:
self.logger.info('No EMR Cluster to stop')
return
self.logger.info('Terminating EMR Cluster {')
try:
self.logger.info('Initiating termination procedure...')
# Disable cluster termination protection
self.client_b3.set_termination_protection(JobFlowIds=[self.jobflow_id], TerminationProtected=False)
self.client_b3.terminate_job_flows(JobFlowIds=[self.jobflow_id])
self.jobflow_id = None
self.logger.info('Termination request successful')
except Exception as e:
self.logger.error('Unexpected Exception: {0}'.format(e), exc_info=True)
finally:
self.logger.info('}')
|
# -*- tab-width: 4; indent-tabs-mode: nil; py-indent-offset: 4 -*-
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
import uno
import unohelper
import sys
import types
import os
from com.sun.star.uno import Exception,RuntimeException
from com.sun.star.loader import XImplementationLoader
from com.sun.star.lang import XServiceInfo
MODULE_PROTOCOL = "vnd.openoffice.pymodule:"
DEBUG = 0
g_supportedServices = "com.sun.star.loader.Python", # referenced by the native C++ loader !
g_implementationName = "org.openoffice.comp.pyuno.Loader" # referenced by the native C++ loader !
def splitUrl( url ):
nColon = url.find( ":" )
if -1 == nColon:
raise RuntimeException( "PythonLoader: No protocol in url " + url, None )
return url[0:nColon], url[nColon+1:len(url)]
g_loadedComponents = {}
def checkForPythonPathBesideComponent( url ):
path = unohelper.fileUrlToSystemPath( url+"/pythonpath.zip" );
if DEBUG == 1:
print(b"checking for existence of " + encfile( path ))
if 1 == os.access( encfile( path ), os.F_OK) and not path in sys.path:
if DEBUG == 1:
print(b"adding " + encfile( path ) + b" to sys.path")
sys.path.append( path )
path = unohelper.fileUrlToSystemPath( url+"/pythonpath" );
if 1 == os.access( encfile( path ), os.F_OK) and not path in sys.path:
if DEBUG == 1:
print(b"adding " + encfile( path ) + b" to sys.path")
sys.path.append( path )
def encfile(uni):
return uni.encode( sys.getfilesystemencoding())
class Loader( XImplementationLoader, XServiceInfo, unohelper.Base ):
def __init__(self, ctx ):
if DEBUG:
print("pythonloader.Loader ctor")
self.ctx = ctx
def getModuleFromUrl( self, url ):
if DEBUG:
print("pythonloader: interpreting url " + url)
protocol, dependent = splitUrl( url )
if "vnd.sun.star.expand" == protocol:
exp = self.ctx.getValueByName( "/singletons/com.sun.star.util.theMacroExpander" )
url = exp.expandMacros(dependent)
protocol,dependent = splitUrl( url )
if DEBUG:
print("pythonloader: after expansion " + protocol + ":" + dependent)
try:
if "file" == protocol:
# remove \..\ sequence, which may be useful e.g. in the build env
url = unohelper.absolutize( url, url )
# did we load the module already ?
mod = g_loadedComponents.get( url )
if not mod:
mod = types.ModuleType("uno_component")
# check for pythonpath.zip beside .py files
checkForPythonPathBesideComponent( url[0:url.rfind('/')] )
# read the file
filename = unohelper.fileUrlToSystemPath( url )
fileHandle = open( filename, encoding='utf_8' )
src = fileHandle.read().replace("\r","")
if not src.endswith( "\n" ):
src = src + "\n"
# compile and execute the module
codeobject = compile( src, encfile(filename), "exec" )
mod.__file__ = filename
exec(codeobject, mod.__dict__)
g_loadedComponents[url] = mod
return mod
elif "vnd.openoffice.pymodule" == protocol:
nSlash = dependent.rfind('/')
if -1 != nSlash:
path = unohelper.fileUrlToSystemPath( dependent[0:nSlash] )
dependent = dependent[nSlash+1:len(dependent)]
if not path in sys.path:
sys.path.append( path )
mod = __import__( dependent )
path_component, dot, rest = dependent.partition('.')
while dot == '.':
path_component, dot, rest = rest.partition('.')
mod = getattr(mod, path_component)
return mod
else:
if DEBUG:
print("Unknown protocol '" + protocol + "'");
raise RuntimeException( "PythonLoader: Unknown protocol " +
protocol + " in url " +url, self )
except Exception as e:
if DEBUG:
print ("Python import exception " + str(type(e)) +
" message " + str(e) + " args " + str(e.args));
raise RuntimeException( "Couldn't load " + url + " for reason " + str(e), None )
return None
def activate( self, implementationName, dummy, locationUrl, regKey ):
if DEBUG:
print("pythonloader.Loader.activate")
mod = self.getModuleFromUrl( locationUrl )
implHelper = mod.__dict__.get( "g_ImplementationHelper" , None )
if DEBUG:
print ("Fetched ImplHelper as " + str(implHelper))
if implHelper is None:
return mod.getComponentFactory( implementationName, self.ctx.ServiceManager, regKey )
else:
return implHelper.getComponentFactory( implementationName,regKey,self.ctx.ServiceManager)
def writeRegistryInfo( self, regKey, dummy, locationUrl ):
if DEBUG:
print( "pythonloader.Loader.writeRegistryInfo" )
mod = self.getModuleFromUrl( locationUrl )
implHelper = mod.__dict__.get( "g_ImplementationHelper" , None )
if implHelper is None:
return mod.writeRegistryInfo( self.ctx.ServiceManager, regKey )
else:
return implHelper.writeRegistryInfo( regKey, self.ctx.ServiceManager )
def getImplementationName( self ):
return g_implementationName
def supportsService( self, ServiceName ):
return ServiceName in self.getSupportedServiceNames()
def getSupportedServiceNames( self ):
return g_supportedServices
# vim: set shiftwidth=4 softtabstop=4 expandtab:
|
<gh_stars>10-100
"""
Run CellO on a gene expression matrix
Authors: <NAME> <<EMAIL>>
"""
from optparse import OptionParser
import os
from os.path import join
import pandas as pd
from anndata import AnnData
import dill
import subprocess
import sys
from . import cello
from . import ontology_utils as ou
from . import load_expression_matrix
try:
import scanpy as sc
except ImportError:
sys.exit("The 'cello_predict' command line tool requires that scanpy package be installed. To install scanpy, run 'pip install scanpy'.")
# Units keywords
COUNTS_UNITS = 'COUNTS'
CPM_UNITS = 'CPM'
LOG1_CPM_UNITS = 'LOG1_CPM'
TPM_UNITS = 'TPM'
LOG1_TPM_UNITS = 'LOG1_TPM'
# Assay keywords
FULL_LENGTH_ASSAY = 'FULL_LENGTH'
THREE_PRIMED_ASSAY = '3_PRIME'
def main():
usage = "%prog [options] input_file"
parser = OptionParser(usage=usage)
parser.add_option("-a", "--algo", help="Hierarchical classification algorithm to apply (default='IR'). Must be one of: 'IR' - Isotonic regression, 'CLR' - cascaded logistic regression")
parser.add_option("-d", "--data_type", help="Data type (required). Must be one of: 'TSV', 'CSV', '10x', or 'HDF5'. Note: if 'HDF5' is used, then arguments must be provided to the h5_cell_key, h5_gene_key, and h5_expression_key parameters.")
parser.add_option("-c", "--h5_cell_key", help="The key of the dataset within the input HDF5 file specifying which dataset stores the cell ID's. This argument is only applicable if '-d HDF5' is used")
parser.add_option("-g", "--h5_gene_key", help="The key of the dataset within the input HDF5 file specifying which dataset stores the gene names/ID's. This argument is only applicable if '-d HDF5' is used")
parser.add_option("-e", "--h5_expression_key", help="The key of the dataset within the input HDF5 file specifying which dataset stores the expression matrix. This argument is only applicable if '-d HDF5' is used")
parser.add_option("-r", "--rows_cells", action="store_true", help="Use this flag if expression matrix is organized as CELLS x GENES rather than GENES x CELLS. Not applicable when '-d 10x' is used.")
parser.add_option("-u", "--units", help="Units of expression. Must be one of: 'COUNTS', 'CPM', 'LOG1_CPM', 'TPM', 'LOG1_TPM'")
parser.add_option("-s", "--assay", help="Sequencing assay. Must be one of: '3_PRIME', 'FULL_LENGTH'")
parser.add_option("-t", "--train_model", action="store_true", help="If the genes in the input matrix don't match what is expected by the classifier, then train a classifier on the input genes. The model will be saved to <output_prefix>.model.dill")
parser.add_option("-f", "--resource_location", help="Path to CellO resources directory, named 'resources', which stores gene mappings, pre-trained models, and training sets. If not supplied, CellO will look for 'resources' in the current directory. If resources do not exist at provided location, they will be downloaded automatically.")
parser.add_option("-m", "--model", help="Path to pretrained model file.")
parser.add_option("-l", "--remove_anatomical", help="A comma-separated list of terms ID's from the Uberon Ontology specifying which tissues to use to filter results. All cell types known to be resident to the input tissues will be filtered from the results.")
parser.add_option("-p", "--pre_clustering", help="A TSV file with pre-clustered cells. The first column stores the cell names/ID's (i.e. the column names of the input expression matrix) and the second column stores integers referring to each cluster. The TSV file should not have column names.")
parser.add_option("-b", "--ontology_term_ids", action="store_true", help="Use the less readable, but more rigorous Cell Ontology term id's in output")
parser.add_option("-o", "--output_prefix", help="Prefix for all output files. This prefix may contain a path.")
(options, args) = parser.parse_args()
data_loc = args[0]
out_pref = options.output_prefix
if options.resource_location:
rsrc_loc = options.resource_location
else:
rsrc_loc = os.getcwd()
# Input validation
if options.model is not None and options.train_model is not None:
print("Warning! Option 'train_model' was used along with the")
print("option 'model'. These are conflicting arguments. ")
print("CellO will use the model file provided to 'model' ")
print("instead of training a new one.")
options.train_model = False
if options.data_type is not None and options.data_type == 'HDF5':
try:
assert options.h5_cell_key is not None
assert options.h5_gene_key is not None
assert options.h5_expression_key is not None
except:
print()
print("Error. The specified input data is HDF5. The dataset keys within the HDF5 must be provided via the '-c', '-g', and '-e' arguments. Please run 'python cello_predict.py -h' for more details.")
exit()
# Parse options
if options.data_type:
data_type = options.data_type
else:
print("Warning! A data format was not specified with the '-d' option. Assuming that input is a tab-separated-value (TSV) file.")
data_type = 'TSV'
if options.algo:
algo = options.algo
else:
algo = 'IR'
# Parse the pre-clustered cells
if options.pre_clustering:
pre_clustering_f = options.pre_clustering
cell_to_cluster = {}
with open(pre_clustering_f, 'r') as f:
for l in f:
toks = l.split('\t')
cell = toks[0].strip()
clust = int(toks[1].strip())
cell_to_cluster[cell] = clust
else:
cell_to_cluster = None
try:
assert options.units
except:
print("Error. Please specify units using the '-u' ('--units') option.")
print("For more details, run with '-h' ('--help') option.")
return
units = options.units
assay = options.assay
# One last argument to parse that relies on the Cell Ontology itself
remove_anatomical_subterms = None
if options.remove_anatomical:
remove_anatomical_subterms = options.remove_anatomical.split(',')
for term in remove_anatomical_subterms:
try:
assert term in ou.cell_ontology().id_to_term
except AssertionError:
print()
print('Error. For argument --remove_anatomical (-l), the term "{}" was not found in the Uberon Ontology.'.format(term))
exit()
# Create log directory
log_dir = '{}.log'.format(out_pref)
subprocess.run('mkdir {}'.format(log_dir), shell=True)
# Load data
print('Loading data from {}...'.format(data_loc))
ad = load_expression_matrix.load_data(
data_loc,
data_type,
hdf5_expr_key=options.h5_expression_key,
hdf5_cells_key=options.h5_cell_key,
hdf5_genes_key=options.h5_gene_key
)
print("Loaded data matrix with {} cells and {} genes.".format(
ad.X.shape[0],
ad.X.shape[1]
))
# Load or train model
if options.model:
model_f = options.model
print('Loading model from {}...'.format(model_f))
with open(model_f, 'rb') as f:
model=dill.load(f)
else:
# Load or train a model
model = cello._retrieve_pretrained_model(ad, algo, rsrc_loc)
if model is None:
if options.train_model:
model = cello.train_model(ad, rsrc_loc, algo=algo, log_dir=log_dir)
out_model_f = '{}.model.dill'.format(out_pref)
print('Writing trained model to {}'.format(out_model_f))
with open(out_model_f, 'wb') as f:
dill.dump(model, f)
if model is None:
print()
print("Error. The genes present in data matrix do not match those expected by any of the pre-trained classifiers.")
print("Please train a classifier on this input gene set by either using the cello_train_model.py program or by running cello_classify with the '-t' flag.")
exit()
results_df, finalized_binary_results_df, ms_results_df = run_cello(
ad,
units,
model,
assay=assay,
algo=algo,
cluster=True,
cell_to_clust=cell_to_cluster,
log_dir=log_dir,
res=1.0,
remove_anatomical_subterms=remove_anatomical_subterms,
rsrc_loc=rsrc_loc
)
# Convert to human-readable ontology terms
if not options.ontology_term_ids:
results_df.columns = [
ou.cell_ontology().id_to_term[x].name
for x in results_df.columns
]
finalized_binary_results_df.columns = [
ou.cell_ontology().id_to_term[x].name
for x in finalized_binary_results_df.columns
]
ms_results_df['most_specific_cell_type'] = [
ou.cell_ontology().id_to_term[x].name
for x in ms_results_df['most_specific_cell_type']
]
# Write output
out_f = '{}.probability.tsv'.format(out_pref)
print("Writing classifier probabilities to {}...".format(out_f))
results_df.to_csv(out_f, sep='\t')
out_f = '{}.binary.tsv'.format(out_pref)
print("Writing binarized classifications to {}...".format(out_f))
finalized_binary_results_df.to_csv(out_f, sep='\t')
out_f = '{}.most_specific.tsv'.format(out_pref)
print("Writing most-specific cell types to {}...".format(out_f))
ms_results_df.to_csv(out_f, sep='\t')
def run_cello(
ad,
units,
mod,
assay='3_PRIME',
algo='IR',
cluster=True,
cell_to_clust=None,
log_dir=None,
res=1.0,
remove_anatomical_subterms=None,
rsrc_loc=None
):
# Get units into log(TPM+1)
if assay == FULL_LENGTH_ASSAY:
if units in set([COUNTS_UNITS, CPM_UNITS, LOG1_CPM_UNITS]):
print('Error. The input units were specified as {}'.format(units),
'but the assay was specified as {}.'.format(assay),
'To run classification, please input expression matrix in ',
'units of either LOG1_TPM or log(TPM+1) for this assay type.')
exit()
if units == COUNTS_UNITS:
print('Normalizing counts...')
sc.pp.normalize_total(ad, target_sum=1e6)
sc.pp.log1p(ad)
print('done.')
elif units in set([CPM_UNITS, TPM_UNITS]):
sc.pp.log1p(ad)
if cluster and ad.X.shape[0] > 50 and cell_to_clust is None:
# Run clustering
sc.pp.pca(ad)
sc.pp.neighbors(ad)
sc.tl.leiden(ad, resolution=res)
clust_key = 'leiden'
elif cluster and cell_to_clust is not None:
# Clusters are already provided
ad.obs['cluster'] = [
cell_to_clust[cell]
for cell in ad.obs.index
]
clust_key = 'cluster'
else:
# Do not run clustering
clust_key = None
results_df, finalized_binary_results_df, ms_results_df = cello.predict(
ad,
mod,
algo=algo,
clust_key=clust_key,
log_dir=log_dir,
remove_anatomical_subterms=remove_anatomical_subterms,
rsrc_loc=rsrc_loc
)
return results_df, finalized_binary_results_df, ms_results_df
if __name__ == '__main__':
main()
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
Tests for node activation propagation and gate arithmetic
"""
from micropsi_core import runtime as micropsi
def prepare(test_nodenet):
nodenet = micropsi.get_nodenet(test_nodenet)
netapi = nodenet.netapi
source = netapi.create_node("Register", None, "Source")
netapi.link(source, "gen", source, "gen")
source.activation = 1
nodenet.step()
return nodenet, netapi, source
def test_node_logic_loop(test_nodenet):
# test gen looping behaviour
net, netapi, source = prepare(test_nodenet)
net.step()
assert source.get_gate("gen").activation == 1
net.step()
assert source.get_gate("gen").activation == 1
netapi.link(source, "gen", source, "gen", 0.5)
net.step()
assert source.get_gate("gen").activation == 0.5
def test_node_logic_die(test_nodenet):
# without the link, activation ought to drop to 0
net, netapi, source = prepare(test_nodenet)
netapi.unlink(source, "gen", source, "gen")
net.step()
assert source.get_gate("gen").activation == 0
def test_node_logic_sum(test_nodenet):
# propagate positive activation, expect sum
net, netapi, source = prepare(test_nodenet)
reg_a = netapi.create_node("Register", None, "RegA")
reg_b = netapi.create_node("Register", None, "RegB")
reg_result = netapi.create_node("Register", None, "RegResult")
netapi.link(source, "gen", reg_a, "gen", 0.5)
netapi.link(source, "gen", reg_b, "gen", 0.5)
netapi.link(reg_a, "gen", reg_result, "gen")
netapi.link(reg_b, "gen", reg_result, "gen")
net.step()
net.step()
assert reg_result.get_gate("gen").activation == 1
def test_node_logic_cancel(test_nodenet):
# propagate positive and negative activation, expect cancellation
net, netapi, source = prepare(test_nodenet)
reg_a = netapi.create_node("Register", None, "RegA")
reg_b = netapi.create_node("Register", None, "RegB")
reg_b.set_gate_parameter("gen", "threshold", -100)
reg_result = netapi.create_node("Register", None, "RegResult")
netapi.link(source, "gen", reg_a, "gen", 1)
netapi.link(source, "gen", reg_b, "gen", -1)
netapi.link(reg_a, "gen", reg_result, "gen")
netapi.link(reg_b, "gen", reg_result, "gen")
net.step()
net.step()
assert reg_result.get_gate("gen").activation == 0
def test_node_logic_store_and_forward(test_nodenet):
# collect activation in one node, go forward only if both dependencies are met
net, netapi, source = prepare(test_nodenet)
reg_a = netapi.create_node("Register", None, "RegA")
reg_b = netapi.create_node("Register", None, "RegB")
reg_b.set_gate_parameter("gen", "threshold", -100)
reg_result = netapi.create_node("Register", None, "RegResult")
reg_b.set_gate_parameter("gen", "threshold", 1)
netapi.link(source, "gen", reg_a, "gen")
netapi.link(reg_a, "gen", reg_result, "gen")
netapi.link(reg_b, "gen", reg_result, "gen")
net.step()
assert reg_result.get_gate("gen").activation == 0
netapi.link(source, "gen", reg_b, "gen")
net.step()
assert reg_result.get_gate("gen").activation == 1
def test_node_logic_activators(test_nodenet):
net, netapi, source = prepare(test_nodenet)
activator = netapi.create_node('Activator', None)
activator.set_parameter('type', 'sub')
activator.activation = 1
testpipe = netapi.create_node("Pipe", None)
netapi.link(source, "gen", testpipe, "sub", 0)
net.step()
net.step()
assert testpipe.get_gate("sub").activation == 0
def test_node_logic_sensor_modulator(test_nodenet, default_world):
net, netapi, source = prepare(test_nodenet)
register = netapi.create_node("Register", None)
netapi.link_sensor(register, "emo_activation", "gen")
micropsi.step_nodenet(test_nodenet)
micropsi.step_nodenet(test_nodenet)
micropsi.step_nodenet(test_nodenet)
assert round(netapi.get_modulator("emo_activation"), 3) == round(register.activation, 3)
def test_node_logic_sensor_datasource(test_nodenet, default_world):
net, netapi, source = prepare(test_nodenet)
micropsi.set_nodenet_properties(test_nodenet, worldadapter="Default", world_uid=default_world)
register = netapi.create_node("Register", None)
netapi.link_sensor(register, "static_on", "gen", weight=0.35)
micropsi.step_nodenet(test_nodenet)
micropsi.step_nodenet(test_nodenet)
assert round(register.get_gate("gen").activation, 3) == 0.35
def test_node_logic_actor_modulator(test_nodenet, default_world):
net, netapi, source = prepare(test_nodenet)
netapi.link_actor(source, "base_porret_decay_factor", weight=0.3, gate="gen")
micropsi.step_nodenet(test_nodenet)
assert round(netapi.get_modulator("base_porret_decay_factor"), 3) == 0.3
def test_node_logic_actor_datatarget(test_nodenet, default_world):
net, netapi, source = prepare(test_nodenet)
micropsi.set_nodenet_properties(test_nodenet, worldadapter="Default", world_uid=default_world)
netapi.link_actor(source, "echo", weight=0.5, gate="gen")
register = netapi.create_node("Register", None)
actor = netapi.get_nodes(node_name_prefix="echo")[0]
netapi.link(actor, "gen", register, "gen")
micropsi.step_nodenet(test_nodenet)
micropsi.step_nodenet(test_nodenet)
micropsi.step_nodenet(test_nodenet)
assert round(register.get_gate("gen").activation, 1) == 0.5
def test_node_logic_sensor_nomodulators(engine, default_world):
result, nnuid = micropsi.new_nodenet("adf", engine, "Default", world_uid=default_world, use_modulators=False)
net, netapi, source = prepare(nnuid)
register = netapi.create_node("Register", None)
netapi.link_sensor(register, "static_on", "gen", weight=0.4)
micropsi.step_nodenet(nnuid)
micropsi.step_nodenet(nnuid)
assert round(register.get_gate("gen").activation, 1) == 0.4
def test_node_logic_actor_nomodulators(engine, default_world):
result, nnuid = micropsi.new_nodenet("adf", engine, "Default", world_uid=default_world, use_modulators=False)
net, netapi, source = prepare(nnuid)
netapi.link_actor(source, "echo", weight=0.7, gate="gen")
register = netapi.create_node("Register", None)
actor = netapi.get_nodes(node_name_prefix="echo")[0]
netapi.link(actor, "gen", register, "gen")
micropsi.step_nodenet(nnuid)
micropsi.step_nodenet(nnuid)
micropsi.step_nodenet(nnuid)
assert round(register.get_gate("gen").activation, 1) == 0.7
|
<filename>scripts/M030.py
"""
Created by: <NAME>
Sep 21
IEEE Fraud Detection Model
- FE009
- Adding raddar user level features
- Add first, second, third digit of addr1 and addr2 features
- Drop only DOY features with low importance
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import sys
import matplotlib.pylab as plt
from sklearn.model_selection import KFold
from datetime import datetime
import time
import logging
from sklearn.metrics import roc_auc_score
from catboost import CatBoostClassifier, Pool
from timeit import default_timer as timer
import lightgbm as lgb
start = timer()
##################
# PARAMETERS
###################
run_id = "{:%m%d_%H%M}".format(datetime.now())
KERNEL_RUN = False
MODEL_NUMBER = os.path.basename(__file__).split('.')[0]
if KERNEL_RUN:
INPUT_DIR = '../input/champs-scalar-coupling/'
FE_DIR = '../input/molecule-fe024/'
FOLDS_DIR = '../input/champs-3fold-ids/'
TARGET = "isFraud"
N_ESTIMATORS = 100000
N_META_ESTIMATORS = 500000
LEARNING_RATE = 0.005
VERBOSE = 100
EARLY_STOPPING_ROUNDS = 100
RANDOM_STATE = 529
N_THREADS = 58
DEPTH = 14
N_FOLDS = 5
SHUFFLE = False
FE_SET = 'FE009' # Feature Engineering Version
MODEL_TYPE = "lightgbm"
#####################
## SETUP LOGGER
#####################
def get_logger():
"""
credits to: https://www.kaggle.com/ogrellier/user-level-lightgbm-lb-1-4480
"""
os.environ["TZ"] = "US/Eastern"
time.tzset()
FORMAT = "[%(levelname)s]%(asctime)s:%(name)s:%(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger("main")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
fhandler = logging.FileHandler(f'../logs/{MODEL_NUMBER}_{run_id}.log')
formatter = logging.Formatter(FORMAT)
handler.setFormatter(formatter)
# logger.addHandler(handler)
logger.addHandler(fhandler)
return logger
logger = get_logger()
logger.info(f'Running for Model Number {MODEL_NUMBER}')
##################
# PARAMETERS
###################
if MODEL_TYPE == 'xgboost':
EVAL_METRIC = "AUC"
elif MODEL_TYPE == 'lightgbm':
EVAL_METRIC = 'auc'
elif MODEL_TYPE == 'catboost':
EVAL_METRIC = "AUC"
##################
# TRACKING FUNCTION
###################
def update_tracking(run_id,
field,
value, csv_file="../tracking/tracking.csv", integer=False, digits=None, drop_incomplete_rows=False):
"""
Function to update the tracking CSV with information about the model
"""
try:
df = pd.read_csv(csv_file, index_col=[0])
except FileNotFoundError:
df = pd.DataFrame()
if integer:
value = round(value)
elif digits is not None:
value = round(value, digits)
if drop_incomplete_rows:
df = df.loc[~df['AUC'].isna()]
df.loc[run_id, field] = value # Model number is index
df.to_csv(csv_file)
update_tracking(run_id, "model_number", MODEL_NUMBER, drop_incomplete_rows=True)
update_tracking(run_id, "n_estimators", N_ESTIMATORS)
update_tracking(run_id, "early_stopping_rounds", EARLY_STOPPING_ROUNDS)
update_tracking(run_id, "random_state", RANDOM_STATE)
update_tracking(run_id, "n_threads", N_THREADS)
update_tracking(run_id, "learning_rate", LEARNING_RATE)
update_tracking(run_id, "n_fold", N_FOLDS)
update_tracking(run_id, "model_type", MODEL_TYPE)
update_tracking(run_id, "eval_metric", EVAL_METRIC)
update_tracking(run_id, "depth", DEPTH)
update_tracking(run_id, "shuffle", SHUFFLE)
update_tracking(run_id, "fe", FE_SET)
#####################
# PREPARE MODEL DATA
#####################
folds = KFold(n_splits=N_FOLDS, random_state=RANDOM_STATE, shuffle=SHUFFLE)
logger.info('Loading Data...')
train_df = pd.read_parquet(f'../data/train_{FE_SET}.parquet')
test_df = pd.read_parquet(f'../data/test_{FE_SET}.parquet')
logger.info('Done loading Data...')
###########
# FEATURES
###########
REMOVE_FEATURES = ['TransactionID', 'TransactionDT', 'isFraud', 'DT', 'DT_M', 'DT_W',
'DT_D', 'DT_hour', 'DT_day_week', 'DT_day_month', 'DT_M_total',
'DT_W_total', 'DT_D_total', 'uid', 'uid2', 'uid3', 'uid4', 'uid5',
'bank_type', 'year_day', 'V300','V309','V111','C3','V124','V106','V125','V315','V134','V102','V123','V316','V113',
'V136','V305','V110','V299','V289','V286','V318','V103','V304','V116','V29','V284','V293',
'V137','V295','V301','V104','V311','V115','V109','V119','V321','V114','V133','V122','V319',
'V105','V112','V118','V117','V121','V108','V135','V320','V303','V297','V120','Day_of_Year_Year']
LOW_IMPORTANCE_FEATS = [
'dist2_div_Mean_D11_DOY', 'dist1_div_Mean_D8_DOY',
'dist1_div_Mean_D9_DOY_productCD', 'dist1_div_Mean_D8_DOY_productCD',
'dist2_div_Mean_D11_DOY_productCD', 'dist1_div_Mean_D7_DOY',
'dist1_div_Mean_D6_DOY_productCD', 'dist1_div_Mean_D6_DOY',
'dist1_div_Mean_D7_DOY_productCD', 'dist1_div_Mean_D9_DOY',
'dist1_div_Mean_D14_DOY_productCD', 'dist1_div_Mean_D14_DOY',
'dist1_div_Mean_D13_DOY_productCD', 'dist1_div_Mean_D13_DOY',
'dist1_div_Mean_D12_DOY_productCD', 'dist1_div_Mean_D12_DOY',
'addr2_div_Mean_D7_DOY_productCD', 'addr2_div_Mean_D8_DOY_productCD',
'addr2_div_Mean_D12_DOY_productCD', 'addr2_div_Mean_D13_DOY_productCD',
'addr2_div_Mean_D5_DOY_productCD', 'addr1_div_Mean_D12_DOY_productCD',
'addr2_div_Mean_D8_DOY', 'addr2_div_Mean_D2_DOY_productCD',
'addr2_div_Mean_D6_DOY_productCD', 'addr2_div_Mean_D12_DOY',
'addr2_div_Mean_D7_DOY', 'addr1_div_Mean_D7_DOY',
'addr1_div_Mean_D7_DOY_productCD', 'addr2_div_Mean_D3_DOY_productCD',
'addr2_div_Mean_D14_DOY_productCD', 'addr1_div_Mean_D12_DOY',
'addr2_div_Mean_D13_DOY', 'addr1_div_Mean_D13_DOY',
'dist2_div_Mean_D12_DOY_productCD', 'dist2_div_Mean_D8_DOY_productCD',
'dist2_div_Mean_D2_DOY_productCD', 'addr1_div_Mean_D13_DOY_productCD',
'addr2_div_Mean_D9_DOY_productCD', 'dist2_div_Mean_D7_DOY_productCD',
'addr1_div_Mean_D6_DOY',
'addr1_div_Mean_D8_DOY_productCD', 'addr1_div_Mean_D6_DOY_productCD',
'dist2_div_Mean_D5_DOY_productCD',
'dist2_div_Mean_D3_DOY_productCD', 'dist1_div_Mean_D5_DOY_productCD',
'dist2_div_Mean_D6_DOY_productCD',
'addr1_div_Mean_D8_DOY', 'dist1_div_Mean_D2_DOY_productCD',
'addr2_div_Mean_D11_DOY_productCD', 'addr2_div_Mean_D14_DOY',
'dist2_div_Mean_D2_DOY', 'dist2_div_Mean_D8_DOY',
'card5_div_Mean_D12_DOY_productCD',
'dist2_div_Mean_D4_DOY_productCD', 'dist1_div_Mean_D3_DOY_productCD',
'card6_div_Mean_D12_DOY_productCD', 'dist2_div_Mean_D3_DOY',
'card1_div_Mean_D12_DOY_productCD',
'card1_div_Mean_D7_DOY_productCD', 'card3_div_Mean_D2_DOY_productCD',
'card3_div_Mean_D8_DOY_productCD','dist1_div_Mean_D1_DOY_productCD',
'card4_div_Mean_D12_DOY_productCD', 'card4_div_Mean_D7_DOY_productCD',
'card3_div_Mean_D12_DOY_productCD', 'dist2_div_Mean_D5_DOY','dist1_div_Mean_D4_DOY_productCD',
'addr2_div_Mean_D2_DOY', 'card6_div_Mean_D7_DOY_productCD',
'addr1_div_Mean_D14_DOY_productCD', 'card2_div_Mean_D12_DOY_productCD',
'card2_div_Mean_D7_DOY_productCD', 'card5_div_Mean_D7_DOY_productCD',
'dist2_div_Mean_D12_DOY',
'dist1_div_Mean_D11_DOY_productCD',
'dist1_div_Mean_D10_DOY_productCD',
'card1_div_Mean_D7_DOY',
'card3_div_Mean_D7_DOY_productCD',
'card6_div_Mean_D8_DOY',
'addr1_div_Mean_D9_DOY', 'card5_div_Mean_D7_DOY',
'dist2_div_Mean_D4_DOY', 'card2_div_Mean_D7_DOY',
'dist2_div_Mean_D10_DOY_productCD', 'addr2_div_Mean_D5_DOY',
'TranAmt_div_Mean_D8_DOY_productCD',
'dist2_div_Mean_D6_DOY',
'card4_div_Mean_D7_DOY',
'addr1_div_Mean_D9_DOY_productCD',
'addr1_div_Mean_D11_DOY_productCD',
'dist2_div_Mean_D9_DOY_productCD',
'TranAmt_div_Mean_D7_DOY_productCD',
'addr2_div_Mean_D3_DOY',
'card1_div_Mean_D11_DOY_productCD',
'card6_div_Mean_D7_DOY',
'dist2_div_Mean_D11_DOY', 'dist1_div_Mean_D8_DOY', 'dist1_div_Mean_D9_DOY_productCD',
'dist1_div_Mean_D8_DOY_productCD',
'dist2_div_Mean_D11_DOY_productCD',
'dist1_div_Mean_D7_DOY',
'dist1_div_Mean_D6_DOY_productCD',
'dist1_div_Mean_D6_DOY',
'dist1_div_Mean_D7_DOY_productCD',
'dist1_div_Mean_D9_DOY',
'dist1_div_Mean_D14_DOY_productCD',
'dist1_div_Mean_D14_DOY',
'dist1_div_Mean_D13_DOY_productCD',
'dist1_div_Mean_D13_DOY',
'dist1_div_Mean_D12_DOY_productCD',
'dist1_div_Mean_D12_DOY',
'addr2_div_Mean_D7_DOY_productCD',
'addr2_div_Mean_D8_DOY_productCD',
'addr2_div_Mean_D12_DOY_productCD',
'addr2_div_Mean_D13_DOY_productCD',
'addr2_div_Mean_D5_DOY_productCD',
'addr1_div_Mean_D12_DOY_productCD',
'addr2_div_Mean_D8_DOY',
'addr2_div_Mean_D2_DOY_productCD',
'addr2_div_Mean_D6_DOY_productCD',
'addr2_div_Mean_D12_DOY',
'addr2_div_Mean_D7_DOY',
'addr1_div_Mean_D7_DOY',
'addr1_div_Mean_D7_DOY_productCD',
'addr2_div_Mean_D3_DOY_productCD',
'addr2_div_Mean_D14_DOY_productCD',
'addr1_div_Mean_D12_DOY',
'addr2_div_Mean_D13_DOY',
'addr1_div_Mean_D13_DOY',
'dist2_div_Mean_D12_DOY_productCD',
'dist2_div_Mean_D8_DOY_productCD',
'dist2_div_Mean_D2_DOY_productCD',
'addr1_div_Mean_D13_DOY_productCD',
'addr2_div_Mean_D9_DOY_productCD',
'dist2_div_Mean_D7_DOY_productCD',
'addr1_div_Mean_D6_DOY',
'addr1_div_Mean_D8_DOY_productCD',
'addr1_div_Mean_D6_DOY_productCD',
'dist2_div_Mean_D5_DOY_productCD',
'dist2_div_Mean_D3_DOY_productCD',
'dist1_div_Mean_D5_DOY_productCD',
'dist2_div_Mean_D6_DOY_productCD',
'addr1_div_Mean_D8_DOY',
'addr2_div_Mean_D6_DOY',
'dist1_div_Mean_D2_DOY_productCD',
'addr2_div_Mean_D11_DOY_productCD',
'addr2_div_Mean_D14_DOY',
'dist2_div_Mean_D2_DOY',
'dist2_div_Mean_D8_DOY',
'card5_div_Mean_D12_DOY_productCD',
'dist2_div_Mean_D4_DOY_productCD',
'dist1_div_Mean_D3_DOY_productCD',
'card6_div_Mean_D12_DOY_productCD',
'dist2_div_Mean_D3_DOY',
'TranAmt_div_Mean_D11_DOY_productCD',
'card5_div_Mean_D8_DOY',
'card5_div_Mean_D8_DOY_productCD',
'card3_div_Mean_D11_DOY_productCD',
'card5_div_Mean_D11_DOY_productCD',
'card4_div_Mean_D11_DOY_productCD',
'card6_div_Mean_D11_DOY_productCD',
'dist2_div_Mean_D14_DOY_productCD',
'card2_div_Mean_D8_DOY_productCD',
'card1_div_Mean_D8_DOY',
'TranAmt_div_Mean_D8_DOY',
'card2_div_Mean_D11_DOY_productCD',
'card2_div_Mean_D8_DOY',
'card4_div_Mean_D8_DOY',
'card3_div_Mean_D6_DOY_productCD',
'dist1_div_Mean_D2_DOY',
'card6_div_Mean_D8_DOY_productCD',
'card1_div_Mean_D6_DOY_productCD',
'card1_div_Mean_D8_DOY_productCD',
'dist2_div_Mean_D13_DOY_productCD',
'dist1_div_Mean_D5_DOY',
'dist2_div_Mean_D1_DOY_productCD',
'card3_div_Mean_D7_DOY',
'card3_div_Mean_D8_DOY',
'addr1_div_Mean_D5_DOY_productCD',
'card4_div_Mean_D9_DOY_productCD',
'card1_div_Mean_D9_DOY',
'card4_div_Mean_D6_DOY_productCD',
'addr1_div_Mean_D2_DOY_productCD',
'dist2_div_Mean_D1_DOY',
'card2_div_Mean_D9_DOY_productCD',
'TranAmt_div_Mean_D7_DOY',
'card3_div_Mean_D3_DOY_productCD',
'card1_div_Mean_D13_DOY_productCD',
'TranAmt_div_Mean_D6_DOY_productCD',
'card6_div_Mean_D6_DOY_productCD',
'card6_div_Mean_D2_DOY_productCD',
'dist1_div_Mean_D3_DOY',
'card5_div_Mean_D6_DOY_productCD',
'dist2_div_Mean_D9_DOY',
'dist2_div_Mean_D14_DOY',
'card2_div_Mean_D9_DOY',
'addr2_div_Mean_D11_DOY',
'card4_div_Mean_D5_DOY_productCD',
'dist2_div_Mean_D10_DOY',
'card1_div_Mean_D6_DOY',
'card1_div_Mean_D9_DOY_productCD',
'addr1_div_Mean_D5_DOY',
'card4_div_Mean_D9_DOY',
'addr1_div_Mean_D3_DOY_productCD',
'dist2_div_Mean_D13_DOY',
'card3_div_Mean_D9_DOY_productCD',
'card5_div_Mean_D9_DOY_productCD',
'addr1_div_Mean_D2_DOY',
'card3_div_Mean_D5_DOY_productCD',
'TranAmt_div_Mean_D2_DOY_productCD',
'card5_div_Mean_D13_DOY_productCD',
'card1_div_Mean_D12_DOY',
'card2_div_Mean_D6_DOY_productCD',
'card5_div_Mean_D9_DOY',
'card5_div_Mean_D12_DOY',
'card5_div_Mean_D5_DOY_productCD',
'card5_div_Mean_D6_DOY',
'card6_div_Mean_D9_DOY_productCD',
'TranAmt_div_Mean_D3_DOY_productCD',
'addr1_div_Mean_D3_DOY',
'card1_div_Mean_D14_DOY_productCD',
'TranAmt_div_Mean_D2_DOY',
'card6_div_Mean_D5_DOY_productCD',
'addr2_div_Mean_D4_DOY_productCD',
'TranAmt_div_Mean_D5_DOY_productCD',
'card2_div_Mean_D13_DOY_productCD',
'card1_div_Mean_D5_DOY_productCD',
'card4_div_Mean_D13_DOY_productCD',
'card1_div_Mean_D13_DOY',
'card2_div_Mean_D14_DOY_productCD',
'card5_div_Mean_D2_DOY_productCD',
'TranAmt_div_Mean_D5_DOY',
'card5_div_Mean_D13_DOY',
'card2_div_Mean_D2_DOY_productCD',
'card6_div_Mean_D12_DOY',
'TranAmt_div_Mean_D3_DOY',
'card5_div_Mean_D2_DOY',
'card6_div_Mean_D9_DOY',
'card4_div_Mean_D6_DOY',
'card1_div_Mean_D2_DOY_productCD',
'card2_div_Mean_D5_DOY_productCD',
'card5_div_Mean_D5_DOY',
'card4_div_Mean_D2_DOY_productCD',
'card3_div_Mean_D3_DOY',
'card6_div_Mean_D6_DOY',
'card4_div_Mean_D12_DOY',
'card4_div_Mean_D5_DOY',
'card1_div_Mean_D5_DOY',
'card4_div_Mean_D14_DOY_productCD',
'card4_div_Mean_D2_DOY',
'card6_div_Mean_D5_DOY',
'card6_div_Mean_D3_DOY_productCD',
'card3_div_Mean_D5_DOY',
'card1_div_Mean_D14_DOY',
'card2_div_Mean_D3_DOY_productCD',
'card2_div_Mean_D2_DOY',
'card2_div_Mean_D5_DOY',
'card3_div_Mean_D2_DOY',
'TranAmt_div_Mean_D13_DOY_productCD',
'card2_div_Mean_D14_DOY',
'card6_div_Mean_D13_DOY_productCD',
'dist1_div_Mean_D1_DOY',
'card2_div_Mean_D6_DOY',
'card5_div_Mean_D3_DOY_productCD',
'dist1_div_Mean_D4_DOY',
'card4_div_Mean_D13_DOY',
'dist1_div_Mean_D11_DOY',
'card6_div_Mean_D14_DOY_productCD',
'card6_div_Mean_D2_DOY',
'card6_div_Mean_D14_DOY',
'card2_div_Mean_D12_DOY',
'card2_div_Mean_D13_DOY',
'TranAmt_div_Mean_D9_DOY_productCD',
'card3_div_Mean_D13_DOY_productCD',
'card1_div_Mean_D3_DOY_productCD',
'card4_div_Mean_D3_DOY_productCD',
'card6_div_Mean_D13_DOY',
'card4_div_Mean_D14_DOY',
'dist1_div_Mean_D10_DOY',
'TranAmt_div_Mean_D13_DOY',
'TranAmt_div_Mean_D14_DOY_productCD',
'TranAmt_div_Mean_D12_DOY',
'TranAmt_div_Mean_D6_DOY',
'TranAmt_div_Mean_D9_DOY',
'card5_div_Mean_D3_DOY',
'card4_div_Mean_D3_DOY',
'card3_div_Mean_D12_DOY',
'card5_div_Mean_D14_DOY',
'card3_div_Mean_D13_DOY',
'card5_div_Mean_D14_DOY_productCD',
'addr1_div_Mean_D4_DOY_productCD',
'card1_div_Mean_D2_DOY',
'card3_div_Mean_D14_DOY_productCD',
'card2_div_Mean_D3_DOY',
'card1_div_Mean_D3_DOY',
'TranAmt_div_Mean_D11_DOY']
REMOVE_FEATURES = REMOVE_FEATURES + LOW_IMPORTANCE_FEATS
FEATURES = [c for c in test_df.columns if c not in REMOVE_FEATURES]
CAT_FEATURES = ['ProductCD', 'card4', 'card6',
'id_12', 'id_13', 'id_14',
'id_15', 'id_16', 'id_17',
'id_18', 'id_19', 'id_20',
'id_21',
'id_22',
'id_23',
'id_24',
'id_25',
'id_26',
'id_27',
'id_28',
'id_29',
'id_32',
'id_34',
'id_35',
'id_36', 'id_37', 'id_38',
'DeviceType', 'DeviceInfo',
'M4','P_emaildomain',
'R_emaildomain', 'addr1', 'addr2',
'M1', 'M2', 'M3', 'M5', 'M6', 'M7', 'M8', 'M9',
'ProductCD_W_95cents','ProductCD_W_00cents','ProductCD_W_50cents',
'ProductCD_W_50_95_0_cents','ProductCD_W_NOT_50_95_0_cents']
CAT_FEATURES = [c for c in CAT_FEATURES if c not in REMOVE_FEATURES]
X = train_df[FEATURES]
y = train_df[TARGET]
X_test = test_df[FEATURES]
X = X.fillna(-9999)
X_test = X_test.fillna(-9999)
logger.info('Running with features...')
logger.info(FEATURES)
logger.info(f'Target is {TARGET}')
update_tracking(run_id, "n_features", len(FEATURES), integer=True)
############################
#### TRAIN MODELS FUNCTIONS
############################
def train_catboost(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance):
train_dataset = Pool(data=X_train, label=y_train, cat_features=CAT_FEATURES)
valid_dataset = Pool(data=X_valid, label=y_valid, cat_features=CAT_FEATURES)
test_dataset = Pool(data=X_test, cat_features=CAT_FEATURES)
model = CatBoostClassifier(
iterations=N_ESTIMATORS,
learning_rate=LEARNING_RATE,
depth=DEPTH,
eval_metric=EVAL_METRIC,
verbose=VERBOSE,
random_state=RANDOM_STATE,
thread_count=N_THREADS,
task_type="GPU")
model.fit(
train_dataset,
eval_set=valid_dataset,
early_stopping_rounds=EARLY_STOPPING_ROUNDS,
)
y_pred_valid = model.predict_proba(valid_dataset)[:,1]
y_pred = model.predict_proba(test_dataset)[:,1]
fold_importance = pd.DataFrame()
fold_importance["feature"] = model.feature_names_
fold_importance["importance"] = model.get_feature_importance()
fold_importance["fold"] = fold_n + 1
feature_importance = pd.concat([feature_importance, fold_importance],
axis=0)
best_iteration = model.best_iteration_
return y_pred, y_pred_valid, feature_importance, best_iteration
lgb_params = {
'objective':'binary',
'boosting_type':'gbdt',
'metric': EVAL_METRIC,
'n_jobs':N_THREADS,
'learning_rate':LEARNING_RATE,
'num_leaves': 2**8,
'max_depth':DEPTH,
'tree_learner':'serial',
'colsample_bytree': 0.85,
'subsample_freq':1,
'subsample':0.85,
'n_estimators':N_ESTIMATORS,
'max_bin':255,
'verbose':-1,
'seed': RANDOM_STATE,
#'early_stopping_rounds':EARLY_STOPPING_ROUNDS,
'reg_alpha':0.3,
'reg_lamdba':0.243,
#'categorical_feature': CAT_FEATURES
}
def train_lightgbm(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance):
X_train = X_train.copy()
X_valid = X_valid.copy()
X_test = X_test.copy()
X_train[CAT_FEATURES] = X_train[CAT_FEATURES].astype('category')
X_valid[CAT_FEATURES] = X_valid[CAT_FEATURES].astype('category')
X_test[CAT_FEATURES] = X_test[CAT_FEATURES].astype('category')
model = lgb.LGBMClassifier(**lgb_params)
model.fit(X_train, y_train,
eval_set = [(X_train, y_train),
(X_valid, y_valid)],
verbose = VERBOSE,
early_stopping_rounds=EARLY_STOPPING_ROUNDS)
y_pred_valid = model.predict_proba(X_valid)[:,1]
y_pred = model.predict_proba(X_test)[:,1]
fold_importance = pd.DataFrame()
fold_importance["feature"] = X_train.columns
fold_importance["importance"] = model.feature_importances_
fold_importance["fold"] = fold_n + 1
feature_importance = pd.concat([feature_importance, fold_importance],
axis=0)
best_iteration = model.best_iteration_
return y_pred, y_pred_valid, feature_importance, best_iteration
################################
# Dataframes for storing results
#################################
feature_importance = pd.DataFrame()
oof = np.zeros(len(X))
pred = np.zeros(len(X_test))
oof_df = train_df[['isFraud']].copy()
oof_df['oof'] = np.nan
oof_df['fold'] = np.nan
scores = []
best_iterations = []
for fold_n, (train_idx, valid_idx) in enumerate(folds.split(X, y)):
X_train = X.iloc[train_idx]
y_train = y.iloc[train_idx]
X_valid = X.iloc[valid_idx]
y_valid = y.iloc[valid_idx]
if MODEL_TYPE == "catboost":
y_pred, y_pred_valid, feature_importance, best_iteration = train_catboost(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance)
if MODEL_TYPE == 'lightgbm':
y_pred, y_pred_valid, feature_importance, best_iteration = train_lightgbm(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance)
best_iterations.append(best_iteration)
fold_score = roc_auc_score(y_valid, y_pred_valid)
scores.append(fold_score)
update_tracking(run_id, "AUC_f{}".format(fold_n + 1),
fold_score,
integer=False,)
logger.info('Fold {} of {} CV mean AUC score: {:.4f}. Best iteration {}'.format(fold_n + 1,
N_FOLDS,
fold_score,
best_iteration))
oof_df.iloc[valid_idx, oof_df.columns.get_loc('oof')] = y_pred_valid.reshape(-1)
oof_df.iloc[valid_idx, oof_df.columns.get_loc('fold')] = fold_n + 1
pred += y_pred
update_tracking(run_id, 'avg_best_iteration',
np.mean(best_iterations),
integer=True)
###############
# Store Results
###############
pred /= N_FOLDS
score = np.mean(scores)
sub = pd.read_csv('../input/sample_submission.csv')
sub['isFraud'] = pred
sub.to_csv(f'../sub/sub_{MODEL_NUMBER}_{run_id}_{score:.4f}.csv', index=False)
oof_df.to_csv(f'../oof/oof_{MODEL_NUMBER}_{run_id}_{score:.4f}.csv')
logger.info('CV mean AUC score: {:.4f}, std: {:.4f}.'.format(np.mean(scores),
np.std(scores)))
total_score = roc_auc_score(oof_df['isFraud'], oof_df['oof'])
feature_importance.to_csv(f'../fi/fi_{MODEL_NUMBER}_{run_id}_{score:.4f}.csv')
update_tracking(run_id, "AUC",
total_score,
integer=False,)
logger.info('OOF AUC Score: {:.4f}'.format(total_score))
end = timer()
update_tracking(run_id, "training_time", (end - start), integer=True)
logger.info('Done!')
|
<reponame>RetailMeNotSandbox/dart
from abc import abstractmethod
import base64
import json
import logging
from pydoc import locate
import random
from boto.regioninfo import RegionInfo
from boto.sqs.connection import SQSConnection
from boto.sqs.jsonmessage import JSONMessage
from dart.model.message import MessageState
from dart.service.message import MessageService
_logger = logging.getLogger(__name__)
class MessageBroker(object):
@abstractmethod
def set_app_context(self, app_context):
""" :type app_context: dart.context.context.AppContext """
raise NotImplementedError
@abstractmethod
def send_message(self, message):
"""
:param message: the message to send
:type message: dict
"""
raise NotImplementedError
@abstractmethod
def receive_message(self, handler):
"""
:param handler: callback function that handles the message
:type handler: function[str, dict]
"""
raise NotImplementedError
class SqsJsonMessageBroker(MessageBroker):
def __init__(self, queue_name, aws_access_key_id=None, aws_secret_access_key=None, region='us-east-1',
endpoint=None, is_secure=True, port=None, incoming_message_class='boto.sqs.jsonmessage.JSONMessage'):
self._region = RegionInfo(name=region, endpoint=endpoint) if region and endpoint else None
self._queue_name = queue_name
self._is_secure = is_secure
self._port = port
self._incoming_message_class = incoming_message_class
self._aws_access_key_id = aws_access_key_id
self._aws_secret_access_key = aws_secret_access_key
self._message_class = locate(self._incoming_message_class)
self._queue = None
self._message_service = None
def set_app_context(self, app_context):
self._message_service = app_context.get(MessageService)
def send_message(self, message):
# dart always uses the JSONMessage format
self.queue.write(JSONMessage(self.queue, message))
def receive_message(self, handler, wait_time_seconds=20):
# randomly purge old messages
#if random.randint(0, 100) < 1:
# self._message_service.purge_old_messages()
sqs_message = self.queue.read(wait_time_seconds=wait_time_seconds)
if not sqs_message:
_logger.debug("No message in queue {queue_name}, waited {message_wait_time} seconds.".
format(queue_name=self._queue_name,
message_wait_time=wait_time_seconds))
return
sqs_message_body = self._get_body(sqs_message)
message = self._message_service.get_message(sqs_message.id, raise_when_missing=False)
previous_handler_failed = False
result_state = MessageState.COMPLETED
if not message:
message = self._message_service.save_message(sqs_message.id, json.dumps(sqs_message_body), MessageState.RUNNING)
elif message:
if message.state in [MessageState.COMPLETED, MessageState.FAILED]:
_logger.warn('bailing on sqs message with id=%s because it was redelivered' % sqs_message.id)
self.queue.delete_message(sqs_message)
return
_logger.info('Begin handling message {}\n{}'.format(sqs_message.id, json.dumps(sqs_message_body, indent=4, separators=(',', ': '))))
handler(sqs_message.id, sqs_message_body, previous_handler_failed)
_logger.info('Finish handling message {}'.format(sqs_message.id))
self._message_service.update_message_state(message, result_state)
self.queue.delete_message(sqs_message)
@staticmethod
def _get_body(message):
if type(message.get_body()) is dict:
return message.get_body()
try:
# dart's messages are decoded like JSONMessage
value = base64.b64decode(message.get_body().encode('utf-8')).decode('utf-8')
value = json.loads(value)
except:
# s3 event notifications are raw
value = json.loads(message.get_body())
return value
@property
def queue(self):
if self._queue:
return self._queue
conn = SQSConnection(self._aws_access_key_id, self._aws_secret_access_key, self._is_secure, self._port, region=self._region)
self._queue = conn.create_queue(self._queue_name)
self._queue.set_message_class(self._message_class)
return self._queue
|
<filename>dtool_lookup_gui/views/log_window.py<gh_stars>0
#
# Copyright 2021-2022 <NAME>
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import datetime
import logging
import os
from gi.repository import Gdk, GLib, Gio, Gtk, GtkSource
from ..utils.logging import FormattedAppendingGtkTextBufferHandler
logger = logging.getLogger(__name__)
@Gtk.Template(filename=f'{os.path.dirname(__file__)}/log_window.ui')
class LogWindow(Gtk.Window):
__gtype_name__ = 'DtoolLogWindow'
log_text_view = Gtk.Template.Child()
clear_button = Gtk.Template.Child()
copy_button = Gtk.Template.Child()
save_button = Gtk.Template.Child()
log_switch = Gtk.Template.Child()
# loglevel_entry = Gtk.Template.Child()
loglevel_combo_box = Gtk.Template.Child()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
root_logger = logging.getLogger()
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
# populate logleve selection combo box
loglevel_store = Gtk.ListStore(int, str)
configurable_loglevels = [
(logging.CRITICAL, "CRITICAL"),
(logging.ERROR, "ERROR"),
(logging.WARNING, "WARNING"),
(logging.INFO, "INFO"),
(logging.DEBUG, "DEBUG"),
]
self.loglevel_row_index_map = {}
for row_index, (loglevel_value, loglevel_label) in enumerate(configurable_loglevels):
loglevel_store.append([loglevel_value, loglevel_label])
self.loglevel_row_index_map[loglevel_value] = row_index
self.loglevel_combo_box.set_model(loglevel_store)
self.loglevel_combo_box.set_active(self.loglevel_row_index_map[root_logger.level])
self.loglevel_combo_box.connect("changed", self.on_loglevel_combo_box_changed)
self.loglevel_combo_box.set_entry_text_column(1)
# connect log handler to my buffer
self.log_buffer = self.log_text_view.get_buffer()
self.log_handler = FormattedAppendingGtkTextBufferHandler(
text_buffer=self.log_buffer)
# set up some pseudo highlighting for the log window
lang_manager = GtkSource.LanguageManager()
self.log_buffer.set_language(lang_manager.get_language("python"))
self.log_buffer.set_highlight_syntax(True)
self.log_buffer.set_highlight_matching_brackets(True)
logger.debug("Append GtkTextBufferHandler to root logger.")
root_logger.addHandler(self.log_handler)
# bind another handler to th application-level action 'set-loglevel'
set_loglevel_action = self.get_action_group("app").lookup_action('set-loglevel')
set_loglevel_action.connect("change-state", self.do_loglevel_changed)
root_logger.debug("Created log window.")
# action handlers
def do_loglevel_changed(self, action, value):
"""Keep entry ot loglevel combobox in sync with actual action state."""
new_loglevel = value.get_uint16()
logger.debug(f"Loglevel changed to {new_loglevel}, update combo box entry if necessary.")
tree_iter = self.loglevel_combo_box.get_active_iter()
model = self.loglevel_combo_box.get_model()
current_loglevel, loglevel_label = model[tree_iter][:2]
logger.debug(f"Current loglevel is {current_loglevel}.")
if new_loglevel != current_loglevel:
self.loglevel_combo_box.set_active(self.loglevel_row_index_map[new_loglevel])
logger.debug(f"Combo box updated to row {self.loglevel_row_index_map[new_loglevel]}.")
# signal handlers
@Gtk.Template.Callback()
def on_show(self, widget):
logger.debug("Show.")
root_logger = logging.getLogger()
@Gtk.Template.Callback()
def on_delete(self, widget, event):
logger.debug("Hide on delete.")
return self.hide_on_delete()
@Gtk.Template.Callback()
def on_destroy(self, widget):
logger.debug("Destroy.")
root_logger = logging.getLogger()
if self.log_handler in root_logger.handlers:
root_logger.removeHandler(self.log_handler)
@Gtk.Template.Callback()
def on_log_switch_state_set(self, widget, state):
logger.debug(f"{widget.get_name()} switched state to {state}")
# Eventually managed to tie switch directly to action via glade
# by setting up a stateful but parameterless toggle action
# and specifying app.toggle-logging as action name for switch in xml,
# hence no need for this handler anymore.
@Gtk.Template.Callback()
def on_loglevel_combo_box_changed(self, combo):
"""Selected loglevel from combo box."""
tree_iter = combo.get_active_iter()
model = combo.get_model()
loglevel, loglevel_label = model[tree_iter][:2]
logger.debug(f"Selected ID: {loglevel}, loglevel: {loglevel_label}")
# This explicitly evokes the according action when loglevel selected
# in combo box turned, see
# https://lazka.github.io/pgi-docs/Gio-2.0/classes/ActionGroup.html#Gio.ActionGroup.list_actions
# There might be more elegant mechanism to connect a switch with an
# app-central action, but the Gtk docs are sparse on actions...
self.get_action_group("app").activate_action('set-loglevel', GLib.Variant.new_uint16(loglevel))
@Gtk.Template.Callback()
def on_clear_clicked(self, widget):
"""Clear contents of log window."""
self.log_buffer.set_text("")
@Gtk.Template.Callback()
def on_copy_clicked(self, widget):
"""Copy contents of log window to clipboard."""
start_iter = self.log_buffer.get_start_iter()
end_iter = self.log_buffer.get_end_iter()
self.log_buffer.select_range(start_iter, end_iter)
self.log_buffer.copy_clipboard(self.clipboard)
logger.debug("Copied content of log window to clipboard.")
@Gtk.Template.Callback()
def on_save_clicked(self, widget):
"""Open file chooser dialog and save contents of log window to file."""
dialog = Gtk.FileChooserDialog(title=f"Save log", parent=self,
action=Gtk.FileChooserAction.SAVE)
dialog.add_buttons(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK,
Gtk.ResponseType.OK)
suggested_file_name = f"{datetime.datetime.now().isoformat()}-{self.get_application().get_application_id()}.log"
dialog.set_current_name(suggested_file_name)
dialog.set_do_overwrite_confirmation(True)
response = dialog.run()
if response == Gtk.ResponseType.OK:
start_iter = self.log_buffer.get_start_iter()
end_iter = self.log_buffer.get_end_iter()
dest_filename = dialog.get_filename()
with open(dest_filename, "w") as file:
file.write(self.log_buffer.get_text(start_iter, end_iter, include_hidden_chars=False))
elif response == Gtk.ResponseType.CANCEL:
pass
dialog.destroy()
|
<reponame>RT-Team/rt-bot
# RT AUtoMod - Mod Utils
from __future__ import annotations
from typing import TYPE_CHECKING, Union, Any
from datetime import timedelta
from re import findall
from time import time
import discord
from difflib import SequenceMatcher
from emoji import emoji_lis
if TYPE_CHECKING:
from .data_manager import GuildData
from .cache import Cache
def similar(before: str, after: str) -> float:
"文章が似ているかチェックします。"
return SequenceMatcher(None, before, after).ratio() * 100
def join(message: discord.Message) -> list[str]:
"渡されたメッセージにある文字列を全て合体させます。"
contents = [message.content or ""]
for embed in message.embeds:
contents.append(
"".join(map(lambda x: getattr(embed, x, None) or "", ("title", "description")))
+ embed.footer.text
)
for attachment in message.attachments:
contents.append(attachment.filename)
return contents
def emoji_count(text: str) -> int:
"渡された文字列にある絵文字の数を数えます。"
return len(findall("<a?:.+:\\d+>", text)) \
+ len([char for char in text if emoji_lis(char)])
async def log(
cache: Union["Cache", discord.Member, Any],
reason: str, subject: str, error: bool = False
) -> discord.Message:
"ログを流します。"
for channel in cache.guild.text_channels:
if channel.topic and "rt>automod" in channel.topic:
return await channel.send(
f"<t:{int(time())}>", embed=discord.Embed(
title="AutoMod",
description=f"{cache.member.mention}を{reason}のため{subject}しました。"
+ (f"\nですが権限がないので{subject}することができませんでした。" if error else ""),
color=cache.cog.COLORS["error" if error else "warn"]
)
)
def get(cache: "Cache", data: "GuildData", key: str) -> Any:
"GuildDataから特定のデータを抜き取ります。これはデフォルトをサポートします。"
return data.get(key, cache.cog.DEFAULTS.get(key))
async def trial_message(
self: "Cache", data: "GuildData", message: discord.Message
) -> None:
"渡されたUserData(Cache)のユーザーとメッセージを調べ、処罰すべきか裁判をし、処罰が必要な場合は相応の罰を下します。"
try:
if (mute := get(self, data, "mute")) <= self.warn <= mute + 1:
# ToDo: Pycordのスラッシュへの移行作業後にTimeoutをここに実装する。
self.cog.print("[punishment.mute]", self)
await self.member.edit(timeout=timedelta(days=1))
return await log(self, "スパム", "タイムアウト")
elif mute - 1 <= self.warn <= mute:
await message.reply(
{"ja": "これ以上スパムをやめなければタイムアウトします。",
"en": "If you don't stop spamming any more, I will timeout you."}
)
if (ban := get(self, data, "ban")) <= self.warn <= ban + 1:
self.cog.print("[punishment.ban]", self)
await self.member.ban(reason=f"[AutoMod] スパムのため")
return await log(self, "スパム", "BAN")
elif ban - 1 <= self.warn <= ban:
await message.reply(
{"ja": "スパムをやめなければBANをします。",
"en": "If you don't stop spamming any more, I will ban you."}
)
except discord.Forbidden:
await log(self, "スパム", "処罰しようと", True)
def process_check_message(
self: "Cache", data: "GuildData", message: discord.Message
) -> None:
"渡されたメッセージをスパムかどうかをチェックします。"
if (message.author.guild_permissions.administrator
or message.channel.id in data.get("ignore", ())):
# 管理者ならチェックしない。
return
# もし0.3秒以内に投稿されたメッセージなら問答無用でスパム認定とする。
if self.before is not None and time() - self.checked <= 0.3:
self.suspicious += 50
elif self.update_cache(message) is not None:
# スパム判定をする。
# 以前送られたメッセージと似ているかをチェックし似ている度を怪しさにカウントします。
self.suspicious += sum(
similar(*contents) for contents in zip(
self.before_content, join(message)
)
)
if self.process_suspicious():
self.cog.bot.loop.create_task(trial_message(self, data, message))
# 絵文字カウントをチェックします。
if get(self, data, "emoji") <= emoji_count(message.content):
self.suspicious += 50
self.cog.bot.loop.create_task(discord.utils.async_all(
(
message.author.send(
f"このサーバーでは一度のメッセージに{data['emoji']}個まで絵文字を送信できます。"
), message.delete()
)
))
# もし招待リンク削除が有効かつ招待リンクがあるなら削除を行う。
if "invite_deleter" in data:
if findall(
r"(https?:\/\/)?(www\.)?(discord\.(gg|io|me|li)|discordapp\.com\/invite)\/.+[a-z]",
message.content
) and all(word not in message.content for word in data["invite_deleter"]):
self.cog.print("[InviteDeleter]", message.author.name)
self.cog.bot.loop.create_task(discord.utils.async_all(
(
message.author.send(
f"その招待リンクを{message.guild.name}に送信することはできません。"
), message.delete()
)
))
async def trial_new_member(self: "Cache", data: "GuildData") -> None:
"渡された新規参加者のメンバーを即抜け等をしていないか調べて必要に応じて処罰をします。"
if self.before_join is not None and "bolt" in data:
if time() - self.before_join <= data["bolt"]:
self.cog.print("[bolt.ban]", self.member.name)
await self.member.ban(reason=f"[AutoMod] 即抜けのため")
await log(self, "即抜け", "BAN")
self.before_join = time()
async def trial_invite(data: "GuildData", invite: discord.Invite) -> None:
"招待リンク規制対象かどうかをチェックして必要なら招待の削除を行います。"
if hasattr(invite.guild, "get_member"):
# もしinvite.guildがdiscord.Guildじゃなかったのならちゃんとしたのを取得する。
if (guild := invite._state._get_guild(invite.guild.id)):
invite.guild = guild
else:
# もしどこのサーバーかわからなかったのなら諦める。
return
# discord.Inviteのinviterはdiscord.Memberではないので取得し直す。
if (member := invite.guild.get_member(invite.inviter.id)):
# 管理者権限を持っている場合は例外とする。
if member.guild_permissions.administrator:
return
if "invites" in data:
if not any(
member.get_role(id_) or invite.channel.id == id_
for id_ in data["invites"]
):
# もし例外対象じゃないのなら招待リンクを削除する。
await invite.delete(reason=f"[AutoMod] 招待リンク作成不可なため")
await member.send(
f"{member.guild.name}の{invite.channel.name}では招待リンクを作ることができません。"
)
|
<reponame>zwgraham/Demand-as-Frequency-Response<filename>sbc/loads.py
import atexit
from serial import Serial
from xbee import XBee
from time import sleep
from ts7250v2 import dio
class LoadBase(object):
''' Virtual parent class for loads '''
def __init__(self):
pass
# priority (0 highest ------- lowest 10)
class SheddableLoad(LoadBase):
'''Virtual base class for sheddable loads'''
LoadList = []
def __init__(self, priority):
self.priority = priority
self.shed = False
SheddableLoad.LoadList.append(self)
super(SheddableLoad, self).__init__()
def isShed(self):
return self.shed
def shedLoad(self):
raise NotImplementedError
def restoreLoad(self):
raise NotImplementedError
@classmethod
def shedByPriority(cls, priority):
for load in cls.LoadList:
if load.priority >= priority:
load.shedLoad()
@classmethod
def restoreByPriority(cls, priority):
for load in cls.LoadList:
if load.priority <= priority:
load.restoreLoad()
class DummySheddableLoad(SheddableLoad):
'''Stub class for sheddable loads'''
def __init__(self, priority):
super(DummySheddableLoad, self).__init__(priority)
def shedLoad(self):
if not self.isShed():
self.shed = True
return True
return False
def restoreLoad(self):
if self.isShed():
self.shed = False
return True
return False
class SBCDIOSheddableLoad(SheddableLoad):
def __init__(self, priority, dio_pin, evgpio='/usr/local/bin/evgpioctl'):
if dio_pin not in dio.DIO_MAP.keys():
raise TypeError("dio_pin not a key in dio.DIO_MAP.")
self.dio_pin = dio_pin
self.evgpio = dio.DIO()
self.evgpio.DIO_set_output(self.dio_pin)
self.evgpio.DIO_set_high(self.dio_pin)
super(SBCDIOSheddableLoad, self).__init__(priority)
# the following is a hack to ensure that the gpio is set back to
# default when the program exits
atexit.register(self._cleanup)
def _cleanup(self):
self.evgpio.DIO_set_low(self.dio_pin)
self.evgpio.DIO_set_input(self.dio_pin)
def _evgpioOff(self):
self.evgpio.DIO_set_low(self.dio_pin)
def _evgpioOn(self):
self.evgpio.DIO_set_high(self.dio_pin)
def shedLoad(self):
if not self.isShed():
# run SBC specific command
self._evgpioOff()
self.shed = True
return True
return False
def restoreLoad(self):
if self.isShed():
# run SBC specific command
self._evgpioOn()
self.shed = False
return True
return False
class DeferrableLoad(LoadBase):
LoadList = []
def __init__(self, priority, advanceable=False):
self.priority = priority
self.deferred = False
self.advanced = False
self.advanceable = advanceable
DeferrableLoad.LoadList.append(self)
super(DeferrableLoad, self).__init__()
def isDeferred(self):
return self.deferred
def isAdvanced(self):
return self.advanced
def defer(self):
raise NotImplementedError
@classmethod
def deferByPriority(cls, priority):
for load in cls.LoadList:
if load.priority >= priority:
load.defer()
@classmethod
def restoreByPriority(cls, priority):
for load in cls.LoadList:
if load.priority <= priority:
load.restore()
def restore(self):
raise NotImplementedError
def advance(self):
raise NotImplementedError
# TODO: try accept blocks for timeouts
class ArduinoDeferrableWaterHeater(DeferrableLoad):
# this is written with only one device connected in mind
# we send messages to the PAN broadcast address instead of
# to individual water heaters at specific addresses
# should be changed later
# TODO ^^^^^^^^^^
def __init__(self, priority, setpoint, deferOffset, advanceOffset,
serial='/dev/ttyUSB0', baud=9600):
self.serial = Serial(serial, baud)
self.xbee = XBee(self.serial)
self.setpoint = None
self.nominalsetpoint = setpoint
self.deferOffset = deferOffset
self.advanceOffset = advanceOffset
self.enabled = False
super(ArduinoDeferrableWaterHeater, self).__init__(
priority=priority,
advanceable=True
)
sleep(2)
self._setTemperature(self.nominalsetpoint)
def _setTemperature(self, temperature):
self.xbee.tx(dest_addr=b'\xFF\xFF',
data='SetPoint: {}!'.format(temperature))
# we should get something back, no dropped packets
d = self.xbee.wait_read_frame()
if self._checkPacket(
d,
'Set Point Recieved {:.2f}'.format(temperature)):
self.setpoint = temperature
return True
else:
return False
def _checkPacket(self, packet, phrase):
if packet['rf_data'].strip() == phrase:
return True
else:
return False
def enable(self):
# do a check if setpoint is none and throw an exception
self.xbee.tx(dest_addr=b'\xFF\xFF', data='ON!')
d = self.xbee.wait_read_frame()
# if it times out we need to check the status if it's enabled
if self._checkPacket(d, 'Water Heater Enabled'):
self.enabled = True
def defer(self):
if self.isAdvanced():
# return to nominal to defer
x = self._setTemperature(self.nominalsetpoint)
self.advanced = not x
elif not self.isDeferred():
# defer
x = self._setTemperature(self.nominalsetpoint - self.deferOffset)
self.deferred = x
def advance(self):
if self.isDeferred():
# return to nominal to advance
x = self._setTemperature(self.nominalsetpoint)
self.deferred = not x
if not self.isAdvanced():
# advance
x = self._setTemperature(self.nominalsetpoint + self.deferOffset)
self.advanced = x
def restore(self):
if self.isDeferred():
x = self._setTemperature(self.nominalsetpoint)
self.deferred = not x
elif self.isAdvanced():
x = self._setTemperature(self.nominalsetpoint)
self.advnaced = not x
def disable(self):
self.xbee.tx(dest_addr=b'\xFF\xFF', data='OFF!')
d = self.xbee.wait_read_frame()
# if it times out we need to check the status if it's enabled
if self._checkPacket(d, 'Water Heater Disabled'):
self.enabled = False
|
<filename>thingsdb/model/thing.py
import asyncio
import logging
from .prop import Prop
def checkevent(f):
def wrapper(self, event_id, *args):
if self._event_id > event_id:
logging.warning(
f'ignore event because the current event `{self._event_id}` '
f'is greather than the received event `{event_id}`')
return
self._event_id = event_id
f(self, event_id, *args)
self._collection._go_pending()
return wrapper
class ThingHash:
def __init__(self, id):
self._id = id
def __hash__(self):
return self._id
def __eq__(self, other):
return self._id == other._id
class Thing(ThingHash):
# When __STRICT__ is set to `True`, only properties which are defined in
# the model class are assigned to a `Thing` instance. If `False`, all
# properties are set, not only the ones defined by the model class.
__STRICT__ = False
# When __SET_ANYWAY__ is set to `True`, values which do mot match the
# specification will be assigned to a `Thing` instance anyway and only
# a warning will be logged. If `False`, the properties will not be set.
__SET_ANYWAY__ = False
# When __AS_TYPE__ is set to `True`, this class will be created in
# thingsdb as a Type when using the `build(..)` method. If `False`, no type
# will be created. A Collection instance will have `False` as default.
__AS_TYPE__ = True
_ev_handlers = dict()
_props = dict()
_type_name = None # Only set when __AS_TYPE__ is True
_visited = 0 # For build, 0=not visited, 1=new_type, 2=set_type, 3=build
def __init__(self, collection, id: int):
super().__init__(id)
self._event_id = 0
self._collection = collection
collection._register(self)
def __init_subclass__(cls):
cls._ev_handlers = {}
cls._props = {}
items = {
k: v for k, v in cls.__dict__.items() if not k.startswith('__')}
for key, val in items.items():
if isinstance(val, str):
val = val,
if isinstance(val, tuple):
cls._props[key] = Prop(*val)
delattr(cls, key)
elif callable(val) and hasattr(val, '_ev'):
cls._ev_handlers[val._ev] = val
if cls.__AS_TYPE__:
cls._type_name = getattr(cls, '__TYPE_NAME__', cls.__name__)
def __bool__(self):
return bool(self._event_id)
def __repr__(self):
return f'#{self._id}'
def id(self):
return self._id
def get_collection(self):
return self._collection
def get_client(self):
return self._collection._client
@classmethod
def _unpack(cls, collection):
if cls._props:
for p in cls._props.values():
p.unpack(collection)
# unpacking is no longer required
cls._unpack = lambda *_args: None
def watch(self):
collection = self._collection
# when calling watch directly, make sure the props are unpacked
self._unpack(collection)
return collection._client.watch(self._id, scope=collection._scope)
def unwatch(self):
collection = self._collection
return collection._client.unwatch(self._id, scope=collection._scope)
def emit(self, event, *args):
data = {f'd{i}': v for i, v in enumerate(args)}
dstr = "".join((f", {k}" for k in data.keys()))
return self._collection.query(
f'thing(id).emit(event{dstr});',
id=self._id,
event=event,
**data)
@checkevent
def on_init(self, event, data):
self._job_set(data)
@checkevent
def on_update(self, event, jobs):
for job_dict in jobs:
for name, job in job_dict.items():
jobfun = self._UPDMAP.get(name)
if jobfun is None:
logging.warning(f'unhandled job `{name}` for `{self}`')
continue
jobfun(self, job)
def on_delete(self):
self._collection._things.pop(self.id())
def on_event(self, ev, *args):
cls = self.__class__
fun = cls._ev_handlers.get(ev)
if fun is None:
logging.debug(f'no event handler for {ev} on {cls.__name__}')
return
fun(self, *args)
def on_stop(self):
logging.warning(f'stopped watching thing {self}')
def _job_add(self, pair):
cls = self.__class__
(k, v), = pair.items()
prop = cls._props.get(k)
if not prop and cls.__STRICT__:
return
try:
set_ = getattr(self, k)
except AttributeError:
if prop:
logging.warning(
f'missing property `{k}` on `{self}` '
f'while the property is defined in the '
f'model class as `{prop.spec}`')
return
if not isinstance(set_, set):
logging.warning(
f'got a add job for property `{k}` on `{self}` '
f'while the property is of type `{type(set_)}`')
return
convert = prop.nconv if prop else self._collection._conv_thing
try:
set_.update((convert(item) for item in v))
except Exception as e:
logging.warning(
f'got a value for property `{k}` on `{self}` which '
f'does not match `{prop.spec if prop else "thing"}` ({e})')
def _job_del(self, k):
prop = self.__class__._props.get(k)
if prop:
logging.warning(
f'property `{k}` on `{self}` will be removed while it '
f'is defined in the model class as `{prop.spec}`')
try:
delattr(self, k)
except AttributeError:
pass
def _job_event(self, data):
self.on_event(*data)
def _job_remove(self, pair):
cls = self.__class__
(k, v), = pair.items()
prop = cls._props.get(k)
if not prop and cls.__STRICT__:
return
try:
set_ = getattr(self, k)
except AttributeError:
if prop:
logging.warning(
f'missing property `{k}` on `{self}` '
f'while the property is defined in the '
f'model class as `{prop.spec}`')
return
if not isinstance(set_, set):
logging.warning(
f'got a remove job for property `{k}` on `{self}` '
f'while the property is of type `{type(set_)}`')
return
set_.difference_update((ThingHash(id) for id in v))
def _job_set(self, pairs):
cls = self.__class__
for k, v in pairs.items():
if k == '#':
continue
prop = cls._props.get(k)
if prop:
convert = prop.vconv
elif cls.__STRICT__:
continue
else:
convert = self._collection._conv_any
try:
v = convert(v)
except Exception as e:
logging.warning(
f'got a value for property `{k}` on `{self}` which does '
f'not match `{prop.spec if prop else "any"}` ({repr(e)})')
if not cls.__SET_ANYWAY__:
continue
setattr(self, k, v)
self._collection._go_pending()
def _job_splice(self, pair):
cls = self.__class__
(k, v), = pair.items()
prop = cls._props.get(k)
if not prop and cls.__STRICT__:
return
try:
arr = getattr(self, k)
except AttributeError:
if prop:
logging.warning(
f'missing property `{k}` on `{self}` '
f'while the property is defined in the '
f'model class as `{prop.spec}`')
return
if not isinstance(arr, list):
logging.warning(
f'got a splice job for property `{k}` on `{self}` '
f'while the property is of type `{type(arr)}`')
return
index, count, *items = v
convert = prop.nconv if prop else self._collection._conv_any
try:
arr[index:index+count] = (convert(item) for item in items)
except (TypeError, ValueError) as e:
logging.warning(
f'got a value for property `{k}` on `{self}` '
f'which does not match `{prop.spec if prop else "any"}` ({e})')
def _job_del_procedure(self, data):
delattr(self._collection, data)
def _job_del_enum(self, data):
#keep the enum so simply ignore this event
pass
def _job_del_type(self, data):
# keep the type so simply ignore this event
pass
def _job_mod_type_add(self, data):
self._collection._upd_type_add(data)
def _job_mod_type_del(self, data):
self._collection._upd_type_del(data)
def _job_mod_type_mod(self, data):
# ignore the specification so simply ignore this event
pass
def _job_mod_type_rel(self, data):
# ignore the specification so simply ignore this event
pass
def _job_mod_type_ren(self, data):
self._collection._upd_type_ren(data)
def _job_mod_type_wpo(self, data):
# ignore wrap-only mode so simply ignore this event
pass
def _job_mod_enum_add(self, data):
self._collection._upd_enum_add(data)
def _job_mod_enum_del(self, data):
self._collection._upd_enum_del(data)
def _job_mod_enum_def(self, data):
self._collection._upd_enum_def(data)
def _job_mod_enum_mod(self, data):
self._collection._upd_enum_mod(data)
def _job_mod_enum_ren(self, data):
self._collection._upd_enum_ren(data)
def _job_new_procedure(self, data):
self._collection._set_procedure(data)
def _job_new_type(self, data):
data['fields'] = []
self._collection._update_type(data)
def _job_rename_enum(self, data):
# rename a enum type
pass
def _job_rename_procedure(self, data):
self._collection._rename_procedure(data)
def _job_rename_type(self, data):
# do not rename a type in python
pass
def _job_set_enum(self, data):
self._collection._update_enum(data)
def _job_set_type(self, data):
self._collection._update_type(data)
_UPDMAP = {
# Thing jobs
'add': _job_add,
'del': _job_del,
'event': _job_event,
'remove': _job_remove,
'set': _job_set,
'splice': _job_splice,
# Collection jobs
'del_enum': _job_del_enum,
'del_procedure': _job_del_procedure,
'del_type': _job_del_type,
'mod_type_add': _job_mod_type_add,
'mod_type_del': _job_mod_type_del,
'mod_enum_add': _job_mod_enum_add,
'mod_type_mod': _job_mod_type_mod,
'mod_type_rel': _job_mod_type_rel,
'mod_type_ren': _job_mod_type_ren,
'mod_type_wpo': _job_mod_type_wpo,
'mod_enum_def': _job_mod_enum_def,
'mod_enum_del': _job_mod_enum_del,
'mod_enum_mod': _job_mod_enum_mod,
'mod_enum_ren': _job_mod_enum_ren,
'new_procedure': _job_new_procedure,
'new_type': _job_new_type,
'rename_enum': _job_rename_enum,
'rename_procedure': _job_rename_procedure,
'rename_type': _job_rename_type,
'set_enum': _job_set_enum,
'set_type': _job_set_type,
}
@classmethod
async def _new_type(cls, client, collection):
if cls._visited > 0:
return
cls._visited += 1
for prop in cls._props.values():
if prop.model:
await prop.model._new_type(client, collection)
if not cls._type_name:
return
await client.query(f'''
new_type('{cls._type_name}');
''', scope=collection._scope)
@classmethod
async def _set_type(cls, client, collection):
if cls._visited > 1:
return
cls._visited += 1
for prop in cls._props.values():
if prop.model:
await prop.model._set_type(client, collection)
if not cls._type_name:
return
await client.query(f'''
set_type('{cls._type_name}', {{
{', '.join(f'{k}: "{p.spec}"' for k, p in cls._props.items())}
}});
''', scope=collection._scope)
class ThingStrict(Thing):
__STRICT__ = True
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append('./')
import six
import os
import os.path as osp
import math
import argparse
parser = argparse.ArgumentParser(description="Softmax loss classification")
# data
parser.add_argument('--synthetic_train_data_dir', nargs='+', type=str, metavar='PATH',
default=['/share/zhui/reg_dataset/NIPS2014'])
parser.add_argument('--real_train_data_dir', type=str, metavar='PATH',
default='/data/zhui/benchmark/cocotext_trainval')
parser.add_argument('--extra_train_data_dir', nargs='+', type=str, metavar='PATH',
default=['/share/zhui/reg_dataset/CVPR2016'])
parser.add_argument('--test_data_dir', type=str, metavar='PATH',
default='/share/zhui/reg_dataset/IIIT5K_3000')
parser.add_argument('--MULTI_TRAINDATA', action='store_true', default=False,
help='whether use the extra_train_data for training.')
parser.add_argument('-b', '--batch_size', type=int, default=128)
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--height', type=int, default=64,
help="input height, default: 256 for resnet*, ""64 for inception")
parser.add_argument('--width', type=int, default=256,
help="input width, default: 128 for resnet*, ""256 for inception")
parser.add_argument('--keep_ratio', action='store_true', default=False,
help='length fixed or lenghth variable.')
parser.add_argument('--voc_type', type=str, default='ALLCASES_SYMBOLS',
choices=['LOWERCASE', 'ALLCASES', 'ALLCASES_SYMBOLS'])
parser.add_argument('--mix_data', action='store_true',
help="whether combine multi datasets in the training stage.")
parser.add_argument('--num_train', type=int, default=math.inf)
parser.add_argument('--num_test', type=int, default=math.inf)
parser.add_argument('--aug', action='store_true', default=False,
help='whether use data augmentation.')
parser.add_argument('--lexicon_type', type=str, default='0', choices=['0', '50', '1k', 'full'],
help='which lexicon associated to image is used.')
parser.add_argument('--image_path', type=str, default="data/demo.png",
help='the path of single image, used in demo.py.')
parser.add_argument('--tps_inputsize', nargs='+', type=int, default=[32, 64])
parser.add_argument('--tps_outputsize', nargs='+', type=int, default=[32, 100])
# model
parser.add_argument('-a', '--arch', type=str, default='ResNet_ASTER')
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--max_len', type=int, default=100)
parser.add_argument('--n_group', type=int, default=1)
parser.add_argument('--STN_ON', action='store_false',
help='add the stn head.')
parser.add_argument('--tps_margins', nargs='+', type=float, default=[0.05,0.05])
parser.add_argument('--stn_activation', type=str, default='none')
parser.add_argument('--num_control_points', type=int, default=20)
parser.add_argument('--stn_with_dropout', action='store_true', default=False)
## lstm
parser.add_argument('--with_lstm', action='store_false', default=False,
help='whether append lstm after cnn in the encoder part.')
parser.add_argument('--decoder_sdim', type=int, default=512,
help="the dim of hidden layer in decoder.")
parser.add_argument('--attDim', type=int, default=512,
help="the dim for attention.")
# optimizer
parser.add_argument('--lr', type=float, default=1,
help="learning rate of new parameters, for pretrained "
"parameters it is 10 times smaller than this")
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=0.0) # the model maybe under-fitting, 0.0 gives much better results.
parser.add_argument('--grad_clip', type=float, default=1.0)
parser.add_argument('--loss_weights', nargs='+', type=float, default=[1,1,1])
# training configs
parser.add_argument('--resume', type=str, default='/data/mazhenyu/jingxin/my_aster/checkpoints/stretch.pth.tar', metavar='PATH')
parser.add_argument('--evaluate', action='store_true',
help="evaluation only")
parser.add_argument('--epochs', type=int, default=6)
parser.add_argument('--start_save', type=int, default=0,
help="start saving checkpoints after specific epoch")
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print_freq', type=int, default=100)
parser.add_argument('--cuda', default=True, type=bool,
help='whether use cuda support.')
# testing configs
parser.add_argument('--evaluation_metric', type=str, default='accuracy')
parser.add_argument('--evaluate_with_lexicon', action='store_true', default=False)
parser.add_argument('--beam_width', type=int, default=5)
# misc
working_dir = osp.dirname(osp.dirname(osp.abspath(__file__)))
parser.add_argument('--logs_dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs'))
parser.add_argument('--real_logs_dir', type=str, metavar='PATH',
default='/media/mkyang/research/recognition/selfattention_rec')
parser.add_argument('--debug', action='store_true',
help="if debugging, some steps will be passed.")
parser.add_argument('--vis_dir', type=str, metavar='PATH', default='',
help="whether visualize the results while evaluation.")
parser.add_argument('--run_on_remote', action='store_true', default=False,
help="run the code on remote or local.")
def get_args(sys_args):
global_args = parser.parse_args(sys_args)
return global_args |
<gh_stars>1-10
import SimpleHTTPServer
import SocketServer
import os
from threading import Thread
import unittest
from twitter.common.contextutil import pushd, temporary_dir, temporary_file
from twitter.common.dirutil import safe_mkdir
from twitter.pants.base.build_invalidator import CacheKey
from twitter.pants.cache import create_artifact_cache, select_best_url
from twitter.pants.cache.combined_artifact_cache import CombinedArtifactCache
from twitter.pants.cache.file_based_artifact_cache import FileBasedArtifactCache
from twitter.pants.cache.restful_artifact_cache import RESTfulArtifactCache
from twitter.pants.testutils import MockLogger
class MockPinger(object):
def __init__(self, hosts_to_times):
self._hosts_to_times = hosts_to_times
# Returns a fake ping time such that the last host is always the 'fastest'.
def pings(self, hosts):
return map(lambda host: (host, self._hosts_to_times.get(host, 9999)), hosts)
# A very trivial server that serves files under the cwd.
class SimpleRESTHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def __init__(self, request, client_address, server):
# The base class implements GET and HEAD.
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(self, request, client_address, server)
def do_HEAD(self):
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_HEAD(self)
def do_PUT(self):
path = self.translate_path(self.path)
content_length = int(self.headers.getheader('content-length'))
content = self.rfile.read(content_length)
safe_mkdir(os.path.dirname(path))
with open(path, 'wb') as outfile:
outfile.write(content)
self.send_response(200)
self.end_headers()
def do_DELETE(self):
path = self.translate_path(self.path)
if os.path.exists(path):
os.unlink(path)
self.send_response(200)
else:
self.send_error(404, 'File not found')
self.end_headers()
TEST_CONTENT1 = 'muppet'
TEST_CONTENT2 = 'kermit'
class TestArtifactCache(unittest.TestCase):
def test_select_best_url(self):
spec = 'http://host1|https://host2:666/path/to|http://host3/path/'
best = select_best_url(spec, MockPinger({'host1': 5, 'host2:666': 3, 'host3': 7}), MockLogger())
self.assertEquals('https://host2:666/path/to', best)
def test_cache_spec_parsing(self):
artifact_root = '/bogus/artifact/root'
def check(expected_type, spec):
cache = create_artifact_cache(MockLogger(), artifact_root, spec, 'TestTask')
self.assertTrue(isinstance(cache, expected_type))
self.assertEquals(cache.artifact_root, artifact_root)
with temporary_file() as temp:
path = temp.name # Must be a real path, since we safe_mkdir it.
check(FileBasedArtifactCache, path)
check(RESTfulArtifactCache, 'http://localhost/bar')
check(CombinedArtifactCache, [path, 'http://localhost/bar'])
def test_local_cache(self):
with temporary_dir() as artifact_root:
with temporary_dir() as cache_root:
artifact_cache = FileBasedArtifactCache(None, artifact_root, cache_root)
self.do_test_artifact_cache(artifact_cache)
def test_restful_cache(self):
httpd = None
httpd_thread = None
try:
with temporary_dir() as cache_root:
with pushd(cache_root): # SimpleRESTHandler serves from the cwd.
httpd = SocketServer.TCPServer(('localhost', 0), SimpleRESTHandler)
port = httpd.server_address[1]
httpd_thread = Thread(target=httpd.serve_forever)
httpd_thread.start()
with temporary_dir() as artifact_root:
artifact_cache = RESTfulArtifactCache(MockLogger(), artifact_root,
'http://localhost:%d' % port)
self.do_test_artifact_cache(artifact_cache)
finally:
if httpd:
httpd.shutdown()
if httpd_thread:
httpd_thread.join()
def do_test_artifact_cache(self, artifact_cache):
key = CacheKey('muppet_key', 'fake_hash', 42, [])
with temporary_file(artifact_cache.artifact_root) as f:
# Write the file.
f.write(TEST_CONTENT1)
path = f.name
f.close()
# Cache it.
self.assertFalse(artifact_cache.has(key))
self.assertFalse(artifact_cache.use_cached_files(key))
artifact_cache.insert(key, [path])
self.assertTrue(artifact_cache.has(key))
# Stomp it.
with open(path, 'w') as outfile:
outfile.write(TEST_CONTENT2)
# Recover it from the cache.
self.assertTrue(artifact_cache.use_cached_files(key))
# Check that it was recovered correctly.
with open(path, 'r') as infile:
content = infile.read()
self.assertEquals(content, TEST_CONTENT1)
# Delete it.
artifact_cache.delete(key)
self.assertFalse(artifact_cache.has(key))
|
<filename>ffthompy/tensorsLowRank/objects/sparseTensorWrapper.py
import numpy as np
from tt.core.vector import vector
from ffthompy.tensorsLowRank.objects.tucker import Tucker
from ffthompy.tensorsLowRank.objects.canoTensor import CanoTensor
from ffthompy.tensorsLowRank.objects.tensorTrain import TensorTrain
from ffthompy.tensorsLowRank.objects.tensors import fft_form_default
def SparseTensor(kind='tt', val=None, core=None, basis=None, eps=None, rank=None,
Fourier=False, name='unnamed', vectorObj=None, fft_form=fft_form_default):
"""
A uniform wrapper of different tensorsLowRank tensor format
:param kind: type of tensorsLowRank tensor, can be 'cano','tucker' or 'tt', or more variants (see the code).
:type kind: string
:param val: a full tensor to be approximated
:type val: n-D array
:param core: core for canonical, tucker or TT tensorsLowRank tensor
:type core: 1-D array for canonical tensor, n-D arary for tucker, list of arrays for TT.
:param basis: basis for canonical or tucker tensorsLowRank tensor.
:type basis: list of arrays.
:param eps: approximation accuracy.
:type eps: float.
:param rank: rank of the cano and tucker tensorsLowRank tensor, maximum rank of TT tensorsLowRank tensor.
:type rank: int for cano and TT, list of int for tucker.
:param vectorObj: a TTPY vector class object, to be cast into tensorTrain object.
:type vectorObj: TTPY vector
:returns: object of tensorsLowRank tensor.
"""
if type(rank) is list or type(rank) is np.ndarray:
rmax=max(rank)
r=min(rank)
else:
rmax=r=rank
if kind.lower() in ['cano', 'canotensor']:
return CanoTensor(name=name, val=val, core=core,basis=basis,Fourier=Fourier,fft_form=fft_form).truncate(rank=r, tol=eps)
elif kind.lower() in ['tucker']:
return Tucker(name=name, val=val, core=core, basis=basis,Fourier=Fourier,fft_form=fft_form).truncate(rank=rank, tol=eps)
elif kind.lower() in ['tt', 'tensortrain']:
return TensorTrain(val=val, core=core, eps=eps, rmax=rmax, name=name,
Fourier=Fourier, vectorObj=vectorObj,fft_form=fft_form)
else:
raise ValueError("Unexpected argument value: '" + kind +"'")
if __name__=='__main__':
print()
print('----testing "Repeat" function ----')
print()
v1 = np.random.rand(3, 3,3)
tt = SparseTensor(kind='tt', val=v1)
# tt.fourier()
print((tt.full()))
tt.repeat(6)
print((tt.full()))
print('\n----testing wrapper function ----\n')
v1=np.random.rand(20, 30)
cano=SparseTensor(kind='cano', val=v1)
print(cano)
cano2=SparseTensor(kind='cano', val=v1, rank=10)
print(cano2)
cano3=SparseTensor(kind='cano', core=np.array([1.]), basis=[np.atleast_2d(np.ones(5)) for ii in range(2)], Fourier=False)
print(cano3)
v1=np.random.rand(20, 30, 40)
tucker1=SparseTensor(kind='tucker', val=v1)
print(tucker1)
tucker2=SparseTensor(kind='tucker', val=v1, rank=[10, 20, 35])
print(tucker2)
tucker3=SparseTensor(kind='tucker', core=np.array([1.]), basis=[np.atleast_2d(np.ones(5)) for ii in range(3)])
print(tucker3)
tt1=SparseTensor(kind='tt', val=v1)
print(tt1)
tt2=SparseTensor(kind='tt', val=v1, eps=2e-1)
print(tt2)
tt_vec=vector(v1)
tt3=SparseTensor(kind='TT', vectorObj=tt_vec)
print(tt3)
tt4=SparseTensor()
print (tt4)
v1=np.random.rand(20, 30)
cano=SparseTensor(kind='CAno', val=v1)
print(cano)
print('END')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 15 14:12:02 2019
@author: itamar
"""
import bs4 as bs
import pickle
import datetime as dt
import pandas as pd
import os
import matplotlib.pyplot as plt
from matplotlib import style
import pandas_datareader as web
import requests
#First of all you need to find a site with the table list of the b3 companies
style.use('ggplot')
def save_B3_tickers():
tickers = []
for i in range (24):
site = 'https://br.advfn.com/bolsa-de-valores/bovespa/'
site = site + chr(ord('A')+i)
resp = requests.get(site)
soup = bs.BeautifulSoup(resp.text,'lxml')
table = soup.find('table',{'class':'atoz-link-bov'})
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[1].text
if not ticker.endswith('L') and not ticker.endswith('B') and not len(ticker) > 6:
print(ticker)
tickers.append(ticker)
with open ("bovtickers.pickle","wb") as f:
pickle.dump(tickers,f)
print(tickers)
return tickers
def get_data_from_yahoo(reload_b3 = False):
real_tickers = []
if reload_b3:
tickers = save_B3_tickers()
else:
with open("bovtickers.pickle", "rb") as f:
tickers = pickle.load(f)
if not os.path.exists('stock_dfs'):
os.makedirs('stock_dfs')
start = dt.datetime(2000,1,1)
end = dt.datetime(2019,12,22)
for ticker in tickers:
print(ticker)
df = pd.DataFrame()
if not os.path.exists('stock_dfs/{}.csv'.format(ticker)):
try:
#tem que botar pra ele nao salvar os arquivos vazios
print('tentativas ', ticker)
df = web.DataReader(ticker + '.SA','yahoo',start,end)
real_tickers.append(ticker)
except:
print('No data for {}'.format(ticker))
df.to_csv('stock_dfs/{}.csv'.format(ticker))
else:
print('Already have {}'.format(ticker))
with open ("bovtickers.pickle","wb") as f:
pickle.dump(real_tickers,f)
def compile_data():
with open("bovtickers.pickle","rb") as f:
tickers = pickle.load(f)
main_df = pd.DataFrame()
for count, ticker in enumerate(tickers):
try:
df = pd.read_csv('stock_dfs/{}.csv'.format(ticker))
df.set_index('Date', inplace = True)
except:
print('no data for {}'.format(ticker))
continue
df.rename(columns = {'Adj Close':ticker}, inplace = True)
df.drop(['Open','High','Low','Close','Volume'],1,inplace = True)
if main_df.empty:
main_df = df
else:
main_df = main_df.join(df,how = 'outer')
if count % 10 == 0:
print(count)
print(main_df.head())
main_df.to_csv('bovespa.csv')
get_data_from_yahoo(reload_b3=True)
compile_data()
dataset = pd.read_csv('bovespa.csv',index_col = 0)
|
import os
import ast
import math
import torch
import stable_nalu
import argparse
import stable_nalu.functional.regualizer as Regualizer
from decimal import Decimal
import numpy as np
import misc.utils as utils
import random
# Parse arguments
parser = argparse.ArgumentParser(description='Runs the simple function static task')
parser.add_argument('--layer-type',
action='store',
default='NALU',
choices=list(stable_nalu.network.SimpleFunctionStaticNetwork.UNIT_NAMES),
type=str,
help='Specify the layer type, e.g. Tanh, ReLU, NAC, NALU')
parser.add_argument('--operation',
action='store',
default='add',
choices=[
'add', 'sub', 'mul', 'div'
],
type=str,
help='Specify the operation to use, e.g. add, mul, squared')
parser.add_argument('--num-subsets',
action='store',
default=2,
type=int,
help='Specify the number of subsets to use')
parser.add_argument('--regualizer',
action='store',
default=10,
type=float,
help='Specify the regualization lambda to be used')
parser.add_argument('--regualizer-z',
action='store',
default=0,
type=float,
help='Specify the z-regualization lambda to be used')
parser.add_argument('--regualizer-oob',
action='store',
default=1,
type=float,
help='Specify the oob-regualization lambda to be used')
parser.add_argument('--first-layer',
action='store',
default=None,
help='Set the first layer to be a different type')
parser.add_argument('--max-iterations',
action='store',
default=100000,
type=int,
help='Specify the max number of iterations to use')
parser.add_argument('--batch-size',
action='store',
default=128,
type=int,
help='Specify the batch-size to be used for training')
parser.add_argument('--seed',
action='store',
default=0,
type=int,
help='Specify the seed to use')
parser.add_argument('--interpolation-range',
action='store',
default=[1,2],
type=ast.literal_eval,
help='Specify the interpolation range that is sampled uniformly from')
parser.add_argument('--extrapolation-range',
action='store',
default=[2,6],
type=ast.literal_eval,
help='Specify the extrapolation range that is sampled uniformly from')
parser.add_argument('--input-size',
action='store',
default=2,
type=int,
help='Specify the input size')
parser.add_argument('--output-size',
action='store',
default=1,
type=int,
help='Specify the output size')
parser.add_argument('--subset-ratio',
action='store',
default=0.5,
type=float,
help='Specify the subset-size as a fraction of the input-size')
parser.add_argument('--overlap-ratio',
action='store',
default=0.0,
type=float,
help='Specify the overlap-size as a fraction of the input-size')
parser.add_argument('--simple',
action='store_true',
default=False,
help='Use a very simple dataset with t = sum(v[0:2]) + sum(v[4:6])')
parser.add_argument('--hidden-size',
action='store',
default=2,
type=int,
help='Specify the vector size of the hidden layer.')
parser.add_argument('--nac-mul',
action='store',
default='none',
choices=['none', 'normal', 'safe', 'max-safe', 'mnac', 'npu', 'real-npu'],
type=str,
help='Make the second NAC a multiplicative NAC, used in case of a just NAC network.')
parser.add_argument('--oob-mode',
action='store',
default='clip',
choices=['regualized', 'clip'],
type=str,
help='Choose of out-of-bound should be handled by clipping or regualization.')
parser.add_argument('--regualizer-scaling',
action='store',
default='linear',
choices=['exp', 'linear'],
type=str,
help='Use an expoentational scaling from 0 to 1, or a linear scaling.')
parser.add_argument('--regualizer-scaling-start',
action='store',
default=1000000,
type=int,
help='Start linear scaling at this global step.')
parser.add_argument('--regualizer-scaling-end',
action='store',
default=2000000,
type=int,
help='Stop linear scaling at this global step.')
parser.add_argument('--regualizer-shape',
action='store',
default='linear',
choices=['squared', 'linear', 'none'],
type=str,
help='Use either a squared or linear shape for the bias and oob regualizer. Use none so W reg in tensorboard is logged at 0')
parser.add_argument('--mnac-epsilon',
action='store',
default=0,
type=float,
help='Set the idendity epsilon for MNAC.')
parser.add_argument('--nalu-bias',
action='store_true',
default=False,
help='Enables bias in the NALU gate')
parser.add_argument('--nalu-two-nac',
action='store_true',
default=False,
help='Uses two independent NACs in the NALU Layer')
parser.add_argument('--nalu-two-gate',
action='store_true',
default=False,
help='Uses two independent gates in the NALU Layer')
parser.add_argument('--nalu-mul',
action='store',
default='normal',
choices=['normal', 'safe', 'trig', 'max-safe', 'mnac', 'golden-ratio'],
help='Multplication unit, can be normal, safe, trig')
parser.add_argument('--nalu-gate',
action='store',
default='normal',
choices=['normal', 'regualized', 'obs-gumbel', 'gumbel', 'golden-ratio'],
type=str,
help='Can be normal, regualized, obs-gumbel, or gumbel')
parser.add_argument('--nac-weight',
action='store',
default='normal',
choices=['normal', 'golden-ratio'],
type=str,
help='Way to calculate the NAC+.')
parser.add_argument('--optimizer',
action='store',
default='adam',
choices=['adam', 'sgd'],
type=str,
help='The optimization algorithm to use, Adam or SGD')
parser.add_argument('--learning-rate',
action='store',
default=1e-3,
type=float,
help='Specify the learning-rate')
parser.add_argument('--momentum',
action='store',
default=0.0,
type=float,
help='Specify the nestrov momentum, only used with SGD')
parser.add_argument('--no-cuda',
action='store_true',
default=False,
help=f'Force no CUDA (cuda usage is detected automatically as {torch.cuda.is_available()})')
parser.add_argument('--name-prefix',
action='store',
default='simple_function_static',
type=str,
help='Where the data should be stored')
parser.add_argument('--remove-existing-data',
action='store_true',
default=False,
help='Should old results be removed')
parser.add_argument('--verbose',
action='store_true',
default=False,
help='Should network measures (e.g. gates) and gradients be shown')
parser.add_argument('--reg-scale-type',
action='store',
default='heim',
choices=['heim', 'madsen'],
type=str,
help='Type of npu regularisation scaling to use. Matches respective author\'s papers')
parser.add_argument('--regualizer-beta-start',
action='store',
default=1e-5,
type=float,
help='Starting value of the beta scale factor.')
parser.add_argument('--regualizer-beta-end',
action='store',
default=1e-4,
type=float,
help='Final value of the beta scale factor.')
parser.add_argument('--regualizer-beta-step',
action='store',
default=10000,
type=int,
help='Update the regualizer-beta-start value every x steps.')
parser.add_argument('--regualizer-beta-growth',
action='store',
default=10,
type=int,
help='Scale factor to grow the regualizer-beta-start value by.')
parser.add_argument('--regualizer-l1',
action='store_true',
default=False,
help='Add L1 regularization loss term. Be sure the regualizer-scaling is set')
parser.add_argument('--regualizer-npu-w',
action='store',
default=0,
type=int,
help='Use sparisty reg on npu weights. Int represents the amount to scale reg by. 0 means off')
parser.add_argument('--regualizer-gate',
type=int,
default=0,
help='Use sparisty reg on npu gate. Int represents the amount to scale reg by. 0 means off')
parser.add_argument('--npu-clip',
action='store',
default='none',
choices=['none', 'w', 'g', 'wg', 'wig'],
help='Type of parameters (if any) to clip in a NPU/RealNPU module')
parser.add_argument('--npu-Wr-init',
action='store',
default='xavier-uniform',
choices=['xavier-uniform', 'xavier-uniform-constrained'],
help='Init method to use for the W_real of the NPU. xavier-uniform= NPU paper init method,'
'xavier-uniform-constrained= NAU init method')
parser.add_argument('--pytorch-precision',
type=int,
default=32,
help='Precision for pytorch to work in')
parser.add_argument('--nmu-noise',
action='store_true',
default=False,
help='Applies/ unapplies multiplicative noise from a ~U[1,5] during training. Aids with failure ranges on a vinilla NMU.')
parser.add_argument('--nau-noise',
action='store_true',
default=False,
help='Applies/ unapplies additive noise from a ~U[1,5] during training.')
parser.add_argument('--no-save',
action='store_true',
default=False,
help='Do not save model at the end of training')
parser.add_argument('--load-checkpoint',
action='store_true',
default=False,
help='Loads a saved checkpoint and resumes training')
parser.add_argument('--log-interval',
action='store',
default=1000,
type=int,
help='Log to tensorboard every X epochs.')
parser.add_argument('--clip-grad-norm',
action='store',
default=None,
type=float,
help='Norm clip value for gradients.')
parser.add_argument('--nru-div-mode',
action='store',
default='div',
choices=['div', 'div-sepSign'],
help='Division type for NRU. div calcs mag and sign in one go. div-sepSign calcs sign separately')
parser.add_argument('--realnpu-reg-type',
action='store',
default='W',
choices=['W', 'bias'],
help='W penalises {-1,1}. bias penalises {-1,0,1}.')
parser.add_argument('--clip-grad-value',
action='store',
default=None,
type=float,
help='Clip value for gradients i.e. [-value, value].')
parser.add_argument('--reinit',
action='store_true',
default=False,
help='Enables iNALU\'s reinitialization scheme')
parser.add_argument('--reinit-epoch-interval',
action='store',
default=10,
type=int,
help='Check after this many epochs if reinitialization can occur.')
parser.add_argument('--reinit-max-stored-losses',
action='store',
default=5000,
type=int,
help='Number of losses that need to be collected before reinitialization can occur.')
parser.add_argument('--reinit-loss-thr',
action='store',
default=1.,
type=float,
help='Reinitialization only occurs if the avg accumulated loss is greater than this threshold.')
args = parser.parse_args()
utils.set_pytorch_precision(args.pytorch_precision)
setattr(args, 'cuda', torch.cuda.is_available() and not args.no_cuda)
# Print configuration
print(f'running')
print(f' - layer_type: {args.layer_type}')
print(f' - first_layer: {args.first_layer}')
print(f' - operation: {args.operation}')
print(f' - num_subsets: {args.num_subsets}')
print(f' - regualizer: {args.regualizer}')
print(f' - regualizer_z: {args.regualizer_z}')
print(f' - regualizer_oob: {args.regualizer_oob}')
print(f' -')
print(f' - max_iterations: {args.max_iterations}')
print(f' - batch_size: {args.batch_size}')
print(f' - seed: {args.seed}')
print(f' -')
print(f' - interpolation_range: {args.interpolation_range}')
print(f' - extrapolation_range: {args.extrapolation_range}')
print(f' - input_size: {args.input_size}')
print(f' - output_size: {args.output_size}')
print(f' - subset_ratio: {args.subset_ratio}')
print(f' - overlap_ratio: {args.overlap_ratio}')
print(f' - simple: {args.simple}')
print(f' -')
print(f' - hidden_size: {args.hidden_size}')
print(f' - nac_mul: {args.nac_mul}')
print(f' - oob_mode: {args.oob_mode}')
print(f' - regualizer_scaling: {args.regualizer_scaling}')
print(f' - regualizer_scaling_start: {args.regualizer_scaling_start}')
print(f' - regualizer_scaling_end: {args.regualizer_scaling_end}')
print(f' - regualizer_shape: {args.regualizer_shape}')
print(f' - mnac_epsilon: {args.mnac_epsilon}')
print(f' - nalu_bias: {args.nalu_bias}')
print(f' - nalu_two_nac: {args.nalu_two_nac}')
print(f' - nalu_two_gate: {args.nalu_two_gate}')
print(f' - nalu_mul: {args.nalu_mul}')
print(f' - nalu_gate: {args.nalu_gate}')
print(f' - nac_weight: {args.nac_weight}')
print(f' -')
print(f' - optimizer: {args.optimizer}')
print(f' - learning_rate: {args.learning_rate}')
print(f' - momentum: {args.momentum}')
print(f' -')
print(f' - cuda: {args.cuda}')
print(f' - name_prefix: {args.name_prefix}')
print(f' - remove_existing_data: {args.remove_existing_data}')
print(f' - verbose: {args.verbose}')
print(f' -')
print(f' - reg_scale_type: {args.reg_scale_type}')
print(f' - regualizer_beta_start: {args.regualizer_beta_start}')
print(f' - regualizer_beta_end: {args.regualizer_beta_end}')
print(f' - regualizer_beta_step: {args.regualizer_beta_step}')
print(f' - regualizer_beta_growth: {args.regualizer_beta_growth}')
print(f' - regualizer_l1: {args.regualizer_l1}')
print(f' - regualizer-npu-w: {args.regualizer_npu_w}')
print(f' - regualizer-gate: {args.regualizer_gate}')
print(f' - npu-clip: {args.npu_clip}')
print(f' - npu-Wr-init: {args.npu_Wr_init}')
print(f' -')
print(f' - pytorch-precision: {torch.get_default_dtype()}')
print(f' -')
print(f' - no-save: {args.no_save}')
print(f' - load-checkpoint: {args.load_checkpoint}')
print(f' - log-interval: {args.log_interval}')
print(f' -')
print(f' - clip-grad-norm: {args.clip_grad_norm}')
print(f' - nru_div_mode: {args.nru_div_mode}')
print(f' - realnpu_reg_type: {args.realnpu_reg_type}')
print(f' -')
print(f' - reinit: {args.reinit}')
print(f' - reinit_epoch_interval: {args.reinit_epoch_interval}')
print(f' - reinit_max_stored_losses: {args.reinit_max_stored_losses}')
print(f' - reinit_loss_thr: {args.reinit_loss_thr}')
print(f' -')
def get_npu_Wr_init_writer_value():
if args.npu_Wr_init == 'xavier-uniform':
return 'xu'
elif args.npu_Wr_init == 'xavier-uniform-constrained':
return 'xuc'
else:
raise ValueError(f'Invalid arg ({args.npu_Wr_init}) given for npu_Wr_init')
# Prepear logging
# summary_writer = stable_nalu.writer.DummySummaryWriter()
summary_writer = stable_nalu.writer.SummaryWriter(
f'{args.name_prefix}/{args.layer_type.lower()}'
# f'{"-nac-" if args.nac_mul != "none" else ""}'
# f'{"n" if args.nac_mul == "normal" else ""}'
# f'{"s" if args.nac_mul == "safe" else ""}'
# f'{"s" if args.nac_mul == "max-safe" else ""}'
# f'{"t" if args.nac_mul == "trig" else ""}'
# f'{"m" if args.nac_mul == "mnac" else ""}'
# f'{"npu" if args.nac_mul == "npu" else ""}'
# f'{"npur" if args.nac_mul == "real-npu" else ""}'
# f'{"-nalu-" if (args.nalu_bias or args.nalu_two_nac or args.nalu_two_gate or args.nalu_mul != "normal" or args.nalu_gate != "normal") else ""}'
f'{"-gr" if args.nac_weight == "golden-ratio" and (args.layer_type == "NALU" or args.layer_type == "NAC") else ""}'
f'{"-b" if args.nalu_bias and args.layer_type == "NALU" else ""}'
f'{"-2n" if args.nalu_two_nac and args.layer_type == "NALU" else ""}'
f'{"-2g" if args.nalu_two_gate and args.layer_type == "NALU" else ""}'
f'{"-s" if args.nalu_mul == "safe" and args.layer_type == "NALU" else ""}'
f'{"-s" if args.nalu_mul == "max-safe" and args.layer_type == "NALU" else ""}'
f'{"-t" if args.nalu_mul == "trig" and args.layer_type == "NALU" else ""}'
f'{"-m" if args.nalu_mul == "mnac" and args.layer_type == "NALU" else ""}'
f'{"-r" if args.nalu_gate == "regualized" and args.layer_type == "NALU" else ""}'
f'{"-u" if args.nalu_gate == "gumbel" and args.layer_type == "NALU" else ""}'
f'{"-uu" if args.nalu_gate == "obs-gumbel" and args.layer_type == "NALU" else ""}'
f'{"-sS" if args.nru_div_mode == "div-sepSign" and args.layer_type == "NRU" else ""}'
f'_op-{args.operation.lower()}'
f'_oob-{"c" if args.oob_mode == "clip" else "r"}'
f'_rs-{args.regualizer_scaling}-{args.regualizer_shape}'
f'_eps-{args.mnac_epsilon}'
f'_rl-{args.regualizer_scaling_start}-{args.regualizer_scaling_end}'
f'_r-{args.regualizer}-{args.regualizer_z}-{args.regualizer_oob}'
f'_i-{args.interpolation_range[0]}-{args.interpolation_range[1]}'
f'_e-{args.extrapolation_range[0]}-{args.extrapolation_range[1]}'
f'_z-{"simple" if args.simple else f"{args.input_size}-{args.subset_ratio}-{args.overlap_ratio}"}'
f'_b{args.batch_size}'
f'_s{args.seed}'
f'_h{args.hidden_size}'
f'_z{args.num_subsets}'
f'_lr-{args.optimizer}-{"%.5f" % args.learning_rate}-{args.momentum}'
f'_L1{"T" if args.regualizer_l1 else f"F"}'
f'_rb-{args.regualizer_beta_start}-{args.regualizer_beta_end}-{args.regualizer_beta_step}-{args.regualizer_beta_growth}'
f'_rWnpu-{args.regualizer_npu_w}-{args.realnpu_reg_type[0]}'
f'_rg-{args.regualizer_gate}'
f'_r{"H" if args.reg_scale_type == "heim" else f"M"}'
f'_clip-{args.npu_clip if args.npu_clip != "none" else args.npu_clip[0]}'
f'_WrI-{get_npu_Wr_init_writer_value()}'
#f'_p-{args.pytorch_precision}'
f'_gn-{args.clip_grad_norm if args.clip_grad_norm != None else f"F"}'
f'_gv-{args.clip_grad_value if args.clip_grad_value != None else f"F"}'
f'_r{str(args.reinit)[0]}-{args.reinit_epoch_interval}-{args.reinit_max_stored_losses}',
# f'_TB-{args.log_interval}',
remove_existing_data=args.remove_existing_data
)
# Set threads
if 'LSB_DJOB_NUMPROC' in os.environ:
torch.set_num_threads(int(os.environ['LSB_DJOB_NUMPROC']))
# Set seed
def seed_torch(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
seed_torch(args.seed)
# set epsilon for numerical stability
eps = torch.finfo().eps
# Setup datasets
dataset = stable_nalu.dataset.SimpleFunctionStaticDataset(
operation=args.operation,
input_size=args.input_size,
subset_ratio=args.subset_ratio,
overlap_ratio=args.overlap_ratio,
num_subsets=args.num_subsets,
simple=args.simple,
use_cuda=args.cuda,
seed=args.seed,
)
print(f' -')
print(f' - dataset: {dataset.print_operation()}')
# Interpolation and extrapolation seeds are from random.org
dataset_train = iter(dataset.fork(sample_range=args.interpolation_range).dataloader(batch_size=args.batch_size))
dataset_valid_interpolation_data = next(iter(dataset.fork(sample_range=args.interpolation_range, seed=43953907).dataloader(batch_size=10000)))
dataset_test_extrapolation_data = next(iter(dataset.fork(sample_range=args.extrapolation_range, seed=8689336).dataloader(batch_size=10000)))
# setup model
model = stable_nalu.network.SingleLayerNetwork(
args.layer_type,
input_size=dataset.get_input_size(),
output_size=args.output_size,
writer=summary_writer.every(args.log_interval).verbose(args.verbose),
first_layer=args.first_layer,
hidden_size=args.hidden_size,
nac_oob=args.oob_mode,
regualizer_shape=args.regualizer_shape,
regualizer_z=args.regualizer_z,
mnac_epsilon=args.mnac_epsilon,
nac_mul=args.nac_mul,
nalu_bias=args.nalu_bias,
nalu_two_nac=args.nalu_two_nac,
nalu_two_gate=args.nalu_two_gate,
nalu_mul=args.nalu_mul,
nalu_gate=args.nalu_gate,
nac_weight=args.nac_weight,
regualizer_gate=args.regualizer_gate,
regualizer_npu_w=args.regualizer_npu_w,
npu_clip=args.npu_clip,
npu_Wr_init=args.npu_Wr_init,
nru_div_mode=args.nru_div_mode,
realnpu_reg_type=args.realnpu_reg_type
)
model.reset_parameters()
if args.cuda:
model.cuda()
criterion = torch.nn.MSELoss()
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
elif args.optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum)
else:
raise ValueError(f'{args.optimizer} is not a valid optimizer algorithm')
def test_model(data):
with torch.no_grad(), model.no_internal_logging(), model.no_random():
model.eval()
x, t = data
err = criterion(model(x), t)
model.train()
return err
# Train model
print(model)
print('')
print(summary_writer.name)
print('')
# only print inits of small models
utils.print_model_params(model) if args.input_size <= 10 else None
print()
use_npu_scaling = args.regualizer_l1 or (args.regualizer_npu_w and args.reg_scale_type == 'heim') \
or (args.regualizer_gate and args.reg_scale_type == 'heim')
if use_npu_scaling:
# Decimal type required to avoid accumulation of fp precision errors when multiplying by growth factor
args.regualizer_beta_start = Decimal(str(args.regualizer_beta_start))
# Decimal and fp arithmetic don't mix so beta end must also be a decimal
args.regualizer_beta_end = Decimal(str(args.regualizer_beta_end))
r_l1_scale = args.regualizer_beta_start
'''Resuming previous training'''
resume_epoch = 0
if args.load_checkpoint:
resume_epoch = stable_nalu.writer.load_model(summary_writer.name, model, optimizer)
if resume_epoch > args.max_iterations:
raise ValueError(
f'{args.max_iterations} must be larger than or equal to the loaded models resume epoch {resume_epoch}')
if resume_epoch != 0:
for i, j in zip(range(resume_epoch), dataset_train):
(x_train, t_train) = j
print("Checkpoint loaded")
print('train %d: %.5f, inter: %.5f, extra: %.5f' % (resume_epoch, test_model((x_train, t_train)), test_model(dataset_valid_interpolation_data), test_model(dataset_test_extrapolation_data)))
'''------------------'''
if args.reinit:
epoch_losses = []
reinit_counter = 0
for epoch_i, (x_train, t_train) in zip(range(resume_epoch, args.max_iterations + 1), dataset_train):
summary_writer.set_iteration(epoch_i)
# Prepear model
model.set_parameter('tau', max(0.5, math.exp(-1e-5 * epoch_i)))
optimizer.zero_grad()
# Log validation
if epoch_i % args.log_interval == 0:
interpolation_error = test_model(dataset_valid_interpolation_data)
extrapolation_error = test_model(dataset_test_extrapolation_data)
summary_writer.add_scalar('metric/valid/interpolation', interpolation_error)
summary_writer.add_scalar('metric/test/extrapolation', extrapolation_error)
# forward
y_train = model(x_train)
regualizers = model.regualizer() # logs 3 reg metrics to tensorbord if verbose
if (args.regualizer_scaling == 'linear'):
r_w_scale = max(0, min(1, (
(epoch_i - args.regualizer_scaling_start) /
(args.regualizer_scaling_end - args.regualizer_scaling_start)
)))
elif (args.regualizer_scaling == 'exp'):
r_w_scale = 1 - math.exp(-1e-5 * epoch_i)
l1_loss = 0
if args.regualizer_l1:
l1_loss = Regualizer.l1(model.parameters())
if args.verbose:
summary_writer.add_scalar('L1/train/L1-loss', l1_loss)
if use_npu_scaling:
# the beta_start value will be updated accordingly to be the correct beta value for the epoch.
# It is done this way to avoid having initialise another variable outside the epoch loop
if args.regualizer_beta_start <= args.regualizer_beta_end:
if epoch_i % args.regualizer_beta_step == 0 and epoch_i != 0:
if args.regualizer_beta_start < args.regualizer_beta_end:
args.regualizer_beta_start *= args.regualizer_beta_growth
else:
if epoch_i % args.regualizer_beta_step == 0 and epoch_i != 0:
if args.regualizer_beta_start > args.regualizer_beta_end:
args.regualizer_beta_start /= args.regualizer_beta_growth
r_l1_scale = float(args.regualizer_beta_start) # Decimal doesn't work for tensorboard or mixed fp arithmetic
summary_writer.add_scalar('L1/train/beta', r_l1_scale)
# mse loss
loss_train_criterion = criterion(y_train, t_train)
loss_train_regualizer = args.regualizer * r_w_scale * regualizers['W'] + \
regualizers['g'] + \
args.regualizer_z * regualizers['z'] + \
args.regualizer_oob * regualizers['W-OOB'] + \
args.regualizer_l1 * r_l1_scale * l1_loss + \
args.regualizer_npu_w * (r_l1_scale if args.reg_scale_type == 'heim' else r_w_scale) * regualizers['W-NPU'] + \
args.regualizer_gate * (r_l1_scale if args.reg_scale_type == 'heim' else r_w_scale) * regualizers['g-NPU'] + \
((0.05 * regualizers['inalu']) if (interpolation_error < 1 and epoch_i > 10000) else 0)
loss_train = loss_train_criterion + loss_train_regualizer
# Log the loss
if args.verbose or epoch_i % args.log_interval == 0:
summary_writer.add_scalar('loss/train/critation', loss_train_criterion)
summary_writer.add_scalar('loss/train/regualizer', loss_train_regualizer)
summary_writer.add_scalar('loss/train/total', loss_train)
if epoch_i % args.log_interval == 0:
print('train %d: %.5f, inter: %.5f, extra: %.5f' % (epoch_i, loss_train_criterion, interpolation_error, extrapolation_error))
# Optimize model
if loss_train.requires_grad:
loss_train.backward()
if args.clip_grad_norm != None:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad_norm)
if args.clip_grad_value != None:
torch.nn.utils.clip_grad_value_(model.parameters(), args.clip_grad_value)
optimizer.step()
model.optimize(loss_train_criterion)
# Log gradients if in verbose mode
if args.verbose and epoch_i % args.log_interval == 0:
model.log_gradients()
# model.log_gradient_elems()
'''
inalu reinit conditions:
- every 10th epoch (and not the first epoch) where the number of stored errors is over 5,000.
- if the average err value of the first half of the errors is less than the 2nd half + sdev and the avg loss of the
latter half is larger than 1
'''
if args.reinit:
epoch_losses.append(interpolation_error)
if epoch_i % args.reinit_epoch_interval == 0 and epoch_i > 0 and len(epoch_losses) > args.reinit_max_stored_losses:
losses_last_half = epoch_losses[len(epoch_losses) // 2:]
if np.mean(epoch_losses[0:len(epoch_losses) // 2]) <= (np.mean(losses_last_half) + np.std(losses_last_half)) \
and (np.mean(losses_last_half) > args.reinit_loss_thr):
model.reset_parameters()
print(f"reinit number {reinit_counter}")
summary_writer._root.writer.add_text(f'reinit', str(reinit_counter), epoch_i)
epoch_losses = []
reinit_counter += 1
# Compute validation loss
loss_valid_inter = test_model(dataset_valid_interpolation_data)
loss_valid_extra = test_model(dataset_test_extrapolation_data)
# Write results for this training
print(f'finished:')
if args.reinit:
print(f'Reinitialized {reinit_counter} times')
print(f' - loss_train: {loss_train}')
print(f' - loss_valid_inter: {loss_valid_inter}')
print(f' - loss_valid_extra: {loss_valid_extra}')
print()
utils.print_model_params(model)
if not args.no_save:
model.writer._root.close() # fix - close summary writer before saving model to avoid thread locking issues
# Use saved weights to visualize the intermediate values.
stable_nalu.writer.save_model_checkpoint(summary_writer.name, epoch_i + 1, model, optimizer,
{'torch': torch.get_rng_state(),
'numpy': np.random.get_state()}
)
|
<reponame>rr-/termi
import argparse
import json
import sys
import time
from PIL import Image
from termi import term
from termi import term_settings
from termi import renderer
def positive_int(value):
value = int(value)
if value < 1:
raise argparse.ArgumentTypeError('Only positive integers allowed')
return value
def parse_args():
parser = argparse.ArgumentParser(
prog='termi', description='Convert images to ASCII')
parser.add_argument(
'--glyph-ar', dest='glyph_ar', metavar='NUM',
type=float, default=2, help='character aspect ratio (default: 2)')
parser.add_argument(
'--width', metavar='NUM', type=int, default=None,
help='target width in characters (default: terminal width)')
parser.add_argument(
'--height', metavar='NUM', type=int, default=None,
help='target height in characters (default: terminal height)')
parser.add_argument(
metavar='PATH', dest='input_path',
help='where to get the input image from')
parser.add_argument(
'--depth', metavar='NUM', dest='depth',
type=int, default=8, choices=(4, 8, 24),
help='color bit resolution (default: 8)')
parser.add_argument(
'--palette', metavar='PATH', dest='palette_path',
help='custom palette (JSON); for --depth=4 can be also "dark" or "light"')
parser.add_argument(
'--scale', default='lanczos',
choices=('lanczos', 'bicubic', 'nearest'),
help='how to scale the image')
parser.add_argument(
'--animate', action='store_true', help='animate GIF images')
parser.add_argument(
'--loop', action='store_true', help='loop the animation until ^C')
args = parser.parse_args()
if args.loop:
args.animate = True
return args
def _get_palette(depth, path):
if depth == 4:
return term_settings.PALETTE_16_DARK
if depth == 8:
return term_settings.PALETTE_256
if path:
if depth == 24:
raise RuntimeError('Palette doesn\'t make sense with --depth=24')
if path == 'dark':
if depth != 4:
raise RuntimeError('Dark palette can be only used with --depth=4')
return term_settings.PALETTE_16_DARK
if path == 'light':
if depth != 4:
raise RuntimeError('Light palette can be only used with --depth=4')
return term_settings.PALETTE_16_LIGHT
with open(path, 'r') as handle:
return json.load(handle)
return None
def main():
args = parse_args()
target_size = [args.width, args.height]
for i in range(2):
if not target_size[i]:
terminal_size = term_settings.get_term_size()
target_size[i] = terminal_size[i] - 1
palette = _get_palette(args.depth, args.palette_path)
image = Image.open(args.input_path)
if palette:
palette_image = renderer.create_palette_image(palette)
else:
palette_image = None
output_strategy = {
24: term.mix_true_color,
8: term.mix_256,
4: term.mix_16,
}[args.depth]
scale_strategy = {
'lanczos': Image.LANCZOS,
'bicubic': Image.BICUBIC,
'nearest': Image.NEAREST,
}[args.scale]
frame = renderer.render_image(
image, target_size, args.glyph_ar, palette_image,
output_strategy, scale_strategy)
print(frame, end='')
height = frame.count('\n')
if args.animate and getattr(image, 'is_animated', False):
frames = []
while image.tell() + 1 < image.n_frames:
print(
'decoding frame {0} / {1}'.format(image.tell(), image.n_frames),
file=sys.stderr,
end='\r')
frame = renderer.render_image(
image, target_size, args.glyph_ar, palette_image,
output_strategy, scale_strategy)
frames.append(frame)
image.seek(image.tell() + 1)
print(term.clear_current_line(), end='')
while True:
for frame in frames:
try:
print(term.move_cursor_up(height) + frame, end='')
if term.is_interactive():
time.sleep(image.info['duration'] / 1000)
except (KeyboardInterrupt, SystemExit):
return
if not args.loop:
return
if __name__ == '__main__':
main()
|
import hashlib
import hmac
from io import BytesIO
from struct import unpack, pack
from zlib import crc32
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
import scapy.modules.six as six
from scapy.modules.six.moves import range
from scapy.compat import hex_bytes, orb
from scapy.packet import Raw
# ARC4
def ARC4_encrypt(key, data, skip=0):
"""Encrypt data @data with key @key, skipping @skip first bytes of the
keystream"""
algorithm = algorithms.ARC4(key)
cipher = Cipher(algorithm, mode=None, backend=default_backend())
encryptor = cipher.encryptor()
if skip:
encryptor.update("\x00" * skip)
return encryptor.update(data)
def ARC4_decrypt(key, data, skip=0):
"""Decrypt data @data with key @key, skipping @skip first bytes of the
keystream"""
return ARC4_encrypt(key, data, skip)
# Custom WPA PseudoRandomFunction
def customPRF512(key, amac, smac, anonce, snonce):
"""Source https://stackoverflow.com/questions/12018920/"""
A = "Pairwise key expansion"
B = "".join(sorted([amac, smac]) + sorted([anonce, snonce]))
blen = 64
i = 0
R = ''
while i<=((blen*8+159)/160):
hmacsha1 = hmac.new(key,A+chr(0x00)+B+chr(i), hashlib.sha1)
i+=1
R = R+hmacsha1.digest()
return R[:blen]
# TKIP - WEPSeed generation
# Tested against pyDot11: tkip.py
# 802.11i p.53-54
_SBOXS = [
[
0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A
],
[
0xA5C6, 0x84F8, 0x99EE, 0x8DF6, 0x0DFF, 0xBDD6, 0xB1DE, 0x5491,
0x5060, 0x0302, 0xA9CE, 0x7D56, 0x19E7, 0x62B5, 0xE64D, 0x9AEC,
0x458F, 0x9D1F, 0x4089, 0x87FA, 0x15EF, 0xEBB2, 0xC98E, 0x0BFB,
0xEC41, 0x67B3, 0xFD5F, 0xEA45, 0xBF23, 0xF753, 0x96E4, 0x5B9B,
0xC275, 0x1CE1, 0xAE3D, 0x6A4C, 0x5A6C, 0x417E, 0x02F5, 0x4F83,
0x5C68, 0xF451, 0x34D1, 0x08F9, 0x93E2, 0x73AB, 0x5362, 0x3F2A,
0x0C08, 0x5295, 0x6546, 0x5E9D, 0x2830, 0xA137, 0x0F0A, 0xB52F,
0x090E, 0x3624, 0x9B1B, 0x3DDF, 0x26CD, 0x694E, 0xCD7F, 0x9FEA,
0x1B12, 0x9E1D, 0x7458, 0x2E34, 0x2D36, 0xB2DC, 0xEEB4, 0xFB5B,
0xF6A4, 0x4D76, 0x61B7, 0xCE7D, 0x7B52, 0x3EDD, 0x715E, 0x9713,
0xF5A6, 0x68B9, 0x0000, 0x2CC1, 0x6040, 0x1FE3, 0xC879, 0xEDB6,
0xBED4, 0x468D, 0xD967, 0x4B72, 0xDE94, 0xD498, 0xE8B0, 0x4A85,
0x6BBB, 0x2AC5, 0xE54F, 0x16ED, 0xC586, 0xD79A, 0x5566, 0x9411,
0xCF8A, 0x10E9, 0x0604, 0x81FE, 0xF0A0, 0x4478, 0xBA25, 0xE34B,
0xF3A2, 0xFE5D, 0xC080, 0x8A05, 0xAD3F, 0xBC21, 0x4870, 0x04F1,
0xDF63, 0xC177, 0x75AF, 0x6342, 0x3020, 0x1AE5, 0x0EFD, 0x6DBF,
0x4C81, 0x1418, 0x3526, 0x2FC3, 0xE1BE, 0xA235, 0xCC88, 0x392E,
0x5793, 0xF255, 0x82FC, 0x477A, 0xACC8, 0xE7BA, 0x2B32, 0x95E6,
0xA0C0, 0x9819, 0xD19E, 0x7FA3, 0x6644, 0x7E54, 0xAB3B, 0x830B,
0xCA8C, 0x29C7, 0xD36B, 0x3C28, 0x79A7, 0xE2BC, 0x1D16, 0x76AD,
0x3BDB, 0x5664, 0x4E74, 0x1E14, 0xDB92, 0x0A0C, 0x6C48, 0xE4B8,
0x5D9F, 0x6EBD, 0xEF43, 0xA6C4, 0xA839, 0xA431, 0x37D3, 0x8BF2,
0x32D5, 0x438B, 0x596E, 0xB7DA, 0x8C01, 0x64B1, 0xD29C, 0xE049,
0xB4D8, 0xFAAC, 0x07F3, 0x25CF, 0xAFCA, 0x8EF4, 0xE947, 0x1810,
0xD56F, 0x88F0, 0x6F4A, 0x725C, 0x2438, 0xF157, 0xC773, 0x5197,
0x23CB, 0x7CA1, 0x9CE8, 0x213E, 0xDD96, 0xDC61, 0x860D, 0x850F,
0x90E0, 0x427C, 0xC471, 0xAACC, 0xD890, 0x0506, 0x01F7, 0x121C,
0xA3C2, 0x5F6A, 0xF9AE, 0xD069, 0x9117, 0x5899, 0x273A, 0xB927,
0x38D9, 0x13EB, 0xB32B, 0x3322, 0xBBD2, 0x70A9, 0x8907, 0xA733,
0xB62D, 0x223C, 0x9215, 0x20C9, 0x4987, 0xFFAA, 0x7850, 0x7AA5,
0x8F03, 0xF859, 0x8009, 0x171A, 0xDA65, 0x31D7, 0xC684, 0xB8D0,
0xC382, 0xB029, 0x775A, 0x111E, 0xCB7B, 0xFCA8, 0xD66D, 0x3A2C
]
]
# 802.11i Annex H
PHASE1_LOOP_CNT = 8
def _MK16(b1, b2):
return (b1 << 8) | b2
def _SBOX16(index):
return _SBOXS[0][index & 0xff] ^ _SBOXS[1][(index >> 8)]
def _CAST16(value):
return value & 0xffff
def _RotR1(value):
return ((value >> 1) & 0x7fff) | (value << 15)
def gen_TKIP_RC4_key(TSC, TA, TK):
"""Implement TKIP WEPSeed generation
TSC: packet IV
TA: target addr bytes
TK: temporal key
"""
assert len(TSC) == 6
assert len(TA) == 6
assert len(TK) == 16
assert all(isinstance(x, six.integer_types) for x in TSC + TA + TK)
# Phase 1
# 802.11i p.54
# Phase 1 - Step 1
TTAK = []
TTAK.append(_MK16(TSC[3], TSC[2]))
TTAK.append(_MK16(TSC[5], TSC[4]))
TTAK.append(_MK16(TA[1], TA[0]))
TTAK.append(_MK16(TA[3], TA[2]))
TTAK.append(_MK16(TA[5], TA[4]))
# Phase 1 - Step 2
for i in range(PHASE1_LOOP_CNT):
j = 2 * (i & 1)
TTAK[0] = _CAST16(TTAK[0] + _SBOX16(TTAK[4] ^ _MK16(TK[1 + j], TK[0 + j])))
TTAK[1] = _CAST16(TTAK[1] + _SBOX16(TTAK[0] ^ _MK16(TK[5 + j], TK[4 + j])))
TTAK[2] = _CAST16(TTAK[2] + _SBOX16(TTAK[1] ^ _MK16(TK[9 + j], TK[8 + j])))
TTAK[3] = _CAST16(TTAK[3] + _SBOX16(TTAK[2] ^ _MK16(TK[13 + j], TK[12 + j])))
TTAK[4] = _CAST16(TTAK[4] + _SBOX16(TTAK[3] ^ _MK16(TK[1 + j], TK[0 + j])) + i)
# Phase 2
# 802.11i p.56
# Phase 2 - Step 1
PPK = list(TTAK)
PPK.append(_CAST16(TTAK[4] + _MK16(TSC[1], TSC[0])))
# Phase 2 - Step 2
PPK[0] = _CAST16(PPK[0] + _SBOX16(PPK[5] ^ _MK16(TK[1], TK[0])))
PPK[1] = _CAST16(PPK[1] + _SBOX16(PPK[0] ^ _MK16(TK[3], TK[2])))
PPK[2] = _CAST16(PPK[2] + _SBOX16(PPK[1] ^ _MK16(TK[5], TK[4])))
PPK[3] = _CAST16(PPK[3] + _SBOX16(PPK[2] ^ _MK16(TK[7], TK[6])))
PPK[4] = _CAST16(PPK[4] + _SBOX16(PPK[3] ^ _MK16(TK[9], TK[8])))
PPK[5] = _CAST16(PPK[5] + _SBOX16(PPK[4] ^ _MK16(TK[11], TK[10])))
PPK[0] = _CAST16(PPK[0] + _RotR1(PPK[5] ^ _MK16(TK[13], TK[12])))
PPK[1] = _CAST16(PPK[1] + _RotR1(PPK[0] ^ _MK16(TK[15], TK[14])))
PPK[2] = _CAST16(PPK[2] + _RotR1(PPK[1]))
PPK[3] = _CAST16(PPK[3] + _RotR1(PPK[2]))
PPK[4] = _CAST16(PPK[4] + _RotR1(PPK[3]))
PPK[5] = _CAST16(PPK[5] + _RotR1(PPK[4]))
# Phase 2 - Step 3
WEPSeed = []
WEPSeed.append(TSC[1])
WEPSeed.append((TSC[1] | 0x20) & 0x7f)
WEPSeed.append(TSC[0])
WEPSeed.append(((PPK[5] ^ _MK16(TK[1], TK[0])) >> 1) & 0xFF)
for i in range(6):
WEPSeed.append(PPK[i] & 0xFF)
WEPSeed.append(PPK[i] >> 8)
assert len(WEPSeed) == 16
return "".join([chr(x) for x in WEPSeed])
# TKIP - Michael
# Tested against cryptopy (crypto.keyedHash.michael: Michael)
def _rotate_right32(value, shift):
return (value >> (shift % 32) | value << ((32 - shift) % 32)) & 0xFFFFFFFF
def _rotate_left32(value, shift):
return (value << (shift % 32) | value >> ((32 - shift) % 32)) & 0xFFFFFFFF
def _XSWAP(value):
"""Swap 2 least significant bytes of @value"""
return ((value & 0xFF00FF00) >> 8) | ((value & 0x00FF00FF) << 8)
def _michael_b(l, r):
"""Defined in 802.11i p.49"""
r = r ^ _rotate_left32(l, 17)
l = (l + r) % 2**32
r = r ^ _XSWAP(l)
l = (l + r) % 2**32
r = r ^ _rotate_left32(l, 3)
l = (l + r) % 2**32
r = r ^ _rotate_right32(l, 2)
l = (l + r) % 2**32
return l, r
def michael(key, to_hash):
"""Defined in 802.11i p.48"""
# Block size: 4
nb_block, nb_extra_bytes = divmod(len(to_hash), 4)
# Add padding
data = to_hash + chr(0x5a) + "\x00" * (7 - nb_extra_bytes)
# Hash
l, r = unpack('<II', key)
for i in range(nb_block + 2):
# Convert i-th block to int
block_i = unpack('<I', data[i*4:i*4 + 4])[0]
l ^= block_i
l, r = _michael_b(l, r)
return pack('<II', l, r)
# TKIP packet utils
def parse_TKIP_hdr(pkt):
"""Extract TSCs, TA and encoded-data from a packet @pkt"""
# Note: FCS bit is not handled
assert pkt.FCfield.wep
# 802.11i - 8.3.2.2
payload = BytesIO(pkt[Raw].load)
TSC1, WEPseed, TSC0, bitfield = (orb(x) for x in payload.read(4))
if bitfield & (1 << 5):
# Extended IV
TSC2, TSC3, TSC4, TSC5 = (orb(x) for x in payload.read(4))
else:
TSC2, TSC3, TSC4, TSC5 = None, None, None, None
# 802.11i p. 46
raise ValueError("Extended IV must be set for TKIP")
# 802.11i p. 46
assert (TSC1 | 0x20) & 0x7f == WEPseed
TA = [orb(e) for e in hex_bytes(pkt.addr2.replace(':', ''))]
TSC = [TSC0, TSC1, TSC2, TSC3, TSC4, TSC5]
return TSC, TA, payload.read()
def build_TKIP_payload(data, iv, mac, tk):
"""Build a TKIP header for IV @iv and mac @mac, and encrypt @data
based on temporal key @tk
"""
TSC5, TSC4, TSC3, TSC2, TSC1, TSC0 = (
(iv >> 40) & 0xFF,
(iv >> 32) & 0xFF,
(iv >> 24) & 0xFF,
(iv >> 16) & 0xFF,
(iv >> 8) & 0xFF,
iv & 0xFF
)
bitfield = 1 << 5 # Extended IV
TKIP_hdr = chr(TSC1) + chr((TSC1 | 0x20) & 0x7f) + chr(TSC0) + chr(bitfield)
TKIP_hdr += chr(TSC2) + chr(TSC3) + chr(TSC4) + chr(TSC5)
TA = [orb(e) for e in hex_bytes(mac.replace(':', ''))]
TSC = [TSC0, TSC1, TSC2, TSC3, TSC4, TSC5]
TK = [orb(x) for x in tk]
rc4_key = gen_TKIP_RC4_key(TSC, TA, TK)
return TKIP_hdr + ARC4_encrypt(rc4_key, data)
def parse_data_pkt(pkt, tk):
"""Extract data from a WPA packet @pkt with temporal key @tk"""
TSC, TA, data = parse_TKIP_hdr(pkt)
TK = [orb(x) for x in tk]
rc4_key = gen_TKIP_RC4_key(TSC, TA, TK)
return ARC4_decrypt(rc4_key, data)
class ICVError(Exception):
"""The expected ICV is not the computed one"""
pass
class MICError(Exception):
"""The expected MIC is not the computed one"""
pass
def check_MIC_ICV(data, mic_key, source, dest):
"""Check MIC, ICV & return the data from a decrypted TKIP packet"""
assert len(data) > 12
# DATA - MIC(DA - SA - Priority=0 - 0 - 0 - 0 - DATA) - ICV
# 802.11i p.47
ICV = data[-4:]
MIC = data[-12:-4]
data_clear = data[:-12]
expected_ICV = pack("<I", crc32(data_clear + MIC) & 0xFFFFFFFF)
if expected_ICV != ICV:
raise ICVError()
sa = hex_bytes(source.replace(":", "")) # Source MAC
da = hex_bytes(dest.replace(":", "")) # Dest MAC
expected_MIC = michael(mic_key, da + sa + "\x00" + "\x00" * 3 + data_clear)
if expected_MIC != MIC:
raise MICError()
return data_clear
def build_MIC_ICV(data, mic_key, source, dest):
"""Compute and return the data with its MIC and ICV"""
# DATA - MIC(DA - SA - Priority=0 - 0 - 0 - 0 - DATA) - ICV
# 802.11i p.47
sa = hex_bytes(source.replace(":", "")) # Source MAC
da = hex_bytes(dest.replace(":", "")) # Dest MAC
MIC = michael(mic_key, da + sa + "\x00" + "\x00" * 3 + data)
ICV = pack("<I", crc32(data + MIC) & 0xFFFFFFFF)
return data + MIC + ICV
|
<filename>batchflow/models/tf/inception_v1.py
""" <NAME>. et al "`Going Deeper with Convolutions
<https://arxiv.org/abs/1409.4842>`_"
"""
import tensorflow as tf
from .inception_base import Inception
from .layers import conv_block
_DEFAULT_V1_ARCH = {
'b': {'filters': [
[64, 96, 128, 16, 32, 32],
[128, 128, 192, 32, 96, 64],
[192, 96, 208, 16, 48, 64],
[160, 112, 224, 24, 64, 64],
[128, 128, 256, 24, 64, 64],
[112, 144, 288, 32, 64, 64],
[256, 160, 320, 32, 128, 128],
[256, 160, 320, 32, 128, 128],
[384, 192, 384, 48, 128, 128]]},
'r': {'layout': 'p', 'pool_size': 3, 'pool_strides': 2}
}
class Inception_v1(Inception):
""" Inception network, version 1
**Configuration**
inputs : dict
dict with 'images' and 'labels' (see :meth:`~.TFModel._make_inputs`)
body/arch : dict
architecture: network layout, block layout, number of filters in each block, pooling parameters
"""
@classmethod
def default_config(cls):
""" Define model defaults. See :meth: `~.TFModel.default_config` """
config = Inception.default_config()
config['common']['layout'] = 'cn'
config['initial_block'] += dict(layout='cnp cn cn p', filters=[64, 64, 192],
kernel_size=[7, 3, 3], strides=[2, 1, 1],
pool_size=3, pool_strides=2)
config['body']['arch'] = _DEFAULT_V1_ARCH
config['body']['layout'] = 'bbrbbbbbrbb'
config['head'].update(dict(layout='Vdf', dropout_rate=.4))
config['loss'] = 'ce'
return config
@classmethod
def block(cls, inputs, filters, layout='cn', name=None, **kwargs):
""" Inception building block
Parameters
----------
inputs : tf.Tensor
input tensor
filters : list with 6 items:
- number of filters in 1x1 conv
- number of filters in 1x1 conv going before conv 3x3
- number of filters in 3x3 conv
- number of filters in 1x1 conv going before conv 5x5,
- number of filters in 5x5 conv,
- number of filters in 1x1 conv going before max-pooling
layout : str
a sequence of layers in the block. Default is 'cn'.
name : str
scope name
Returns
-------
tf.Tensor
"""
with tf.variable_scope(name):
branch_1 = conv_block(inputs, layout=layout, filters=filters[0], kernel_size=1, name='conv_1', **kwargs)
branch_3 = conv_block(inputs, layout=layout*2, filters=[filters[1], filters[2]], kernel_size=[1, 3],
name='conv_3', **kwargs)
branch_5 = conv_block(inputs, layout=layout*2, filters=[filters[3], filters[4]], kernel_size=[1, 5],
name='conv_5', **kwargs)
branch_pool = conv_block(inputs, layout='p'+layout, filters=filters[5], kernel_size=1,
name='conv_pool', **{**kwargs, 'pool_strides': 1})
axis = cls.channels_axis(kwargs['data_format'])
output = tf.concat([branch_1, branch_3, branch_5, branch_pool], axis, name='output')
return output
@classmethod
def reduction_block(cls, inputs, layout='p', filters=None, name='reduction_block', **kwargs):
""" Reduction block.
Just a max pooling in 3x3 with strides=2
Parameters
----------
inputs : tf.Tensor
input tensor
name : str
scope name
Returns
-------
tf.Tensor
"""
output = conv_block(inputs, layout=layout, filters=filters, name=name, **kwargs)
return output
|
import sys
from collections import Counter
from io import StringIO
import numpy as np
import gym
import gym.spaces
class TimeTable (gym.Env):
"""
-1 配置不可の枠
0 未配置の枠
1 現代文 毎日
2 数学 毎日
3 英語 毎日
"""
CLASS_SIZE = 2
WEEK_SIZE = 5
DAY_SIZE = 3
TIME_TABLE = np.zeros((CLASS_SIZE, WEEK_SIZE * DAY_SIZE))
LESSON_LIST = [1] * 10 + [2] * 10 + [3] * 10
MAX_DAMAGE = 100
def __init__(self):
super().__init__()
self.action_space = gym.spaces.Discrete(2 * 5 * 3)
self.observation_space = gym.spaces.Box(
low = -1,
high = max(self.LESSON_LIST),
shape = self.TIME_TABLE.shape
)
self.reward_range = [0, len(self.LESSON_LIST)]
self.reset()
def reset(self):
"""
状態を初期化し、初期の観測値を返す
"""
self.damage = 0
self.total = 0
self.table = self.TIME_TABLE.copy()
self.lesson = list(self.LESSON_LIST)
return self.table.copy()
def step(self, action):
"""
1ステップ進める処理を記述。戻り値は observation, reward, done(ゲーム終了したか), info(追加の情報の辞書)
"""
# 配置する授業を取り出す
lesson = self.lesson.copy().pop()
# 授業を配置することが可能化どうか
if self._is_bookable(action, lesson):
table = self.table.copy()
shape = table.shape
table = table.reshape(1, shape[0] * shape[1])
table[0][action] = lesson
self.table = table.reshape(shape)
self.damage = 0
self.lesson.pop()
moved = True
else:
moved = False
# いまの状態
observation = self.table.copy()
# 今回の移動によるダメージ
self.damage += self._get_damage(moved)
# 今回の移動による報酬
reward = self._get_reward(moved)
self.total += reward
# 終了するか継続するかの判定
self.done = self._is_done()
return observation, reward, self.done, {}
def _close(self):
"""
[オプション]環境を閉じて後処理をする
"""
pass
def _seed(self, seed=None):
"""
[オプション]ランダムシードを固定する
"""
pass
def _get_reward(self, moved):
if moved:
# 特典の計算。授業が分散されて配置できていると高得点になるようにしたいけど効率的な判定方法分からないので一旦スルー
# table = self.table.copy()
# shape = self.table.shape
# tableT = table.T
# # 授業のインデックスを集める
# lesson_index = dict([(lesson, []) for lesson in list(set(self.LESSON_LIST))])
# for i in range(shape[1]):
# for j in range(tableT[i])
# cell = tableT[i][j]
# lesson_index[cell].append(i)
return len(self.LESSON_LIST) - len(self.lesson)
else:
# 配置できないところにコマを置こうとすると減点する
return 0
def _get_damage(self, moved):
# 配置できないところにコマを置こうとすると減点する
if moved:
return 0
else:
return 1
def _is_bookable(self, action, lesson):
"""
授業を配置することが可能か否かを返す
"""
table = self.table.copy()
shape = self.table.shape
table = table.reshape(1, shape[0] * shape[1])
# 配置不可の枠には配置できない
if table[0][action] == -1:
return False
# すでに配置済みのところには配置できない
if table[0][action] > 0:
return False
# 更新をしてみる
table[0][action] = lesson
table = table.reshape(shape)
tableT = table.T
# 同じ時間帯に授業が入っていないかを確認する
for i in range(shape[1]):
row = tableT[i]
count = Counter(row)
if len([num for num in count.items() if num[0] > 0 and num[1] > 1]) > 0:
return False
# 1日に同じ授業は2回実施できない
table = table.reshape(self.CLASS_SIZE, self.WEEK_SIZE, self.DAY_SIZE)
for i in range(self.CLASS_SIZE):
for j in range(self.WEEK_SIZE):
row = table[i][j]
count = Counter(row)
if len([num for num in count.items() if num[0] > 0 and num[1] > 1]) > 0:
return False
return True
def _is_done(self):
"""
すべての授業を配置するか、授業を配置できない状態がMAX_DAMAGE回続くと終了
"""
if self.damage >= self.MAX_DAMAGE:
return True
elif len(self.lesson) == 0:
self.render()
sys.exit(0)
return True
return False
def render(self, mode='human', close=False):
"""
環境を可視化する
"""
# human の場合はコンソールに出力。ansiの場合は StringIO を返す
outfile = StringIO() if mode == 'ansi' else sys.stdout
outfile.write('\n'.join(' '.join(str(elem) for elem in row) for row in self.table) + '\n' + 'total:' + str(self.total) + '\n' + 'damage:' + str(self.damage) + '\n' + 'lesson:' + str(len(self.lesson)) + '\n')
return outfile |
import os, sys, math
sys.path.insert(0, '../../..')
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import scipy.stats
class Attention(nn.Module):
def __init__(self, encoder_size, decoder_size, device, type="additive"):
""" Attention module.
TODO description for each type
TODO needed bias=False for KVQ transformations, as well for type?
TODO need mask?
Args:
encoder_size (int): Size of the encoder's output (as input for the decoder).
decoder_size (int): Size of the decoder's output.
device (torch.device): Device (eg. torch.device("cpu"))
type (string): One of several types of attention
See: https://arxiv.org/pdf/1902.02181.pdf
Notes:
Self-Attention(Intra-attention) Relating different positions of the same input sequence. Theoretically the self-attention can adopt any score functions above, but just replace the target sequence with the same input sequence.
Global/Soft Attending to the entire input state space.
Local/Hard Attending to the part of input state space; i.e. a patch of the input image.
"""
super(Attention, self).__init__()
self.encoder_size = encoder_size
self.decoder_size = decoder_size
self.type = type
# transforms encoder states into keys
self.key_annotation_function = nn.Linear(self.encoder_size, self.encoder_size, bias=False)
# transforms encoder states into values
self.value_annotation_function = nn.Linear(self.encoder_size, self.encoder_size, bias=False)
# transforms the hidden state into query
self.query_annotation_function = nn.Linear(self.decoder_size, self.encoder_size, bias=False) # NOTE: transforming q to K size
if type == "additive":
# f(q, K) = wimp*tanh(W1K + W2q + b) , Bahdanau et al., 2015
self.V = nn.Linear(self.encoder_size, 1, bias=False)
self.W1 = nn.Linear(self.encoder_size, self.encoder_size, bias=False)
self.W2 = nn.Linear(self.encoder_size, self.encoder_size, bias=False) # encoder size because q is now K's size, otherwise dec_size to enc_size
self.b = nn.Parameter(torch.zeros(self.encoder_size))
elif type == "coverage": # https://arxiv.org/pdf/1601.04811.pdf
# f(q, K) = wimp*tanh(W1K + W2q + b) , Bahdanau et al., 2015
self.V = nn.Linear(self.encoder_size, 1, bias=False)
self.W1 = nn.Linear(self.encoder_size, self.encoder_size, bias=False)
self.W2 = nn.Linear(self.encoder_size, self.encoder_size, bias=False) # encoder size because q is now K's size, otherwise dec_size to enc_size
self.b = nn.Parameter(torch.zeros(self.encoder_size))
self.coverage_dim = 10
self.coverage_input_size = self.coverage_dim + 1 + self.encoder_size + self.encoder_size
self.cov_gru = nn.GRU(self.coverage_input_size, self.coverage_dim, batch_first=True)
self.W3 = nn.Linear(self.coverage_dim, self.encoder_size)
elif (type == "multiplicative" or type == "dot"):
# f(q, K) = q^t K , Luong et al., 2015
# direct dot product, nothing to declare here
pass
elif type == "scaled multiplicative" or type == "scaled dot":
# f(q, K) = multiplicative / sqrt(dk) , Vaswani et al., 2017
self.scale = math.sqrt(self.encoder_size)
elif type == "general" or type == "bilinear":
# f(q, K) = q^t WK , Luong et al., 2015
self.W = nn.Linear(self.encoder_size, self.encoder_size, bias=False)
elif type == "biased general":
# f(q, K) = K|(W q + b) Sordoni et al., 2016
pass
elif type == "activated general":
# f(q, K) = act(q|WK + b) Ma et al., 2017
pass
elif type == "concat":
# f(q, K) = act(W[K;q] + b) , Luong et al., 2015
pass
elif type == "p":
# https://arxiv.org/pdf/1702.04521.pdf pagina 3, de adaugat predict-ul in attention, KVP si Q
pass
else:
raise Exception("Attention type not properly defined! (got type={})".format(self.type))
self.device = device
self.to(self.device)
def _reshape_state_h(self, state_h):
"""
Reshapes the hidden state to desired shape
Input: [num_layers * 1, batch_size, decoder_hidden_size]
Output: [batch_size, 1, decoder_hidden_state]
Args:
state_h (tensor): Hidden state of the decoder.
[num_layers * 1, batch_size, decoder_hidden_size]
Returns:
The reshaped hidden state.
[batch_size, 1, decoder_hidden_state]
"""
num_layers, batch_size, hidden_size = state_h.size()
# in case the decoder has more than 1 layer, take only the last one -> [1, batch_size, decoder_hidden_size]
if num_layers > 1:
state_h = state_h[num_layers-1:num_layers,:,:]
# [1, batch_size, decoder_hidden_size] -> [batch_size, 1, decoder_hidden_size]
return state_h.permute(1, 0, 2)
def reset_coverage(self, batch_size, enc_seq_len, force_weight): # called on every new batch
self.force_weight = force_weight
if self.type == "coverage":
self.C = torch.zeros(batch_size, enc_seq_len, self.coverage_dim, device=self.device)
self.gru_input = torch.zeros(batch_size, 1, self.coverage_input_size, device=self.device)
def _coverage_compute_next_C(self, attention_weights, enc_output, state_h):
# attention_weights: [batch_size, seq_len, 1]
# enc_output : [batch_size, seq_len, encoder_size]
# state_h (after reshape): [batch_size, 1, encoder_size]
# self.C at prev timestep: [batch_size, seq_len, coverage_dim]
seq_len = enc_output.size[1]
for i in range(seq_len):
self.gru_input[:,:,0:self.coverage_dim] = self.C[:,i:i+1,:] # cat self.C at prev timestep for position i of source word
self.gru_input[:,:,self.coverage_dim:self.coverage_dim+1] = attention_weights[:,i:i+1,:] # cat attention weight
self.gru_input[:,:,self.coverage_dim+1:self.coverage_dim+1+self.encoder_size] = enc_output[:,i:i+1,:] # cat encoder output
self.gru_input[:,:,self.coverage_dim+1+self.encoder_size:] = state_h[:,0:1,:] # cat state_h
self.C[:,i:i+1,:] = self.cov_gru(self.gru_input)
def _energy (self, K, Q):
"""
Calculates the compatibility function f(query, keys)
Args:
K (tensor): Keys tensor of size [batch_size, seq_len, encoder_size]
Q (tensor): Query tensor of size [batch_size, 1, decoder_size], but now dec_size is enc_size due to Q annotation
Returns:
energy tensor of size [batch_size, seq_len, 1]
"""
if self.type == "additive":
return self.V(torch.tanh(self.W1(K) + self.W2(Q) + self.b))
elif self.type == "coverage":
return self.V(torch.tanh(self.W1(K) + self.W2(Q) + self.W3(self.C) + self.b))
elif self.type == "multiplicative" or self.type == "dot":
# q^t K means batch matrix multiplying K with q transposed:
# bmm( [batch_size, seq_len, enc_size] , [batch_size, enc_size, 1] ) -> [batch_size, seq_len, 1]
return torch.bmm(K, Q.transpose(1,2))
elif self.type == "scaled multiplicative" or self.type == "scaled dot":
# same as multiplicative but scaled
return torch.bmm(K, Q.transpose(1,2))/self.scale
elif self.type == "general" or self.type == "bilinear":
# f(q, K) = q^t WK , Luong et al., 2015
return torch.bmm(self.W(K), Q.transpose(1,2))
def forward(self, enc_output, state_h, decoder_step, dec_seq_len, mask=None):
"""
This function calculates the context vector of the attention layer, given the hidden state and the encoder
last lstm layer output.
Args:
state_h (tensor): The raw hidden state of the decoder's LSTM
Shape: [num_layers * 1, batch_size, decoder_size].
enc_output (tensor): The output of the last LSTM encoder layer.
Shape: [batch_size, seq_len, encoder_size].
mask (tensor): 1 and 0 as for encoder input
Shape: [batch_size, seq_len].
Returns:
context (tensor): The context vector. Shape: [batch_size, encoder_size]
attention_weights (tensor): Attention weights. Shape: [batch_size, seq_len, 1]
"""
batch_size = enc_output.shape[0]
seq_len = enc_output.shape[1]
state_h = self._reshape_state_h(state_h) # [batch_size, 1, decoder_size]
# get K, V, Q
K = self.key_annotation_function(enc_output) # [batch_size, seq_len, encoder_size]
V = self.value_annotation_function(enc_output) # [batch_size, seq_len, encoder_size]
Q = self.query_annotation_function(state_h) # [batch_size, 1, encoder_size]
# calculate energy
energy = self._energy(K,Q) # [batch_size, seq_len, 1]
# mask with -inf paddings
if mask is not None:
energy.masked_fill_(mask.unsqueeze(-1) == 0, -np.inf)
# transform energy into probability distribution using softmax
attention_weights = torch.softmax(energy, dim=1) # [batch_size, seq_len, 1]
# calculate gaussian distribution as forced probability, assume that decoder len is roughly the same as encoder len
"""
if self.force_weight > 0.:
approx_position = decoder_step*seq_len/dec_seq_len
x = np.linspace(0, seq_len, seq_len)
y = scipy.stats.norm.pdf(x, approx_position, 3) # loc (mean) is decoder_step, scale (std dev) = 1.
#print(decoder_step)
#print(np.sum(y))
y = y / np.sum(y) # rescale to make it a PDF
gaussian_dist = torch.tensor(y, dtype = attention_weights.dtype, device = self.device) # make it a tensor, it's [seq_len]
gaussian_dist = gaussian_dist.repeat(batch_size, 1) # same for all examples in batch, now it's [batch_size, seq_len]
# calculate attention_weights as a PDF
attention_weights = self.force_weight * gaussian_dist + (1-self.force_weight) * attention_weights.squeeze(2)
attention_weights = attention_weights.unsqueeze(2) # recreate [batch_size, seq_len, 1]
"""
# for coverage only, calculate the next C
if type=="coverage":
self._coverage_compute_next_C(attention_weights, enc_output, state_h)
# calculate weighted values z (element wise multiplication of energy * values)
# attention_weights is [batch_size, seq_len, 1], V is [batch_size, seq_len, encoder_size], z is same as V
z = attention_weights*V # same as torch.mul(), element wise multiplication
# finally, calculate context as the esum of z.
# z is [batch_size, seq_len, encoder_size], context will be [batch_size, encoder_size]
context = torch.sum(z, dim=1)
return context, attention_weights # [batch_size, encoder_size], [batch_size, seq_len, 1]
if __name__ == "__main__":
import numpy as np
# debug stuff:
q = torch.tensor([ [ [2.,2.,2.] ] ])
K = torch.tensor([ [ [1.,1.,1.] , [5.,5.,5.] ] ])
#result would be ([ [ [2.,2.,2.] , [2.5,2.5,2.5] ] ])
print(K.size())
print(q)
print(q.size())
#qt = q.expand(1,3,3)#q.transpose(1,2)
qt = q.transpose(1,2)
print(qt)
print(qt.size())
print()
r = torch.bmm(K,qt)
print(r)
print(r.size())
print()
#print(e1.size())
#print(v1.size())
#qq = e1*v1
#print(qq)
# prep inputs
batch_size = 2
seq_len = 10
enc_size = 4
dec_layers = 5
dec_size = 3
encoder_outputs = torch.tensor(np.random.rand(batch_size, seq_len, enc_size), dtype=torch.float)
decoder_hidden_state = torch.tensor(np.random.rand(dec_layers*1, batch_size, dec_size), dtype=torch.float) # 1 for unidirectional
# prep layer
device = torch.device("cpu")
#type = "additive"
type = "general"
att = Attention(enc_size, dec_size, device, type)
# run
context, attention_weights = att(encoder_outputs, decoder_hidden_state)
print("Output is:")
print(context)
print("Attention weights size:" + str(attention_weights.size()))
# debug stuff:
#e1 = torch.tensor([[[2],[0.5]]])
#v1 = torch.tensor([ [ [1.,1.,1.] , [5.,5.,5.] ] ])
#result would be ([ [ [2.,2.,2.] , [2.5,2.5,2.5] ] ])
#print(e1.size())
#print(v1.size())
#qq = e1*v1
#print(qq)
|
<reponame>gigforks/python-prompt-toolkit
"""
Highlighters for usage in a BufferControl.
Highlighters are very similar to processors, but they are applied after the
BufferControl created a screen instance. (Instead of right before creating the screen.)
Highlighters can't change the content of the screen, but they can mark regions
(start_pos, end_pos) as highlighted, using a certain Token.
When possible, it's adviced to use a Highlighter instead of a Processor,
because most of the highlighting code is applied only to the visible region of
the screen. (The Window class will apply the highlighting to the visible region.)
"""
from __future__ import unicode_literals
from pygments.token import Token
from abc import ABCMeta, abstractmethod
from six import with_metaclass
from prompt_toolkit.document import Document
from prompt_toolkit.enums import SEARCH_BUFFER
from prompt_toolkit.filters import to_cli_filter
__all__ = (
'Fragment',
'SelectionHighlighter',
'SearchHighlighter',
'MatchingBracketHighlighter',
'ConditionalHighlighter',
)
class Fragment(object):
"""
Highlight fragment.
:param start: (int) Cursor start position.
:param end: (int) Cursor end position.
:param token: Pygments Token.
"""
def __init__(self, start, end, token):
self.start = start
self.end = end
self.token = token
def __repr__(self):
return 'Fragment(%r, %r, %r)' % (self.start, self.end, self.token)
class Highlighter(with_metaclass(ABCMeta, object)):
@abstractmethod
def get_fragments(self, cli, document):
"""
Return a list of :class:`.Fragment` instances.
(This can be a generator as well.)
"""
return []
class SelectionHighlighter(Highlighter):
"""
Highlight the selection.
"""
def get_fragments(self, cli, document):
for from_, to in document.selection_ranges():
yield Fragment(from_, to + 1, Token.SelectedText)
def invalidation_hash(self, cli, document):
# When the selection changes, highlighting will be different.
return (document.selection and (
document.cursor_position,
document.selection.original_cursor_position,
document.selection.type))
class SearchHighlighter(Highlighter):
"""
Highlight search matches in the document.
:param preview_search: A Filter; when active it indicates that we take
the search text in real time while the user is typing, instead of the
last active search state.
:param get_search_state: (Optional) Callable that takes a
CommandLineInterface and returns the SearchState to be used for the highlighting.
"""
def __init__(self, preview_search=False, search_buffer_name=SEARCH_BUFFER,
get_search_state=None):
self.preview_search = to_cli_filter(preview_search)
self.search_buffer_name = search_buffer_name
self.get_search_state = get_search_state
def _get_search_text(self, cli):
"""
The text we are searching for.
"""
# When the search buffer has focus, take that text.
if self.preview_search(cli) and cli.buffers[self.search_buffer_name].text:
return cli.buffers[self.search_buffer_name].text
# Otherwise, take the text of the last active search.
elif self.get_search_state:
return self.get_search_state(cli).text
else:
return cli.search_state.text
def get_fragments(self, cli, document):
search_text = self._get_search_text(cli)
search_text_length = len(search_text)
ignore_case = cli.is_ignoring_case
if search_text and not cli.is_returning:
for index in document.find_all(search_text, ignore_case=ignore_case):
if index <= document.cursor_position < index + search_text_length:
token = Token.SearchMatch.Current
else:
token = Token.SearchMatch
yield Fragment(index, index + len(search_text), token)
def invalidation_hash(self, cli, document):
search_text = self._get_search_text(cli)
# When the search state changes, highlighting will be different.
return (
search_text,
cli.is_returning,
# When we search for text, and the cursor position changes. The
# processor has to be applied every time again, because the current
# match is highlighted in another color.
(search_text and document.cursor_position),
)
class ConditionalHighlighter(Highlighter):
"""
Highlighter that applies another highlighter, according to a certain condition.
:param highlighter: :class:`.Highlighter` instance.
:param filter: :class:`~prompt_toolkit.filters.CLIFilter` instance.
"""
def __init__(self, highlighter, filter):
assert isinstance(highlighter, Highlighter)
self.highlighter = highlighter
self.filter = to_cli_filter(filter)
def get_fragments(self, cli, document):
if self.filter(cli):
return self.highlighter.get_fragments(cli, document)
else:
return []
def invalidation_hash(self, cli, document):
# When enabled, use the hash of the highlighter. Otherwise, just use
# False.
if self.filter(cli):
return (True, self.highlighter.invalidation_hash(cli, document))
else:
return False
class MatchingBracketHighlighter(Highlighter):
"""
When the cursor is on or right after a bracket, it highlights the matching
bracket.
"""
_closing_braces = '])}>'
def __init__(self, chars='[](){}<>'):
self.chars = chars
def get_fragments(self, cli, document):
result = []
def replace_token(pos):
""" Replace token in list of tokens. """
result.append(Fragment(pos, pos + 1, Token.MatchingBracket))
def apply_for_document(document):
""" Find and replace matching tokens. """
if document.current_char in self.chars:
pos = document.matching_bracket_position
if pos:
replace_token(document.cursor_position)
replace_token(document.cursor_position + pos)
return True
# Apply for character below cursor.
applied = apply_for_document(document)
# Otherwise, apply for character before cursor.
if (not applied and document.cursor_position > 0 and
document.char_before_cursor in self._closing_braces):
apply_for_document(Document(document.text, document.cursor_position - 1))
return result
def invalidation_hash(self, cli, document):
on_brace = document.current_char in self.chars
after_brace = document.char_before_cursor in self.chars
if on_brace:
return (True, document.cursor_position)
elif after_brace and document.char_before_cursor in self._closing_braces:
return (True, document.cursor_position - 1)
else:
# Don't include the cursor position in the hash if we are not *on*
# a brace. We don't have to rerender the output, because it will be
# the same anyway.
return False
|
from eclcli.common import command
from eclcli.common import utils
from ..networkclient.common import utils as to_obj
class ListStaticRoute(command.Lister):
def get_parser(self, prog_name):
parser = super(ListStaticRoute, self).get_parser(prog_name)
parser.add_argument(
'--name',
metavar="name",
help="filter by name")
parser.add_argument(
'--id',
metavar="id",
help="filter by id")
parser.add_argument(
'--status',
metavar="status",
help="filter by status")
parser.add_argument(
'--nexthop',
metavar="nexthop",
help="filter by next hop")
parser.add_argument(
'--destination',
metavar="destination",
help="filter by destination")
parser.add_argument(
'--service_type',
metavar="service_type",
help="filter by service_type")
parser.add_argument(
'--internet_gw_id',
metavar="internet_gw_id",
help="filter by internet gateway id")
parser.add_argument(
'--aws_gw_id',
metavar="aws_gw_id",
help="filter by aws gateway id")
parser.add_argument(
'--interdc_gw_id',
metavar="interdc_gw_id",
help="filter by interdc gateway id")
parser.add_argument(
'--vpn_gw_id',
metavar="vpn_gw_id",
help="filter by vpn gateway id")
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
columns = (
'id',
'name',
'service_type',
'destination',
'nexthop',
'status',
)
column_headers = (
'ID',
'Name',
'Service Type',
'Destination',
'Nexthop',
'Status',
)
search_opts = {}
if parsed_args.name:
search_opts.update({"name": parsed_args.name})
if parsed_args.id:
search_opts.update({"id": parsed_args.id})
if parsed_args.status:
search_opts.update({"status": parsed_args.status})
if parsed_args.destination:
search_opts.update({"destination": parsed_args.destination})
if parsed_args.nexthop:
search_opts.update({"nexthop": parsed_args.nexthop})
if parsed_args.service_type:
search_opts.update({"service_type": parsed_args.service_type})
if parsed_args.interdc_gw_id:
search_opts.update({"interdc_gw_id": parsed_args.interdc_gw_id})
if parsed_args.internet_gw_id:
search_opts.update({"internet_gw_id": parsed_args.internet_gw_id})
if parsed_args.aws_gw_id:
search_opts.update({"aws_gw_id": parsed_args.aws_gw_id})
if parsed_args.vpn_gw_id:
search_opts.update({"vpn_gw_id": parsed_args.vpn_gw_id})
data = [to_obj.StaticRoute(static_route)
for static_route in network_client.list_static_routes(
**search_opts).get('static_routes')]
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in data))
class ShowStaticRoute(command.ShowOne):
def get_parser(self, prog_name):
parser = super(ShowStaticRoute, self).get_parser(prog_name)
parser.add_argument(
'static_route_id',
metavar="STATIC_ROUTE_ID",
help="ID of Static Route to show."
)
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
static_route_id = parsed_args.static_route_id
dic = network_client.show_static_route(static_route_id).get('static_route')
columns = utils.get_columns(dic)
obj = to_obj.StaticRoute(dic)
data = utils.get_item_properties(
obj, columns,)
return columns, data
class CreateStaticRoute(command.ShowOne):
def get_parser(self, prog_name):
parser = super(CreateStaticRoute, self).get_parser(prog_name)
parser.add_argument(
'--name',
metavar='<string>',
help='Name of Static Route to create.')
parser.add_argument(
'--description',
metavar='<string>',
help='Description of Static Route to create.')
parser.add_argument(
'--destination',
metavar='CIDR',
required=True,
help='Destination of Static Route to create.')
parser.add_argument(
'--nexthop',
metavar='<ipv4>',
required=True,
help='Nexthop of Static Route to create.')
parser.add_argument(
'--service_type',
metavar='{vpn|internet|interdc}',
choices=["vpn", "internet", "interdc"],
required=True,
help='SERVICE_TYPE of Static Route to create.')
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--internet_gw_id',
metavar='INTERNET_GATEWAY_ID',
help='Internet gateway id of Static Route to create.')
group.add_argument(
'--interdc_gw_id',
metavar='INTERDC_GW_ID',
help='InterDC gateway id of Static Route to create.')
group.add_argument(
'--vpn_gw_id',
metavar='VPN_GATEWAY_ID',
help='VPN gateway id of Static Route to create.')
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
body = {'static_route': {}}
utils.update_dict(
parsed_args,
body['static_route'],
['name', 'description', 'destination',
'nexthop', 'service_type', 'internet_gw_id',
'interdc_gw_id', 'vpn_gw_id'])
dic = network_client.create_static_route(body).get('static_route')
columns = utils.get_columns(dic)
obj = to_obj.StaticRoute(dic)
data = utils.get_item_properties(
obj, columns,)
return (columns, data)
class SetStaticRoute(command.ShowOne):
def get_parser(self, prog_name):
parser = super(SetStaticRoute, self).get_parser(prog_name)
parser.add_argument(
'static_route_id',
metavar='STATIC_ROUTE_ID',
help='ID of Static Route to update.')
parser.add_argument(
'--name',
metavar='<string>',
help='Name of Static Route to update.')
parser.add_argument(
'--description',
metavar='<string>',
help='Description of Static Route to update.')
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
body = {'static_route': {}}
static_route_id = parsed_args.static_route_id
utils.update_dict(
parsed_args,
body['static_route'],
['name', 'description'])
dic = network_client.update_static_route(
static_route_id, body).get('static_route')
columns = utils.get_columns(dic)
obj = to_obj.StaticRoute(dic)
data = utils.get_item_properties(
obj, columns,)
return columns, data
class DeleteStaticRoute(command.Command):
def get_parser(self, prog_name):
parser = super(DeleteStaticRoute, self).get_parser(prog_name)
parser.add_argument(
'static_route_id',
metavar="STATIC_ROUTE_ID",
nargs="+",
help="ID(s) of Static Route to delete."
)
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
for giid in parsed_args.static_route_id:
network_client.delete_static_route(giid)
|
<filename>Day 1/system.py
from SIRmodel import SIR
import matplotlib.pyplot as plt
# STD: April 22, 2021
N = 674635
I = 687
R = 660 + 25018 + 1 # recovered + vaccination + dead
S = N - I
def simulateWithNoQuarantine():
model = SIR(S, I, R, 1, 1./30)
ds = [[], [], [], []] # [date, S, I, R]
for i in range(360):
model.update()
ds[0].append(i)
ds[1].append(model.S)
ds[2].append(model.I)
ds[3].append(model.R)
plt.title("Simulation with no quarantine")
plt.ylabel("Number of People")
plt.xlabel("Date")
plt.plot(ds[0], ds[1], label="Susceptible")
plt.plot(ds[0], ds[2], label="Infectible")
plt.plot(ds[0], ds[3], label="Recovery/Removed")
plt.legend()
plt.show()
def simulateWithLooseQuarantine():
model = SIR(S, I, R, 0.5, 1./30)
ds = [[], [], [], []] # [date, S, I, R]
for i in range(360):
model.update()
ds[0].append(i)
ds[1].append(model.S)
ds[2].append(model.I)
ds[3].append(model.R)
plt.title("Simulation with loose quarantine")
plt.ylabel("Number of People")
plt.xlabel("Date")
plt.plot(ds[0], ds[1], label="Susceptible")
plt.plot(ds[0], ds[2], label="Infectible")
plt.plot(ds[0], ds[3], label="Recovery/Removed")
plt.legend()
plt.show()
def simulateWithNormalQuarantine():
model = SIR(S, I, R, 0.2, 1./20)
ds = [[], [], [], []] # [date, S, I, R]
for i in range(360):
model.update()
ds[0].append(i)
ds[1].append(model.S)
ds[2].append(model.I)
ds[3].append(model.R)
plt.title("Simulation with normal quarantine")
plt.ylabel("Number of People")
plt.xlabel("Date")
plt.plot(ds[0], ds[1], label="Susceptible")
plt.plot(ds[0], ds[2], label="Infectible")
plt.plot(ds[0], ds[3], label="Recovery/Removed")
plt.legend()
plt.show()
def simulateWithKoreanQuarantine():
model = SIR(S, I, R, 0.1, 1./30)
ds = [[], [], [], []] # [date, S, I, R]
for i in range(360):
model.update()
ds[0].append(i)
ds[1].append(model.S)
ds[2].append(model.I)
ds[3].append(model.R)
plt.title("Simulation with hard quarantine")
plt.ylabel("Number of People")
plt.xlabel("Date")
plt.plot(ds[0], ds[1], label="Susceptible")
plt.plot(ds[0], ds[2], label="Infectible")
plt.plot(ds[0], ds[3], label="Recovery/Removed")
plt.legend()
plt.show()
def simulateWithLockdown():
model = SIR(S, I, R, 0.05, 1./30) # assume everyone stay at home.
ds = [[], [], [], []] # [date, S, I, R]
for i in range(360):
model.update()
ds[0].append(i)
ds[1].append(model.S)
ds[2].append(model.I)
ds[3].append(model.R)
plt.title("Simulation with lockdown")
plt.ylabel("Number of People")
plt.xlabel("Date")
plt.plot(ds[0], ds[1], label="Susceptible")
plt.plot(ds[0], ds[2], label="Infectible")
plt.plot(ds[0], ds[3], label="Recovery/Removed")
plt.legend()
plt.show()
|
<gh_stars>1-10
# ===== IMPORTS =====
# === Standard library ===
from collections import defaultdict, Counter
import logging
import pathlib
from pathlib import Path
import json
# === Thirdparty ===
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import torch
import torch.utils.data as tdata
import torch.nn.functional as tfunctional
import torch.nn.utils.rnn as tutilsrnn
from sklearn.metrics import precision_recall_fscore_support
# === Local ===
import ml4logs
from ml4logs.features.utils import load_features_as_dict
from ml4logs.data.hdfs import load_labels
from ml4logs.models.utils import classify, get_metrics, get_threshold_metrics, find_optimal_threshold
# ===== GLOBALS =====
logger = logging.getLogger(__name__)
NORMAL_LABEL = 0
ABNORMAL_LABEL = 1
# ===== CLASSES =====
class Seq2LabelModelTrainer:
def __init__(self, device, f_dim, many_to_one, model_kwargs,
optim_kwargs, lr_scheduler_kwargs):
self._model = ml4logs.models.baselines.SeqModel(
f_dim, **model_kwargs
).to(device)
self._criterion = torch.nn.BCEWithLogitsLoss()
self._optimizer = torch.optim.Adam(
self._model.parameters(), **optim_kwargs)
self._scheduler = torch.optim.lr_scheduler.ExponentialLR(
self._optimizer, **lr_scheduler_kwargs)
self._device = device
self._many_to_one = many_to_one
def train(self, dataloader):
self._model.train()
train_loss = 0.0
for inputs, labels, lengths in dataloader:
results, labels, _ = self._forward(inputs, labels, lengths)
loss = self._criterion(results, labels)
train_loss += loss.item()
self._optimizer.zero_grad()
loss.backward()
self._optimizer.step()
self._scheduler.step()
return train_loss / len(dataloader)
def predict_flatten(self, dataloader):
# makes predictions and returns them and targets both flattened as 1D numpy arrays
self._model.eval()
Ys = []
Ts = []
with torch.no_grad():
for X, T, L in dataloader:
Y, T, _ = self._forward(X, T, L)
Ys.append(Y.data.to(device='cpu').numpy().reshape(-1))
Ts.append(T.data.to(device='cpu').numpy().reshape(-1))
Ys = np.concatenate(Ys)
Ts = np.concatenate(Ts)
# logger.info(f"{Ys.shape}, {Ts.shape}")
return Ys, Ts
def evaluate(self, dataloader):
self._model.eval()
total_loss = 0.0
with torch.no_grad():
for inputs, labels, lengths in dataloader:
results, labels, _ = self._forward(inputs, labels, lengths)
loss = self._criterion(results, labels)
total_loss += loss.item()
return total_loss / len(dataloader)
def find_optimal_threshold(self, dataloader):
Y, T = self.predict_flatten(dataloader)
return find_optimal_threshold(T, Y)
# def evaluate_validation_metrics(self, dataloader):
# threshold = self.find_optimal_threshold(dataloader)
# Y = self.score(dataloader)[:, 1]
# C = classify(Y, threshold)
# T = self.targets(dataloader)[:, 1]
# metrics = get_metrics(T, C)
# metrics.update(get_threshold_metrics(T, Y))
# return metrics
def _forward(self, X, T, L):
if self._many_to_one:
return self._forward_many_to_one(X, T, L)
else:
# lengths can be infered from targets T
return self._forward_many_to_many(X, T)
def _forward_many_to_one(self, X, T, L):
# gets data as created by `pad_collate()`
X = X.to(self._device)
T = T.to(self._device)
# self._model output has shape: (batch_size, max_sequence_length, 1) - there is a single output neuron
Y = self._model(X)
Y = Y.reshape(Y.shape[0], Y.shape[1]) # reshape to (batch_size, max_sequence_length)
Y = Y[range(Y.shape[0]), L - 1] # L - 1: indices of last elements of each output sequence
return Y, T, L
def _forward_many_to_many(self, X, T):
# gets data as created by `pad_collate()`
X = X.to(self._device)
T = T.to(self._device)
# T (labels) shape will be (batch_size, max_sequence_length)
# lengths will be (batch_size, ) tensor with actual sequence lengths
T, lengths = tutilsrnn.pad_packed_sequence(
T,
batch_first=True)
# results will be (batch_size, max_sequence_length, 1) - there is a single output neuron
Y = self._model(X)
# the network predicts even after actual sequence_length
# the following pack_padded and pad_packed will set all these predictions to 0
# so they do not mess with loss (targets are also padded b zeros)
Y = tutilsrnn.pack_padded_sequence(
Y,
lengths,
batch_first=True,
enforce_sorted=False
)
Y, _ = tutilsrnn.pad_packed_sequence(
Y,
batch_first=True
)
# squeeze removes the last dimension so we get (batch_size, max_sequence_length)
return torch.squeeze(Y), T, lengths
# ===== FUNCTIONS =====
def train_test_seq2label(args):
np.random.seed(args['seed'])
if args['device'] == 'cuda' and not torch.cuda.is_available():
logger.warning("CUDA not available, falling back to CPU!")
args['device'] = 'cpu'
torch.manual_seed(args['seed'])
train_path = Path(args['train_path'])
val_path = Path(args['val_path'])
test_path = Path(args['test_path'])
stats_path = Path(args['stats_path'])
ml4logs.utils.mkdirs(files=[stats_path])
def load_split(input_path, label_path):
logger.info(
f'Loading split:\n\t"{args[input_path]}"\n\t"{args[label_path]}"')
labels = load_labels(args[label_path])
inputs = load_features_as_dict(args[label_path], args[input_path])
logger.info(
f" # input blocks: {len(inputs)}, # labels: {len(labels)}")
return inputs, labels
train_blocks, train_labels = load_split("train_path", "train_label_path")
val_blocks, val_labels = load_split("val_path", "val_label_path")
test_blocks, test_labels = load_split("test_path", "test_label_path")
# originally implemented splits:
# train - only normal blocks
# val - only normal blocks
# test - rest of normal blocks and all anomalous
many_to_one = args.get("many_to_one", True)
create_sequence = create_sequence_dataset_many_to_one if many_to_one else create_sequence_dataset_many_to_many
train_dataset = create_sequence(train_blocks, train_labels)
validation_dataset = create_sequence(val_blocks, val_labels)
test_dataset = create_sequence(test_blocks, test_labels)
logger.info('Creating Torch DataLoaders')
loaders_kwargs = {
'batch_size': args['batch_size'],
'collate_fn': pad_collate_many_to_one if args.get("many_to_one", True) else pad_collate_many_to_many,
'shuffle': True,
'pin_memory': True
}
train_l = tdata.DataLoader(train_dataset, **loaders_kwargs)
validation_l = tdata.DataLoader(validation_dataset, **loaders_kwargs)
test_l = tdata.DataLoader(test_dataset, **loaders_kwargs)
logger.info('Creating model, optimizer, lr_scheduler and trainer')
device = torch.device(args['device'])
f_dim = train_blocks[list(train_blocks.keys())[0]].shape[-1]
trainer = Seq2LabelModelTrainer(
device,
f_dim,
many_to_one,
args['model_kwargs'],
args['optim_kwargs'],
args['lr_scheduler_kwargs'],
)
method_label = "lstm_classifier_m2o" if many_to_one else "lstm_classifier_m2m"
stats = {
'step': args,
'training': {method_label: []},
'metrics': {}
}
logger.info('Starting training')
validation_loss = trainer.evaluate(validation_l)
stats['training'][method_label].append(
{'epoch': 0, 'validation_loss': validation_loss})
logger.info('Epoch: %3d | Validation loss: %.2f', 0, validation_loss)
for epoch in range(1, args['epochs'] + 1):
train_loss = trainer.train(train_l)
validation_loss = trainer.evaluate(validation_l)
stats['training'][method_label].append(
{'epoch': epoch,
'train_loss': train_loss,
'validation_loss': validation_loss}
)
logger.info('Epoch: %3d | Train loss: %.2f | Validation loss: %.2f',
epoch, train_loss, validation_loss)
logger.info(f'Computing threshold on validation set')
threshold, f1 = trainer.find_optimal_threshold(validation_l)
logger.info(f'Threshold = {threshold}, F1 = {f1}')
y_test, t_test = trainer.predict_flatten(test_l)
c_test = classify(y_test, threshold)
logger.info(f"y_test = {y_test.shape}, c_test = {c_test.shape}, t_test = {t_test.shape}")
metrics = get_metrics(t_test, c_test)
metrics.update(get_threshold_metrics(t_test, y_test))
logger.info(f'Precision = {metrics["precision"]:.2f}, Recall = {metrics["recall"]:.2f}, F1-score = {metrics["f1"]:.2f}')
logger.info(f'MCC = {metrics["mcc"]:.2f}')
logger.info(f'AUC = {metrics["auc"]:.2f}, AP = {metrics["ap"]:.2f}')
stats['metrics'][method_label] = metrics
logger.info('Saving metrics into \'%s\'', stats_path)
stats_path.write_text(json.dumps(stats, indent=4))
def create_sequence_dataset_many_to_one(blocks, labels_):
inputs = []
for block in blocks.values():
inputs.append(block.astype(np.float32, copy=False))
labels = torch.FloatTensor(labels_.Label.values)
return ml4logs.models.baselines.SequenceDataset(inputs, labels)
def create_sequence_dataset_many_to_many(blocks, labels_):
inputs = []
labels = []
for block, label in zip(blocks.values(), labels_.Label.values):
inputs.append(block.astype(np.float32, copy=False))
labels.append(
torch.ones(block.shape[0], dtype=torch.float32)
if label
else torch.zeros(block.shape[0], dtype=torch.float32)
)
return ml4logs.models.baselines.SequenceDataset(inputs, labels)
def pad_collate_many_to_one(samples):
# samples: list of (input,lable) tuples:
# input is (block_size, feature_dim) numpy array
# lable is a block label (tensor(1) or tensor(0))
inputs, labels = zip(*samples)
lengths = np.array([len(i) for i in inputs])
# convert input numpy tensors to the torch ones
inputs = tuple(map(torch.from_numpy, inputs))
# pack inputs
inputs = tutilsrnn.pack_sequence(inputs, enforce_sorted=False)
return inputs, torch.FloatTensor(labels), lengths
def pad_collate_many_to_many(samples):
# samples: list of (input,lable) tuples:
# input is (block_size, feature_dim) numpy array
# lable is (block_size,) torch tensor
# get separate input and label array lists
inputs, labels = zip(*samples)
lengths = np.array([len(i) for i in inputs])
# convert input numpy tensors to the torch ones
inputs = tuple(map(torch.from_numpy, inputs))
# pack everything
inputs = tutilsrnn.pack_sequence(inputs, enforce_sorted=False)
labels = tutilsrnn.pack_sequence(labels, enforce_sorted=False)
return inputs, labels, lengths
|
<reponame>kanokkorn/watering_robot
from gps3 import gps3
import serial
import math
import time
import csv
import os
# setup gps socket
ser = serial.Serial('/dev/ttyUSB0', 9600)
gps_socket = gps3.GPSDSocket()
data_stream = gps3.DataStream()
gps_socket.connect()
gps_socket.watch()
#read csv files
def track():
# prefix parameter
distance = 10
earth_radius = 6371e3
in_lat = 10.725450
in_lon = 99.375350
k = 1
with open('./lat_lon.csv', newline='') as f:
read = csv.reader(f)
for gps_row in read:
#print(gps_row) # check if gps read properly
try:
lat_b = float(gps_row[0]) #unpack list to float
lon_b = float(gps_row[1])
except IndexError:
os.system('cls||clear')
raise Exception('Indexing error...Program terminated.')
ser.write(str.encode('S'))
break
# main function
for new_data in gps_socket:
if (new_data and distance > 5):
data_stream.unpack(new_data)
#print('Altitude = ', data_stream.TPV['lat'], 'Latitude = ', data_stream.TPV['lon'])
if (data_stream.TPV['lat'] == 'n/a') or (data_stream.TPV['lon'] != 'n/a'):
pass
if (data_stream.TPV['lat'] != 'n/a') or (data_stream.TPV['lon'] != 'n/a'):
try:
in_lat = float(data_stream.TPV['lat'])
except ValueError:
print("lat N/A value")
in_lat = (10.712709)
try:
in_lon = float(data_stream.TPV['lon'])
except ValueError:
print("lon N/A value")
in_lon = (99.378788)
lat_A = math.radians(in_lat)
lat_B = math.radians(lat_b)
del_lat = math.radians(lat_b-(in_lat))
del_lon = math.radians(lon_b-(in_lon))
a = (math.sin(del_lat/2)*math.sin(del_lat/2))+math.cos(lat_A)*math.cos(lat_B)*(math.sin(del_lon/2)*math.sin(del_lon/2))
# check if equal zero
try:
c = 2*math.atan2(math.sqrt(a), math.sqrt((1-a)))
except ValueError as identifier:
print("No Value")
distance = earth_radius*c
os.system('cls||clear')
print("Distance: ", distance, " Status : Running")
ser.write(str.encode('M'))
elif (new_data and distance < 5 ):
data_stream.unpack(new_data)
#print('Altitude = ', data_stream.TPV['lat'], 'Latitude = ', data_stream.TPV['lon'])
if (data_stream.TPV['lat'] == 'n/a') or (data_stream.TPV['lon'] != 'n/a'):
pass
if (data_stream.TPV['lat'] != 'n/a') or (data_stream.TPV['lon'] != 'n/a'):
try:
in_lat = float(data_stream.TPV['lat'])
except ValueError:
print("lat N/A value")
in_lat = (10.712709)
try:
in_lon = float(data_stream.TPV['lon'])
except ValueError:
print("lon N/A value")
in_lon = (99.378788)
ser.write(str.encode('S'))
os.system('cls||clear')
print('\n==== Checkpoint ', k," start ====")
time.sleep(0.3)
print("\nDistance: ", distance, " Status : Stop")
time.sleep(0.3)
print("Serial_STOP")
time.sleep(0.3)
for target in range(10):
ser.write(str.encode('O'))
print("watering"+"."*target, end="\r")
ser.write(str.encode('P'))
time.sleep(0.8)
time.sleep(0.3)
print("\nClassification palm Tree :"+ str(k))
time.sleep(0.3)
#classify_edit.main()
for target in range(10):
print("writing csv files"+"."*target, end="\r")
time.sleep(0.8)
distance = 10
in_lat = lat_b
in_lon = lon_b
distance = 10
print("\n==== Checkpoint", k, " done ====\n")
k += 1
time.sleep(1)
print("Start Moving to next checkpoint\n")
time.sleep(1)
break
else:
ser.write(str.encode('S'))
os.system('cls||clear')
print('\n==== End of lines ====')
time.sleep(1)
print('\nFinished\n')
if __name__ == '__main__':
try:
track()
except KeyboardInterrupt:
print('Serial_STOP')
ser.write(str.encode('S'))
raise Exception('Interrupt...Program terminated.')
|
<gh_stars>0
import argparse
import os
import numpy as np
import scipy.spatial.qhull as qhull
import pandas as pd
from mpi4py import MPI
import stk
import utilities
from scipy.interpolate import griddata
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description="A simple post-processing tool")
parser.add_argument(
"-m",
"--mfile",
help="Root name of files to postprocess",
required=True,
type=str,
)
parser.add_argument("--auto_decomp", help="Auto-decomposition", action="store_true")
parser.add_argument(
"-v",
"--vel_name",
help="Name of the velocity field",
default="velocity",
type=str,
)
parser.add_argument(
"--navg", help="Number of times to average", default=10, type=int
)
parser.add_argument(
"--flowthrough",
help="Flowthrough time (L/u)",
default=8 * np.pi / 22.5,
type=float,
)
parser.add_argument(
"--factor",
help="Factor of flowthrough time between time steps used in average",
type=float,
default=1.2,
)
args = parser.parse_args()
fdir = os.path.dirname(args.mfile)
pfx = os.path.splitext(os.path.basename(args.mfile))[0]
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
par = stk.Parallel.initialize()
printer = utilities.p0_printer(par)
mesh = stk.StkMesh(par)
printer("Reading meta data for mesh: ", args.mfile)
mesh.read_mesh_meta_data(args.mfile, auto_decomp=args.auto_decomp)
printer("Done reading meta data")
printer("Loading bulk data for mesh: ", args.mfile)
mesh.populate_bulk_data()
printer("Done reading bulk data")
num_time_steps = mesh.stkio.num_time_steps
max_time = mesh.stkio.max_time
tsteps = np.array(mesh.stkio.time_steps)
printer(f"""Num. time steps = {num_time_steps}\nMax. time step = {max_time}""")
# Figure out the times over which to average
if args.factor > 0:
tmp_tavg = np.sort(
tsteps[-1] - args.flowthrough * args.factor * np.arange(args.navg)
)
dist = np.abs(np.array(tsteps)[:, np.newaxis] - tmp_tavg)
idx = dist.argmin(axis=0)
else:
idx = np.arange(len(tsteps) - args.navg, len(tsteps))
tavg = tsteps[idx]
tavg_instantaneous = tsteps[idx[0] :]
printer("Averaging the following steps:")
printer(tavg)
# Extract time and spanwise average tau_wall on wall
tw_data = None
for tstep in tavg_instantaneous:
ftime, missing = mesh.stkio.read_defined_input_fields(tstep)
printer(f"Loading tau_wall fields for time: {ftime}")
coords = mesh.meta.coordinate_field
wall = mesh.meta.get_part("wall")
sel = wall & mesh.meta.locally_owned_part
tauw = mesh.meta.get_field("tau_wall")
names = ["x", "y", "z", "tauw"]
nnodes = sum(bkt.size for bkt in mesh.iter_buckets(sel, stk.StkRank.NODE_RANK))
cnt = 0
data = np.zeros((nnodes, len(names)))
for bkt in mesh.iter_buckets(sel, stk.StkRank.NODE_RANK):
xyz = coords.bkt_view(bkt)
tw = tauw.bkt_view(bkt)
data[cnt : cnt + bkt.size, :] = np.hstack((xyz, tw.reshape(-1, 1)))
cnt += bkt.size
if tw_data is None:
tw_data = np.zeros(data.shape)
tw_data += data / len(tavg_instantaneous)
lst = comm.gather(tw_data, root=0)
comm.Barrier()
if rank == 0:
df = pd.DataFrame(np.vstack(lst), columns=names)
tw = df.groupby("x", as_index=False).mean().sort_values(by=["x"])
twname = os.path.join(fdir, f"{pfx}-tw.dat")
tw.to_csv(twname, index=False)
# Extract (average) velocity data
vel_data = None
for tstep in tavg:
ftime, missing = mesh.stkio.read_defined_input_fields(tstep)
printer(f"Loading {args.vel_name} fields for time: {ftime}")
interior = mesh.meta.get_part("unspecified-2-hex")
sel = interior & mesh.meta.locally_owned_part
velocity = mesh.meta.get_field(args.vel_name)
names = ["x", "y", "z", "u", "v", "w"]
nnodes = sum(bkt.size for bkt in mesh.iter_buckets(sel, stk.StkRank.NODE_RANK))
cnt = 0
data = np.zeros((nnodes, len(names)))
for bkt in mesh.iter_buckets(sel, stk.StkRank.NODE_RANK):
xyz = coords.bkt_view(bkt)
vel = velocity.bkt_view(bkt)
data[cnt : cnt + bkt.size, :] = np.hstack((xyz, vel))
cnt += bkt.size
if vel_data is None:
vel_data = np.zeros(data.shape)
vel_data += data / len(tavg)
lst = comm.gather(vel_data, root=0)
comm.Barrier()
if rank == 0:
df = pd.DataFrame(np.vstack(lst), columns=names)
df.loc[df.y > 1, "y"] = 2 - df.loc[df.y > 1, "y"]
by = utilities.groupby_isclose(df.y, atol=1e-10)
df = df.groupby(by=by, as_index=False).mean().sort_values(by=["y"])
df.to_csv(os.path.join(fdir, f"{pfx}-profiles.dat"), index=False)
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import datetime
from django.utils.functional import cached_property
from corehq.apps.hqwebapp.decorators import use_nvd3
from corehq.apps.locations.models import SQLLocation
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.graph_models import MultiBarChart, Axis, PieChart
from corehq.apps.reports.standard import ProjectReportParametersMixin, CustomProjectReport, DatespanMixin
from custom.intrahealth.filters import DateRangeFilter, ProgramsAndProductsFilter, YeksiNaaLocationFilter
from custom.intrahealth.sqldata import SatisfactionRateAfterDeliveryPerProductData
from dimagi.utils.dates import force_to_date
class TauxDeSatisfactionReport(CustomProjectReport, DatespanMixin, ProjectReportParametersMixin):
slug = 'taux_de_satisfaction_report'
comment = 'produits proposés sur produits livrés'
name = 'Taux de Satisfaction'
default_rows = 10
report_template_path = 'yeksi_naa/tabular_report.html'
@use_nvd3
def decorator_dispatcher(self, request, *args, **kwargs):
super(TauxDeSatisfactionReport, self).decorator_dispatcher(request, *args, **kwargs)
@property
def fields(self):
return [DateRangeFilter, ProgramsAndProductsFilter, YeksiNaaLocationFilter]
@cached_property
def rendered_report_title(self):
return self.name
@property
def report_context(self):
context = {
'report': self.get_report_context(),
'title': self.name,
'charts': self.charts
}
return context
@property
def selected_location(self):
try:
return SQLLocation.objects.get(location_id=self.request.GET.get('location_id'))
except SQLLocation.DoesNotExist:
return None
@property
def selected_location_type(self):
if self.selected_location:
location_type = self.selected_location.location_type.code
if location_type == 'region':
return 'District'
else:
return 'PPS'
else:
return 'Region'
@property
def headers(self):
def get_products():
products_names = []
for row in self.clean_rows:
for product_info in row['products']:
product_name = product_info['product_name']
if product_name not in products_names:
products_names.append(product_name)
return products_names
headers = DataTablesHeader(
DataTablesColumn(self.selected_location_type),
)
products = get_products()
for product in products:
headers.add_column(DataTablesColumn(product))
return headers
def get_report_context(self):
if self.needs_filters:
headers = []
rows = []
else:
rows = self.calculate_rows()
headers = self.headers
context = dict(
report_table=dict(
title=self.name,
slug=self.slug,
comment=self.comment,
headers=headers,
rows=rows,
default_rows=self.default_rows,
)
)
return context
@property
def clean_rows(self):
quantities = SatisfactionRateAfterDeliveryPerProductData(config=self.config).rows
loc_type = self.selected_location_type.lower()
quantities_list = []
for quantity in quantities:
data_dict = {
'location_name': quantity['{}_name'.format(loc_type)],
'location_id': quantity['{}_id'.format(loc_type)],
'products': [],
}
product_name = quantity['product_name']
product_id = quantity['product_id']
amt_delivered_convenience = quantity['amt_delivered_convenience']
ideal_topup = quantity['ideal_topup']
length = len(quantities_list)
if not quantities_list:
product_dict = {
'product_name': product_name,
'product_id': product_id,
'amt_delivered_convenience': amt_delivered_convenience,
'ideal_topup': ideal_topup,
}
data_dict['products'].append(product_dict)
quantities_list.append(data_dict)
else:
for r in range(0, length):
location_id = quantities_list[r]['location_id']
if quantity['{}_id'.format(loc_type)] == location_id:
if not quantities_list[r]['products']:
product_dict = {
'product_name': product_name,
'product_id': product_id,
'amt_delivered_convenience': amt_delivered_convenience,
'ideal_topup': ideal_topup,
}
quantities_list[r]['products'].append(product_dict)
else:
products = quantities_list[r]['products']
amount_of_products = len(products)
for s in range(0, amount_of_products):
product = products[s]
if product['product_id'] == product_id:
product['amt_delivered_convenience'] += amt_delivered_convenience
product['ideal_topup'] += ideal_topup
break
elif product['product_id'] != product_id and s == amount_of_products - 1:
product_dict = {
'product_name': product_name,
'product_id': product_id,
'amt_delivered_convenience': amt_delivered_convenience,
'ideal_topup': ideal_topup,
}
quantities_list[r]['products'].append(product_dict)
elif quantity['{}_id'.format(loc_type)] != location_id and r == length - 1:
product_dict = {
'product_name': product_name,
'product_id': product_id,
'amt_delivered_convenience': amt_delivered_convenience,
'ideal_topup': ideal_topup,
}
data_dict['products'].append(product_dict)
quantities_list.append(data_dict)
return quantities_list
def calculate_rows(self):
def data_to_rows(quantities_list):
quantities_to_return = []
product_names = []
product_ids = []
for quantity in quantities_list:
for product in quantity['products']:
product_name = product['product_name']
product_id = product['product_id']
if product_id not in product_ids:
product_ids.append(product_id)
product_names.append(product_name)
for quantity in quantities_list:
products_list = []
location_name = quantity['location_name']
for product in quantity['products']:
products_list.append(product)
products_names_from_list = [x['product_name'] for x in quantity['products']]
for product_name in product_names:
if product_name not in products_names_from_list:
products_list.append({
'product_name': product_name,
'amt_delivered_convenience': 0,
'ideal_topup': 0,
})
quantities_to_return.append([
location_name,
])
products_list = sorted(products_list, key=lambda x: x['product_name'])
for product_info in products_list:
amt_delivered_convenience = product_info['amt_delivered_convenience']
ideal_topup = product_info['ideal_topup']
percent_formatted = 'pas de données'
percent = (amt_delivered_convenience / float(ideal_topup)) * 100 \
if ideal_topup > 0 else percent_formatted
if percent is not 'pas de données':
percent_formatted = '{:.2f} %'.format(percent)
quantities_to_return[-1].append({
'html': '{}'.format(percent_formatted),
'sort_key': percent_formatted
})
return quantities_to_return
rows = data_to_rows(self.clean_rows)
return rows
@property
def charts(self):
chart = PieChart('Taux de Satisfaction des produits au niveau national',
'produits proposes sur produits livres', [])
def data_to_chart(quantities_list):
products_names_list = []
products_occurences = {}
products_data = {}
product_names = []
for quantity in quantities_list:
products_list = []
for product in quantity['products']:
products_list.append(product)
if product['product_name'] not in product_names:
product_names.append(product['product_name'])
products_names_from_list = [x['product_name'] for x in quantity['products']]
for product_name in product_names:
if product_name not in products_names_from_list:
products_list.append({
'product_name': product_name,
'amt_delivered_convenience': 0,
'ideal_topup': 0,
})
for product in products_list:
product_name = product['product_name']
amt_delivered_convenience = product['amt_delivered_convenience']
ideal_topup = product['ideal_topup']
if product_name not in products_names_list:
products_names_list.append(product_name)
products_occurences[product_name] = 1
products_data[product_name] = [amt_delivered_convenience, ideal_topup]
else:
products_occurences[product_name] += 1
products_data[product_name][0] += amt_delivered_convenience
products_data[product_name][1] += ideal_topup
chart_pertencts = []
for product in product_names:
amt_delivered_convenience = products_data[product][0]
ideal_topup = products_data[product][1]
product_occurences = products_occurences[product]
percent = ((amt_delivered_convenience / float(ideal_topup)) * 100) / product_occurences
chart_pertencts.append([product, percent])
return chart_pertencts
def get_data_for_graph():
chart_percents = data_to_chart(self.clean_rows)
return [
{'label': x[0], 'value': x[1]} for x in chart_percents
]
chart.data = get_data_for_graph()
return [chart]
@property
def config(self):
config = dict(
domain=self.domain,
)
if self.request.GET.get('startdate'):
startdate = force_to_date(self.request.GET.get('startdate'))
else:
startdate = datetime.datetime.now()
if self.request.GET.get('enddate'):
enddate = force_to_date(self.request.GET.get('enddate'))
else:
enddate = datetime.datetime.now()
config['startdate'] = startdate
config['enddate'] = enddate
config['product_program'] = self.request.GET.get('product_program')
config['product_product'] = self.request.GET.get('product_product')
config['selected_location'] = self.request.GET.get('location_id')
return config
|
"""Find/replace widget."""
# FIXME: finding 'as' or 'asa' from 'asasasasa' is broken
import re
import sys
import tkinter as tk
from tkinter import ttk
import weakref
from porcupine import actions, get_tab_manager, images, tabs
# keys are tabs, values are Finder widgets
finders = weakref.WeakKeyDictionary()
class Finder(ttk.Frame):
"""A widget for finding and replacing text.
Use the pack geometry manager with this widget.
"""
def __init__(self, parent, textwidget, **kwargs):
super().__init__(parent, **kwargs)
self._textwidget = textwidget
# grid layout:
# column 0 column 1 column 2 column 3
# ,---------------------------------------------------------------.
# row0| Find: | text entry | | [x] Full words only |
# |---------------|---------------|-------|-----------------------|
# row1| Replace with: | text entry | | [x] Ignore case |
# |---------------------------------------------------------------|
# row2| button frame, this thing contains a bunch of buttons |
# |---------------------------------------------------------------|
# row3| status label with useful-ish text |
# |---------------------------------------------------------------|
# row4| separator |
# `---------------------------------------------------------------'
#
# note that column 2 is used just for spacing, the separator helps
# distinguish this from e.g. status bar below this
self.grid_columnconfigure(2, minsize=30)
self.grid_columnconfigure(3, weight=1)
# TODO: use the pygments theme somehow?
textwidget.tag_config('find_highlight',
foreground='black', background='yellow')
self.find_entry = self._add_entry(0, "Find:")
find_var = self.find_entry['textvariable'] = tk.StringVar()
find_var.trace('w', self.highlight_all_matches)
self.find_entry.lol = find_var # because cpython gc
self.replace_entry = self._add_entry(1, "Replace with:")
self.find_entry.bind('<Shift-Return>', self._go_to_previous_match)
self.find_entry.bind('<Return>', self._go_to_next_match)
# commented out because pressing tab in self.find_entry unselects the
# text in textwidget for some reason
#self.replace_entry.bind('<Return>', self._replace_this)
buttonframe = ttk.Frame(self)
buttonframe.grid(row=2, column=0, columnspan=4, sticky='we')
self.previous_button = ttk.Button(buttonframe, text="Previous match",
command=self._go_to_previous_match)
self.next_button = ttk.Button(buttonframe, text="Next match",
command=self._go_to_next_match)
self.replace_this_button = ttk.Button(
buttonframe, text="Replace this match",
command=self._replace_this)
self.replace_all_button = ttk.Button(
buttonframe, text="Replace all",
command=self._replace_all)
self.previous_button.pack(side='left')
self.next_button.pack(side='left')
self.replace_this_button.pack(side='left')
self.replace_all_button.pack(side='left')
self._update_buttons()
self.full_words_var = tk.BooleanVar()
self.full_words_var.trace('w', self.highlight_all_matches)
self.ignore_case_var = tk.BooleanVar()
self.ignore_case_var.trace('w', self.highlight_all_matches)
ttk.Checkbutton(
self, text="Full words only", variable=self.full_words_var).grid(
row=0, column=3, sticky='w')
ttk.Checkbutton(
self, text="Ignore case", variable=self.ignore_case_var).grid(
row=1, column=3, sticky='w')
self.statuslabel = ttk.Label(self)
self.statuslabel.grid(row=3, column=0, columnspan=4, sticky='we')
ttk.Separator(self, orient='horizontal').grid(
row=4, column=0, columnspan=4, sticky='we')
closebutton = ttk.Label(self, cursor='hand2')
closebutton.place(relx=1, rely=0, anchor='ne')
closebutton.bind('<Button-1>', self.hide)
# TODO: figure out why images don't work in tests
if 'pytest' not in sys.modules: # pragma: no cover
closebutton['image'] = images.get('closebutton')
# explained in test_find_plugin.py
textwidget.bind('<<Selection>>', self._update_buttons, add=True)
def _add_entry(self, row, text):
ttk.Label(self, text=text).grid(row=row, column=0, sticky='w')
entry = ttk.Entry(self, width=35, font='TkFixedFont')
entry.bind('<Escape>', self.hide)
entry.grid(row=row, column=1, sticky='we')
return entry
def show(self):
self.pack(fill='x')
self.find_entry.focus_set()
self.highlight_all_matches()
def hide(self, junk_event=None):
# remove previous highlights from highlight_all_matches
self._textwidget.tag_remove('find_highlight', '1.0', 'end')
self.pack_forget()
self._textwidget.focus_set()
# tag_ranges returns (start1, end1, start2, end2, ...), and this thing
# gives a list of (start, end) pairs
def get_match_ranges(self):
starts_and_ends = list(
map(str, self._textwidget.tag_ranges('find_highlight')))
assert len(starts_and_ends) % 2 == 0
pairs = list(zip(starts_and_ends[0::2], starts_and_ends[1::2]))
return pairs
# must be called when going to another match or replacing becomes possible
# or impossible, i.e. when find_highlight areas or the selection changes
def _update_buttons(self, junk_event=None):
matches_something_state = (
'normal' if self.get_match_ranges() else 'disabled')
try:
start, end = map(str, self._textwidget.tag_ranges('sel'))
except ValueError:
replace_this_state = 'disabled'
else: # no, elif doesn't work here
if (start, end) in self.get_match_ranges():
replace_this_state = 'normal'
else:
replace_this_state = 'disabled'
self.previous_button['state'] = matches_something_state
self.next_button['state'] = matches_something_state
self.replace_this_button['state'] = replace_this_state
self.replace_all_button['state'] = matches_something_state
def _get_matches_to_highlight(self, looking4):
search_opts = {'nocase': self.ignore_case_var.get()}
if self.full_words_var.get():
# tk doesn't have python-style \b, but it has \m and \M that match
# the beginning and end of word, see re_syntax(3tcl)
search_arg = r'\m' + looking4 + r'\M'
search_opts['regexp'] = True
else:
search_arg = looking4
start_index = '1.0'
first_time = True
while True:
# searching at the beginning of a match gives that match, not
# the next match, so we need + 1 char... unless we are looking
# at the beginning of the file, and to avoid infinite
# recursion, we check for that by checking if we have done it
# before
if first_time:
start_index_for_search = start_index
first_time = False
else:
start_index_for_search = '%s + 1 char' % start_index
start_index = self._textwidget.search(
search_arg, start_index_for_search, 'end', **search_opts)
if not start_index:
# no more matches
break
yield start_index
def highlight_all_matches(self, *junk):
# clear previous highlights
self._textwidget.tag_remove('find_highlight', '1.0', 'end')
looking4 = self.find_entry.get()
if not looking4: # don't search for empty string
self._update_buttons()
self.statuslabel['text'] = "Type something to find."
return
if self.full_words_var.get():
# check for non-wordy characters
match = re.search(r'\W', looking4)
if match is not None:
self._update_buttons()
self.statuslabel['text'] = ('The search string can\'t contain '
'"%s" when "Full words only" is '
'checked.' % match.group(0))
return
count = 0
for start_index in self._get_matches_to_highlight(looking4):
self._textwidget.tag_add(
'find_highlight', start_index,
'%s + %d chars' % (start_index, len(looking4)))
count += 1
self._update_buttons()
if count == 0:
self.statuslabel['text'] = "Found no matches :("
elif count == 1:
self.statuslabel['text'] = "Found 1 match."
else:
self.statuslabel['text'] = "Found %d matches." % count
def _select_range(self, start, end):
# the tag_lower makes sure sel shows up, hiding find_highlight under it
self._textwidget.tag_lower('find_highlight', 'sel')
self._textwidget.tag_remove('sel', '1.0', 'end')
self._textwidget.tag_add('sel', start, end)
self._textwidget.mark_set('insert', start)
self._textwidget.see(start)
# TODO: adjust scrolling accordingly
def _go_to_next_match(self, junk_event=None):
pairs = self.get_match_ranges()
if not pairs:
# the "Next match" button is disabled in this case, but the key
# binding of the find entry is not
self.statuslabel['text'] = "No matches found!"
return
# find first pair that starts after the cursor
for start, end in pairs:
if self._textwidget.compare(start, '>', 'insert'):
self._select_range(start, end)
break
else:
# reached end of file, use the first match
self._select_range(*pairs[0])
self.statuslabel['text'] = ""
self._update_buttons()
# see _go_to_next_match for comments
def _go_to_previous_match(self, junk_event=None):
pairs = self.get_match_ranges()
if not pairs:
self.statuslabel['text'] = "No matches found!"
return
for start, end in reversed(pairs):
if self._textwidget.compare(start, '<', 'insert'):
self._select_range(start, end)
break
else:
self._select_range(*pairs[-1])
self.statuslabel['text'] = ""
self._update_buttons()
return
def _replace_this(self, junk_event=None):
if str(self.replace_this_button['state']) == 'disabled':
self.statuslabel['text'] = (
'Click "Previous match" or "Next match" first.')
return
# highlighted areas must not be moved after .replace, think about what
# happens when you replace 'asd' with 'asd'
start, end = self._textwidget.tag_ranges('sel')
self._textwidget.tag_remove('find_highlight', start, end)
self._update_buttons()
self._textwidget.replace(start, end, self.replace_entry.get())
self._textwidget.mark_set('insert', start)
self._go_to_next_match()
left = len(self.get_match_ranges())
if left == 0:
self.statuslabel['text'] = "Replaced the last match."
elif left == 1:
self.statuslabel['text'] = (
"Replaced a match. There is 1 more match.")
else:
self.statuslabel['text'] = (
"Replaced a match. There are %d more matches." % left)
def _replace_all(self):
match_ranges = self.get_match_ranges()
# must do this backwards because replacing may screw up indexes AFTER
# the replaced place
for start, end in reversed(match_ranges):
self._textwidget.replace(start, end, self.replace_entry.get())
self._textwidget.tag_remove('find_highlight', '1.0', 'end')
self._update_buttons()
if len(match_ranges) == 1:
self.statuslabel['text'] = "Replaced 1 match."
else:
self.statuslabel['text'] = ("Replaced %d matches." %
len(match_ranges))
def find():
tab = get_tab_manager().select()
assert isinstance(tab, tabs.FileTab)
if tab not in finders:
finders[tab] = Finder(tab.bottom_frame, tab.textwidget)
finders[tab].show()
def setup():
actions.add_command("Edit/Find and Replace", find, '<Control-f>',
tabtypes=[tabs.FileTab])
|
# Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2004-2006 The Regents of The University of Michigan
# Copyright (c) 2010-2011 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>
# <NAME>
# <NAME>
# <NAME>
#####################################################################
#
# Parameter description classes
#
# The _params dictionary in each class maps parameter names to either
# a Param or a VectorParam object. These objects contain the
# parameter description string, the parameter type, and the default
# value (if any). The convert() method on these objects is used to
# force whatever value is assigned to the parameter to the appropriate
# type.
#
# Note that the default values are loaded into the class's attribute
# space when the parameter dictionary is initialized (in
# MetaSimObject._new_param()); after that point they aren't used.
#
#####################################################################
import copy
import datetime
import re
import sys
import time
import math
import proxy
import ticks
from util import *
def isSimObject(*args, **kwargs):
return SimObject.isSimObject(*args, **kwargs)
def isSimObjectSequence(*args, **kwargs):
return SimObject.isSimObjectSequence(*args, **kwargs)
def isSimObjectClass(*args, **kwargs):
return SimObject.isSimObjectClass(*args, **kwargs)
allParams = {}
class MetaParamValue(type):
def __new__(mcls, name, bases, dct):
cls = super(MetaParamValue, mcls).__new__(mcls, name, bases, dct)
assert name not in allParams
allParams[name] = cls
return cls
# Dummy base class to identify types that are legitimate for SimObject
# parameters.
class ParamValue(object):
__metaclass__ = MetaParamValue
# Generate the code needed as a prerequisite for declaring a C++
# object of this type. Typically generates one or more #include
# statements. Used when declaring parameters of this type.
@classmethod
def cxx_predecls(cls, code):
pass
# Generate the code needed as a prerequisite for including a
# reference to a C++ object of this type in a SWIG .i file.
# Typically generates one or more %import or %include statements.
@classmethod
def swig_predecls(cls, code):
pass
# default for printing to .ini file is regular string conversion.
# will be overridden in some cases
def ini_str(self):
return str(self)
# allows us to blithely call unproxy() on things without checking
# if they're really proxies or not
def unproxy(self, base):
return self
# Regular parameter description.
class ParamDesc(object):
def __init__(self, ptype_str, ptype, *args, **kwargs):
self.ptype_str = ptype_str
# remember ptype only if it is provided
if ptype != None:
self.ptype = ptype
if args:
if len(args) == 1:
self.desc = args[0]
elif len(args) == 2:
self.default = args[0]
self.desc = args[1]
else:
raise TypeError, 'too many arguments'
if kwargs.has_key('desc'):
assert(not hasattr(self, 'desc'))
self.desc = kwargs['desc']
del kwargs['desc']
if kwargs.has_key('default'):
assert(not hasattr(self, 'default'))
self.default = kwargs['default']
del kwargs['default']
if kwargs:
raise TypeError, 'extra unknown kwargs %s' % kwargs
if not hasattr(self, 'desc'):
raise TypeError, 'desc attribute missing'
def __getattr__(self, attr):
if attr == 'ptype':
ptype = SimObject.allClasses[self.ptype_str]
assert isSimObjectClass(ptype)
self.ptype = ptype
return ptype
raise AttributeError, "'%s' object has no attribute '%s'" % \
(type(self).__name__, attr)
def convert(self, value):
if isinstance(value, proxy.BaseProxy):
value.set_param_desc(self)
return value
if not hasattr(self, 'ptype') and isNullPointer(value):
# deferred evaluation of SimObject; continue to defer if
# we're just assigning a null pointer
return value
if isinstance(value, self.ptype):
return value
if isNullPointer(value) and isSimObjectClass(self.ptype):
return value
return self.ptype(value)
def cxx_predecls(self, code):
code('#include <cstddef>')
self.ptype.cxx_predecls(code)
def swig_predecls(self, code):
self.ptype.swig_predecls(code)
def cxx_decl(self, code):
code('${{self.ptype.cxx_type}} ${{self.name}};')
# Vector-valued parameter description. Just like ParamDesc, except
# that the value is a vector (list) of the specified type instead of a
# single value.
class VectorParamValue(list):
__metaclass__ = MetaParamValue
def __setattr__(self, attr, value):
raise AttributeError, \
"Not allowed to set %s on '%s'" % (attr, type(self).__name__)
def ini_str(self):
return ' '.join([v.ini_str() for v in self])
def getValue(self):
return [ v.getValue() for v in self ]
def unproxy(self, base):
if len(self) == 1 and isinstance(self[0], proxy.AllProxy):
return self[0].unproxy(base)
else:
return [v.unproxy(base) for v in self]
class SimObjectVector(VectorParamValue):
# support clone operation
def __call__(self, **kwargs):
return SimObjectVector([v(**kwargs) for v in self])
def clear_parent(self, old_parent):
for v in self:
v.clear_parent(old_parent)
def set_parent(self, parent, name):
if len(self) == 1:
self[0].set_parent(parent, name)
else:
width = int(math.ceil(math.log(len(self))/math.log(10)))
for i,v in enumerate(self):
v.set_parent(parent, "%s%0*d" % (name, width, i))
def has_parent(self):
return reduce(lambda x,y: x and y, [v.has_parent() for v in self])
# return 'cpu0 cpu1' etc. for print_ini()
def get_name(self):
return ' '.join([v._name for v in self])
# By iterating through the constituent members of the vector here
# we can nicely handle iterating over all a SimObject's children
# without having to provide lots of special functions on
# SimObjectVector directly.
def descendants(self):
for v in self:
for obj in v.descendants():
yield obj
def get_config_as_dict(self):
a = []
for v in self:
a.append(v.get_config_as_dict())
return a
# If we are replacing an item in the vector, make sure to set the
# parent reference of the new SimObject to be the same as the parent
# of the SimObject being replaced. Useful to have if we created
# a SimObjectVector of temporary objects that will be modified later in
# configuration scripts.
def __setitem__(self, key, value):
val = self[key]
if value.has_parent():
warn("SimObject %s already has a parent" % value.get_name() +\
" that is being overwritten by a SimObjectVector")
value.set_parent(val.get_parent(), val._name)
super(SimObjectVector, self).__setitem__(key, value)
class VectorParamDesc(ParamDesc):
# Convert assigned value to appropriate type. If the RHS is not a
# list or tuple, it generates a single-element list.
def convert(self, value):
if isinstance(value, (list, tuple)):
# list: coerce each element into new list
tmp_list = [ ParamDesc.convert(self, v) for v in value ]
else:
# singleton: coerce to a single-element list
tmp_list = [ ParamDesc.convert(self, value) ]
if isSimObjectSequence(tmp_list):
return SimObjectVector(tmp_list)
else:
return VectorParamValue(tmp_list)
def swig_module_name(self):
return "%s_vector" % self.ptype_str
def swig_predecls(self, code):
code('%import "${{self.swig_module_name()}}.i"')
def swig_decl(self, code):
code('%module(package="m5.internal") ${{self.swig_module_name()}}')
code('%{')
self.ptype.cxx_predecls(code)
code('%}')
code()
# Make sure the SWIGPY_SLICE_ARG is defined through this inclusion
code('%include "std_container.i"')
code()
self.ptype.swig_predecls(code)
code()
code('%include "std_vector.i"')
code()
ptype = self.ptype_str
cxx_type = self.ptype.cxx_type
code('''\
%typemap(in) std::vector< $cxx_type >::value_type {
if (SWIG_ConvertPtr($$input, (void **)&$$1, $$1_descriptor, 0) == -1) {
if (SWIG_ConvertPtr($$input, (void **)&$$1,
$$descriptor($cxx_type), 0) == -1) {
return NULL;
}
}
}
%typemap(in) std::vector< $cxx_type >::value_type * {
if (SWIG_ConvertPtr($$input, (void **)&$$1, $$1_descriptor, 0) == -1) {
if (SWIG_ConvertPtr($$input, (void **)&$$1,
$$descriptor($cxx_type *), 0) == -1) {
return NULL;
}
}
}
''')
code('%template(vector_$ptype) std::vector< $cxx_type >;')
def cxx_predecls(self, code):
code('#include <vector>')
self.ptype.cxx_predecls(code)
def cxx_decl(self, code):
code('std::vector< ${{self.ptype.cxx_type}} > ${{self.name}};')
class ParamFactory(object):
def __init__(self, param_desc_class, ptype_str = None):
self.param_desc_class = param_desc_class
self.ptype_str = ptype_str
def __getattr__(self, attr):
if self.ptype_str:
attr = self.ptype_str + '.' + attr
return ParamFactory(self.param_desc_class, attr)
# E.g., Param.Int(5, "number of widgets")
def __call__(self, *args, **kwargs):
ptype = None
try:
ptype = allParams[self.ptype_str]
except KeyError:
# if name isn't defined yet, assume it's a SimObject, and
# try to resolve it later
pass
return self.param_desc_class(self.ptype_str, ptype, *args, **kwargs)
Param = ParamFactory(ParamDesc)
VectorParam = ParamFactory(VectorParamDesc)
#####################################################################
#
# Parameter Types
#
# Though native Python types could be used to specify parameter types
# (the 'ptype' field of the Param and VectorParam classes), it's more
# flexible to define our own set of types. This gives us more control
# over how Python expressions are converted to values (via the
# __init__() constructor) and how these values are printed out (via
# the __str__() conversion method).
#
#####################################################################
# String-valued parameter. Just mixin the ParamValue class with the
# built-in str class.
class String(ParamValue,str):
cxx_type = 'std::string'
@classmethod
def cxx_predecls(self, code):
code('#include <string>')
@classmethod
def swig_predecls(cls, code):
code('%include "std_string.i"')
def getValue(self):
return self
# superclass for "numeric" parameter values, to emulate math
# operations in a type-safe way. e.g., a Latency times an int returns
# a new Latency object.
class NumericParamValue(ParamValue):
def __str__(self):
return str(self.value)
def __float__(self):
return float(self.value)
def __long__(self):
return long(self.value)
def __int__(self):
return int(self.value)
# hook for bounds checking
def _check(self):
return
def __mul__(self, other):
newobj = self.__class__(self)
newobj.value *= other
newobj._check()
return newobj
__rmul__ = __mul__
def __div__(self, other):
newobj = self.__class__(self)
newobj.value /= other
newobj._check()
return newobj
def __sub__(self, other):
newobj = self.__class__(self)
newobj.value -= other
newobj._check()
return newobj
# Metaclass for bounds-checked integer parameters. See CheckedInt.
class CheckedIntType(MetaParamValue):
def __init__(cls, name, bases, dict):
super(CheckedIntType, cls).__init__(name, bases, dict)
# CheckedInt is an abstract base class, so we actually don't
# want to do any processing on it... the rest of this code is
# just for classes that derive from CheckedInt.
if name == 'CheckedInt':
return
if not (hasattr(cls, 'min') and hasattr(cls, 'max')):
if not (hasattr(cls, 'size') and hasattr(cls, 'unsigned')):
panic("CheckedInt subclass %s must define either\n" \
" 'min' and 'max' or 'size' and 'unsigned'\n",
name);
if cls.unsigned:
cls.min = 0
cls.max = 2 ** cls.size - 1
else:
cls.min = -(2 ** (cls.size - 1))
cls.max = (2 ** (cls.size - 1)) - 1
# Abstract superclass for bounds-checked integer parameters. This
# class is subclassed to generate parameter classes with specific
# bounds. Initialization of the min and max bounds is done in the
# metaclass CheckedIntType.__init__.
class CheckedInt(NumericParamValue):
__metaclass__ = CheckedIntType
def _check(self):
if not self.min <= self.value <= self.max:
raise TypeError, 'Integer param out of bounds %d < %d < %d' % \
(self.min, self.value, self.max)
def __init__(self, value):
if isinstance(value, str):
self.value = convert.toInteger(value)
elif isinstance(value, (int, long, float, NumericParamValue)):
self.value = long(value)
else:
raise TypeError, "Can't convert object of type %s to CheckedInt" \
% type(value).__name__
self._check()
@classmethod
def cxx_predecls(cls, code):
# most derived types require this, so we just do it here once
code('#include "base/types.hh"')
@classmethod
def swig_predecls(cls, code):
# most derived types require this, so we just do it here once
code('%import "stdint.i"')
code('%import "base/types.hh"')
def getValue(self):
return long(self.value)
class Int(CheckedInt): cxx_type = 'int'; size = 32; unsigned = False
class Unsigned(CheckedInt): cxx_type = 'unsigned'; size = 32; unsigned = True
class Int8(CheckedInt): cxx_type = 'int8_t'; size = 8; unsigned = False
class UInt8(CheckedInt): cxx_type = 'uint8_t'; size = 8; unsigned = True
class Int16(CheckedInt): cxx_type = 'int16_t'; size = 16; unsigned = False
class UInt16(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
class Int32(CheckedInt): cxx_type = 'int32_t'; size = 32; unsigned = False
class UInt32(CheckedInt): cxx_type = 'uint32_t'; size = 32; unsigned = True
class Int64(CheckedInt): cxx_type = 'int64_t'; size = 64; unsigned = False
class UInt64(CheckedInt): cxx_type = 'uint64_t'; size = 64; unsigned = True
class Counter(CheckedInt): cxx_type = 'Counter'; size = 64; unsigned = True
class Tick(CheckedInt): cxx_type = 'Tick'; size = 64; unsigned = True
class TcpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
class UdpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
class Percent(CheckedInt): cxx_type = 'int'; min = 0; max = 100
class Cycles(CheckedInt):
cxx_type = 'Cycles'
size = 64
unsigned = True
def getValue(self):
from m5.internal.core import Cycles
return Cycles(self.value)
class Float(ParamValue, float):
cxx_type = 'double'
def __init__(self, value):
if isinstance(value, (int, long, float, NumericParamValue, Float)):
self.value = float(value)
else:
raise TypeError, "Can't convert object of type %s to Float" \
% type(value).__name__
def getValue(self):
return float(self.value)
class MemorySize(CheckedInt):
cxx_type = 'uint64_t'
size = 64
unsigned = True
def __init__(self, value):
if isinstance(value, MemorySize):
self.value = value.value
else:
self.value = convert.toMemorySize(value)
self._check()
class MemorySize32(CheckedInt):
cxx_type = 'uint32_t'
size = 32
unsigned = True
def __init__(self, value):
if isinstance(value, MemorySize):
self.value = value.value
else:
self.value = convert.toMemorySize(value)
self._check()
class Addr(CheckedInt):
cxx_type = 'Addr'
size = 64
unsigned = True
def __init__(self, value):
if isinstance(value, Addr):
self.value = value.value
else:
try:
self.value = convert.toMemorySize(value)
except TypeError:
self.value = long(value)
self._check()
def __add__(self, other):
if isinstance(other, Addr):
return self.value + other.value
else:
return self.value + other
class AddrRange(ParamValue):
cxx_type = 'AddrRange'
def __init__(self, *args, **kwargs):
# Disable interleaving by default
self.intlvHighBit = 0
self.intlvBits = 0
self.intlvMatch = 0
def handle_kwargs(self, kwargs):
# An address range needs to have an upper limit, specified
# either explicitly with an end, or as an offset using the
# size keyword.
if 'end' in kwargs:
self.end = Addr(kwargs.pop('end'))
elif 'size' in kwargs:
self.end = self.start + Addr(kwargs.pop('size')) - 1
else:
raise TypeError, "Either end or size must be specified"
# Now on to the optional bit
if 'intlvHighBit' in kwargs:
self.intlvHighBit = int(kwargs.pop('intlvHighBit'))
if 'intlvBits' in kwargs:
self.intlvBits = int(kwargs.pop('intlvBits'))
if 'intlvMatch' in kwargs:
self.intlvMatch = int(kwargs.pop('intlvMatch'))
if len(args) == 0:
self.start = Addr(kwargs.pop('start'))
handle_kwargs(self, kwargs)
elif len(args) == 1:
if kwargs:
self.start = Addr(args[0])
handle_kwargs(self, kwargs)
elif isinstance(args[0], (list, tuple)):
self.start = Addr(args[0][0])
self.end = Addr(args[0][1])
else:
self.start = Addr(0)
self.end = Addr(args[0]) - 1
elif len(args) == 2:
self.start = Addr(args[0])
self.end = Addr(args[1])
else:
raise TypeError, "Too many arguments specified"
if kwargs:
raise TypeError, "Too many keywords: %s" % kwargs.keys()
def __str__(self):
return '%s:%s' % (self.start, self.end)
def size(self):
# Divide the size by the size of the interleaving slice
return (long(self.end) - long(self.start) + 1) >> self.intlvBits
@classmethod
def cxx_predecls(cls, code):
Addr.cxx_predecls(code)
code('#include "base/addr_range.hh"')
@classmethod
def swig_predecls(cls, code):
Addr.swig_predecls(code)
def getValue(self):
# Go from the Python class to the wrapped C++ class generated
# by swig
from m5.internal.range import AddrRange
return AddrRange(long(self.start), long(self.end),
int(self.intlvHighBit), int(self.intlvBits),
int(self.intlvMatch))
# Boolean parameter type. Python doesn't let you subclass bool, since
# it doesn't want to let you create multiple instances of True and
# False. Thus this is a little more complicated than String.
class Bool(ParamValue):
cxx_type = 'bool'
def __init__(self, value):
try:
self.value = convert.toBool(value)
except TypeError:
self.value = bool(value)
def getValue(self):
return bool(self.value)
def __str__(self):
return str(self.value)
# implement truth value testing for Bool parameters so that these params
# evaluate correctly during the python configuration phase
def __nonzero__(self):
return bool(self.value)
def ini_str(self):
if self.value:
return 'true'
return 'false'
def IncEthernetAddr(addr, val = 1):
bytes = map(lambda x: int(x, 16), addr.split(':'))
bytes[5] += val
for i in (5, 4, 3, 2, 1):
val,rem = divmod(bytes[i], 256)
bytes[i] = rem
if val == 0:
break
bytes[i - 1] += val
assert(bytes[0] <= 255)
return ':'.join(map(lambda x: '%02x' % x, bytes))
_NextEthernetAddr = "00:90:00:00:00:01"
def NextEthernetAddr():
global _NextEthernetAddr
value = _NextEthernetAddr
_NextEthernetAddr = IncEthernetAddr(_NextEthernetAddr, 1)
return value
class EthernetAddr(ParamValue):
cxx_type = 'Net::EthAddr'
@classmethod
def cxx_predecls(cls, code):
code('#include "base/inet.hh"')
@classmethod
def swig_predecls(cls, code):
code('%include "python/swig/inet.i"')
def __init__(self, value):
if value == NextEthernetAddr:
self.value = value
return
if not isinstance(value, str):
raise TypeError, "expected an ethernet address and didn't get one"
bytes = value.split(':')
if len(bytes) != 6:
raise TypeError, 'invalid ethernet address %s' % value
for byte in bytes:
if not 0 <= int(byte, base=16) <= 0xff:
raise TypeError, 'invalid ethernet address %s' % value
self.value = value
def unproxy(self, base):
if self.value == NextEthernetAddr:
return EthernetAddr(self.value())
return self
def getValue(self):
from m5.internal.params import EthAddr
return EthAddr(self.value)
def ini_str(self):
return self.value
# When initializing an IpAddress, pass in an existing IpAddress, a string of
# the form "a.b.c.d", or an integer representing an IP.
class IpAddress(ParamValue):
cxx_type = 'Net::IpAddress'
@classmethod
def cxx_predecls(cls, code):
code('#include "base/inet.hh"')
@classmethod
def swig_predecls(cls, code):
code('%include "python/swig/inet.i"')
def __init__(self, value):
if isinstance(value, IpAddress):
self.ip = value.ip
else:
try:
self.ip = convert.toIpAddress(value)
except TypeError:
self.ip = long(value)
self.verifyIp()
def __str__(self):
tup = [(self.ip >> i) & 0xff for i in (24, 16, 8, 0)]
return '%d.%d.%d.%d' % tuple(tup)
def __eq__(self, other):
if isinstance(other, IpAddress):
return self.ip == other.ip
elif isinstance(other, str):
try:
return self.ip == convert.toIpAddress(other)
except:
return False
else:
return self.ip == other
def __ne__(self, other):
return not (self == other)
def verifyIp(self):
if self.ip < 0 or self.ip >= (1 << 32):
raise TypeError, "invalid ip address %#08x" % self.ip
def getValue(self):
from m5.internal.params import IpAddress
return IpAddress(self.ip)
# When initializing an IpNetmask, pass in an existing IpNetmask, a string of
# the form "a.b.c.d/n" or "a.b.c.d/e.f.g.h", or an ip and netmask as
# positional or keyword arguments.
class IpNetmask(IpAddress):
cxx_type = 'Net::IpNetmask'
@classmethod
def cxx_predecls(cls, code):
code('#include "base/inet.hh"')
@classmethod
def swig_predecls(cls, code):
code('%include "python/swig/inet.i"')
def __init__(self, *args, **kwargs):
def handle_kwarg(self, kwargs, key, elseVal = None):
if key in kwargs:
setattr(self, key, kwargs.pop(key))
elif elseVal:
setattr(self, key, elseVal)
else:
raise TypeError, "No value set for %s" % key
if len(args) == 0:
handle_kwarg(self, kwargs, 'ip')
handle_kwarg(self, kwargs, 'netmask')
elif len(args) == 1:
if kwargs:
if not 'ip' in kwargs and not 'netmask' in kwargs:
raise TypeError, "Invalid arguments"
handle_kwarg(self, kwargs, 'ip', args[0])
handle_kwarg(self, kwargs, 'netmask', args[0])
elif isinstance(args[0], IpNetmask):
self.ip = args[0].ip
self.netmask = args[0].netmask
else:
(self.ip, self.netmask) = convert.toIpNetmask(args[0])
elif len(args) == 2:
self.ip = args[0]
self.netmask = args[1]
else:
raise TypeError, "Too many arguments specified"
if kwargs:
raise TypeError, "Too many keywords: %s" % kwargs.keys()
self.verify()
def __str__(self):
return "%s/%d" % (super(IpNetmask, self).__str__(), self.netmask)
def __eq__(self, other):
if isinstance(other, IpNetmask):
return self.ip == other.ip and self.netmask == other.netmask
elif isinstance(other, str):
try:
return (self.ip, self.netmask) == convert.toIpNetmask(other)
except:
return False
else:
return False
def verify(self):
self.verifyIp()
if self.netmask < 0 or self.netmask > 32:
raise TypeError, "invalid netmask %d" % netmask
def getValue(self):
from m5.internal.params import IpNetmask
return IpNetmask(self.ip, self.netmask)
# When initializing an IpWithPort, pass in an existing IpWithPort, a string of
# the form "a.b.c.d:p", or an ip and port as positional or keyword arguments.
class IpWithPort(IpAddress):
cxx_type = 'Net::IpWithPort'
@classmethod
def cxx_predecls(cls, code):
code('#include "base/inet.hh"')
@classmethod
def swig_predecls(cls, code):
code('%include "python/swig/inet.i"')
def __init__(self, *args, **kwargs):
def handle_kwarg(self, kwargs, key, elseVal = None):
if key in kwargs:
setattr(self, key, kwargs.pop(key))
elif elseVal:
setattr(self, key, elseVal)
else:
raise TypeError, "No value set for %s" % key
if len(args) == 0:
handle_kwarg(self, kwargs, 'ip')
handle_kwarg(self, kwargs, 'port')
elif len(args) == 1:
if kwargs:
if not 'ip' in kwargs and not 'port' in kwargs:
raise TypeError, "Invalid arguments"
handle_kwarg(self, kwargs, 'ip', args[0])
handle_kwarg(self, kwargs, 'port', args[0])
elif isinstance(args[0], IpWithPort):
self.ip = args[0].ip
self.port = args[0].port
else:
(self.ip, self.port) = convert.toIpWithPort(args[0])
elif len(args) == 2:
self.ip = args[0]
self.port = args[1]
else:
raise TypeError, "Too many arguments specified"
if kwargs:
raise TypeError, "Too many keywords: %s" % kwargs.keys()
self.verify()
def __str__(self):
return "%s:%d" % (super(IpWithPort, self).__str__(), self.port)
def __eq__(self, other):
if isinstance(other, IpWithPort):
return self.ip == other.ip and self.port == other.port
elif isinstance(other, str):
try:
return (self.ip, self.port) == convert.toIpWithPort(other)
except:
return False
else:
return False
def verify(self):
self.verifyIp()
if self.port < 0 or self.port > 0xffff:
raise TypeError, "invalid port %d" % self.port
def getValue(self):
from m5.internal.params import IpWithPort
return IpWithPort(self.ip, self.port)
time_formats = [ "%a %b %d %H:%M:%S %Z %Y",
"%a %b %d %H:%M:%S %Z %Y",
"%Y/%m/%d %H:%M:%S",
"%Y/%m/%d %H:%M",
"%Y/%m/%d",
"%m/%d/%Y %H:%M:%S",
"%m/%d/%Y %H:%M",
"%m/%d/%Y",
"%m/%d/%y %H:%M:%S",
"%m/%d/%y %H:%M",
"%m/%d/%y"]
def parse_time(value):
from time import gmtime, strptime, struct_time, time
from datetime import datetime, date
if isinstance(value, struct_time):
return value
if isinstance(value, (int, long)):
return gmtime(value)
if isinstance(value, (datetime, date)):
return value.timetuple()
if isinstance(value, str):
if value in ('Now', 'Today'):
return time.gmtime(time.time())
for format in time_formats:
try:
return strptime(value, format)
except ValueError:
pass
raise ValueError, "Could not parse '%s' as a time" % value
class Time(ParamValue):
cxx_type = 'tm'
@classmethod
def cxx_predecls(cls, code):
code('#include <time.h>')
@classmethod
def swig_predecls(cls, code):
code('%include "python/swig/time.i"')
def __init__(self, value):
self.value = parse_time(value)
def getValue(self):
from m5.internal.params import tm
c_time = tm()
py_time = self.value
# UNIX is years since 1900
c_time.tm_year = py_time.tm_year - 1900;
# Python starts at 1, UNIX starts at 0
c_time.tm_mon = py_time.tm_mon - 1;
c_time.tm_mday = py_time.tm_mday;
c_time.tm_hour = py_time.tm_hour;
c_time.tm_min = py_time.tm_min;
c_time.tm_sec = py_time.tm_sec;
# Python has 0 as Monday, UNIX is 0 as sunday
c_time.tm_wday = py_time.tm_wday + 1
if c_time.tm_wday > 6:
c_time.tm_wday -= 7;
# Python starts at 1, Unix starts at 0
c_time.tm_yday = py_time.tm_yday - 1;
return c_time
def __str__(self):
return time.asctime(self.value)
def ini_str(self):
return str(self)
def get_config_as_dict(self):
return str(self)
# Enumerated types are a little more complex. The user specifies the
# type as Enum(foo) where foo is either a list or dictionary of
# alternatives (typically strings, but not necessarily so). (In the
# long run, the integer value of the parameter will be the list index
# or the corresponding dictionary value. For now, since we only check
# that the alternative is valid and then spit it into a .ini file,
# there's not much point in using the dictionary.)
# What Enum() must do is generate a new type encapsulating the
# provided list/dictionary so that specific values of the parameter
# can be instances of that type. We define two hidden internal
# classes (_ListEnum and _DictEnum) to serve as base classes, then
# derive the new type from the appropriate base class on the fly.
allEnums = {}
# Metaclass for Enum types
class MetaEnum(MetaParamValue):
def __new__(mcls, name, bases, dict):
assert name not in allEnums
cls = super(MetaEnum, mcls).__new__(mcls, name, bases, dict)
allEnums[name] = cls
return cls
def __init__(cls, name, bases, init_dict):
if init_dict.has_key('map'):
if not isinstance(cls.map, dict):
raise TypeError, "Enum-derived class attribute 'map' " \
"must be of type dict"
# build list of value strings from map
cls.vals = cls.map.keys()
cls.vals.sort()
elif init_dict.has_key('vals'):
if not isinstance(cls.vals, list):
raise TypeError, "Enum-derived class attribute 'vals' " \
"must be of type list"
# build string->value map from vals sequence
cls.map = {}
for idx,val in enumerate(cls.vals):
cls.map[val] = idx
else:
raise TypeError, "Enum-derived class must define "\
"attribute 'map' or 'vals'"
cls.cxx_type = 'Enums::%s' % name
super(MetaEnum, cls).__init__(name, bases, init_dict)
# Generate C++ class declaration for this enum type.
# Note that we wrap the enum in a class/struct to act as a namespace,
# so that the enum strings can be brief w/o worrying about collisions.
def cxx_decl(cls, code):
name = cls.__name__
code('''\
#ifndef __ENUM__${name}__
#define __ENUM__${name}__
namespace Enums {
enum $name {
''')
code.indent(2)
for val in cls.vals:
code('$val = ${{cls.map[val]}},')
code('Num_$name = ${{len(cls.vals)}}')
code.dedent(2)
code('''\
};
extern const char *${name}Strings[Num_${name}];
}
#endif // __ENUM__${name}__
''')
def cxx_def(cls, code):
name = cls.__name__
code('''\
#include "enums/$name.hh"
namespace Enums {
const char *${name}Strings[Num_${name}] =
{
''')
code.indent(2)
for val in cls.vals:
code('"$val",')
code.dedent(2)
code('''
};
} // namespace Enums
''')
def swig_decl(cls, code):
name = cls.__name__
code('''\
%module(package="m5.internal") enum_$name
%{
#include "enums/$name.hh"
%}
%include "enums/$name.hh"
''')
# Base class for enum types.
class Enum(ParamValue):
__metaclass__ = MetaEnum
vals = []
def __init__(self, value):
if value not in self.map:
raise TypeError, "Enum param got bad value '%s' (not in %s)" \
% (value, self.vals)
self.value = value
@classmethod
def cxx_predecls(cls, code):
code('#include "enums/$0.hh"', cls.__name__)
@classmethod
def swig_predecls(cls, code):
code('%import "python/m5/internal/enum_$0.i"', cls.__name__)
def getValue(self):
return int(self.map[self.value])
def __str__(self):
return self.value
# how big does a rounding error need to be before we warn about it?
frequency_tolerance = 0.001 # 0.1%
class TickParamValue(NumericParamValue):
cxx_type = 'Tick'
@classmethod
def cxx_predecls(cls, code):
code('#include "base/types.hh"')
@classmethod
def swig_predecls(cls, code):
code('%import "stdint.i"')
code('%import "base/types.hh"')
def getValue(self):
return long(self.value)
class Latency(TickParamValue):
def __init__(self, value):
if isinstance(value, (Latency, Clock)):
self.ticks = value.ticks
self.value = value.value
elif isinstance(value, Frequency):
self.ticks = value.ticks
self.value = 1.0 / value.value
elif value.endswith('t'):
self.ticks = True
self.value = int(value[:-1])
else:
self.ticks = False
self.value = convert.toLatency(value)
def __getattr__(self, attr):
if attr in ('latency', 'period'):
return self
if attr == 'frequency':
return Frequency(self)
raise AttributeError, "Latency object has no attribute '%s'" % attr
def getValue(self):
if self.ticks or self.value == 0:
value = self.value
else:
value = ticks.fromSeconds(self.value)
return long(value)
# convert latency to ticks
def ini_str(self):
return '%d' % self.getValue()
class Frequency(TickParamValue):
def __init__(self, value):
if isinstance(value, (Latency, Clock)):
if value.value == 0:
self.value = 0
else:
self.value = 1.0 / value.value
self.ticks = value.ticks
elif isinstance(value, Frequency):
self.value = value.value
self.ticks = value.ticks
else:
self.ticks = False
self.value = convert.toFrequency(value)
def __getattr__(self, attr):
if attr == 'frequency':
return self
if attr in ('latency', 'period'):
return Latency(self)
raise AttributeError, "Frequency object has no attribute '%s'" % attr
# convert latency to ticks
def getValue(self):
if self.ticks or self.value == 0:
value = self.value
else:
value = ticks.fromSeconds(1.0 / self.value)
return long(value)
def ini_str(self):
return '%d' % self.getValue()
# A generic Frequency and/or Latency value. Value is stored as a
# latency, just like Latency and Frequency.
class Clock(TickParamValue):
def __init__(self, value):
if isinstance(value, (Latency, Clock)):
self.ticks = value.ticks
self.value = value.value
elif isinstance(value, Frequency):
self.ticks = value.ticks
self.value = 1.0 / value.value
elif value.endswith('t'):
self.ticks = True
self.value = int(value[:-1])
else:
self.ticks = False
self.value = convert.anyToLatency(value)
def __getattr__(self, attr):
if attr == 'frequency':
return Frequency(self)
if attr in ('latency', 'period'):
return Latency(self)
raise AttributeError, "Frequency object has no attribute '%s'" % attr
def getValue(self):
return self.period.getValue()
def ini_str(self):
return self.period.ini_str()
class Voltage(float,ParamValue):
cxx_type = 'double'
def __new__(cls, value):
# convert to voltage
val = convert.toVoltage(value)
return super(cls, Voltage).__new__(cls, val)
def __str__(self):
return str(self.val)
def getValue(self):
value = float(self)
return value
def ini_str(self):
return '%f' % self.getValue()
class NetworkBandwidth(float,ParamValue):
cxx_type = 'float'
def __new__(cls, value):
# convert to bits per second
val = convert.toNetworkBandwidth(value)
return super(cls, NetworkBandwidth).__new__(cls, val)
def __str__(self):
return str(self.val)
def getValue(self):
# convert to seconds per byte
value = 8.0 / float(self)
# convert to ticks per byte
value = ticks.fromSeconds(value)
return float(value)
def ini_str(self):
return '%f' % self.getValue()
class MemoryBandwidth(float,ParamValue):
cxx_type = 'float'
def __new__(cls, value):
# convert to bytes per second
val = convert.toMemoryBandwidth(value)
return super(cls, MemoryBandwidth).__new__(cls, val)
def __str__(self):
return str(self.val)
def getValue(self):
# convert to seconds per byte
value = float(self)
if value:
value = 1.0 / float(self)
# convert to ticks per byte
value = ticks.fromSeconds(value)
return float(value)
def ini_str(self):
return '%f' % self.getValue()
#
# "Constants"... handy aliases for various values.
#
# Special class for NULL pointers. Note the special check in
# make_param_value() above that lets these be assigned where a
# SimObject is required.
# only one copy of a particular node
class NullSimObject(object):
__metaclass__ = Singleton
def __call__(cls):
return cls
def _instantiate(self, parent = None, path = ''):
pass
def ini_str(self):
return 'Null'
def unproxy(self, base):
return self
def set_path(self, parent, name):
pass
def __str__(self):
return 'Null'
def getValue(self):
return None
# The only instance you'll ever need...
NULL = NullSimObject()
def isNullPointer(value):
return isinstance(value, NullSimObject)
# Some memory range specifications use this as a default upper bound.
MaxAddr = Addr.max
MaxTick = Tick.max
AllMemory = AddrRange(0, MaxAddr)
#####################################################################
#
# Port objects
#
# Ports are used to interconnect objects in the memory system.
#
#####################################################################
# Port reference: encapsulates a reference to a particular port on a
# particular SimObject.
class PortRef(object):
def __init__(self, simobj, name, role):
assert(isSimObject(simobj) or isSimObjectClass(simobj))
self.simobj = simobj
self.name = name
self.role = role
self.peer = None # not associated with another port yet
self.ccConnected = False # C++ port connection done?
self.index = -1 # always -1 for non-vector ports
def __str__(self):
return '%s.%s' % (self.simobj, self.name)
def __len__(self):
# Return the number of connected ports, i.e. 0 is we have no
# peer and 1 if we do.
return int(self.peer != None)
# for config.ini, print peer's name (not ours)
def ini_str(self):
return str(self.peer)
# for config.json
def get_config_as_dict(self):
return {'role' : self.role, 'peer' : str(self.peer)}
def __getattr__(self, attr):
if attr == 'peerObj':
# shorthand for proxies
return self.peer.simobj
raise AttributeError, "'%s' object has no attribute '%s'" % \
(self.__class__.__name__, attr)
# Full connection is symmetric (both ways). Called via
# SimObject.__setattr__ as a result of a port assignment, e.g.,
# "obj1.portA = obj2.portB", or via VectorPortElementRef.__setitem__,
# e.g., "obj1.portA[3] = obj2.portB".
def connect(self, other):
if isinstance(other, VectorPortRef):
# reference to plain VectorPort is implicit append
other = other._get_next()
if self.peer and not proxy.isproxy(self.peer):
fatal("Port %s is already connected to %s, cannot connect %s\n",
self, self.peer, other);
self.peer = other
if proxy.isproxy(other):
other.set_param_desc(PortParamDesc())
elif isinstance(other, PortRef):
if other.peer is not self:
other.connect(self)
else:
raise TypeError, \
"assigning non-port reference '%s' to port '%s'" \
% (other, self)
def clone(self, simobj, memo):
if memo.has_key(self):
return memo[self]
newRef = copy.copy(self)
memo[self] = newRef
newRef.simobj = simobj
assert(isSimObject(newRef.simobj))
if self.peer and not proxy.isproxy(self.peer):
peerObj = self.peer.simobj(_memo=memo)
newRef.peer = self.peer.clone(peerObj, memo)
assert(not isinstance(newRef.peer, VectorPortRef))
return newRef
def unproxy(self, simobj):
assert(simobj is self.simobj)
if proxy.isproxy(self.peer):
try:
realPeer = self.peer.unproxy(self.simobj)
except:
print "Error in unproxying port '%s' of %s" % \
(self.name, self.simobj.path())
raise
self.connect(realPeer)
# Call C++ to create corresponding port connection between C++ objects
def ccConnect(self):
from m5.internal.pyobject import connectPorts
if self.role == 'SLAVE':
# do nothing and let the master take care of it
return
if self.ccConnected: # already done this
return
peer = self.peer
if not self.peer: # nothing to connect to
return
# check that we connect a master to a slave
if self.role == peer.role:
raise TypeError, \
"cannot connect '%s' and '%s' due to identical role '%s'" \
% (peer, self, self.role)
try:
# self is always the master and peer the slave
connectPorts(self.simobj.getCCObject(), self.name, self.index,
peer.simobj.getCCObject(), peer.name, peer.index)
except:
print "Error connecting port %s.%s to %s.%s" % \
(self.simobj.path(), self.name,
peer.simobj.path(), peer.name)
raise
self.ccConnected = True
peer.ccConnected = True
# A reference to an individual element of a VectorPort... much like a
# PortRef, but has an index.
class VectorPortElementRef(PortRef):
def __init__(self, simobj, name, role, index):
PortRef.__init__(self, simobj, name, role)
self.index = index
def __str__(self):
return '%s.%s[%d]' % (self.simobj, self.name, self.index)
# A reference to a complete vector-valued port (not just a single element).
# Can be indexed to retrieve individual VectorPortElementRef instances.
class VectorPortRef(object):
def __init__(self, simobj, name, role):
assert(isSimObject(simobj) or isSimObjectClass(simobj))
self.simobj = simobj
self.name = name
self.role = role
self.elements = []
def __str__(self):
return '%s.%s[:]' % (self.simobj, self.name)
def __len__(self):
# Return the number of connected peers, corresponding the the
# length of the elements.
return len(self.elements)
# for config.ini, print peer's name (not ours)
def ini_str(self):
return ' '.join([el.ini_str() for el in self.elements])
# for config.json
def get_config_as_dict(self):
return {'role' : self.role,
'peer' : [el.ini_str() for el in self.elements]}
def __getitem__(self, key):
if not isinstance(key, int):
raise TypeError, "VectorPort index must be integer"
if key >= len(self.elements):
# need to extend list
ext = [VectorPortElementRef(self.simobj, self.name, self.role, i)
for i in range(len(self.elements), key+1)]
self.elements.extend(ext)
return self.elements[key]
def _get_next(self):
return self[len(self.elements)]
def __setitem__(self, key, value):
if not isinstance(key, int):
raise TypeError, "VectorPort index must be integer"
self[key].connect(value)
def connect(self, other):
if isinstance(other, (list, tuple)):
# Assign list of port refs to vector port.
# For now, append them... not sure if that's the right semantics
# or if it should replace the current vector.
for ref in other:
self._get_next().connect(ref)
else:
# scalar assignment to plain VectorPort is implicit append
self._get_next().connect(other)
def clone(self, simobj, memo):
if memo.has_key(self):
return memo[self]
newRef = copy.copy(self)
memo[self] = newRef
newRef.simobj = simobj
assert(isSimObject(newRef.simobj))
newRef.elements = [el.clone(simobj, memo) for el in self.elements]
return newRef
def unproxy(self, simobj):
[el.unproxy(simobj) for el in self.elements]
def ccConnect(self):
[el.ccConnect() for el in self.elements]
# Port description object. Like a ParamDesc object, this represents a
# logical port in the SimObject class, not a particular port on a
# SimObject instance. The latter are represented by PortRef objects.
class Port(object):
# Generate a PortRef for this port on the given SimObject with the
# given name
def makeRef(self, simobj):
return PortRef(simobj, self.name, self.role)
# Connect an instance of this port (on the given SimObject with
# the given name) with the port described by the supplied PortRef
def connect(self, simobj, ref):
self.makeRef(simobj).connect(ref)
# No need for any pre-declarations at the moment as we merely rely
# on an unsigned int.
def cxx_predecls(self, code):
pass
# Declare an unsigned int with the same name as the port, that
# will eventually hold the number of connected ports (and thus the
# number of elements for a VectorPort).
def cxx_decl(self, code):
code('unsigned int port_${{self.name}}_connection_count;')
class MasterPort(Port):
# MasterPort("description")
def __init__(self, *args):
if len(args) == 1:
self.desc = args[0]
self.role = 'MASTER'
else:
raise TypeError, 'wrong number of arguments'
class SlavePort(Port):
# SlavePort("description")
def __init__(self, *args):
if len(args) == 1:
self.desc = args[0]
self.role = 'SLAVE'
else:
raise TypeError, 'wrong number of arguments'
# VectorPort description object. Like Port, but represents a vector
# of connections (e.g., as on a Bus).
class VectorPort(Port):
def __init__(self, *args):
self.isVec = True
def makeRef(self, simobj):
return VectorPortRef(simobj, self.name, self.role)
class VectorMasterPort(VectorPort):
# VectorMasterPort("description")
def __init__(self, *args):
if len(args) == 1:
self.desc = args[0]
self.role = 'MASTER'
VectorPort.__init__(self, *args)
else:
raise TypeError, 'wrong number of arguments'
class VectorSlavePort(VectorPort):
# VectorSlavePort("description")
def __init__(self, *args):
if len(args) == 1:
self.desc = args[0]
self.role = 'SLAVE'
VectorPort.__init__(self, *args)
else:
raise TypeError, 'wrong number of arguments'
# 'Fake' ParamDesc for Port references to assign to the _pdesc slot of
# proxy objects (via set_param_desc()) so that proxy error messages
# make sense.
class PortParamDesc(object):
__metaclass__ = Singleton
ptype_str = 'Port'
ptype = Port
baseEnums = allEnums.copy()
baseParams = allParams.copy()
def clear():
global allEnums, allParams
allEnums = baseEnums.copy()
allParams = baseParams.copy()
__all__ = ['Param', 'VectorParam',
'Enum', 'Bool', 'String', 'Float',
'Int', 'Unsigned', 'Int8', 'UInt8', 'Int16', 'UInt16',
'Int32', 'UInt32', 'Int64', 'UInt64',
'Counter', 'Addr', 'Tick', 'Percent',
'TcpPort', 'UdpPort', 'EthernetAddr',
'IpAddress', 'IpNetmask', 'IpWithPort',
'MemorySize', 'MemorySize32',
'Latency', 'Frequency', 'Clock', 'Voltage',
'NetworkBandwidth', 'MemoryBandwidth',
'AddrRange',
'MaxAddr', 'MaxTick', 'AllMemory',
'Time',
'NextEthernetAddr', 'NULL',
'MasterPort', 'SlavePort',
'VectorMasterPort', 'VectorSlavePort']
import SimObject
|
#!/usr/bin/env python3
import gi
import os
import sys
import uuid
import json
import signal
import socket
import warnings
import threading
from threading import Thread
gi.require_version("Notify", "0.7")
gi.require_version('Gtk', '3.0')
from gi.repository import Notify, GObject
def exit():
if(listenerThread):
listenerThread.stop()
if(discoveryRecv):
discoveryRecv.stop()
if(discoverySend):
discoverySend.stop()
sys.exit()
def clearValidDevices():
deviceFile = open(os.path.expanduser("~/.local/share/LinuxNotifier/devices.json"), "w+")
deviceFile.write("{}")
deviceFile.close()
def readValidDevices():
try:
deviceFile = open(os.path.expanduser("~/.local/share/LinuxNotifier/devices.json"), "r")
jsonObject = json.load(deviceFile)
deviceFile.close()
devices = []
try:
i = 0
for deviceName in jsonObject["name"]:
newDevice = device(jsonObject["name"][i],
jsonObject["address"][i],
jsonObject["pin"][i])
devices.append(newDevice)
i += 1
return devices
except:
print("Error opening devices file (maybe it doesn't exist?).")
return
except FileNotFoundError:
print("file not found error")
os.makedirs(os.path.expanduser("~/.local/share/LinuxNotifier"))
deviceFile = open(os.path.expanduser("~/.local/share/LinuxNotifier/devices.json"), "w+")
deviceFile.write("{}")
deviceFile.close()
return
def writeValidDevices(deviceList):
jsonObject = {}
names = []
addresses = []
pins = []
for currentDevice in deviceList:
names.append(currentDevice.name)
addresses.append(currentDevice.address)
pins.append(currentDevice.pin)
jsonObject["name"] = names
jsonObject["address"] = addresses
jsonObject["pin"] = pins
output = json.dumps(jsonObject)
outputFile = open(os.path.expanduser("~/.local/share/LinuxNotifier/devices.json"), "w+")
outputFile.write(output)
outputFile.close()
class configFile:
def __init__(self):
self.modificationDate = self.getModificationDate()
self.defaultConfig = "[Device]@[App] app: [NewLine][Title][NewLine][Data]"
def createConfig(self):
try:
configFile = open(os.path.expanduser("~/.local/share/LinuxNotifier/config.conf"), "w+")
configFile.write(self.defaultConfig)
configFile.close()
except OSError:
exit()
def getConfig(self):
try:
configFile = open(os.path.expanduser("~/.local/share/LinuxNotifier/config.conf"), "r")
returnString = configFile.read()
configFile.close()
return returnString
except OSError:
self.createConfig()
return self.defaultConfig
def getModificationDate(self):
try:
return os.path.getmtime(os.path.expanduser("~/.local/share/LinuxNotifier/config.conf"))
except OSError:
try:
self.createConfig()
return os.path.getmtime(os.path.expanduser("~/.local/share/LinuxNotifier/config.conf"))
except OSError:
exit()
class device():
def __init__(self, name, address, pin):
self.name = name
self.address = address
self.pin = pin
class authThread(threading.Thread):
def __init__(self, name, address, pin, deviceList, connection):
Thread.__init__(self)
self.name = name
self.address = address
self.pin = pin
self.deviceList = deviceList
self.connection = connection
def run(self):
self.loop = GObject.MainLoop()
self.authNotification = Notify.Notification.new("Auth request",
''.join(("From ", self.name, " (", self.address, "), with PIN: ", self.pin, ".")))
self.authNotification.set_timeout(Notify.EXPIRES_NEVER)
self.authNotification.add_action("accept", "Accept", self.acceptAuth, None)
self.authNotification.add_action("deny", "Deny", self.denyAuth, None)
self.authNotification.connect("closed", self.denyAuthNoClick, self)
self.authNotification.show()
GObject.timeout_add(10000, self.denyAuthNoClick)
self.loop.run()
def acceptAuth(self, notification, action, data):
print("accepted")
newDevice = device(self.name,
self.address,
self.pin)
self.shouldAdd = True
for currentDevice in self.deviceList:
if(newDevice.address == currentDevice.address):
self.shouldAdd = False
if(self.shouldAdd):
self.deviceList.append(newDevice)
writeValidDevices(self.deviceList)
dataToSend = {
"reason": "authresponse",
"response": "1"
}
self.connection.send(str.encode(str(dataToSend)))
notification.close()
self.loop.quit()
def denyAuth(self, notification):
dataToSend = {
"reason": "authresponse",
"response": "0"
}
self.connection.send(str.encode(str(dataToSend)))
notification.close()
self.loop.quit()
def denyAuthNoClick(self):
dataToSend = {
"reason": "authresponse",
"response": "0"
}
self.connection.send(str.encode(str(dataToSend)))
self.authNotification.close()
self.loop.quit()
def denyAuthTimeout(self, param):
self.denyAuthNoClick()
class UDPSender():
def __init__(self):
Thread.__init__(self)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 1)
def sendData(self, address):
dataToSend = {
"reason": "linux notifier discovery",
"from": "desktop",
"name": socket.gethostname(),
"mac": listenerThread.getMacAddress()
}
print(' '.join(("Sending to", address, "message", str(dataToSend))))
self.socket.sendto(str.encode(str(dataToSend)), (address, 5005))
def stop(self):
self.socket.close()
class UDPReceiver(Thread):
def __init__(self):
Thread.__init__(self)
self.daemon = True
self.mustContinue = True
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.bind(('', 5005))
except socket.error:
print("Can't create UDP socket on port 5005.")
exit()
def run(self):
while(self.mustContinue):
data, address = self.socket.recvfrom(1024)
if(data):
message = json.loads(data.decode("utf-8"))
print(message)
if(message["reason"] == "linux notifier discovery" and
message["from"] == "android"):
discoverySend.sendData(str(address[0]))
def stop(self):
self.mustContinue = False
self.socket.close()
class TCPReceiver(Thread):
def __init__(self):
Thread.__init__(self)
self.daemon = True
self.mustContinue = True
self.validDevices = []
self.notificationConfig = configFile()
self.notificationStringOriginal = self.notificationConfig.getConfig()
self.notificationConfigModDate = self.notificationConfig.getModificationDate()
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind(('', 5005))
self.socket.listen(1)
self.socket.setblocking(True)
except socket.error:
print("Can't create TCP socket on port 5005.")
exit()
def run(self):
while(self.mustContinue):
print("Listening...")
connection, address = self.socket.accept()
data = connection.recv(1024)
if(data):
print("Got data, message: " + data.decode("utf-8") + ".")
message = json.loads(data.decode("utf-8"))
if(message["reason"] == "authentificate"):
print("auth")
newAuthThread = authThread(message["name"], str(address[0]),
message["pin"], self.validDevices,
connection)
newAuthThread.start()
elif(message["reason"] == "notification"):
print("Notification from " + str(address[0]))
for currentDevice in self.validDevices:
if(currentDevice.address == str(address[0])):
self.buildNotification(currentDevice.name,
message["app name"],
message["title"],
message["data"])
break
connection.close()
elif(message["reason"] == "revoke authentification"):
print("Revoking auth for " + str(address[0]))
for currentDevice in self.validDevices:
if(currentDevice.address == str(address[0])):
validDevices.remove(currentDevice)
break
connection.close()
else:
connection.close()
def stop(self):
self.mustContinue = False
self.socket.close()
def addValidDevice(self, newDevice):
self.shouldAdd = True
for currentDevice in self.validDevices:
if(newDevice.address == currentDevice.address):
self.shouldAdd = False
if(self.shouldAdd):
print("New valid device: " + newDevice.name)
self.validDevices.append(newDevice)
def getMacAddress(self):
macNum = hex(uuid.getnode()).replace("0x", "").upper()
mac = ":".join(macNum[i: i + 2] for i in range(0, 11, 2))
return mac
def buildNotification(self, deviceName, appName, title, data):
if(self.notificationConfigModDate != self.notificationConfig.getModificationDate()):
self.notificationStringOriginal = self.notificationConfig.getConfig()
notificationString = self.notificationStringOriginal
notificationString = notificationString.replace("[NewLine]", os.linesep)
notificationString = notificationString.replace("[Device]", deviceName)
notificationString = notificationString.replace("[App]", appName)
notificationString = notificationString.replace("[Title]", title)
notificationString = notificationString.replace("[Data]", data)
newNotification = Notify.Notification.new("LinuxNotifier", notificationString, "dialog-information")
newNotification.show()
if(__name__ == "__main__"):
warnings.simplefilter("ignore")
if(len(sys.argv) > 1 and sys.argv[1] == "clear"):
clearValidDevices()
else:
try:
Notify.init("LinuxNotifier")
listenerThread = TCPReceiver()
discoverySend = UDPSender()
discoverySend.sendData("224.0.0.1")
discoveryRecv = UDPReceiver()
discoveryRecv.start()
validDevices = readValidDevices()
if(validDevices):
for validDevice in validDevices:
listenerThread.addValidDevice(validDevice)
listenerThread.start()
signal.pause()
except socket.error:
print("Network error: cannot bind port.")
exit()
except threading.ThreadError:
print("Threading error: can't create thread.")
exit()
except KeyboardInterrupt:
print("Keyboard interrupt detected.")
exit()
|
<gh_stars>0
class ShareCalendar:
def __init__(self):
try:
self.__server = smtplib.SMTP('smtp.gmail.com', 587)
self.__server.ehlo()
self.__server.starttls()
self.__server.ehlo()
# Next, log in to the server
self.__server.login("<EMAIL>", "alessioaytugjef")
except SMTPAuthenticationError:
print("Could not connect to mailserver")
def share_calendar(self, p_cal_id, p_email=None, p_password=""):
# share calendar by email
if p_email is not None:
new_url = self.__save_shared_calendar(p_cal_id, p_email, p_password)
self.__share_url_by_mail(p_email, new_url)
return None
# share calendar by public url
else:
calendar = Calendars.objects.filter(cal_id=p_cal_id)[0]
shared_cal_url = SharedCalendars.objects.filter(cal_id=calendar, mail__isnull=True)
# if public url doesn't exist
if shared_cal_url.count() == 0:
return self.__save_shared_calendar(p_cal_id, p_email, p_password)
else:
return shared_cal_url.values()[0]["url"]+".ics"
def share_emails(self, p_cal_id):
email_list = []
calendar = Calendars.objects.filter(cal_id=p_cal_id)[0]
shared_cal_url = SharedCalendars.objects.filter(cal_id=calendar, mail__isnull=False)
for i in range(0, shared_cal_url.count()):
email_list.append(shared_cal_url.values()[i]["mail"])
return email_list
def __save_shared_calendar(self, p_cal_id, p_email, p_password):
cal = calendar()
cal.load_from_database(int(p_cal_id))
new_url = cal.export()
# Save shared calendar to saved storage
ICS_TMP_STORE_len = len(ICS_TMP_STORE)
path = new_url[ICS_TMP_STORE_len:]
saved_url = ICS_SAVED_STORE + path
cal = open(new_url, "rb").read()
new_cal = open(saved_url+".ics", "wb")
new_cal.write(cal)
new_cal.close()
calendar_db = Calendars.objects.filter(cal_id=p_cal_id)[0]
if SharedCalendars.objects.filter(cal_id=calendar_db, mail=p_email).exists():
return SharedCalendars.objects.filter(cal_id=calendar_db, mail=p_email)[0].url + ".ics"
else:
db_shared_cal = SharedCalendars(cal_id=calendar_db, mail=p_email, url=saved_url, password=<PASSWORD>)
db_shared_cal.save()
return saved_url+".ics"
def delete_shared_email(self, p_cal_id, p_email):
calendar = Calendars.objects.filter(cal_id=p_cal_id)[0]
entry_to_delete = SharedCalendars.objects.filter(cal_id=calendar, mail=p_email)[0]
entry_to_delete.delete()
def __share_url_by_mail(self, p_adress, p_url):
# Send the mail
message = "There is a calendar shared with you, here is the link:\n\n" + p_url
mail = 'Subject: {}\n\n{}'.format("There is a calendar shared with you...", message)
self.__server.sendmail("<EMAIL>", p_adress, mail)
import smtplib
from cal_tool.models import SharedCalendars, Calendars
from cal_tool.calendar.calendar import calendar
from Project.settings import ICS_SAVED_STORE, ICS_TMP_STORE
from smtplib import SMTPAuthenticationError |
<filename>equivalence/actions/get.py<gh_stars>0
# -*- coding: utf-8 -*-
from equivalence.AstraRastr import RASTR
from equivalence.tools.tool import changing_number_of_semicolons
class GettingParameter:
f"""
Класс предназначен для работы с ячейками таблиц в RastrWin3.\n
1.Метод "get_cell_row" - возвращает значение ячейки из таблицы по номеру строки;\n
2.Метод "get_cell_setsel" - для получения значения ячейки, с помощью поиска table.SetSel("Num=2351513");\n
3.Метод "get_cell_index" - возвращает порядковый номер таблицы;\n
4.Метод "get_count_table_starting_zero" - возвращает максимальное число строк начиная с нуля (от 0 до max-1);\n
5.Метод "get_count_table" - возвращает количество строк таблице начиная от одно (от 1 до max).\n
"""
def __init__(self, rastr_win=RASTR):
self.rastr_win = rastr_win
f"""
:param rastr_win: COM - объект Rastr.Astra (win32com);\n
"""
def get_cell_row(self,
table: str,
column: str,
row: int,
rounding_to: int = None):
f"""
Метод get_cell_row - возвращает значение ячейки по индексу в таблице.\n
Индекс в таблице - это порядковый номер строки в таблице.\n
:param table: название таблицы RastrWin3 ("Generator");\n
:param column: навание колонки (столбца) RastrWin3 ("Num");\n
:param row: индекс в таблице (порядковый номер в таблице (от 0 до table.count-1));\n
:param rounding_to: количетво символов после запятой;
:return: value_cell_of_row - возвращает значение ячейки по номеру row_id.\n
"""
table_ = self.rastr_win.Tables(table)
_value_cell_of_row = table_.Cols(column).Z(row)
if rounding_to is not None and type(_value_cell_of_row) is (float or int or str):
value_cell_of_row = changing_number_of_semicolons(number=_value_cell_of_row, digits=rounding_to)
return value_cell_of_row
else:
value_cell_of_row = _value_cell_of_row
return value_cell_of_row
def get_cell_SetSel(self,
table: str,
column: str,
viborka: str,
rounding_to: int = None):
f"""
Метод get_cell_setsel - метод для получения значения ячейки, с помощью поиска table.SetSel("Num=2351513").\n
:param rounding_to: ;\n
:param table: название таблицы RastrWin3 ("Generator");\n
:param column: навание колонки (столбца) RastrWin3 ("Num");\n
:param viborka: выборка ("Num=5170004");\n
:return: value_cell_of_set_sel - значение ячейки, с помощью поиска table.SetSel("Num=2351513").\n
"""
_table = self.rastr_win.Tables(table)
_table.SetSel(viborka)
_row = _table.FindNextSel(-1)
if _row != (-1):
_value_cell_of_set_sel = _table.Cols(column).Z(_row)
if rounding_to is not None and type(_value_cell_of_set_sel) is (float or int):
value_cell_of_set_sel = changing_number_of_semicolons(number=_value_cell_of_set_sel, digits=rounding_to)
return value_cell_of_set_sel
else:
value_cell_of_set_sel = _value_cell_of_set_sel
return value_cell_of_set_sel
else:
return None
def get_cell_index(self,
table: str,
viborka: str) -> (int or None):
f"""
Метод get_cell_index - метод возвращает порядковый номер таблицы.\n
:param viborka: формула выборки;\n
:param table: название таблицы;\n
:return: row - порядковый номер таблицы.\n
"""
_table = self.rastr_win.Tables(table)
_table.SetSel(viborka)
_row = _table.FindNextSel(-1)
if _row != (-1):
return _row
else:
return None
def get_count_table_starting_zero(self, table: str) -> int:
f"""
Метод get_count_table_starting_zero - возвращает количество строк таблице начиная с нуля.\n
:param table: название таблицы RastrWin3 (generator);\n
:return: count - максимальное число строк в таблице.\n
"""
table_ = self.rastr_win.Tables(table)
count = table_.Count - 1
return count
def get_count_table(self, table: str) -> int:
f"""
Метод get_count_table - метод возвращает количество строк таблице.\n
:param table: название таблицы RastrWin3 (generator);\n
:return: count - максимальное число строк в таблице.\n
"""
table_ = self.rastr_win.Tables(table)
count = table_.Count
return count
|
import bpy
import math
from . import selection_sets
from bpy.types import Scene
from bpy.props import (
FloatProperty,
BoolProperty
)
"""
**********************************************************************
* def section *
**********************************************************************
"""
def meshes_names_to_clipboard():
""" Send object names to clipboard using "name", "name", "name", pattern
"""
# var init
meshes_names_to_list = []
meshes_names_to_clipboard = ""
selected_only = bpy.context.scene.retico_mesh_check_only_selected
objects_selected = selection_sets.meshes_in_selection(
) if selected_only else selection_sets.meshes_selectable()
# function core
# getting a list, to be able to sort alphabetically
for obj in objects_selected:
meshes_names_to_list.append(obj.name)
meshes_names_to_list = sorted(meshes_names_to_list, key=str.lower)
# converting all list items to a single string
for name in meshes_names_to_list:
if name is meshes_names_to_list[-1]:
meshes_names_to_clipboard += f'"{name}"'
else:
meshes_names_to_clipboard += f'"{name}", '
# sending to clipboard
bpy.context.window_manager.clipboard = meshes_names_to_clipboard
return {'FINISHED'}
def transfer_names():
""" Copy object name to mesh name
"""
# var init
user_active = bpy.context.view_layer.objects.active
is_user_in_edit_mode = False
selected_only = bpy.context.scene.retico_mesh_check_only_selected
objects_selected = selection_sets.meshes_in_selection(
) if selected_only else selection_sets.meshes_selectable()
# handling active object
if (
bpy.context.view_layer.objects.active
and bpy.context.view_layer.objects.active.mode == 'EDIT'
):
is_user_in_edit_mode = True
bpy.ops.object.mode_set(mode='OBJECT')
# function core
for obj in objects_selected:
obj.data.name = "tmp" # temp name to avoid naming conflict later
for obj in objects_selected:
obj.data.name = obj.name
# handling active object
bpy.context.view_layer.objects.active = user_active
if is_user_in_edit_mode:
bpy.ops.object.mode_set(mode='EDIT')
return {'FINISHED'}
def set_autosmooth(user_angle=85):
""" Activate autosmooth
"""
# var init
user_active = bpy.context.view_layer.objects.active
is_user_in_edit_mode = False
selected_only = bpy.context.scene.retico_mesh_check_only_selected
objects_selected = selection_sets.meshes_in_selection(
) if selected_only else selection_sets.meshes_selectable()
# handling active object
if bpy.context.view_layer.objects.active.mode == 'EDIT':
is_user_in_edit_mode = True
bpy.ops.object.mode_set(mode='OBJECT')
# function core
for obj in objects_selected:
bpy.context.view_layer.objects.active = obj
mesh = obj.data
mesh.use_auto_smooth = True
mesh.auto_smooth_angle = math.radians(user_angle)
bpy.ops.object.shade_smooth()
# handling active object
bpy.context.view_layer.objects.active = user_active
if is_user_in_edit_mode:
bpy.ops.object.mode_set(mode='EDIT')
return {'FINISHED'}
def set_custom_normals(apply=True):
""" Add or delete custom normals if asked
"""
# var init
user_active = bpy.context.view_layer.objects.active
is_user_in_edit_mode = False
selected_only = bpy.context.scene.retico_mesh_check_only_selected
objects_selected = selection_sets.meshes_in_selection(
) if selected_only else selection_sets.meshes_selectable()
# handling active object
if bpy.context.view_layer.objects.active.mode == 'EDIT':
is_user_in_edit_mode = True
bpy.ops.object.mode_set(mode='OBJECT')
# function core
for obj in objects_selected:
bpy.context.view_layer.objects.active = obj
mesh = obj.data
if apply:
bpy.ops.mesh.customdata_custom_splitnormals_add()
else:
bpy.ops.mesh.customdata_custom_splitnormals_clear()
# handling active object
bpy.context.view_layer.objects.active = user_active
if is_user_in_edit_mode:
bpy.ops.object.mode_set(mode='EDIT')
return {'FINISHED'}
def report_instances():
""" Report meshes using instances
"""
# var init
obj_using_instance = []
meshes_instanced = []
update_selection = bpy.context.scene.retico_mesh_reports_update_selection
selected_only = bpy.context.scene.retico_mesh_check_only_selected
objects_selected = selection_sets.meshes_in_selection(
) if selected_only else selection_sets.meshes_selectable()
report_message = []
# function core
if update_selection:
for obj in bpy.context.selected_objects:
obj.select_set(False)
for obj in objects_selected:
mesh = obj.data
already_exists = False
mesh_used_id = 0
# skipping non-instanced
if mesh.users <= 1:
continue
if update_selection:
# select those using instances
obj.select_set(True)
# checking if instanced mesh already in list
for mesh_inst_id in range(len(meshes_instanced)):
if mesh == meshes_instanced[mesh_inst_id]:
already_exists = True
mesh_used_id = mesh_inst_id
continue
# if not, adding it and save instance id
if not already_exists:
meshes_instanced.append(mesh)
mesh_inst_id = len(meshes_instanced) - 1
# saving [object, instance_used_id]
obj_using_instance.append([obj, mesh_inst_id])
# for each instances, listing objects using it
for mesh_inst_id in range(len(meshes_instanced)):
obj_using_instance_list = []
obj_using_instance_list_name = ""
for obj_info in obj_using_instance:
if obj_info[1] == mesh_inst_id:
obj_using_instance_list.append(obj_info[0])
for obj in obj_using_instance_list:
obj_using_instance_list_name += "{}, ".format(obj.name)
report_message.append("{} used by: {}".format(
meshes_instanced[mesh_inst_id].name, obj_using_instance_list_name)[:-2])
if update_selection and len(meshes_instanced) > 0:
bpy.context.view_layer.objects.active = obj_using_instance[0][0]
if len(meshes_instanced) == 0:
return False
else:
return report_message
"""
**********************************************************************
* Panel class section *
**********************************************************************
"""
class RETICO_PT_mesh_3dviewPanel(bpy.types.Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "ReTiCo"
bl_options = {'DEFAULT_CLOSED'}
class RETICO_PT_mesh(RETICO_PT_mesh_3dviewPanel):
bl_label = "Meshes"
bl_idname = "RETICO_PT_mesh"
def draw(self, context):
layout = self.layout
box = layout.box()
row = box.row()
row.prop(context.scene, "retico_mesh_check_only_selected",
text="only on selection")
class RETICO_PT_mesh_misc(RETICO_PT_mesh_3dviewPanel):
bl_parent_id = "RETICO_PT_mesh"
bl_idname = "RETICO_PT_mesh_misc"
bl_label = "Misc"
def draw(self, context):
layout = self.layout
if (
not bpy.context.scene.retico_mesh_check_only_selected
or (
bpy.context.scene.retico_mesh_check_only_selected
and len(bpy.context.selected_objects) > 0
)
):
# transfer object name to mesh name
row = layout.row()
row.operator("retico.mesh_transfer_names",
text="Transfer names", icon='SORTALPHA')
# copy names to clipboard
row = layout.row()
row.operator("retico.mesh_name_to_clipboard",
text="Copy names to clipboard", icon='COPYDOWN')
else:
row = layout.row(align=True)
row.label(text="No object in selection.")
class RETICO_PT_mesh_normals(RETICO_PT_mesh_3dviewPanel):
bl_parent_id = "RETICO_PT_mesh"
bl_idname = "RETICO_PT_mesh_normals"
bl_label = "Normals"
def draw(self, context):
layout = self.layout
if (
not bpy.context.scene.retico_mesh_check_only_selected
or (
bpy.context.scene.retico_mesh_check_only_selected
and len(bpy.context.selected_objects) > 0
)
):
# overwrite autosmooth
row = layout.row(align=True)
row.operator("retico.mesh_set_autosmooth", text="Set autosmooth")
row.prop(context.scene, "retico_mesh_autosmooth_angle",
text="", slider=True)
row = layout.row(align=True)
row.label(text="Custom Normals:")
row.operator("retico.mesh_set_custom_normals",
text="Add").apply = True
row.operator("retico.mesh_set_custom_normals",
text="Del").apply = False
else:
row = layout.row(align=True)
row.label(text="No object in selection.")
class RETICO_PT_mesh_report(RETICO_PT_mesh_3dviewPanel):
bl_parent_id = "RETICO_PT_mesh"
bl_idname = "RETICO_PT_mesh_report"
bl_label = "Report"
def draw(self, context):
layout = self.layout
if (
not bpy.context.scene.retico_mesh_check_only_selected
or (
bpy.context.scene.retico_mesh_check_only_selected
and len(bpy.context.selected_objects) > 0
)
):
# report
box = layout.box()
row = box.row()
row.prop(context.scene, "retico_mesh_reports_update_selection",
text="update selection")
row.prop(context.scene, "retico_mesh_reports_to_clipboard",
text="to clipboard")
grid = layout.grid_flow(
row_major=True, columns=2, even_columns=True, even_rows=True, align=True)
row = grid.row(align=True)
row.operator("retico.mesh_report_instances", text="Instances")
else:
row = layout.row(align=True)
row.label(text="No object in selection.")
"""
**********************************************************************
* Operator class section *
**********************************************************************
"""
class RETICO_OT_mesh_name_to_clipboard(bpy.types.Operator):
bl_idname = "retico.mesh_name_to_clipboard"
bl_label = "Copy Object name to clipboard"
bl_description = "Copy Object name to clipboard"
@classmethod
def poll(cls, context):
return len(context.view_layer.objects) > 0
def execute(self, context):
meshes_names_to_clipboard()
self.report({'INFO'}, "---[ Copied to clipboard ]---")
return {'FINISHED'}
class RETICO_OT_mesh_transfer_names(bpy.types.Operator):
bl_idname = "retico.mesh_transfer_names"
bl_label = "Copy Object name to its Data name"
bl_description = "Copy Object name to its Data name"
@classmethod
def poll(cls, context):
return len(context.view_layer.objects) > 0
def execute(self, context):
transfer_names()
self.report({'INFO'}, "---[ Object name to Mesh ]---")
return {'FINISHED'}
class RETICO_OT_mesh_set_autosmooth(bpy.types.Operator):
bl_idname = "retico.mesh_set_autosmooth"
bl_label = "Batch set autosmooth"
bl_description = "Batch set autosmooth"
@classmethod
def poll(cls, context):
return len(context.view_layer.objects) > 0
def execute(self, context):
set_autosmooth(context.scene.retico_mesh_autosmooth_angle)
self.report({'INFO'}, "---[ Autosmooth ]---")
return {'FINISHED'}
class RETICO_OT_mesh_set_custom_normals(bpy.types.Operator):
bl_idname = "retico.mesh_set_custom_normals"
bl_label = "Add or delete custom split normals"
bl_description = "Add or delete custom split normals"
apply: BoolProperty()
@classmethod
def poll(cls, context):
return len(context.view_layer.objects) > 0
def execute(self, context):
set_custom_normals(self.apply)
self.report({'INFO'}, "---[ Custom Normals ]---")
return {'FINISHED'}
class RETICO_OT_mesh_report_instances(bpy.types.Operator):
bl_idname = "retico.mesh_report_instances"
bl_label = "List objects using instances"
bl_description = "List objects using instances"
@classmethod
def poll(cls, context):
return len(context.view_layer.objects) > 0
def execute(self, context):
message = report_instances()
self.report({'INFO'}, "---[ Objects using instances ]---")
if not message:
self.report({'INFO'}, "No instances detected.")
else:
to_clipboard = context.scene.retico_mesh_reports_to_clipboard
message_to_clipboard = ""
for report in message:
message_to_clipboard += ("\r\n" + report)
self.report({'INFO'}, report)
if to_clipboard:
context.window_manager.clipboard = message_to_clipboard
return {'FINISHED'}
"""
**********************************************************************
* Registration *
**********************************************************************
"""
classes = (
RETICO_PT_mesh,
RETICO_PT_mesh_misc,
RETICO_PT_mesh_normals,
RETICO_PT_mesh_report,
RETICO_OT_mesh_transfer_names,
RETICO_OT_mesh_set_autosmooth,
RETICO_OT_mesh_set_custom_normals,
RETICO_OT_mesh_name_to_clipboard,
RETICO_OT_mesh_report_instances,
)
def register():
from bpy.utils import register_class
for cls in classes:
register_class(cls)
Scene.retico_mesh_check_only_selected = BoolProperty(
name="Mesh tab use selected only",
description="Mesh operations applies on selection, or not",
default=True
)
Scene.retico_mesh_reports_update_selection = BoolProperty(
name="Report update selection",
description="Reports modify user selection",
default=False
)
Scene.retico_mesh_reports_to_clipboard = BoolProperty(
name="Reports sent to clipboard",
description="Reports sent to clipboard",
default=False
)
Scene.retico_mesh_autosmooth_angle = FloatProperty(
name="autosmooth angle",
description="autosmooth angle",
default=85.0,
min=0.0,
max=180.0,
)
def unregister():
from bpy.utils import unregister_class
for cls in reversed(classes):
unregister_class(cls)
del Scene.retico_mesh_autosmooth_angle
del Scene.retico_mesh_reports_update_selection
del Scene.retico_mesh_reports_to_clipboard
del Scene.retico_mesh_check_only_selected
if __name__ == "__main__":
register()
|
<reponame>vanhoefm/apbleed
# Scanning tests
# Copyright (c) 2013, <NAME> <<EMAIL>>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import time
import logging
logger = logging.getLogger()
import os
import subprocess
import hostapd
def check_scan(dev, params, other_started=False):
if not other_started:
dev.dump_monitor()
id = dev.request("SCAN " + params)
if "FAIL" in id:
raise Exception("Failed to start scan")
id = int(id)
if other_started:
ev = dev.wait_event(["CTRL-EVENT-SCAN-STARTED"])
if ev is None:
raise Exception("Other scan did not start")
if "id=" + str(id) in ev:
raise Exception("Own scan id unexpectedly included in start event")
ev = dev.wait_event(["CTRL-EVENT-SCAN-RESULTS"])
if ev is None:
raise Exception("Other scan did not complete")
if "id=" + str(id) in ev:
raise Exception("Own scan id unexpectedly included in completed event")
ev = dev.wait_event(["CTRL-EVENT-SCAN-STARTED"])
if ev is None:
raise Exception("Scan did not start")
if "id=" + str(id) not in ev:
raise Exception("Scan id not included in start event")
ev = dev.wait_event(["CTRL-EVENT-SCAN-RESULTS"])
if ev is None:
raise Exception("Scan did not complete")
if "id=" + str(id) not in ev:
raise Exception("Scan id not included in completed event")
def check_scan_retry(dev, params, bssid):
for i in range(0, 5):
check_scan(dev, "freq=2412-2462,5180 use_id=1")
if int(dev.get_bss(bssid)['age']) <= 1:
return
raise Exception("Unexpectedly old BSS entry")
def test_scan(dev, apdev):
"""Control interface behavior on scan parameters"""
hostapd.add_ap(apdev[0]['ifname'], { "ssid": "test-scan" })
bssid = apdev[0]['bssid']
logger.info("Full scan")
check_scan(dev[0], "use_id=1")
logger.info("Limited channel scan")
check_scan_retry(dev[0], "freq=2412-2462,5180 use_id=1", bssid)
# wait long enough to allow next scans to be verified not to find the AP
time.sleep(2)
logger.info("Passive single-channel scan")
check_scan(dev[0], "freq=2457 passive=1 use_id=1")
logger.info("Active single-channel scan")
check_scan(dev[0], "freq=2452 passive=0 use_id=1")
if int(dev[0].get_bss(bssid)['age']) < 2:
raise Exception("Unexpectedly updated BSS entry")
logger.info("Active single-channel scan on AP's operating channel")
check_scan_retry(dev[0], "freq=2412 passive=0 use_id=1", bssid)
def test_scan_only(dev, apdev):
"""Control interface behavior on scan parameters with type=only"""
hostapd.add_ap(apdev[0]['ifname'], { "ssid": "test-scan" })
bssid = apdev[0]['bssid']
logger.info("Full scan")
check_scan(dev[0], "type=only use_id=1")
logger.info("Limited channel scan")
check_scan_retry(dev[0], "type=only freq=2412-2462,5180 use_id=1", bssid)
# wait long enough to allow next scans to be verified not to find the AP
time.sleep(2)
logger.info("Passive single-channel scan")
check_scan(dev[0], "type=only freq=2457 passive=1 use_id=1")
logger.info("Active single-channel scan")
check_scan(dev[0], "type=only freq=2452 passive=0 use_id=1")
if int(dev[0].get_bss(bssid)['age']) < 2:
raise Exception("Unexpectedly updated BSS entry")
logger.info("Active single-channel scan on AP's operating channel")
check_scan_retry(dev[0], "type=only freq=2412 passive=0 use_id=1", bssid)
def test_scan_external_trigger(dev, apdev):
"""Avoid operations during externally triggered scan"""
hostapd.add_ap(apdev[0]['ifname'], { "ssid": "test-scan" })
bssid = apdev[0]['bssid']
subprocess.call(['sudo', 'iw', dev[0].ifname, 'scan', 'trigger'])
check_scan(dev[0], "use_id=1", other_started=True)
def test_scan_bss_expiration_count(dev, apdev):
"""BSS entry expiration based on scan results without match"""
if "FAIL" not in dev[0].request("BSS_EXPIRE_COUNT 0"):
raise Exception("Invalid BSS_EXPIRE_COUNT accepted")
if "OK" not in dev[0].request("BSS_EXPIRE_COUNT 2"):
raise Exception("BSS_EXPIRE_COUNT failed")
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "test-scan" })
bssid = apdev[0]['bssid']
dev[0].scan(freq="2412", only_new=True)
if bssid not in dev[0].request("SCAN_RESULTS"):
raise Exception("BSS not found in initial scan")
hapd.request("DISABLE")
dev[0].scan(freq="2412", only_new=True)
if bssid not in dev[0].request("SCAN_RESULTS"):
raise Exception("BSS not found in first scan without match")
dev[0].scan(freq="2412", only_new=True)
if bssid in dev[0].request("SCAN_RESULTS"):
raise Exception("BSS found after two scans without match")
def test_scan_bss_expiration_age(dev, apdev):
"""BSS entry expiration based on age"""
try:
if "FAIL" not in dev[0].request("BSS_EXPIRE_AGE COUNT 9"):
raise Exception("Invalid BSS_EXPIRE_AGE accepted")
if "OK" not in dev[0].request("BSS_EXPIRE_AGE 10"):
raise Exception("BSS_EXPIRE_AGE failed")
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "test-scan" })
bssid = apdev[0]['bssid']
dev[0].scan(freq="2412")
if bssid not in dev[0].request("SCAN_RESULTS"):
raise Exception("BSS not found in initial scan")
hapd.request("DISABLE")
logger.info("Waiting for BSS entry to expire")
time.sleep(7)
if bssid not in dev[0].request("SCAN_RESULTS"):
raise Exception("BSS expired too quickly")
ev = dev[0].wait_event(["CTRL-EVENT-BSS-REMOVED"], timeout=15)
if ev is None:
raise Exception("BSS entry expiration timed out")
if bssid in dev[0].request("SCAN_RESULTS"):
raise Exception("BSS not removed after expiration time")
finally:
dev[0].request("BSS_EXPIRE_AGE 180")
def test_scan_filter(dev, apdev):
"""Filter scan results based on SSID"""
try:
if "OK" not in dev[0].request("SET filter_ssids 1"):
raise Exception("SET failed")
dev[0].connect("test-scan", key_mgmt="NONE", only_add_network=True)
hostapd.add_ap(apdev[0]['ifname'], { "ssid": "test-scan" })
bssid = apdev[0]['bssid']
hostapd.add_ap(apdev[1]['ifname'], { "ssid": "test-scan2" })
bssid2 = apdev[1]['bssid']
dev[0].scan(freq="2412", only_new=True)
if bssid not in dev[0].request("SCAN_RESULTS"):
raise Exception("BSS not found in scan results")
if bssid2 in dev[0].request("SCAN_RESULTS"):
raise Exception("Unexpected BSS found in scan results")
finally:
dev[0].request("SET filter_ssids 0")
def test_scan_int(dev, apdev):
"""scan interval configuration"""
try:
if "FAIL" not in dev[0].request("SCAN_INTERVAL -1"):
raise Exception("Accepted invalid scan interval")
if "OK" not in dev[0].request("SCAN_INTERVAL 1"):
raise Exception("Failed to set scan interval")
dev[0].connect("not-used", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
times = {}
for i in range(0, 3):
logger.info("Waiting for scan to start")
start = os.times()[4]
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=5)
if ev is None:
raise Exception("did not start a scan")
stop = os.times()[4]
times[i] = stop - start
logger.info("Waiting for scan to complete")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"], 10)
if ev is None:
raise Exception("did not complete a scan")
print times
if times[0] > 1 or times[1] < 0.5 or times[1] > 1.5 or times[2] < 0.5 or times[2] > 1.5:
raise Exception("Unexpected scan timing: " + str(times))
finally:
dev[0].request("SCAN_INTERVAL 5")
def test_scan_bss_operations(dev, apdev):
"""Control interface behavior on BSS parameters"""
hostapd.add_ap(apdev[0]['ifname'], { "ssid": "test-scan" })
bssid = apdev[0]['bssid']
hostapd.add_ap(apdev[1]['ifname'], { "ssid": "test2-scan" })
bssid2 = apdev[1]['bssid']
dev[0].scan(freq="2412")
dev[0].scan(freq="2412")
dev[0].scan(freq="2412")
id1 = dev[0].request("BSS FIRST MASK=0x1").splitlines()[0].split('=')[1]
id2 = dev[0].request("BSS LAST MASK=0x1").splitlines()[0].split('=')[1]
res = dev[0].request("BSS RANGE=ALL MASK=0x20001")
if "id=" + id1 not in res:
raise Exception("Missing BSS " + id1)
if "id=" + id2 not in res:
raise Exception("Missing BSS " + id2)
if "====" not in res:
raise Exception("Missing delim")
if "####" not in res:
raise Exception("Missing end")
res = dev[0].request("BSS RANGE=ALL MASK=0x1").splitlines()
if len(res) != 2:
raise Exception("Unexpected result")
res = dev[0].request("BSS FIRST MASK=0x1")
if "id=" + id1 not in res:
raise Exception("Unexpected result: " + res)
res = dev[0].request("BSS LAST MASK=0x1")
if "id=" + id2 not in res:
raise Exception("Unexpected result: " + res)
res = dev[0].request("BSS ID-" + id1 + " MASK=0x1")
if "id=" + id1 not in res:
raise Exception("Unexpected result: " + res)
res = dev[0].request("BSS NEXT-" + id1 + " MASK=0x1")
if "id=" + id2 not in res:
raise Exception("Unexpected result: " + res)
if len(dev[0].request("BSS RANGE=" + id2 + " MASK=0x1").splitlines()) != 0:
raise Exception("Unexpected RANGE=1 result")
if len(dev[0].request("BSS RANGE=" + id1 + "- MASK=0x1").splitlines()) != 2:
raise Exception("Unexpected RANGE=0- result")
if len(dev[0].request("BSS RANGE=-" + id2 + " MASK=0x1").splitlines()) != 2:
raise Exception("Unexpected RANGE=-1 result")
if len(dev[0].request("BSS RANGE=" + id1 + "-" + id2 + " MASK=0x1").splitlines()) != 2:
raise Exception("Unexpected RANGE=0-1 result")
if len(dev[0].request("BSS RANGE=" + id2 + "-" + id2 + " MASK=0x1").splitlines()) != 1:
raise Exception("Unexpected RANGE=1-1 result")
if len(dev[0].request("BSS RANGE=" + str(int(id2) + 1) + "-" + str(int(id2) + 10) + " MASK=0x1").splitlines()) != 0:
raise Exception("Unexpected RANGE=2-10 result")
if len(dev[0].request("BSS RANGE=0-" + str(int(id2) + 10) + " MASK=0x1").splitlines()) != 2:
raise Exception("Unexpected RANGE=0-10 result")
def test_scan_and_interface_disabled(dev, apdev):
"""Scan operation when interface gets disabled"""
try:
dev[0].request("SCAN")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-STARTED"])
if ev is None:
raise Exception("Scan did not start")
dev[0].request("DRIVER_EVENT INTERFACE_DISABLED")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"], timeout=7)
if ev is not None:
raise Exception("Scan completed unexpectedly")
# verify that scan is rejected
if "FAIL" not in dev[0].request("SCAN"):
raise Exception("New scan request was accepted unexpectedly")
dev[0].request("DRIVER_EVENT INTERFACE_ENABLED")
dev[0].scan(freq="2412")
finally:
dev[0].request("DRIVER_EVENT INTERFACE_ENABLED")
|
from queue import SimpleQueue
from file_ops import FileOps
class Node(FileOps):
"""
A node is representative of the vertices in a graph.
"""
def __init__(self, uuid, data, file):
"""
Initialize a Node object with uuid and data.
Args:
uuid (str): The uuid of the Node to be created.
data (dict): The data to be assigned to the Node.
Returns:
(Node): The initialized Node object.
"""
self.id = uuid
self.data = data
self.relations = {}
self.file = file
def __str__(self):
"""
Return the str representation of this Node object.
Returns:
(str): The str representation of this Node object.
"""
return "id: {}, data: {}, relations: {}".format(
self.id, str(self.data), str(self.relations)
)
def to_dict(self):
"""
Return the dict representation of this Node object.
Return:
(dict): The dict representation of this Node object.
"""
return {"id": self.id, "data": self.data, "relations": self.relations}
@FileOps.save_on_update
def relate_to(self, node, by=None, bidirectional=False):
"""
Create a relation between this Node and another Node object.
Args:
node (Node): The other Node object to create a relation to.
by (any|None): The label to assign to the newly established
relation.
bidirectional (bool|False): If True, the relation will be created
both ways (from this Node to node and from node to this Node).
Raises:
Exception: If node is not of type Node.
Exception: If specified relation already exists on this Node.
"""
# node must be of type Node
if not isinstance(node, Node):
raise Exception("node must be a node object")
# edge must not already exist
if node in self.relations:
raise Exception(
"{} is already related to {} by label {}".format(
self.id, node.id, by
)
)
if self in node.relations and bidirectional:
raise Exception(
"{} is already related to {} by label {}".format(
node.id, self.id, by
)
)
# add edge to node
self.relations[node] = by
if bidirectional:
node.relations[self] = by
def related_by(self, label):
"""
Return a list of nodes related to this Node by
label.
Args:
label (any): The label of the relation to search for.
Returns:
(list): List of Node objects related to this Node by label.
"""
return [n for n, l in self.relations.items() if l == label]
def related_difference(self, label_1, label_2):
"""
Return a dict of nodes that are directly related by label_1
and indirectly related by label_2. The value will be the
number of times this indirect relation is found.
Args:
label_1 (any): The label of the direct relation to this Node.
label_2 (any): The label of the indirect relations to find
that are connected to this Node.
Returns:
(dict): A dict of Node(s) that are indirectly related by label_2
and directly related by label_1. The value corresponding to each
Node will be the number of times it is connected (degree of
relation).
"""
# build stack of nodes related to current
# node by label_1
stack = [n for n, l in self.relations.items() if l == label_1]
# if stack is empty, no direct relation by label_1 exists,
# return empty list
if len(stack) == 0:
return {}
# create needed structures
result = {}
visited = set([self])
direct_relations = set([self] + stack)
# while there are nodes in the stack
while stack:
# pop a node from the stack
node = stack.pop()
# if the node has not been visited
if node not in visited:
# mark it as visited
visited.add(node)
# iterate through its relations
for relation, label in node.relations.items():
# add each relation to the stack
stack.append(relation)
# if the label is equal to label_2, add the node to result
if label == label_2 and relation not in direct_relations:
if relation in result:
result[relation] += 1
else:
result[relation] = 1
# return resulting list of nodes
return result
|
# -*- coding: utf-8 -*-
"""Using continuations as an escape mechanism."""
from ...syntax import macros, tco, continuations, call_cc
from ...ec import call_ec
def test():
# basic strategy using an escape continuation
def double_odd(x, ec):
if x % 2 == 0: # reject even "x"
ec("not odd")
return 2*x
@call_ec
def result1(ec):
y = double_odd(42, ec)
z = double_odd(21, ec)
return z
@call_ec
def result2(ec):
y = double_odd(21, ec)
z = double_odd(42, ec)
return z
assert result1 == "not odd"
assert result2 == "not odd"
# should work also in a "with tco" block
with tco:
def double_odd(x, ec):
if x % 2 == 0: # reject even "x"
ec("not odd")
return 2*x
@call_ec
def result1(ec):
y = double_odd(42, ec)
z = double_odd(21, ec) # avoid tail-calling because ec is not valid after result1() exits
return z
@call_ec
def result2(ec):
y = double_odd(21, ec)
z = double_odd(42, ec)
return z
assert result1 == "not odd"
assert result2 == "not odd"
# can we do this using the **continuations** machinery?
with continuations:
def double_odd(x, ec, cc):
if x % 2 == 0:
cc = ec # try to escape by overriding cc...
return "not odd"
return 2*x
def main1(cc):
# cc actually has a default, so it's ok to not pass anything as cc here.
y = double_odd(42, ec=cc) # y = "not odd"
z = double_odd(21, ec=cc) # we could tail-call, but let's keep this similar to the first example.
return z
def main2(cc):
y = double_odd(21, ec=cc)
z = double_odd(42, ec=cc)
return z
# ...but no call_cc[] anywhere, so cc is actually always
# unpythonic.fun.identity, cannot perform an escape.
assert main1() == 42
assert main2() == "not odd"
# to fix that, let's call_cc[]:
with continuations:
def double_odd(x, ec, cc):
if x % 2 == 0:
cc = ec # escape by overriding cc (now it works!)
return "not odd"
return 2*x
def main1(cc):
y = call_cc[double_odd(42, ec=cc)]
z = call_cc[double_odd(21, ec=cc)]
return z
def main2(cc):
y = call_cc[double_odd(21, ec=cc)]
z = call_cc[double_odd(42, ec=cc)]
return z
# call_cc[] captures the actual cont, so now this works as expected.
assert main1() == "not odd"
assert main2() == "not odd"
# In each case, the second call_cc[] is actually redundant, because after
# the second call to double_odd(), there is no more code to run in each
# main function.
#
# We can just as well use a tail-call, optimizing away a redundant
# continuation capture:
with continuations:
def double_odd(x, ec, cc):
if x % 2 == 0:
cc = ec
return "not odd"
return 2*x
def main1(cc):
y = call_cc[double_odd(42, ec=cc)]
return double_odd(21, ec=cc) # tail call, no further code to run in main1 so no call_cc needed.
def main2(cc):
y = call_cc[double_odd(21, ec=cc)]
return double_odd(42, ec=cc)
assert main1() == "not odd"
assert main2() == "not odd"
print("All tests PASSED")
if __name__ == '__main__':
test()
|
<reponame>iltempe/osmosi
#!/usr/bin/env python
"""
@file runner.py
@author <NAME>
@date 2007-07-26
@version $Id$
test different traffic_light types
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2007-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import subprocess
import random
import shutil
sys.path.append(
os.path.join(os.path.dirname(sys.argv[0]), '..', '..', '..', '..', "tools"))
from sumolib import checkBinary # noqa
types = ["static", "actuated", "sotl_phase", "sotl_platoon", "sotl_request", "sotl_wave", "sotl_marching", "swarm"]
flow1def = "0;2000;600".split(";")
flow2def = "0;2000;600".split(";")
fillSteps = 120 # 3600
measureSteps = 600 # 36000
simSteps = fillSteps + measureSteps
def buildDemand(simSteps, pWE, pEW, pNS, pSN):
fd = open("input_routes.rou.xml", "w")
#---routes---
print("""<routes>
<vType id="type1" accel="2.0" decel="5.0" sigma="0.0" length="6.5" maxSpeed="70"/>
<route id="WE" edges="1i 3o 5o"/>
<route id="NS" edges="2i 4o 6o"/>
<route id="EW" edges="3i 1o 7o"/>
<route id="SN" edges="4i 2o 8o"/>
""", file=fd)
lastVeh = 0
vehNr = 0
for i in range(simSteps):
if random.uniform(0, 1) < pWE: # Poisson distribution
print(' <vehicle id="%i" type="type1" route="WE" depart="%i" departSpeed="13.89" />' % (
vehNr, i), file=fd)
vehNr += 1
lastVeh = i
if random.uniform(0, 1) < pNS:
print(' <vehicle id="%i" type="type1" route="NS" depart="%i" departSpeed="13.89" />' % (
vehNr, i), file=fd)
vehNr += 1
lastVeh = i
if random.uniform(0, 1) < pEW:
print(' <vehicle id="%i" type="type1" route="EW" depart="%i" departSpeed="13.89" />' % (
vehNr, i), file=fd)
vehNr += 1
lastVeh = i
if random.uniform(0, 1) < pSN:
print(' <vehicle id="%i" type="type1" route="SN" depart="%i" departSpeed="13.89" />' % (
vehNr, i), file=fd)
vehNr += 1
lastVeh = i
print("</routes>", file=fd)
fd.close()
def patchTLSType(ifile, itype, ofile, otype):
fdi = open(ifile)
fdo = open(ofile, "w")
for line in fdi:
line = line.replace(itype, otype)
fdo.write(line)
fdo.close()
fdi.close()
def main():
try:
os.mkdir("results")
except:
pass
try:
os.mkdir("gfx")
except:
pass
sumoHome = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', '..'))
if "SUMO_HOME" in os.environ:
sumoHome = os.environ["SUMO_HOME"]
sumo = os.environ.get(
"SUMO_BINARY", os.path.join(sumoHome, 'bin', 'sumo'))
assert(sumo)
for f1 in range(int(flow1def[0]), int(flow1def[1]), int(flow1def[2])):
pWE = float(f1) / 3600 # [veh/s]
pEW = pWE
for f2 in range(int(flow2def[0]), int(flow2def[1]), int(flow2def[2])):
pNS = float(f2) / 3600 # [veh/s]
pSN = pNS
print("Computing for %s<->%s" % (f1, f2))
buildDemand(simSteps, pWE, pEW, pNS, pSN)
for t in types:
print(" for tls-type %s" % t)
patchTLSType('input_additional_template.add.xml',
'%tls_type%', 'input_additional.add.xml', t)
args = [sumo,
'--no-step-log',
#'--no-duration-log',
#'--verbose',
#'--duration-log.statistics',
'--net-file', 'input_net.net.xml',
'--route-files', 'input_routes.rou.xml',
'--additional-files', 'input_additional.add.xml',
'--tripinfo-output', 'results/tripinfos_%s_%s_%s.xml' % (
t, f1, f2),
'--summary-output', 'results/summary_%s_%s_%s.xml' % (
t, f1, f2),
'--device.emissions.probability', '1',
'--queue-output', 'results/queue_%s_%s_%s.xml' % (
t, f1, f2),
]
retCode = subprocess.call(args)
shutil.move(
"results/e2_output.xml", "results/e2_output_%s_%s_%s.xml" % (t, f1, f2))
shutil.move("results/e2_tl0_output.xml",
"results/e2_tl0_output_%s_%s_%s.xml" % (t, f1, f2))
shutil.move("results/edgeData_3600.xml",
"results/edgeData_3600_%s_%s_%s.xml" % (t, f1, f2))
shutil.move("results/laneData_3600.xml",
"results/laneData_3600_%s_%s_%s.xml" % (t, f1, f2))
shutil.move("results/edgesEmissions_3600.xml",
"results/edgesEmissions_3600_%s_%s_%s.xml" % (t, f1, f2))
shutil.move("results/lanesEmissions_3600.xml",
"results/lanesEmissions_3600_%s_%s_%s.xml" % (t, f1, f2))
shutil.move(
"results/TLSStates.xml", "results/TLSStates_%s_%s_%s.xml" % (t, f1, f2))
shutil.move("results/TLSSwitchTimes.xml",
"results/TLSSwitchTimes_%s_%s_%s.xml" % (t, f1, f2))
shutil.move("results/TLSSwitchStates.xml",
"results/TLSSwitchStates_%s_%s_%s.xml" % (t, f1, f2))
if __name__ == "__main__":
main()
|
<filename>Image_cls/Original/experiments/classification.py
import os
from pprint import pprint
import torch
import torch.optim as optim
import passport_generator
from dataset import prepare_dataset, prepare_wm
from experiments.base import Experiment
from experiments.trainer import Trainer, Tester
from experiments.trainer_private import TesterPrivate
from experiments.utils import construct_passport_kwargs
from models.alexnet_normal import AlexNetNormal
from models.alexnet_passport import AlexNetPassport
from models.layers.conv2d import ConvBlock
from models.layers.passportconv2d import PassportBlock
from models.resnet_normal import ResNet18
from models.resnet_passport import ResNet18Passport
class ClassificationExperiment(Experiment):
def __init__(self, args):
super().__init__(args)
self.in_channels = 1 if self.dataset == 'mnist' else 3
self.num_classes = {
'cifar10': 10,
'cifar100': 100,
'caltech-101': 101,
'caltech-256': 256
}[self.dataset]
self.mean = torch.tensor([0.4914, 0.4822, 0.4465])
self.std = torch.tensor([0.2023, 0.1994, 0.2010])
self.train_data, self.valid_data = prepare_dataset(self.args)
self.wm_data = None
if self.use_trigger_as_passport:
self.passport_data = prepare_wm('data/trigger_set/pics')
else:
self.passport_data = self.valid_data
if self.train_backdoor:
self.wm_data = prepare_wm('data/trigger_set/pics')
self.construct_model()
optimizer = optim.SGD(self.model.parameters(),
lr=self.lr,
momentum=0.9,
weight_decay=0.0005)
if len(self.lr_config[self.lr_config['type']]) != 0: # if no specify steps, then scheduler = None
scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
self.lr_config[self.lr_config['type']],
self.lr_config['gamma'])
else:
scheduler = None
self.trainer = Trainer(self.model, optimizer, scheduler, self.device)
if self.is_tl:
self.finetune_load()
else:
self.makedirs_or_load()
def construct_model(self):
def setup_keys():
if self.key_type != 'random':
if self.arch == 'alexnet':
pretrained_model = AlexNetNormal(self.in_channels, self.num_classes)
else:
pretrained_model = ResNet18(num_classes=self.num_classes,
norm_type=self.norm_type)
pretrained_model.load_state_dict(torch.load(self.pretrained_path))
pretrained_model = pretrained_model.to(self.device)
self.setup_keys(pretrained_model)
def load_pretrained():
if self.pretrained_path is not None:
sd = torch.load(self.pretrained_path)
model.load_state_dict(sd)
if self.train_passport:
passport_kwargs = construct_passport_kwargs(self)
self.passport_kwargs = passport_kwargs
print('Loading arch: ' + self.arch)
if self.arch == 'alexnet':
model = AlexNetPassport(self.in_channels, self.num_classes, passport_kwargs)
else:
model = ResNet18Passport(num_classes=self.num_classes,
passport_kwargs=passport_kwargs)
self.model = model.to(self.device)
setup_keys()
else: # train normally or train backdoor
print('Loading arch: ' + self.arch)
if self.arch == 'alexnet':
model = AlexNetNormal(self.in_channels, self.num_classes, self.norm_type)
else:
model = ResNet18(num_classes=self.num_classes, norm_type=self.norm_type)
load_pretrained()
self.model = model.to(self.device)
pprint(self.model)
def setup_keys(self, pretrained_model):
if self.key_type != 'random':
n = 1 if self.key_type == 'image' else 20 # any number will do
key_x, x_inds = passport_generator.get_key(self.passport_data, n)
key_x = key_x.to(self.device)
key_y, y_inds = passport_generator.get_key(self.passport_data, n)
key_y = key_y.to(self.device)
passport_generator.set_key(pretrained_model, self.model,
key_x, key_y)
def transfer_learning(self):
if not self.is_tl:
raise Exception('Please run with --transfer-learning')
self.num_classes = {
'cifar10': 10,
'cifar100': 100,
'caltech-101': 101,
'caltech-256': 256
}[self.tl_dataset]
##### load clone model #####
print('Loading clone model')
if self.arch == 'alexnet':
clone_model = AlexNetNormal(self.in_channels,
self.num_classes,
self.norm_type)
else:
clone_model = ResNet18(num_classes=self.num_classes,
norm_type=self.norm_type)
print("CLONE MODEL", clone_model)
##### load / reset weights of passport layers for clone model #####
try:
clone_model.load_state_dict(self.model.state_dict())
except:
print('Having problem to direct load state dict, loading it manually')
if self.arch == 'alexnet':
for clone_m, self_m in zip(clone_model.features, self.model.features):
try:
clone_m.load_state_dict(self_m.state_dict())
except:
print('Having problem to load state dict usually caused by missing keys, load by strict=False')
clone_m.load_state_dict(self_m.state_dict(), False) # load conv weight, bn running mean
clone_m.bn.weight.data.copy_(self_m.get_scale().detach().view(-1))
clone_m.bn.bias.data.copy_(self_m.get_bias().detach().view(-1))
else:
passport_settings = self.passport_config
for l_key in passport_settings: # layer
if isinstance(passport_settings[l_key], dict):
for i in passport_settings[l_key]: # sequential
for m_key in passport_settings[l_key][i]: # convblock
clone_m = clone_model.__getattr__(l_key)[int(i)].__getattr__(m_key) # type: ConvBlock
self_m = self.model.__getattr__(l_key)[int(i)].__getattr__(m_key) # type: PassportBlock
try:
clone_m.load_state_dict(self_m.state_dict())
except:
print(f'{l_key}.{i}.{m_key} cannot load state dict directly')
clone_m.load_state_dict(self_m.state_dict(), False)
clone_m.bn.weight.data.copy_(self_m.get_scale().detach().view(-1))
clone_m.bn.bias.data.copy_(self_m.get_bias().detach().view(-1))
else:
clone_m = clone_model.__getattr__(l_key)
self_m = self.model.__getattr__(l_key)
try:
clone_m.load_state_dict(self_m.state_dict())
except:
print(f'{l_key} cannot load state dict directly')
clone_m.load_state_dict(self_m.state_dict(), False)
clone_m.bn.weight.data.copy_(self_m.get_scale().detach().view(-1))
clone_m.bn.bias.data.copy_(self_m.get_bias().detach().view(-1))
clone_model.to(self.device)
print('Loaded clone model')
##### dataset is created at constructor #####
##### tl scheme setup #####
if self.tl_scheme == 'rtal':
# rtal = reset last layer + train all layer
# ftal = train all layer
try:
clone_model.classifier.reset_parameters()
except:
clone_model.linear.reset_parameters()
##### optimizer setup #####
optimizer = optim.SGD(clone_model.parameters(),
lr=self.lr,
momentum=0.9,
weight_decay=0.0005)
if len(self.lr_config[self.lr_config['type']]) != 0: # if no specify steps, then scheduler = None
scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
self.lr_config[self.lr_config['type']],
self.lr_config['gamma'])
else:
scheduler = None
self.trainer = Trainer(clone_model,
optimizer,
scheduler,
self.device)
tester = Tester(self.model,
self.device)
tester_passport = TesterPrivate(self.model,
self.device)
history_file = os.path.join(self.logdir, 'history.csv')
best_file = os.path.join(self.logdir, 'best.txt')
first = True
best_acc = 0
best_ep = 1
for ep in range(1, self.epochs + 1):
train_metrics = self.trainer.train(ep, self.train_data)
valid_metrics = self.trainer.test(self.valid_data)
##### load transfer learning weights from clone model #####
try:
self.model.load_state_dict(clone_model.state_dict())
except:
if self.arch == 'alexnet':
for clone_m, self_m in zip(clone_model.features, self.model.features):
try:
self_m.load_state_dict(clone_m.state_dict())
except:
self_m.load_state_dict(clone_m.state_dict(), False)
else:
passport_settings = self.passport_config
for l_key in passport_settings: # layer
if isinstance(passport_settings[l_key], dict):
for i in passport_settings[l_key]: # sequential
for m_key in passport_settings[l_key][i]: # convblock
clone_m = clone_model.__getattr__(l_key)[int(i)].__getattr__(m_key)
self_m = self.model.__getattr__(l_key)[int(i)].__getattr__(m_key)
try:
self_m.load_state_dict(clone_m.state_dict())
except:
self_m.load_state_dict(clone_m.state_dict(), False)
else:
clone_m = clone_model.__getattr__(l_key)
self_m = self.model.__getattr__(l_key)
try:
self_m.load_state_dict(clone_m.state_dict())
except:
self_m.load_state_dict(clone_m.state_dict(), False)
clone_model.to(self.device)
self.model.to(self.device)
wm_metrics = {}
if self.train_backdoor:
wm_metrics = tester.test(self.wm_data, 'WM Result')
if self.train_passport:
res = tester_passport.test_signature()
for key in res: wm_metrics['passport_' + key] = res[key]
metrics = {}
for key in train_metrics: metrics[f'train_{key}'] = train_metrics[key]
for key in valid_metrics: metrics[f'valid_{key}'] = valid_metrics[key]
for key in wm_metrics: metrics[f'old_wm_{key}'] = wm_metrics[key]
self.append_history(history_file, metrics, first)
first = False
if self.save_interval and ep % self.save_interval == 0:
self.save_model(f'epoch-{ep}.pth')
self.save_model(f'tl-epoch-{ep}.pth', clone_model)
if best_acc < metrics['valid_acc']:
print(f'Found best at epoch {ep}\n')
best_acc = metrics['valid_acc']
self.save_model('best.pth')
self.save_model('tl-best.pth', clone_model)
best_ep = ep
self.save_last_model()
f = open(best_file,'a')
f.write(str(best_acc) + "\n")
f.write("best epoch: %s"%str(best_ep) + '\n')
f.flush()
def training(self):
best_acc = float('-inf')
history_file = os.path.join(self.logdir, 'history.csv')
first = True
best_file = os.path.join(self.logdir, 'best.txt')
best_ep = 1
if self.save_interval > 0:
self.save_model('epoch-0.pth')
for ep in range(1, self.epochs + 1):
train_metrics = self.trainer.train(ep, self.train_data, self.wm_data)
print(f'Sign Detection Accuracy: {train_metrics["sign_acc"] * 100:6.4f}')
valid_metrics = self.trainer.test(self.valid_data, 'Testing Result')
wm_metrics = {}
if self.train_backdoor:
wm_metrics = self.trainer.test(self.wm_data, 'WM Result')
metrics = {}
for key in train_metrics: metrics[f'train_{key}'] = train_metrics[key]
for key in valid_metrics: metrics[f'valid_{key}'] = valid_metrics[key]
for key in wm_metrics: metrics[f'wm_{key}'] = wm_metrics[key]
self.append_history(history_file, metrics, first)
first = False
if self.save_interval and ep % self.save_interval == 0:
self.save_model(f'epoch-{ep}.pth')
if best_acc < metrics['valid_acc']:
print(f'Found best at epoch {ep}\n')
best_acc = metrics['valid_acc']
self.save_model('best.pth')
best_ep = ep
self.save_last_model()
f = open(best_file,'a')
f.write(str(best_acc) + '\n')
f.write("best epoch: %s"%str(best_ep) + '\n')
f.flush()
def evaluate(self):
self.trainer.test(self.valid_data)
|
"""
Testing bebin histogram values.
"""
import numpy as np
from numpy.random import uniform
from numpy.testing import assert_allclose
from scipy.interpolate import splrep, splint
import uncertainties.unumpy as unp
import rebin
from bounded_splines import BoundedUnivariateSpline, BoundedRectBivariateSpline
# ---------------------------------------------------------------------------- #
# Tests for piecewise continuous rebinning
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def test_x2_same_as_x1():
"""
x2 same as x1
"""
# old size
m = 6
# new size
n = 6
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(0., 1., n+1)
# some arbitrary distribution
y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')
assert_allclose(y_new, y_old)
# ---------------------------------------------------------------------------- #
def test_x2_surrounds_x1():
"""
x2 range surrounds x1 range
"""
# old size
m = 2
# new size
n = 3
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(-0.1, 1.2, n+1)
# some arbitrary distribution
y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')
# compute answer here to check rebin
y_old_ave = y_old / np.ediff1d(x_old)
y_new_here = [y_old_ave[0]*(x_new[1]-0.),
y_old_ave[0]*(x_old[1]-x_new[1]) + y_old_ave[1]*(x_new[2]-x_old[1]),
y_old_ave[1]*(x_old[-1]-x_new[-2])]
assert_allclose(y_new, y_new_here)
assert_allclose(y_new.sum(), y_old.sum())
# ---------------------------------------------------------------------------- #
def test_x2_lower_than_x1():
"""
x2 range is completely lower than x1 range
"""
# old size
m = 2
# new size
n = 3
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(-0.2, -0.0, n+1)
# some arbitrary distribution
y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')
assert_allclose(y_new, [0.,0.,0.])
assert_allclose(y_new.sum(), 0.)
# ---------------------------------------------------------------------------- #
def test_x2_above_x1():
"""
x2 range is completely above x1 range
"""
# old size
m = 20
# new size
n = 30
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(1.2, 10., n+1)
# some arbitrary distribution
y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')
assert_allclose(y_new, np.zeros((n,)))
assert_allclose(y_new.sum(), 0.)
# ---------------------------------------------------------------------------- #
def test_x2_in_x1():
"""
x2 only has one bin, and it is surrounded by x1 range
"""
# old size
m = 4
# new size
n = 1
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(0.3, 0.65, n+1)
# some arbitrary distribution
y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')
# compute answer here to check rebin
y_old_ave = y_old / np.ediff1d(x_old)
y_new_here = ( y_old_ave[1]*(x_old[2]-x_new[0])
+ y_old_ave[2]*(x_new[1]-x_old[2]) )
assert_allclose(y_new, y_new_here)
# ---------------------------------------------------------------------------- #
def test_x2_in_x1_2():
"""
x2 has a couple of bins, each of which span more than one original bin
"""
# old size
m = 10
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.array([0.25, 0.55, 0.75])
# some arbitrary distribution
y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)
y_old = unp.uarray(y_old, 0.1*y_old*uniform((m,)))
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')
# compute answer here to check rebin
y_new_here = unp.uarray(np.zeros(2), np.zeros(2))
y_new_here[0] = 0.5 * y_old[2] + y_old[3] + y_old[4] + 0.5 * y_old[5]
y_new_here[1] = 0.5 * y_old[5] + y_old[6] + 0.5 * y_old[7]
assert_allclose(unp.nominal_values(y_new),
unp.nominal_values(y_new_here))
# mean or nominal value comparison
assert_allclose(unp.std_devs(y_new),
unp.std_devs(y_new_here))
# ---------------------------------------------------------------------------- #
def test_y1_uncertainties():
"""
x2 range surrounds x1 range, y1 has uncertainties
"""
# old size
m = 2
# new size
n = 3
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(-0.1, 1.2, n+1)
# some arbitrary distribution
y_old = 1. + np.sin(x_old[:-1]*np.pi) / np.ediff1d(x_old)
# with uncertainties
y_old = unp.uarray(y_old, 0.1*y_old*uniform((m,)))
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind='piecewise_constant')
# compute answer here to check rebin
y_old_ave = y_old / np.ediff1d(x_old)
y_new_here = np.array(
[y_old_ave[0]*(x_new[1]-0.),
y_old_ave[0]*(x_old[1]-x_new[1]) + y_old_ave[1]*(x_new[2]-x_old[1]),
y_old_ave[1]*(x_old[-1]-x_new[-2])]
)
# mean or nominal value comparison
assert_allclose(unp.nominal_values(y_new),
unp.nominal_values(y_new_here))
# mean or nominal value comparison
assert_allclose(unp.std_devs(y_new),
unp.std_devs(y_new_here))
assert_allclose(unp.nominal_values(y_new).sum(),
unp.nominal_values(y_new_here).sum())
# ---------------------------------------------------------------------------- #
# Tests for cubic-spline rebinning
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def test_x2_surrounds_x1_with_constant_distribution():
"""
x2 domain completely surrounds x1 domain
"""
# old size
m = 20
# new size
n = 30
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(-0.5, 1.5, n+1)
# constant spline
mms_spline = BoundedUnivariateSpline([0,.1,.2,1], [1,1,1,1], s=0.)
y_old = np.array(
[ mms_spline.integral(x_old[i],x_old[i+1]) for i in range(m) ])
y_new_mms = np.array(
[ mms_spline.integral(x_new[i],x_new[i+1]) for i in range(n) ])
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)
assert_allclose(y_new, y_new_mms)
# ---------------------------------------------------------------------------- #
def test_x2_left_overlap_x1_with_constant_distribution():
"""
x2 domain overlaps x1 domain from the left
"""
# old size
m = 20
# new size
n = 30
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(-0.75, 0.45, n+1)
# constant spline
mms_spline = BoundedUnivariateSpline([0,.1,.2,1], [1,1,1,1], s=0.)
y_old = np.array(
[ mms_spline.integral(x_old[i],x_old[i+1]) for i in range(m) ])
y_new_mms = np.array(
[ mms_spline.integral(x_new[i],x_new[i+1]) for i in range(n) ])
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)
assert_allclose(y_new, y_new_mms)
# ---------------------------------------------------------------------------- #
def test_x2_right_overlap_x1_with_constant_distribution():
"""
x2 domain overlaps x1 domain from the right
"""
# old size
m = 20
# new size
n = 30
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(0.95, 1.05, n+1)
# constant spline
mms_spline = BoundedUnivariateSpline([0,.1,.2,1], [1,1,1,1], s=0.)
y_old = np.array(
[ mms_spline.integral(x_old[i],x_old[i+1]) for i in range(m) ])
y_new_mms = np.array(
[ mms_spline.integral(x_new[i],x_new[i+1]) for i in range(n) ])
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)
assert_allclose(y_new, y_new_mms, atol=1e-15)
# ---------------------------------------------------------------------------- #
def test_x1_surrounds_x2_with_constant_distribution():
"""
x1 domain surrounds x2
"""
# old size
m = 20
# new size
n = 30
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.linspace(0.05, 0.26, n+1)
# constant spline
mms_spline = BoundedUnivariateSpline([0,.1,.2,1], [1,1,1,1], s=0.)
y_old = np.array(
[ mms_spline.integral(x_old[i],x_old[i+1]) for i in range(m) ])
y_new_mms = np.array(
[ mms_spline.integral(x_new[i],x_new[i+1]) for i in range(n) ])
# rebin
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)
assert_allclose(y_new, y_new_mms)
# ---------------------------------------------------------------------------- #
def test_x2_surrounds_x1_sine_spline():
"""
x2 range is completely above x1 range
using a random vector to build spline
"""
# old size
m = 5
# new size
n = 6
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.array([-.3, -.09, 0.11, 0.14, 0.2, 0.28, 0.73])
subbins = np.array([-.3, -.09, 0., 0.11, 0.14, 0.2, 0.28, 0.4, 0.6, 0.73])
y_old = 1.+np.sin(x_old[:-1]*np.pi)
# compute spline ----------------------------------
x_mids = x_old[:-1] + 0.5*np.ediff1d(x_old)
xx = np.hstack([x_old[0], x_mids, x_old[-1]])
yy = np.hstack([y_old[0], y_old, y_old[-1]])
# build spline
spl = splrep(xx, yy)
area_old = np.array(
[ splint(x_old[i],x_old[i+1], spl) for i in range(m) ])
# computing subbin areas
area_subbins = np.zeros((subbins.size-1,))
for i in range(area_subbins.size):
a, b = subbins[i:i+2]
a = max([a,x_old[0]])
b = min([b,x_old[-1]])
if b>a:
area_subbins[i] = splint(a, b, spl)
# summing subbin contributions in y_new_ref
y_new_ref = np.zeros((x_new.size-1,))
y_new_ref[1] = y_old[0] * area_subbins[2] / area_old[0]
y_new_ref[2] = y_old[0] * area_subbins[3] / area_old[0]
y_new_ref[3] = y_old[0] * area_subbins[4] / area_old[0]
y_new_ref[4] = y_old[1] * area_subbins[5] / area_old[1]
y_new_ref[5] = y_old[1] * area_subbins[6] / area_old[1]
y_new_ref[5] += y_old[2] * area_subbins[7] / area_old[2]
y_new_ref[5] += y_old[3] * area_subbins[8] / area_old[3]
# call rebin function
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)
assert_allclose(y_new, y_new_ref)
# ---------------------------------------------------------------------------- #
def test_y1_uncertainties_spline_with_constant_distribution():
"""
"""
# old size
m = 5
# new size
n = 6
# bin edges
x_old = np.linspace(0., 1., m+1)
x_new = np.array([-.3, -.09, 0.11, 0.14, 0.2, 0.28, 0.73])
subbins = np.array([-.3, -.09, 0., 0.11, 0.14, 0.2, 0.28, 0.4, 0.6, 0.73])
y_old = 1.+np.sin(x_old[:-1]*np.pi)
# compute spline ----------------------------------
x_mids = x_old[:-1] + 0.5*np.ediff1d(x_old)
xx = np.hstack([x_old[0], x_mids, x_old[-1]])
yy = np.hstack([y_old[0], y_old, y_old[-1]])
# build spline
spl = splrep(xx, yy)
area_old = np.array(
[ splint(x_old[i],x_old[i+1], spl) for i in range(m) ])
# with uncertainties
y_old = unp.uarray(y_old, 0.1*y_old*uniform((m,)))
# computing subbin areas
area_subbins = np.zeros((subbins.size-1,))
for i in range(area_subbins.size):
a, b = subbins[i:i+2]
a = max([a,x_old[0]])
b = min([b,x_old[-1]])
if b>a:
area_subbins[i] = splint(a, b, spl)
# summing subbin contributions in y_new_ref
a = np.zeros((x_new.size-1,))
y_new_ref = unp.uarray(a,a)
y_new_ref[1] = y_old[0] * area_subbins[2] / area_old[0]
y_new_ref[2] = y_old[0] * area_subbins[3] / area_old[0]
y_new_ref[3] = y_old[0] * area_subbins[4] / area_old[0]
y_new_ref[4] = y_old[1] * area_subbins[5] / area_old[1]
y_new_ref[5] = y_old[1] * area_subbins[6] / area_old[1]
y_new_ref[5] += y_old[2] * area_subbins[7] / area_old[2]
y_new_ref[5] += y_old[3] * area_subbins[8] / area_old[3]
# call rebin function
y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)
# mean or nominal value comparison
assert_allclose(unp.nominal_values(y_new),
unp.nominal_values(y_new_ref))
# mean or nominal value comparison
assert_allclose(unp.std_devs(y_new),
unp.std_devs(y_new_ref))
# ---------------------------------------------------------------------------- #
# Tests for 2d rebinning
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def test_2d_same():
"""
x1, y1 == x2, y2 implies z1 == z2
2d
"""
# old size
m = 20
n = 30
# bin edges
x_old = np.linspace(0., 1., m+1)
y_old = np.linspace(-0.5, 1.5, n+1)
z_old = np.random.random((m,n))
# rebin
z_new = rebin.rebin2d(x_old, y_old, z_old, x_old, y_old)
assert_allclose(z_old, z_new)
# ---------------------------------------------------------------------------- #
def test_2d_constant_distribution():
"""
various new domains with a constant underlying distribution
2d
"""
# old size
m = 8
n = 11
# new size
p = 5
q = 14
new_bounds = [ (0., 1., -1.5, 1.7),
(0., 1., -1.5, 0.7),
(0., 1., -1.5, -0.7),
(-1., 1.5, -1.5, 1.7),
(-1., 0.5, -1., 0.5),
(0.1, 0.6, 0.1, 0.5),
(0.01, 0.02, -10.0, 20.7)]
for (a,b,c,d) in new_bounds:
# bin edges
x_old = np.linspace(0., 1., m+1)
y_old = np.linspace(-0.5, 1.5, n+1)
x_new = np.linspace(a, b, p+1)
y_new = np.linspace(c, d, q+1)
# constant spline
z_old = np.ones((m+1,n+1))
mms_spline = BoundedRectBivariateSpline(x_old, y_old, z_old, s=0.)
z_old = np.zeros((m,n))
for i in range(m):
for j in range(n):
z_old[i,j] = mms_spline.integral(x_old[i], x_old[i+1],
y_old[j], y_old[j+1])
z_new_mms = np.zeros((p,q))
for i in range(p):
for j in range(q):
z_new_mms[i,j] = mms_spline.integral(x_new[i], x_new[i+1],
y_new[j], y_new[j+1])
# rebin
z_new = rebin.rebin2d(x_old, y_old, z_old, x_new, y_new)
assert_allclose(z_new, z_new_mms)
def test_GH9():
x_old = np.array([1.5, 2.5, 3.5, 4.5, 5.5, 6.5])
y_old = np.array([10, 10, 10, 10, 10])
x_new = np.array([1.7, 2.27332857, 2.84665714, 3.41998571,
3.99331429, 4.56664286])
y_new = rebin.rebin(x_old, y_old, x_new)
assert_allclose(y_new,
[5.7332857] * 5)
# with uncertainties
y_old = np.array([11., 12., 13., 14., 15.])
y_old = unp.uarray(y_old, 0.1 * y_old)
# rebin
y_new = rebin.rebin_piecewise_constant(x_old, y_old, x_new)
# compute answer here to check rebin
y_old_ave = y_old / np.diff(x_old)
y_new_here = np.array(
[y_old_ave[0] * (x_new[1] - x_new[0]),
y_old_ave[0] * (x_old[1] - x_new[1]) +
y_old_ave[1] * (x_new[2] - x_old[1]),
y_old_ave[1] * (x_new[3] - x_new[2]),
y_old_ave[1] * (x_old[2] - x_new[3]) +
y_old_ave[2] * (x_new[4] - x_old[2]),
y_old_ave[3] * (x_new[5] - x_old[3]) +
y_old_ave[2] * (x_old[3] - x_new[4])])
# mean or nominal value comparison
# assert_allclose(unp.nominal_values(y_new),
# unp.nominal_values(y_new_here))
# mean or nominal value comparison
assert_allclose(unp.std_devs(y_new),
unp.std_devs(y_new_here))
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import os, sys
os.environ['GLOG_minloglevel'] = '3'
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.test import vis_detections
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
# Make sure that caffe is on the python path:
caffe_root = './caffe-fast-rcnn/'
os.chdir(caffe_root)
sys.path.insert(0, os.path.join(caffe_root, 'python'))
import caffe
import cv2
import argparse
from PIL import Image
CLASSES = ('__background__','hand')
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
prototxt = "/Users/momo/wkspace/caffe_space/detection/py-faster-rcnn/models/MMCV5S8/faster_rcnn_end2end/test.prototxt"
# caffemodel = "/Users/momo/wkspace/caffe_space/detection/py-faster-rcnn/models/MMCV5S8/mmcv5stride8bn_iter_280000.caffemodel"
caffemodel = "/Users/momo/wkspace/caffe_space/detection/py-faster-rcnn/models/MMCV5S8/mmcv5stride8bn128_neg01_iter_100000.caffemodel"
# prototxt = "/Users/momo/Desktop/gesture/from113/MMCV5_stride16/test.prototxt"
# caffemodel = "/Users/momo/Desktop/sdk/momocv2_model/original_model/object_detect/mmcv5stride16_iter_5250000.caffemodel"
caffe.set_mode_cpu()
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
cfg.TRAIN.IMAGES_LIST ="/Users/momo/wkspace/BC1D8DD0-85AD-11E8-982C-994D6EEB64BE20180712_L.jpg"
cfg.TEST.SCALES = [144,]
cfg.TEST.MAX_SIZE = 256
cfg.DEDUP_BOXES = 1./8.
CONF_THRESH = 0.98
NMS_THRESH = 0.01
fromDir = "//Volumes/song/testVideos/test4neg/momoLive/"
oriDir = "/Volumes/song/testVideos/test4neg/oriPic/"
readFile = open(fromDir + "../momoLive4neg.txt", "r")
retfilename = 'neg_'+caffemodel.split('.')[0].split('/')[-1] + '_' + str(CONF_THRESH).split('.')[-1]
toDir = '/Users/momo/wkspace/caffe_space/detection/py-faster-rcnn/retTests/ret/' + retfilename
if not os.path.isdir(toDir):
os.makedirs(toDir)
writeFile = open(toDir+"/../"+retfilename+".txt", "w")
filelists = readFile.readlines()
print filelists
for filename in filelists:
print "filename:", filename
video_name = filename.split()[0]
video = cv2.VideoCapture(fromDir + video_name + '.mp4')
success, im = video.read()
numFrame = 0
while success:
numFrame += 1
savename = filename.split()[0] + '_f' + str(numFrame) + '.jpg'
cv2.imwrite(oriDir+savename, im)
# scores, boxes = im_detect(net, im)
# dets = np.hstack((boxes, scores)).astype(np.float32)
#
# keep = nms(dets, NMS_THRESH)
# dets = dets[keep, :]
# inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
#
# nhand = 0
# for i in xrange(dets.shape[0]):
# if (dets[i][4] > CONF_THRESH):
# nhand += 1
#
# if nhand > 0:
# writeFile.write(savename + ' ' + str(nhand) + ' ')
# for i in xrange(dets.shape[0]):
# if (dets[i][4] > CONF_THRESH):
# writeFile.write('hand ' \
# + str(int(dets[i][0])) + ' ' \
# + str(int(dets[i][1])) + ' ' \
# + str(int(dets[i][2])) + ' ' \
# + str(int(dets[i][3])) + ' ')
# writeFile.write('\n')
# for i in xrange(dets.shape[0]):
# if (dets[i][4] > CONF_THRESH):
# cv2.rectangle(im, (dets[i][0], dets[i][1]), (dets[i][2], dets[i][3]), (255, 0, 0), 1)
# cv2.putText(im, str(dets[i][4]), (dets[i][0], dets[i][1]), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0))
# cv2.imwrite(toDir + savename, im)
#
# cv2.imshow("negs", im)
# cv2.waitKey(1)
success, im = video.read()
writeFile.close()
readFile.close() |
<reponame>bdevl/PGMCPC
import numpy as np
import torch
from fawkes.Expressions import FastRadialBasisFunction
from bottleneck.flux import FluxConstraintReducedOrderModel
import dolfin as df
class QuerryPoint(object):
def __init__(self, physics, x, bc):
assert isinstance(x, np.ndarray)
assert not isinstance(physics, dict)
assert physics.Vc.dim() == x.size
assert x.ndim == 1
self._physics = physics
self._x = x
self._bc = bc
self._K = None
self._f = None
@property
def physics(self):
return self._physics
@property
def bc(self):
return self._bc
@property
def x(self):
# beware of naming: this is log-transformed x
return self._x
@property
def K(self):
if self._K is None:
self._assemble_system()
return self._K
@property
def f(self):
if self._f is None:
self._assemble_system()
return self._f
@property
def dim_in(self):
return self._x.size
@property
def dim_out(self):
return self._physics.dim_out
def _assemble_system(self):
# caching
self._K, self._f = self._physics.assemble_system(np.exp(self._x), bc=self._bc, only_free_dofs = True)
def construct_querry_weak_galerkin(self, V):
assert V.shape[0] == self.K.shape[0]
assert V.shape[0] == self.f.shape[0]
Gamma = V.T @ self.K
alpha = V.T @ self.f
return Gamma, alpha
class QuerryPointEnsemble(object):
def __init__(self, QPs):
self._QPs = QPs
def X(self, dtype, device):
X = torch.zeros(len(self), self._QPs[0].dim_in, dtype=dtype, device=device)
for n, qp in enumerate(self._QPs):
X[n,:] = torch.tensor(qp.x, dtype=dtype, device=device)
return X
def __iter__(self):
yield from self._QPs
def __getitem__(self, item):
return self._QPs[item]
def __len__(self):
return len(self._QPs)
@property
def dim_out(self):
return self._QPs[0].dim_out
@property
def N(self):
return len(self)
@classmethod
def FromDataSet(cls, dataset, physics):
assert not isinstance(physics, dict)
QPs = list()
X_DG = dataset.get('X_DG')
BCE = dataset.get('BCE')
assert X_DG.dtype == torch.double
for n in range(dataset.N):
x = X_DG[n,:].detach().numpy().flatten()
QPs.append(QuerryPoint(physics, x, BCE[n]))
return cls(QPs)
class BaseSampler():
def __init__(self, qp):
self._qp = qp
@property
def m(self):
raise NotImplementedError
@property
def qp(self):
return self._qp
@property
def dim(self):
return self.qp.dim_out
@property
def type(self):
return self._type.lower()
def sample_V(self):
return self._sample()
def sample(self):
raise NotImplementedError
@property
def precision_mask(self):
raise NotImplementedError
@property
def is_constant(self):
raise NotImplementedError
@property
def fixed_precision(self):
return np.all(self.precision_mask < 0)
def __call__(self, *args, **kwargs):
return self.sample(*args, **kwargs)
class ConstLengthScaleGenerator():
def __init__(self, l):
self._l = l
def __call__(self):
return self._l
class RadialBasisFunctionSampler(BaseSampler):
def __init__(self, qp, l, N_aux):
super().__init__(qp=qp)
assert l is not None
self._V = self._qp.physics.V
self._free_dofs = self._qp.bc.free_dofs('fom')
self._rbf, self._r0, self._l_handle = FastRadialBasisFunction(self._V.ufl_element())
self._l_scale = l
self._N = N_aux
def _sample_rbf(self, only_free_dofs = True):
a = np.random.uniform()
b = np.random.uniform()
r0_ = df.Constant(( a ,b ))
self._r0.assign(r0_)
self._l_handle.assign(self._l_scale)
vec = df.interpolate(self._rbf, self._V).vector().get_local()
if only_free_dofs:
return vec[self._free_dofs]
else:
return vec
@property
def m(self):
return self._N
@property
def is_constant(self):
return False
def illustrate(self):
v = self._sample_rbf(only_free_dofs = False)
f = df.Function(self._V)
f.vector()[:] = v
df.plot(f)
@property
def precision_mask(self):
return -np.ones(self.m)
def _sample(self):
V = np.zeros((self._qp.dim_out, self._N))
for n in range(self._N):
V[:,n] = self._sample_rbf(only_free_dofs=True)
return V
def sample(self):
V = self._sample()
return self._qp.construct_querry_weak_galerkin(V)
class GaussianSketchingSampler(BaseSampler):
def __init__(self, qp, N_aux):
super().__init__(qp=qp)
self._N = N_aux
@property
def m(self):
return self._N
@property
def is_constant(self):
return False
@property
def precision_mask(self):
return -np.ones(self.m)
def _sample(self):
V = np.zeros((self._qp.dim_out, self._N))
for n in range(self._N):
V[:,n] = np.random.normal(0,1,(self._qp.dim_out))
return V
def sample(self):
V = self._sample()
return self._qp.construct_querry_weak_galerkin(V)
class ConcatenatedSamplers(BaseSampler):
def __init__(self, samplers):
super().__init__(qp=None)
self._samplers = samplers
@property
def qp(self):
return self.samplers[0].qp
@property
def m(self):
return sum([v.m for v in self._samplers])
@property
def is_constant(self):
return all((v.is_constant for v in self._samplers))
@property
def precision_mask(self):
return np.concatenate([sampler.precision_mask for sampler in self._samplers])
def _sample(self):
return np.hstack([sampler.sample_V() for sampler in self._samplers])
def sample(self):
cache = [sampler() for sampler in self._samplers]
return np.vstack([c[0] for c in cache]), np.concatenate([c[1] for c in cache])
class CoarseGrainedResidualSampler(BaseSampler):
def __init__(self, qp, W):
super().__init__(qp=qp)
self._V = W
self._Gamma_cgr, self._alpha_cgr = self._qp.construct_querry_weak_galerkin(W)
@property
def m(self):
return self._alpha_cgr.size
@property
def is_constant(self):
return True
def _sample(self):
return self._V
@property
def precision_mask(self):
# infinite precision
return -np.ones(self.m)
def sample(self):
return self._Gamma_cgr, self._alpha_cgr
class FluxConstrainSampler(BaseSampler):
def __init__(self, qp, FluxConstrain):
super().__init__(qp=qp)
if not FluxConstrain.initialized:
raise RuntimeError('Initialize flux-constrain first')
self._Gamma_fc, self._alpha_fc = FluxConstrain.assemble_reduced(np.exp(qp.x), qp.bc)
@property
def m(self):
return self._alpha_fc.size
@property
def is_constant(self):
return True
@property
def precision_mask(self):
return np.ones(self.m)
def sample(self):
return self._Gamma_fc, self._alpha_fc
def _sample(self):
raise NotImplementedError
class LinearQuerry(object):
def __init__(self, querry_point, sampler, dtype, device):
self._sampler = sampler
self._querry_point = querry_point
self._Gamma = None
self._GammaTransposed = None
self._alpha = None
self._dtype = dtype
self._device = device
self._init()
def _init(self):
self.resample(ForceResample=True)
@property
def Gamma(self):
return self._Gamma
@property
def GammaTransposed(self):
return self._GammaTransposed
@property
def alpha(self):
return self._alpha
@property
def device(self):
return self._device
@property
def dtype(self):
return self._dtype
@Gamma.setter
def Gamma(self, value):
assert value.dtype == torch.double
self._Gamma = value
@GammaTransposed.setter
def GammaTransposed(self, value):
assert value.dtype == torch.double
self._GammaTransposed = value
@alpha.setter
def alpha(self, value):
assert value.dtype == torch.double
self._alpha = value
@property
def m(self):
# number of 'virtual observables'
return self.Gamma.shape[0]
def resample(self, ForceResample = False):
if not self._sampler.is_constant or ForceResample:
Gamma, alpha = self._sampler()
self.Gamma = torch.tensor(Gamma, dtype=torch.double, device=self.device)
self.alpha = torch.tensor(alpha, dtype=torch.double, device=self.device)
self.GammaTransposed = self.Gamma.t()
@property
def dim_out(self):
# i.e. dimension of y. different solution?
return self.Gamma.shape[1]
@property
def precision_mask(self):
return self._sampler.precision_mask
def temporary_set_galerkin_manually(self, V):
Gamma, alpha = self._querry_point.construct_querry_weak_galerkin(V)
Gamma = torch.tensor(Gamma, dtype=torch.double, device=self.device)
alpha = torch.tensor(alpha, dtype= torch.double, device=self.device)
self.Gamma = Gamma
self.GammaTransposed = Gamma.t()
self.alpha = alpha
self.precision = (-1)*torch.ones(self.m, dtype=torch.double, device=self.device)
def add_galerkin_sampler(self, sampler):
pass
def add_flux_constraint(self):
raise NotImplementedError
class QuerryEnsemble(object):
def __init__(self, querries, dtype, device):
self._querries = querries
self._dtype = dtype
self._device = device
def __len__(self):
return len(self._querries)
@property
def N(self):
return len(self)
@property
def m(self):
# total number of 'pieces of information'
return sum([querry.m for querry in self._querries])
@property
def dtype(self):
return self._dtype
@property
def device(self):
return self._device
@property
def precision_mask(self):
# assumes that they are identical
return self._querries[0].precision_mask
def resample(self, ForceResample = False):
for q in self:
q.resample(ForceResample=ForceResample)
@property
def dim_out(self):
return self._querries[0].dim_out
def __getitem__(self, item):
return self._querries[item]
def __iter__(self):
yield from self._querries
@classmethod
def FromQuerryPointEnsemble(cls, QuerryPointEnsemble, physics, CGR, flux, N_gaussian, N_rbf, l_rbf = None, *, dtype=None, device=None):
assert isinstance(physics, dict)
W = physics['W']
if W is None:
raise NotImplementedError('need to provide W (as numpy array)')
assert W.shape[0] > W.shape[1]
assert isinstance(W, np.ndarray)
assert dtype is not None
assert device is not None
querries = list()
if flux:
# assemble necessary quantities to derive test functions for flux
fluxconstr = FluxConstraintReducedOrderModel(physics)
fluxconstr.create_measures()
for qp in QuerryPointEnsemble:
samplers = list()
if CGR:
samplers.append(CoarseGrainedResidualSampler(qp=qp, W=W))
if flux:
samplers.append(FluxConstrainSampler(qp, fluxconstr))
if N_gaussian > 0:
samplers.append(GaussianSketchingSampler(qp=qp, N_aux=N_gaussian))
if N_rbf > 0:
assert l_rbf is not None
samplers.append(RadialBasisFunctionSampler(qp=qp, l = l_rbf, N_aux=N_rbf))
if len(samplers) == 1:
sampler = samplers[0]
else:
sampler = ConcatenatedSamplers(samplers)
querries.append(LinearQuerry(qp, sampler, dtype=dtype, device=device))
return cls(querries, dtype=dtype, device=device)
class BaseVirtualObservable(object):
def __init__(self, querry_point, dtype, device):
assert isinstance(querry_point, QuerryPoint)
self._querry_point = querry_point
self._dtype = dtype
self._device = device
@property
def device(self):
return self._device
@property
def dtype(self):
return self._dtype
@property
def querry_point(self):
return self._querry_point
@property
def m(self):
raise NotImplementedError
@property
def d_y(self):
return self._querry_point.dim_out
@property
def mean(self):
raise NotImplementedError
@property
def vars(self):
raise NotImplementedError
def resample(self):
raise NotImplementedError
def update_precision(self, *args, **kwargs):
raise NotImplementedError
def update(self, *args, **kwargs):
raise NotImplementedError
class VirtualObservable(BaseVirtualObservable):
def __init__(self, querry, querry_point, dtype, device):
super().__init__(querry_point, dtype, device)
assert isinstance(querry, LinearQuerry)
self._querry = querry
self._mean = None
self._vars = None
self._vo_variances = None
@property
def querry(self):
return self._querry
@property
def mean(self):
return self._mean
@property
def vars(self):
return self._vars
@property
def m(self):
return self._querry.m
@property
def vo_variances(self):
return self._vo_variances
@vo_variances.setter
def vo_variances(self, value):
assert value.dtype == torch.double
assert value.device == self.device
self._vo_variances = value
def resample(self, ForceResample = False):
self._querry.resample(ForceResample = ForceResample)
@torch.no_grad()
def update(self, g, prec, iteration, *, ForceUpdate = False):
if not ForceUpdate:
raise RuntimeError
g = g.to(dtype=torch.double)
prec = prec.to(dtype=torch.double)
# dirty solution
self._Gamma = self._querry.Gamma.t()
self._GammaTransposed = self.querry.GammaTransposed.t()
self._alpha = self.querry.alpha
cov = 1/prec
Lambda = torch.einsum('im, m, sm -> is', [self._GammaTransposed, cov, self._GammaTransposed]) # checked this; seems to be okay
Lambda += torch.diag(self.vo_variances)
L = torch.cholesky(Lambda)
LambdaInv = torch.cholesky_inverse(L)
solvec = LambdaInv @ (self._GammaTransposed @ g - self._alpha)
mean = g - torch.einsum('i, mi, m -> i', [cov, self._GammaTransposed, solvec])
A = self._GammaTransposed * cov
postcov_diag_subtractor = torch.einsum('si, sm, mi -> i', [A, LambdaInv, A])
self._mean = mean
self._vars = cov - postcov_diag_subtractor
class EnergyVirtualObservable(BaseVirtualObservable):
def __init__(self, querry_point, num_iterations_per_update, stochastic_subspace = None, sampler = None, l = 0.1, dtype = None, device = None):
if dtype is None or device is None:
raise ValueError('need to provide dtype and device')
super().__init__(querry_point, dtype=dtype, device=device)
self._stochastic_subspace = stochastic_subspace
self._num_iterations_per_update = num_iterations_per_update
if sampler is None:
if stochastic_subspace is None:
raise ValueError
sampler = RadialBasisFunctionSampler(self._querry_point, l=l, N_aux=stochastic_subspace)
self._sampler = sampler
self._temperature = 1
self._temperature_schedule = None
self._mean = None
self._vars = None
self._mean_np = None
self._vars_np = None
self._K_diag_np = self._querry_point.K.diagonal()
self._forced_temperature = None
@property
def temperature(self):
if self._forced_temperature is None:
return self._temperature
else:
return self._forced_temperature
def force_temperature(self, value):
self._forced_temperature = value
@property
def mean(self):
return torch.tensor(self._mean_np, dtype=torch.double, device=self.device)
@property
def vars(self):
return torch.tensor(self._vars_np, dtype=torch.double, device=self.device)
@property
def m(self):
return 1
def resample(self, ForceResample = False):
# nothing to be done
pass
def set_temperature(self, temperature):
assert temperature >= 0
self._temperature = temperature
def _init(self):
if self._mean_np is None:
self._mean_np = np.zeros(self._querry_point.dim_out)
def set_temperature_schedule(self, type, T_init, T_final, num_steps):
assert type.lower() in ['linear', 'exponential']
if type.lower() == 'linear':
self._temperature_schedule = LinearTemperatureSchedule(T_init, T_final, num_steps)
elif type.lower() == 'exponential':
self._temperature_schedule = ExponentialTemperatureSchedule(T_init, T_final, num_steps)
else:
raise Exception
def set_linear_temperature_schedule(self, T_init = 1, T_final = 0.0001, num_steps = None):
if num_steps is None:
raise ValueError
self._temperature_schedule = LinearTemperatureSchedule(T_init, T_final, num_steps)
def update_precision(self, iteration):
if self._forced_temperature is not None:
return
if self._temperature_schedule is None:
raise RuntimeError
self._temperature = self._temperature_schedule.get_temperature(iteration)
@torch.no_grad()
def update(self, g, prec, iteration, *, ForceUpdate = False):
if not ForceUpdate:
raise RuntimeError
inv_temperature = 1/self.temperature
self._vars_np = 1/(prec.detach().cpu().numpy() + inv_temperature * self._K_diag_np)
self._init()
A = np.diag(prec.detach().cpu().numpy()) + inv_temperature * self._querry_point.K
b = inv_temperature * self._querry_point.f + prec.detach().cpu().numpy() *g.detach().cpu().numpy()
for n in range(self._num_iterations_per_update):
V = self._sampler.sample_V()
M = np.array(V.T @ A @ V)
self._mean_np = self._mean_np - V @ np.linalg.solve(M, V.T @ np.array(A @ self._mean_np - b).flatten())
def __repr__(self):
s = 'Energy virtual Observable | Current temperature = {}'.format(self._temperature)
return s
class BaseVirtualObservablesEnsemble(object):
def __init__(self, QuerryPointEnsemble, virtual_observables, dtype, device):
self._QuerryPointEnsemble = QuerryPointEnsemble
self._dtype, self._device = dtype, device
self._virtual_observables = virtual_observables
m_target = self._virtual_observables[0].m
for vo in virtual_observables:
assert vo.m == m_target
self._mean = None
self._vars = None
@property
def dtype(self):
return self._dtype
@property
def device(self):
return self._device
@property
def X(self):
return self._QuerryPointEnsemble.X
def __getitem__(self, item):
return self._virtual_observables[item]
def __iter__(self):
yield from self._virtual_observables
def __len__(self):
return len(self._virtual_observables)
def flush_cache(self):
self._vars = None
self._mean = None
@property
def mean(self):
if self._mean is None:
mean = torch.zeros(self.N, self.dim_out, device=self.device, dtype=self.dtype)
for n in range(self.N):
val = self._virtual_observables[n].mean
assert val.dtype == torch.double
assert val.device == self.device
mean[n,:] = val
self._mean = mean
return self._mean.detach()
@property
def vars(self):
if self._vars is None:
vars = torch.zeros(self.N, self.dim_out, device=self.device, dtype=self.dtype)
for n in range(self.N):
val = self._virtual_observables[n].vars
assert val.dtype == torch.double
assert val.device == self.device
vars[n,:] = val
self._vars = vars
return self._vars.detach()
@property
def logsigma(self):
return 0.5*torch.log(self.vars)
@property
def M(self):
return sum((a.m for a in self))
@property
def m(self):
return self._virtual_observables[0].m
@property
def dim_out(self):
return self[0].d_y
@property
def N(self):
return len(self)
def update(self, G, PREC, iteration, writer = None):
self.update_vo_precision(iteration, writer)
for n, virtual_observable in enumerate(self._virtual_observables):
virtual_observable.update(G[n,:], PREC[n,:], iteration, ForceUpdate = True)
self.flush_cache()
def update_vo_precision(self, iteration):
raise NotImplementedError
def resample(self, ForceResample = False):
for vo in self._virtual_observables:
vo.resample(ForceResample=ForceResample)
class VirtualObservablesEnsemble(BaseVirtualObservablesEnsemble):
def __init__(self, QuerryPointEnsemble, QuerryEnsemble, dtype, device):
virtual_observables = list()
for querry, querry_point in zip(QuerryEnsemble, QuerryPointEnsemble):
virtual_observables.append(VirtualObservable(querry, querry_point, dtype=dtype, device=device))
super().__init__(QuerryPointEnsemble, virtual_observables, dtype=dtype, device=device)
self._QuerryEnsemble = QuerryEnsemble
self._alpha_0 = 1e-6
self._beta_0 = 1e-6
self._prec_alpha = 0.5*self.N + self._alpha_0
self._prec_beta = torch.ones(self.m, dtype=torch.double, device=self.device)
self._infinite_precision_mask = None
self._learnable_precision_indeces = None
self._constant_precision = None
self._mean_vo_variances = None
self._mean_vo_variances = self._get_mean_vo_variances()
self._set_member_variance_values(self._mean_vo_variances)
self._precision_initialized = False
@property
def m_free(self):
raise NotImplementedError
@property
def fixed_precision(self):
if self._constant_precision is None:
self._constant_precision = np.all(self.infinite_precision_mask.detach().cpu().numpy())
return self._constant_precision
@property
def learnable_precision_indeces(self):
if self._learnable_precision_indeces is None:
self._learnable_precision_indeces = torch.tensor(np.where(np.invert(self.infinite_precision_mask.detach().cpu().numpy()))[0], dtype=torch.double, device=self.device)
@property
def infinite_precision_mask(self):
if self._infinite_precision_mask is None:
self._infinite_precision_mask = torch.tensor(self._QuerryEnsemble[0].precision_mask < 0, dtype=torch.bool, device=self.device)
return self._infinite_precision_mask
def _get_mean_vo_variances(self):
mean_vars = self._prec_beta / (self._prec_alpha + 1)
mean_vars[self.infinite_precision_mask] = 0
return mean_vars
def _set_member_variance_values(self, mean_vo_vars):
for vo in self:
vo.vo_variances = mean_vo_vars
@torch.no_grad()
def update_vo_precision(self, iteration, writer = None):
if not self._precision_initialized:
self._precision_initialized = True
return
if self[0].mean is None or self[0].vars is None:
raise RuntimeError
if not self.fixed_precision:
beta = torch.zeros(self.m, dtype=torch.double, device=self.device)
for ii, vo in enumerate(self):
Gamma = vo.querry.Gamma
mean = vo.mean
alpha = vo.querry.alpha
vars = vo.vars
beta = beta + (Gamma @ mean - alpha)**2 + (Gamma**2 @ vars)
self._prec_beta = 0.5*beta + self._beta_0
self._mean_vo_variances = self._get_mean_vo_variances()
self._set_member_variance_values(self._mean_vo_variances)
if writer is not None:
writer.add_scalar('Monitor/Mean_VO_variances', torch.mean(self._mean_vo_variances), global_step=iteration)
class EnergyVirtualObservablesEnsemble(BaseVirtualObservablesEnsemble):
def __init__(self, QuerryPointEnsemble, num_iterations_per_update, sampler, dtype, device):
virtual_observables = list()
for qp in QuerryPointEnsemble:
virtual_observables.append(EnergyVirtualObservable(qp, num_iterations_per_update, sampler= sampler, dtype=dtype, device=device))
super().__init__(QuerryPointEnsemble, virtual_observables, dtype=dtype, device=device)
def force_temperature(self, value):
for vo in self:
vo.force_temperature(value)
def set_temperature(self, *args, **kwargs):
for vo in self:
vo.set_temperature(*args, **kwargs)
def set_temperature_schedule(self, type, **kwargs):
for vo in self:
vo.set_temperature_schedule(type, **kwargs)
def set_linear_temperature_schedule(self, *args, **kwargs):
for vo in self:
vo.set_linear_temperature_schedule(*args, **kwargs)
def update_vo_precision(self, iteration, writer = None):
for vo in self._virtual_observables:
vo.update_precision(iteration)
if writer is not None:
writer.add_scalar('Monitoring/Temperature', self._virtual_observables[0].temperature, global_step = iteration)
class TemperatureSchedule(object):
def __init__(self):
pass
def get_temperature(self, iteration):
raise NotImplementedError
class LinearTemperatureSchedule(TemperatureSchedule):
def __init__(self, T_init, T_final, num_steps):
super().__init__()
assert num_steps > 1
assert T_final < T_init
self._T_init = T_init
self._T_final = T_final
self._num_steps = num_steps
def get_temperature(self, iteration):
if iteration > self._num_steps:
raise RuntimeError
frac = iteration / (self._num_steps-1)
return self._T_init + frac*(self._T_final - self._T_init)
class ExponentialTemperatureSchedule(TemperatureSchedule):
def __init__(self, T_init, T_final, num_steps):
super().__init__()
assert num_steps > 1
assert T_final < T_init
self._T_init = T_init
self._T_final = T_final
self._num_steps = num_steps
self._lmbda = - np.log(T_final / T_init)
def get_temperature(self, iteration):
if iteration > self._num_steps:
raise RuntimeError
t = iteration / (self._num_steps - 1)
return self._T_init * np.exp(-self._lmbda * t)
|
<reponame>jkhenning/autokeras
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.python.util import nest
from autokeras.engine import adapter as adapter_module
CATEGORICAL = 'categorical'
NUMERICAL = 'numerical'
class InputAdapter(adapter_module.Adapter):
def check(self, x):
"""Record any information needed by transform."""
if not isinstance(x, (np.ndarray, tf.data.Dataset)):
raise TypeError('Expect the data to Input to be numpy.ndarray or '
'tf.data.Dataset, but got {type}.'.format(type=type(x)))
if isinstance(x, np.ndarray) and not np.issubdtype(x.dtype, np.number):
raise TypeError('Expect the data to Input to be numerical, but got '
'{type}.'.format(type=x.dtype))
class ImageInputAdapter(adapter_module.Adapter):
def check(self, x):
"""Record any information needed by transform."""
if not isinstance(x, (np.ndarray, tf.data.Dataset)):
raise TypeError('Expect the data to ImageInput to be numpy.ndarray or '
'tf.data.Dataset, but got {type}.'.format(type=type(x)))
if isinstance(x, np.ndarray) and x.ndim not in [3, 4]:
raise ValueError('Expect the data to ImageInput to have 3 or 4 '
'dimensions, but got input shape {shape} with {ndim} '
'dimensions'.format(shape=x.shape, ndim=x.ndim))
if isinstance(x, np.ndarray) and not np.issubdtype(x.dtype, np.number):
raise TypeError('Expect the data to ImageInput to be numerical, but got '
'{type}.'.format(type=x.dtype))
def convert_to_dataset(self, x):
if isinstance(x, np.ndarray):
if x.ndim == 3:
x = np.expand_dims(x, axis=3)
return super().convert_to_dataset(x)
class TextInputAdapter(adapter_module.Adapter):
def check(self, x):
"""Record any information needed by transform."""
if not isinstance(x, (np.ndarray, tf.data.Dataset)):
raise TypeError('Expect the data to TextInput to be numpy.ndarray or '
'tf.data.Dataset, but got {type}.'.format(type=type(x)))
if isinstance(x, np.ndarray) and x.ndim != 1:
raise ValueError('Expect the data to TextInput to have 1 dimension, but '
'got input shape {shape} with {ndim} dimensions'.format(
shape=x.shape,
ndim=x.ndim))
if isinstance(x, np.ndarray) and not np.issubdtype(x.dtype, np.character):
raise TypeError('Expect the data to TextInput to be strings, but got '
'{type}.'.format(type=x.dtype))
def convert_to_dataset(self, x):
if len(x.shape) == 1:
x = x.reshape(-1, 1)
if isinstance(x, np.ndarray):
x = tf.data.Dataset.from_tensor_slices(x)
return x
class StructuredDataInputAdapter(adapter_module.Adapter):
def __init__(self, column_names=None, column_types=None, **kwargs):
super().__init__(**kwargs)
self.column_names = column_names
self.column_types = column_types
# Variables for inferring column types.
self.count_nan = None
self.count_numerical = None
self.count_categorical = None
self.count_unique_numerical = []
self.num_col = None
def get_config(self):
config = super().get_config()
config.update({
'count_nan': self.count_nan,
'count_numerical': self.count_numerical,
'count_categorical': self.count_categorical,
'count_unique_numerical': self.count_unique_numerical,
'num_col': self.num_col
})
return config
@classmethod
def from_config(cls, config):
obj = super().from_config(config)
obj.count_nan = config['count_nan']
obj.count_numerical = config['count_numerical']
obj.count_categorical = config['count_categorical']
obj.count_unique_numerical = config['count_unique_numerical']
obj.num_col = config['num_col']
def check(self, x):
if not isinstance(x, (pd.DataFrame, np.ndarray)):
raise TypeError('Unsupported type {type} for '
'{name}.'.format(type=type(x),
name=self.__class__.__name__))
# Extract column_names from pd.DataFrame.
if isinstance(x, pd.DataFrame) and self.column_names is None:
self.column_names = list(x.columns)
# column_types is provided by user
if self.column_types:
for column_name in self.column_types:
if column_name not in self.column_names:
raise ValueError('Column_names and column_types are '
'mismatched. Cannot find column name '
'{name} in the data.'.format(
name=column_name))
# Generate column_names.
if self.column_names is None:
if self.column_types:
raise ValueError('Column names must be specified.')
self.column_names = [index for index in range(x.shape[1])]
# Check if column_names has the correct length.
if len(self.column_names) != x.shape[1]:
raise ValueError('Expect column_names to have length {expect} '
'but got {actual}.'.format(
expect=x.shape[1],
actual=len(self.column_names)))
def convert_to_dataset(self, x):
if isinstance(x, pd.DataFrame):
# Convert x, y, validation_data to tf.Dataset.
x = x.values.astype(np.unicode)
if isinstance(x, np.ndarray):
x = x.astype(np.unicode)
dataset = tf.data.Dataset.from_tensor_slices(x)
return dataset
def fit(self, dataset):
super().fit(dataset)
for x in dataset:
self.update(x)
self.infer_column_types()
def update(self, x):
# Calculate the statistics.
x = nest.flatten(x)[0].numpy()
if self.num_col is None:
self.num_col = len(x)
self.count_nan = np.zeros(self.num_col)
self.count_numerical = np.zeros(self.num_col)
self.count_categorical = np.zeros(self.num_col)
for i in range(len(x)):
self.count_unique_numerical.append({})
for i in range(self.num_col):
x[i] = x[i].decode('utf-8')
if x[i] == 'nan':
self.count_nan[i] += 1
elif x[i] == 'True':
self.count_categorical[i] += 1
elif x[i] == 'False':
self.count_categorical[i] += 1
else:
try:
tmp_num = float(x[i])
self.count_numerical[i] += 1
if tmp_num not in self.count_unique_numerical[i]:
self.count_unique_numerical[i][tmp_num] = 1
else:
self.count_unique_numerical[i][tmp_num] += 1
except ValueError:
self.count_categorical[i] += 1
def infer_column_types(self):
column_types = {}
for i in range(self.num_col):
if self.count_categorical[i] > 0:
column_types[self.column_names[i]] = CATEGORICAL
elif len(self.count_unique_numerical[i])/self.count_numerical[i] < 0.05:
column_types[self.column_names[i]] = CATEGORICAL
else:
column_types[self.column_names[i]] = NUMERICAL
# Partial column_types is provided.
if self.column_types is None:
self.column_types = {}
for key, value in column_types.items():
if key not in self.column_types:
self.column_types[key] = value
class TimeseriesInputAdapter(adapter_module.Adapter):
def __init__(self,
lookback=None,
column_names=None,
column_types=None,
**kwargs):
super().__init__(**kwargs)
self.lookback = lookback
self.column_names = column_names
self.column_types = column_types
def get_config(self):
config = super().get_config()
config.update({
'lookback': self.lookback,
'column_names': self.column_names,
'column_types': self.column_types
})
return config
def check(self, x):
"""Record any information needed by transform."""
if not isinstance(x, (pd.DataFrame, np.ndarray, tf.data.Dataset)):
raise TypeError('Expect the data in TimeseriesInput to be numpy.ndarray'
' or tf.data.Dataset or pd.DataFrame, but got {type}.'.
format(type=type(x)))
if isinstance(x, np.ndarray) and x.ndim != 2:
raise ValueError('Expect the data in TimeseriesInput to have 2 dimension'
', but got input shape {shape} with {ndim} '
'dimensions'.format(
shape=x.shape,
ndim=x.ndim))
# Extract column_names from pd.DataFrame.
if isinstance(x, pd.DataFrame) and self.column_names is None:
self.column_names = list(x.columns)
# column_types is provided by user
if self.column_types:
for column_name in self.column_types:
if column_name not in self.column_names:
raise ValueError('Column_names and column_types are '
'mismatched. Cannot find column name '
'{name} in the data.'.format(
name=column_name))
# Generate column_names.
if self.column_names is None:
if self.column_types:
raise ValueError('Column names must be specified.')
self.column_names = [index for index in range(x.shape[1])]
# Check if column_names has the correct length.
if len(self.column_names) != x.shape[1]:
raise ValueError('Expect column_names to have length {expect} '
'but got {actual}.'.format(
expect=x.shape[1],
actual=len(self.column_names)))
def convert_to_dataset(self, x):
if isinstance(x, pd.DataFrame):
# Convert x, y, validation_data to tf.Dataset.
x = x.values.astype(np.float32)
if isinstance(x, np.ndarray):
x = x.astype(np.float32)
x = tf.data.Dataset.from_tensor_slices(x)
x = x.window(self.lookback, shift=1, drop_remainder=True)
final_data = []
for window in x:
final_data.append([elems.numpy() for elems in window])
final_data = tf.data.Dataset.from_tensor_slices(final_data)
return final_data
|
<gh_stars>0
""" Movable AABBs in space.
Classes:
Body
"""
from __future__ import annotations # NOTE: This is necessary below Python 3.10
from .aabb import AABB
from .collision import (CollisionData, get_axis_collision_distances,
get_axis_collision_times, get_collision_normals)
from .object2d import Object2D
from .space import Space
from pyglet.math import Vec2
from typing import Optional
class Body(AABB):
""" A moving bounding box in 2D space. Contains some helper methods for
finding the nearest collision in the space, and resolving the collision.
"""
def __init__(
self,
x: float, # From `Object2D`
y: float, # From `Object2D`
w: float, # From `AABB`
h: float, # From `AABB`
layer: int = AABB.DEFAULT_LAYER, # From `AABB`
mask: int = AABB.DEFAULT_LAYER, # Use our default layer from before
parent: Optional[Object2D] = None # From `Object2D`
):
super().__init__(x, y, w, h, layer, parent) # Initialise AABB fields
self.mask = mask
def get_collision_data(self, other: AABB, velocity: Vec2) -> CollisionData:
""" Get the collision data between this and another bounding box, using
a given velocity.
"""
# Get Collision Distances
x_entry_dist, x_exit_dist = get_axis_collision_distances(
self.global_x, self.w, velocity.x,
other.global_x, other.w
)
y_entry_dist, y_exit_dist = get_axis_collision_distances(
self.global_y, self.h, velocity.y,
other.global_y, other.h
)
entry_distances = Vec2(x_entry_dist, y_entry_dist)
# Get Collision Times
x_entry_time, x_exit_time = get_axis_collision_times(
x_entry_dist, x_exit_dist,
velocity.x
)
y_entry_time, y_exit_time = get_axis_collision_times(
y_entry_dist, y_exit_dist,
velocity.y
)
entry_times = Vec2(x_entry_time, y_entry_time)
# Use closest entry and furthest exit
entry_time = max(x_entry_time, y_entry_time)
exit_time = min(x_exit_time, y_exit_time)
# Was there a collision?
collided = not (
# No motion
entry_time > exit_time
# Or collision already happened
or exit_time <= 0
# Or collision happens further than 1 time step away
or entry_time > 1
)
# Get collision normals
normals = get_collision_normals(
entry_times,
entry_distances,
) if collided else Vec2(0, 0)
# Return data
return CollisionData(
collided,
# Use whichever is nearest to resolve ongoing collisions in the
# neatest manner.
entry_time if abs(entry_time) < abs(exit_time) else exit_time,
normals,
)
def get_nearest_collision(
self,
space: Space,
velocity: Vec2,
) -> Optional[CollisionData]:
""" Finds the nearest collision in the space, if any. """
broad_phase = self.get_broad_phase(velocity)
closest_data: Optional[CollisionData] = None
# Loop over every box in the space
for other in space:
# Check if a collision is possible
if other is not self and self.mask & other.layer and broad_phase.is_colliding_aabb(other):
# Get data
data = self.get_collision_data(other, velocity)
if (
# No collision yet
closest_data is None
# New collision is nearer
or data.collision_time < closest_data.collision_time
) and data.collided: # Check there actually was a collision
closest_data = data
return closest_data
def move(self, space: Space, velocity: Vec2) -> Vec2:
""" Moves as far as possible in one iteration, returning the remaining
velocity calculated using the slide method.
"""
nearest_collision = self.get_nearest_collision(space, velocity)
if nearest_collision is None:
self.position += velocity # Move all the way
new_velocity = Vec2(0, 0) # No more velocity left over
else:
# Move to point of collision
self.x += velocity.x * nearest_collision.collision_time
self.y += velocity.y * nearest_collision.collision_time
# Calculate dot product of normals and velocity
dot_product = (
velocity.x * nearest_collision.normals.y
+ velocity.y * nearest_collision.normals.x
) * (1-nearest_collision.collision_time)
# Determine new velocity
new_velocity = Vec2(
dot_product * nearest_collision.normals.y,
dot_product * nearest_collision.normals.x
)
return new_velocity
def move_and_slide(
self,
space: Space,
velocity: Vec2,
max_bounce: int = 3,
):
""" Repeatedly moves with the given velocity until it equals 0 or the
maximum bounces in one frame have been reached.
"""
counter = 0
# Move until velocity is zero
while velocity != Vec2(0, 0) and counter < max_bounce:
velocity = self.move(space, velocity)
counter += 1 # Increment max bounces counter
|
<gh_stars>1-10
import mlrun.api.schemas
import mlrun.utils.singleton
from mlrun.api.utils.clients import nuclio
from mlrun.config import config, default_config
from mlrun.runtimes.utils import resolve_mpijob_crd_version
from mlrun.utils import logger
class ClientSpec(metaclass=mlrun.utils.singleton.Singleton,):
def __init__(self):
self._cached_nuclio_version = None
def get_client_spec(self):
mpijob_crd_version = resolve_mpijob_crd_version(api_context=True)
return mlrun.api.schemas.ClientSpec(
version=config.version,
namespace=config.namespace,
docker_registry=config.httpdb.builder.docker_registry,
remote_host=config.remote_host,
mpijob_crd_version=mpijob_crd_version,
ui_url=config.resolve_ui_url(),
artifact_path=config.artifact_path,
spark_app_image=config.spark_app_image,
spark_app_image_tag=config.spark_app_image_tag,
spark_history_server_path=config.spark_history_server_path,
kfp_image=config.kfp_image,
dask_kfp_image=config.dask_kfp_image,
api_url=config.httpdb.api_url,
nuclio_version=self._resolve_nuclio_version(),
# These don't have a default value, but we don't send them if they are not set to allow the client to know
# when to use server value and when to use client value (server only if set). Since their default value is
# empty and not set is also empty we can use the same _get_config_value_if_not_default
default_function_priority_class_name=self._get_config_value_if_not_default(
"default_function_priority_class_name"
),
valid_function_priority_class_names=self._get_config_value_if_not_default(
"valid_function_priority_class_names"
),
# These have a default value, therefore we want to send them only if their value is not the default one
# (otherwise clients don't know when to use server value and when to use client value)
ui_projects_prefix=self._get_config_value_if_not_default(
"ui.projects_prefix"
),
scrape_metrics=self._get_config_value_if_not_default("scrape_metrics"),
hub_url=self._get_config_value_if_not_default("hub_url"),
default_function_node_selector=self._get_config_value_if_not_default(
"default_function_node_selector"
),
igz_version=self._get_config_value_if_not_default("igz_version"),
auto_mount_type=self._get_config_value_if_not_default(
"storage.auto_mount_type"
),
auto_mount_params=self._get_config_value_if_not_default(
"storage.auto_mount_params"
),
spark_operator_version=self._get_config_value_if_not_default(
"spark_operator_version"
),
default_tensorboard_logs_path=self._get_config_value_if_not_default(
"default_tensorboard_logs_path"
),
)
def _get_config_value_if_not_default(self, config_key):
config_key_parts = config_key.split(".")
current_config_value = config
current_default_config_value = default_config
for config_key_part in config_key_parts:
current_config_value = getattr(current_config_value, config_key_part)
current_default_config_value = current_default_config_value.get(
config_key_part, ""
)
if current_config_value == current_default_config_value:
return None
else:
return current_config_value
# if nuclio version specified on mlrun config set it likewise,
# if not specified, get it from nuclio api client
# since this is a heavy operation (sending requests to API), and it's unlikely that the version
# will change - cache it (this means if we upgrade nuclio, we need to restart mlrun to re-fetch the new version)
def _resolve_nuclio_version(self):
if not self._cached_nuclio_version:
# config override everything
nuclio_version = config.nuclio_version
if not nuclio_version and config.nuclio_dashboard_url:
try:
nuclio_client = nuclio.Client()
nuclio_version = nuclio_client.get_dashboard_version()
except Exception as exc:
logger.warning("Failed to resolve nuclio version", exc=str(exc))
self._cached_nuclio_version = nuclio_version
return self._cached_nuclio_version
|
import os
import numpy
from subprocess import Popen, PIPE
import pymesh
import tempfile
from default_config.global_vars import apbs_bin, pdb2pqr_bin, multivalue_bin
import random
"""
Modified from:
computeAPBS.py: Wrapper function to compute the Poisson Boltzmann electrostatics for a surface using APBS.
<NAME> - LPDI STI EPFL 2019
"""
def computeAPBS(vertices, pdb_file, tmp_file_base):
"""
Calls APBS, pdb2pqr, and multivalue and returns the charges per vertex
"""
pdb2pqr = pdb2pqr_bin + " --ff=parse --whitespace --noopt --apbs-input %s %s"# + tempfile.mktemp()
make_pqr = pdb2pqr % (pdb_file, tmp_file_base)
os.system(make_pqr)
apbs = apbs_bin + " %s"
make_apbs = apbs % (tmp_file_base+".in")
os.system(make_apbs)
vertfile = open(tmp_file_base + ".csv", "w")
for vert in vertices:
vertfile.write("{},{},{}\n".format(vert[0], vert[1], vert[2]))
vertfile.close()
multivalue = multivalue_bin + " %s %s %s"
make_multivalue = multivalue % (tmp_file_base+".csv", tmp_file_base+".dx", tmp_file_base+"_out.csv")
os.system(make_multivalue)
# Read the charge file
chargefile = open(tmp_file_base + "_out.csv")
charges = numpy.array([0.0] * len(vertices))
for ix, line in enumerate(chargefile.readlines()):
charges[ix] = float(line.split(",")[3])
os.system("rm " + tmp_file_base + "*")
os.system("rm io.mc")
return charges
""" ORIGINAL FUNCTION
'''
computeAPBS.py: Wrapper function to compute the Poisson Boltzmann electrostatics for a surface using APBS.
<NAME> - LPDI STI EPFL 2019
This file is part of MaSIF.
Released under an Apache License 2.0
'''
def computeAPBS(vertices, pdb_file, tmp_file_base = tempfile.mktemp()):
#Calls APBS, pdb2pqr, and multivalue and returns the charges per vertex
#fields = tmp_file_base.split("/")[0:-1]
#directory = "/".join(fields) + "/"
fields = tmp_file_base
directory = str(fields) + "/"
filename_base = tmp_file_base.split("/")[-1]
pdbname = pdb_file.split("/")[-1]
args = [
pdb2pqr_bin,
"--ff=parse",
"--whitespace",
"--noopt",
"--apbs-input",
pdbname,
filename_base,
]
p2 = Popen(args, stdout=PIPE, stderr=PIPE, cwd=directory)
stdout, stderr = p2.communicate()
args = [apbs_bin, filename_base + ".in"]
p2 = Popen(args, stdout=PIPE, stderr=PIPE, cwd=directory)
stdout, stderr = p2.communicate()
vertfile = open(directory + "/" + filename_base + ".csv", "w")
for vert in vertices:
vertfile.write("{},{},{}\n".format(vert[0], vert[1], vert[2]))
vertfile.close()
args = [
multivalue_bin,
filename_base + ".csv",
filename_base + ".dx",
filename_base + "_out.csv",
]
p2 = Popen(args, stdout=PIPE, stderr=PIPE, cwd=directory)
stdout, stderr = p2.communicate()
# Read the charge file
chargefile = open(tmp_file_base + "_out.csv")
charges = numpy.array([0.0] * len(vertices))
for ix, line in enumerate(chargefile.readlines()):
charges[ix] = float(line.split(",")[3])
return charges
""" |
<reponame>bryantChhun/napari-gui<filename>napari/components/_viewer/view/main.py
from qtpy.QtCore import QCoreApplication, Qt, QSize
from qtpy.QtWidgets import QWidget, QSlider, QVBoxLayout, QSplitter
from qtpy.QtGui import QCursor, QPixmap
from vispy.scene import SceneCanvas, PanZoomCamera
from ..._dims.view import QtDims
from ....resources import resources_dir
from .controls import QtControls
import os.path as osp
from ....resources import resources_dir
from ....util.theme import template, palettes
palette = palettes['dark']
class QtViewer(QSplitter):
with open(osp.join(resources_dir, 'stylesheet.qss'), 'r') as f:
raw_stylesheet = f.read()
themed_stylesheet = template(raw_stylesheet, **palette)
def __init__(self, viewer):
super().__init__()
QCoreApplication.setAttribute(
Qt.AA_UseStyleSheetPropagationInWidgetStyles, True
)
self.setStyleSheet(self.themed_stylesheet)
self.viewer = viewer
self.viewer._qtviewer = self
self.canvas = SceneCanvas(keys=None, vsync=True)
self.canvas.native.setMinimumSize(QSize(100, 100))
self.canvas.connect(self.on_mouse_move)
self.canvas.connect(self.on_mouse_press)
self.canvas.connect(self.on_mouse_release)
self.canvas.connect(self.on_key_press)
self.canvas.connect(self.on_key_release)
self.view = self.canvas.central_widget.add_view()
# Set 2D camera (the camera will scale to the contents in the scene)
self.view.camera = PanZoomCamera(aspect=1)
# flip y-axis to have correct aligment
self.view.camera.flip = (0, 1, 0)
self.view.camera.set_range()
self.view.camera.viewbox_key_event = viewbox_key_event
center = QWidget()
layout = QVBoxLayout()
layout.setContentsMargins(15, 20, 15, 10)
layout.addWidget(self.canvas.native)
dimsview = QtDims(self.viewer.dims)
layout.addWidget(dimsview)
center.setLayout(layout)
# Add controls, center, and layerlist
self.control_panel = QtControls(viewer)
self.addWidget(self.control_panel)
self.addWidget(center)
self.addWidget(self.viewer.layers._qt)
self._cursors = {
'disabled': QCursor(
QPixmap(':/icons/cursor/cursor_disabled.png')
.scaled(20, 20)),
'cross': Qt.CrossCursor,
'forbidden': Qt.ForbiddenCursor,
'pointing': Qt.PointingHandCursor,
'standard': QCursor()
}
def set_cursor(self, cursor, size=10):
if cursor == 'square':
if size < 10 or size > 300:
q_cursor = self._cursors['cross']
else:
q_cursor = QCursor(QPixmap(':/icons/cursor/cursor_square.png')
.scaledToHeight(size))
else:
q_cursor = self._cursors[cursor]
self.canvas.native.setCursor(q_cursor)
def on_mouse_move(self, event):
"""Called whenever mouse moves over canvas.
"""
layer = self.viewer._top
if layer is not None:
layer.on_mouse_move(event)
def on_mouse_press(self, event):
"""Called whenever mouse pressed in canvas.
"""
layer = self.viewer._top
if layer is not None:
layer.on_mouse_press(event)
def on_mouse_release(self, event):
"""Called whenever mouse released in canvas.
"""
layer = self.viewer._top
if layer is not None:
layer.on_mouse_release(event)
def on_key_press(self, event):
"""Called whenever key pressed in canvas.
"""
if (event.text in self.viewer.key_bindings and not
event.native.isAutoRepeat()):
self.viewer.key_bindings[event.text](self.viewer)
return
layer = self.viewer._top
if layer is not None:
layer.on_key_press(event)
def on_key_release(self, event):
"""Called whenever key released in canvas.
"""
layer = self.viewer._top
if layer is not None:
layer.on_key_release(event)
def viewbox_key_event(event):
"""ViewBox key event handler
Parameters
----------
event : instance of Event
The event.
"""
return
|
#!/usr/bin/env python
# ** The MIT License **
#
# Copyright (c) 2007 <NAME> (aka Insanum)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Dude... just buy us a beer. :-)
#
# XXX Todo/Cleanup XXX
# threading is currently broken when getting event list
# if threading works then move pageToken processing from GetAllEvents to thread
# support different types of reminders plus multiple ones (popup, sms, email)
# add caching, should be easy (dump all calendar JSON data to file)
# add support for multiline description input in the 'add' and 'edit' commands
# maybe add support for freebusy ?
#############################################################################
# #
# ( ( ( #
# ( ( ( )\ ) ( )\ ) )\ ) #
# )\ ) )\ )\ (()/( )\ (()/( (()/( #
# (()/( (((_)((((_)( /(_))(((_) /(_)) /(_)) #
# /(_))_ )\___ )\ _ )\ (_)) )\___ (_)) (_)) #
# (_)) __|((/ __|(_)_\(_)| | ((/ __|| | |_ _| #
# | (_ | | (__ / _ \ | |__ | (__ | |__ | | #
# \___| \___|/_/ \_\ |____| \___||____||___| #
# #
# Author: <NAME> <http://www.insanum.com> #
# <NAME> <http://github.com/tresni> #
# <NAME> #
# Home: https://github.com/insanum/gcalcli #
# #
# Everything you need to know (Google API Calendar v3): http://goo.gl/HfTGQ #
# #
#############################################################################
from __future__ import print_function, absolute_import
# These are standard libraries and should never fail
import sys
import os
import re
import shlex
import time
import textwrap
import signal
import json
import random
from datetime import datetime, timedelta, date
from unicodedata import east_asian_width
from collections import namedtuple
from gcalcli.validators import (
get_input, get_override_color_id, STR_NOT_EMPTY, PARSABLE_DATE, STR_TO_INT,
VALID_COLORS, STR_ALLOW_EMPTY, REMINDER)
# Required 3rd party libraries
try:
from dateutil.tz import tzlocal
from dateutil.parser import parse
import httplib2
from six import next
from six.moves import input, range, zip, map, cPickle as pickle
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client import tools
except ImportError as exc: # pragma: no cover
print("ERROR: Missing module - %s" % exc.args[0])
sys.exit(1)
# Package local imports
from gcalcli import __program__, __version__
from gcalcli import utils
from gcalcli.argparsers import get_argument_parser, handle_unparsed
from gcalcli.utils import _u, days_since_epoch
from gcalcli.printer import Printer, valid_color_name
from gcalcli.exceptions import GcalcliError
EventTitle = namedtuple('EventTitle', ['title', 'color'])
CalName = namedtuple('CalName', ['name', 'color'])
class GoogleCalendarInterface:
cache = {}
allCals = []
allEvents = []
now = datetime.now(tzlocal())
agenda_length = 5
maxRetries = 5
authHttp = None
calService = None
urlService = None
ACCESS_OWNER = 'owner'
ACCESS_WRITER = 'writer'
ACCESS_READER = 'reader'
ACCESS_FREEBUSY = 'freeBusyReader'
UNIWIDTH = {'W': 2, 'F': 2, 'N': 1, 'Na': 1, 'H': 1, 'A': 1}
def __init__(self, cal_names=[], printer=Printer(), **options):
self.cals = []
self.printer = printer
self.options = options
self.details = options.get('details', {})
# stored as detail, but provided as option: TODO: fix that
self.details['width'] = options.get('width', 80)
self._get_cached()
self._select_cals(cal_names)
def _select_cals(self, selected_names):
if self.cals:
raise GcalcliError('this object should not already have cals')
if not selected_names:
self.cals = self.allCals
return
for cal_name in selected_names:
matches = []
for self_cal in self.allCals:
# For exact match, we should match only 1 entry and accept
# the first entry. Should honor access role order since
# it happens after _get_cached()
if cal_name.name == self_cal['summary']:
# This makes sure that if we have any regex matches
# that we toss them out in favor of the specific match
matches = [self_cal]
self_cal['colorSpec'] = cal_name.color
break
# Otherwise, if the calendar matches as a regex, append
# it to the list of potential matches
elif re.search(cal_name.name, self_cal['summary'], flags=re.I):
matches.append(self_cal)
self_cal['colorSpec'] = cal_name.color
# Add relevant matches to the list of calendars we want to
# operate against
self.cals += matches
@staticmethod
def _localize_datetime(dt):
if not hasattr(dt, 'tzinfo'): # Why are we skipping these?
return dt
if dt.tzinfo is None:
return dt.replace(tzinfo=tzlocal())
else:
return dt.astimezone(tzlocal())
def _retry_with_backoff(self, method):
for n in range(0, self.maxRetries):
try:
return method.execute()
except HttpError as e:
error = json.loads(e.content)
error = error.get('error')
if error.get('code') == '403' and \
error.get('errors')[0].get('reason') \
in ['rateLimitExceeded', 'userRateLimitExceeded']:
time.sleep((2 ** n) + random.random())
else:
raise
return None
def _google_auth(self):
from argparse import Namespace
if not self.authHttp:
if self.options['configFolder']:
storage = Storage(
os.path.expanduser(
"%s/oauth" % self.options['configFolder']))
else:
storage = Storage(os.path.expanduser('~/.gcalcli_oauth'))
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(
OAuth2WebServerFlow(
client_id=self.options['client_id'],
client_secret=self.options['client_secret'],
scope=['https://www.googleapis.com/auth/calendar',
'https://www.googleapis.com/auth/urlshortener'],
user_agent=__program__ + '/' + __version__),
storage,
Namespace(**self.options))
self.authHttp = credentials.authorize(httplib2.Http())
return self.authHttp
def _cal_service(self):
if not self.calService:
self.calService = \
build(serviceName='calendar',
version='v3',
http=self._google_auth())
return self.calService
def _url_service(self):
if not self.urlService:
self._google_auth()
self.urlService = \
build(serviceName='urlshortener',
version='v1',
http=self._google_auth())
return self.urlService
def _get_cached(self):
if self.options['configFolder']:
cacheFile = os.path.expanduser(
"%s/cache" % self.options['configFolder'])
else:
cacheFile = os.path.expanduser('~/.gcalcli_cache')
if self.options['refresh_cache']:
try:
os.remove(cacheFile)
except OSError:
pass
# fall through
self.cache = {}
self.allCals = []
if self.options['use_cache']:
# note that we need to use pickle for cache data since we stuff
# various non-JSON data in the runtime storage structures
try:
with open(cacheFile, 'rb') as _cache_:
self.cache = pickle.load(_cache_)
self.allCals = self.cache['allCals']
# XXX assuming data is valid, need some verification check here
return
except IOError:
pass
# fall through
calList = self._retry_with_backoff(
self._cal_service().calendarList().list())
while True:
for cal in calList['items']:
self.allCals.append(cal)
pageToken = calList.get('nextPageToken')
if pageToken:
calList = self._retry_with_backoff(
self._cal_service().calendarList().list(
pageToken=pageToken))
else:
break
self.allCals.sort(key=lambda x: x['accessRole'])
if self.options['use_cache']:
self.cache['allCals'] = self.allCals
with open(cacheFile, 'wb') as _cache_:
pickle.dump(self.cache, _cache_)
def _shorten_url(self, url):
if self.details.get('url', False) != "short":
return url
# Note that when authenticated to a google account different shortUrls
# can be returned for the same longUrl. See: http://goo.gl/Ya0A9
shortUrl = self._retry_with_backoff(
self._url_service().url().insert(body={'longUrl': url}))
return shortUrl['id']
def _calendar_color(self, event, override_color=False):
ansi_codes = {
"1": "brightblue",
"2": "brightgreen",
"3": "brightmagenta",
"4": "magenta",
"5": "brightyellow",
"6": "brightred",
"7": "brightcyan",
"8": "brightblack",
"9": "blue",
"10": "green",
"11": "red"
}
if event.get('gcalcli_cal') is None:
return 'default'
else:
cal = event['gcalcli_cal']
if override_color:
return ansi_codes[event['colorId']]
elif cal.get('colorSpec', None):
return cal['colorSpec']
elif cal['accessRole'] == self.ACCESS_OWNER:
return self.options['color_owner']
elif cal['accessRole'] == self.ACCESS_WRITER:
return self.options['color_writer']
elif cal['accessRole'] == self.ACCESS_READER:
return self.options['color_reader']
elif cal['accessRole'] == self.ACCESS_FREEBUSY:
return self.options['color_freebusy']
else:
return 'default'
def _valid_title(self, event):
if 'summary' in event and event['summary'].strip():
return event['summary']
else:
return "(No title)"
def _isallday(self, event):
return event['s'].hour == 0 and event['s'].minute == 0 and \
event['e'].hour == 0 and event['e'].minute == 0
def _cal_monday(self, day_num):
"""Shift the day number if we're doing cal monday, or cal_weekend is
false, since that also means we're starting on day 1"""
if self.options['cal_monday'] or not self.options['cal_weekend']:
day_num -= 1
if day_num < 0:
day_num = 6
return day_num
def _event_time_in_range(self, e_time, r_start, r_end):
return e_time >= r_start and e_time < r_end
def _event_spans_time(self, e_start, e_end, time_point):
return e_start < time_point and e_end >= time_point
def _format_title(self, event, allday=False):
titlestr = self._valid_title(event)
if allday:
return titlestr
elif self.options['military']:
return ' '.join([event['s'].strftime("%H:%M"), titlestr])
else:
return ' '.join([event['s'].strftime("%I:%M").lstrip('0') +
event['s'].strftime('%p').lower(), titlestr])
def _add_reminders(self, event, reminders=None):
if reminders or not self.options['default_reminders']:
event['reminders'] = {'useDefault': False,
'overrides': []}
for r in reminders:
n, m = utils.parse_reminder(r)
event['reminders']['overrides'].append({'minutes': n,
'method': m})
return event
def _get_week_events(self, start_dt, end_dt, event_list):
week_events = [[] for _ in range(7)]
now_in_week = True
if self.now < start_dt or self.now > end_dt:
now_in_week = False
for event in event_list:
event_daynum = self._cal_monday(int(event['s'].strftime("%w")))
event_allday = self._isallday(event)
event_end_date = event['e']
if event_allday:
# NOTE(slwaqo): in allDay events end date is always set as
# day+1 and hour 0:00 so to not display it one day more, it's
# necessary to lower it by one day
event_end_date = event['e'] - timedelta(days=1)
event_is_today = self._event_time_in_range(
event['s'], start_dt, end_dt)
event_continues_today = self._event_spans_time(
event['s'], event_end_date, start_dt)
# NOTE(slawqo): it's necessary to process events which starts in
# current period of time but for all day events also to process
# events which was started before current period of time and are
# still continue in current period of time
if event_is_today or (event_allday and event_continues_today):
force_now_marker = False
if now_in_week:
if (days_since_epoch(self.now) <
days_since_epoch(event['s'])):
force_now_marker = False
week_events[event_daynum - 1].append(
EventTitle(
'\n' + self.options['cal_width'] * '-',
self.options['color_now_marker']))
elif self.now <= event['s']:
# add a line marker before next event
force_now_marker = False
week_events[event_daynum].append(
EventTitle(
'\n' + self.options['cal_width'] * '-',
self.options['color_now_marker']))
# We don't want to recolor all day events, but ignoring
# them leads to issues where the "now" marker misprints
# into the wrong day. This resolves the issue by skipping
# all day events for specific coloring but not for previous
# or next events
elif self.now >= event['s'] and \
self.now <= event_end_date and \
not event_allday:
# line marker is during the event (recolor event)
force_now_marker = True
if force_now_marker:
event_color = self.options['color_now_marker']
else:
if self.options['override_color'] and event.get('colorId'):
event_color = self._calendar_color(
event, override_color=True)
else:
event_color = self._calendar_color(
event)
# NOTE(slawqo): for all day events it's necessary to add event
# to more than one day in week_events
titlestr = self._format_title(event, allday=event_allday)
if event_allday and event['s'] < event_end_date:
if event_end_date > end_dt:
end_daynum = 6
else:
end_daynum = \
self._cal_monday(
int(event_end_date.strftime("%w")))
if event_daynum > end_daynum:
event_daynum = 0
for day in range(event_daynum, end_daynum + 1):
week_events[day].append(
EventTitle('\n' + titlestr, event_color))
else:
# newline and empty string are the keys to turn off
# coloring
week_events[event_daynum].append(
EventTitle('\n' + titlestr, event_color))
return week_events
def _printed_len(self, string):
# We need to treat everything as unicode for this to actually give
# us the info we want. Date string were coming in as `str` type
# so we convert them to unicode and then check their size. Fixes
# the output issues we were seeing around non-US locale strings
return sum(
self.UNIWIDTH[east_asian_width(char)] for char in _u(string))
def _word_cut(self, word):
stop = 0
for i, char in enumerate(word):
stop += self._printed_len(char)
if stop >= self.options['cal_width']:
return stop, i + 1
def _next_cut(self, string, cur_print_len):
print_len = 0
words = _u(string).split()
for i, word in enumerate(words):
word_len = self._printed_len(word)
if (cur_print_len + word_len + print_len) >= \
self.options['cal_width']:
cut_idx = len(' '.join(words[:i]))
# if the first word is too long, we cannot cut between words
if cut_idx == 0:
return self._word_cut(word)
return (print_len, cut_idx)
print_len += word_len + i # +i for the space between words
return (print_len, len(' '.join(words[:i])))
def _get_cut_index(self, event_string):
print_len = self._printed_len(event_string)
# newline in string is a special case
idx = event_string.find('\n')
if idx > -1 and idx <= self.options['cal_width']:
return (self._printed_len(event_string[:idx]),
len(event_string[:idx]))
if print_len <= self.options['cal_width']:
return (print_len, len(event_string))
else:
# we must cut: _next_cut will loop until we find the right spot
return self._next_cut(event_string, 0)
def _GraphEvents(self, cmd, startDateTime, count, eventList):
# ignore started events (i.e. events that start previous day and end
# start day)
color_border = self.options['color_border']
while (len(eventList) and eventList[0]['s'] < startDateTime):
eventList = eventList[1:]
day_width_line = self.options['cal_width'] * self.printer.art['hrz']
days = 7 if self.options['cal_weekend'] else 5
# Get the localized day names... January 1, 2001 was a Monday
day_names = [date(2001, 1, i + 1).strftime('%A') for i in range(days)]
if not self.options['cal_monday'] or not self.options['cal_weekend']:
day_names = day_names[6:] + day_names[:6]
def build_divider(left, center, right):
return (
self.printer.art[left] + day_width_line +
((days - 1) * (self.printer.art[center] + day_width_line)) +
self.printer.art[right])
week_top = build_divider('ulc', 'ute', 'urc')
week_divider = build_divider('lte', 'crs', 'rte')
week_bottom = build_divider('llc', 'bte', 'lrc')
empty_day = self.options['cal_width'] * ' '
if cmd == 'calm':
# month titlebar
month_title_top = build_divider('ulc', 'hrz', 'urc')
self.printer.msg(month_title_top + '\n', color_border)
month_title = startDateTime.strftime('%B %Y')
month_width = (self.options['cal_width'] * days) + (days - 1)
month_title += ' ' * (month_width - self._printed_len(month_title))
self.printer.art_msg('vrt', color_border)
self.printer.msg(month_title, self.options['color_date'])
self.printer.art_msg('vrt', color_border)
month_title_bottom = build_divider('lte', 'ute', 'rte')
self.printer.msg('\n' + month_title_bottom + '\n', color_border)
else:
# week titlebar
# month title bottom takes care of this when cmd='calm'
self.printer.msg(week_top + '\n', color_border)
# weekday labels
self.printer.art_msg('vrt', color_border)
for day_name in day_names:
day_name += ' ' * (
self.options['cal_width'] - self._printed_len(day_name))
self.printer.msg(day_name, self.options['color_date'])
self.printer.art_msg('vrt', color_border)
self.printer.msg('\n' + week_divider + '\n', color_border)
cur_month = startDateTime.strftime("%b")
# get date range objects for the first week
if cmd == 'calm':
day_num = self._cal_monday(int(startDateTime.strftime("%w")))
startDateTime = (startDateTime - timedelta(days=day_num))
startWeekDateTime = startDateTime
endWeekDateTime = (startWeekDateTime + timedelta(days=7))
for i in range(count):
# create and print the date line for a week
for j in range(days):
if cmd == 'calw':
d = (startWeekDateTime +
timedelta(days=j)).strftime("%d %b")
else: # (cmd == 'calm'):
d = (startWeekDateTime +
timedelta(days=j)).strftime("%d")
if cur_month != (startWeekDateTime +
timedelta(days=j)).strftime("%b"):
d = ''
tmpDateColor = self.options['color_date']
if self.now.strftime("%d%b%Y") == \
(startWeekDateTime + timedelta(days=j)).strftime("%d%b%Y"):
tmpDateColor = self.options['color_now_marker']
d += " **"
d += ' ' * (self.options['cal_width'] - self._printed_len(d))
# print dates
self.printer.art_msg('vrt', color_border)
self.printer.msg(d, tmpDateColor)
self.printer.art_msg('vrt', color_border)
self.printer.msg('\n')
week_events = self._get_week_events(
startWeekDateTime, endWeekDateTime, eventList)
# get date range objects for the next week
startWeekDateTime = endWeekDateTime
endWeekDateTime = (endWeekDateTime + timedelta(days=7))
while True:
# keep looping over events by day, printing one line at a time
# stop when everything has been printed
done = True
self.printer.art_msg('vrt', color_border)
for j in range(days):
if not week_events[j]:
# no events today
self.printer.msg(
empty_day + self.printer.art['vrt'],
color_border)
continue
curr_event = week_events[j][0]
print_len, cut_idx = self._get_cut_index(curr_event.title)
padding = ' ' * (self.options['cal_width'] - print_len)
self.printer.msg(
curr_event.title[:cut_idx] + padding,
curr_event.color)
# trim what we've already printed
trimmed_title = curr_event.title[cut_idx:].strip()
if trimmed_title == '':
week_events[j].pop(0)
else:
week_events[j][0] = \
curr_event._replace(title=trimmed_title)
done = False
self.printer.art_msg('vrt', color_border)
self.printer.msg('\n')
if done:
break
if i < range(count)[len(range(count)) - 1]:
self.printer.msg(week_divider + '\n', color_border)
else:
self.printer.msg(week_bottom + '\n', color_border)
def _tsv(self, startDateTime, eventList):
for event in eventList:
if self.options['ignore_started'] and (event['s'] < self.now):
continue
if self.options['ignore_declined'] and self._DeclinedEvent(event):
continue
output = "%s\t%s\t%s\t%s" % (_u(event['s'].strftime('%Y-%m-%d')),
_u(event['s'].strftime('%H:%M')),
_u(event['e'].strftime('%Y-%m-%d')),
_u(event['e'].strftime('%H:%M')))
if self.details.get('url'):
output += "\t%s" % (self._shorten_url(event['htmlLink'])
if 'htmlLink' in event else '')
output += "\t%s" % (self._shorten_url(event['hangoutLink'])
if 'hangoutLink' in event else '')
output += "\t%s" % _u(self._valid_title(event).strip())
if self.details.get('location'):
output += "\t%s" % (_u(event['location'].strip())
if 'location' in event else '')
if self.details.get('description'):
output += "\t%s" % (_u(event['description'].strip())
if 'description' in event else '')
if self.details.get('calendar'):
output += "\t%s" % _u(event['gcalcli_cal']['summary'].strip())
if self.details.get('email'):
output += "\t%s" % (event['creator']['email'].strip()
if 'email' in event['creator'] else '')
output = "%s\n" % output.replace('\n', '''\\n''')
sys.stdout.write(_u(output))
def _PrintEvent(self, event, prefix):
def _formatDescr(descr, indent, box):
wrapper = textwrap.TextWrapper()
if box:
wrapper.initial_indent = (indent + ' ')
wrapper.subsequent_indent = (indent + ' ')
wrapper.width = (self.details.get('width') - 2)
else:
wrapper.initial_indent = indent
wrapper.subsequent_indent = indent
wrapper.width = self.details.get('width')
new_descr = ""
for line in descr.split("\n"):
if box:
tmpLine = wrapper.fill(line)
for singleLine in tmpLine.split("\n"):
singleLine = singleLine.ljust(
self.details.get('width'), ' ')
new_descr += singleLine[:len(indent)] + \
self.printer.art['vrt'] + \
singleLine[(len(indent) + 1):
(self.details.get('width') - 1)] + \
self.printer.art['vrt'] + '\n'
else:
new_descr += wrapper.fill(line) + "\n"
return new_descr.rstrip()
indent = 10 * ' '
detailsIndent = 19 * ' '
if self.options['military']:
timeFormat = '%-5s'
tmpTimeStr = event['s'].strftime("%H:%M")
else:
timeFormat = '%-7s'
tmpTimeStr = \
event['s'].strftime("%I:%M").lstrip('0').rjust(5) + \
event['s'].strftime('%p').lower()
if not prefix:
prefix = indent
self.printer.msg(prefix, self.options['color_date'])
happeningNow = event['s'] <= self.now <= event['e']
allDay = self._isallday(event)
if self.options['override_color'] and event.get('colorId'):
if happeningNow and not allDay:
eventColor = self.options['color_now_marker']
else:
eventColor = self._calendar_color(
event, override_color=True)
else:
eventColor = self.options['color_now_marker'] \
if happeningNow and not allDay \
else self._calendar_color(event)
if allDay:
fmt = ' ' + timeFormat + ' %s\n'
self.printer.msg(
fmt % ('', self._valid_title(event).strip()),
eventColor)
else:
fmt = ' ' + timeFormat + ' %s\n'
self.printer.msg(
fmt % (
tmpTimeStr, self._valid_title(event).strip()),
eventColor)
if self.details.get('calendar'):
xstr = "%s Calendar: %s\n" % (
detailsIndent, event['gcalcli_cal']['summary'])
self.printer.msg(xstr, 'default')
if self.details.get('url') and 'htmlLink' in event:
hLink = self._shorten_url(event['htmlLink'])
xstr = "%s Link: %s\n" % (detailsIndent, hLink)
self.printer.msg(xstr, 'default')
if self.details.get('url') and 'hangoutLink' in event:
hLink = self._shorten_url(event['hangoutLink'])
xstr = "%s Hangout Link: %s\n" % (detailsIndent, hLink)
self.printer.msg(xstr, 'default')
if self.details.get('location') and \
'location' in event and \
event['location'].strip():
xstr = "%s Location: %s\n" % (
detailsIndent,
event['location'].strip()
)
self.printer.msg(xstr, 'default')
if self.details.get('attendees') and 'attendees' in event:
xstr = "%s Attendees:\n" % (detailsIndent)
self.printer.msg(xstr, 'default')
if 'self' not in event['organizer']:
xstr = "%s %s: <%s>\n" % (
detailsIndent,
event['organizer'].get('displayName', 'Not Provided')
.strip(),
event['organizer'].get('email', 'Not Provided').strip()
)
self.printer.msg(xstr, 'default')
for attendee in event['attendees']:
if 'self' not in attendee:
xstr = "%s %s: <%s>\n" % (
detailsIndent,
attendee.get('displayName', 'Not Provided').strip(),
attendee.get('email', 'Not Provided').strip()
)
self.printer.msg(xstr, 'default')
if self.details.get('attachments') and 'attachments' in event:
xstr = "%s Attachments:\n" % (detailsIndent)
self.printer.msg(xstr, 'default')
for attendee in event['attachments']:
xstr = "%s %s\n%s -> %s\n" % (
detailsIndent,
attendee.get('title', 'Not Provided').strip(),
detailsIndent,
attendee.get('fileUrl', 'Not Provided').strip()
)
self.printer.msg(xstr, 'default')
if self.details.get('length'):
diffDateTime = (event['e'] - event['s'])
xstr = "%s Length: %s\n" % (detailsIndent, diffDateTime)
self.printer.msg(xstr, 'default')
if self.details.get('reminders') and 'reminders' in event:
if event['reminders']['useDefault'] is True:
xstr = "%s Reminder: (default)\n" % (detailsIndent)
self.printer.msg(xstr, 'default')
elif 'overrides' in event['reminders']:
for rem in event['reminders']['overrides']:
xstr = "%s Reminder: %s %d minutes\n" % \
(detailsIndent, rem['method'], rem['minutes'])
self.printer.msg(xstr, 'default')
if self.details.get('email') and \
'email' in event['creator'] and \
event['creator']['email'].strip():
xstr = "%s Email: %s\n" % (
detailsIndent,
event['creator']['email'].strip()
)
self.printer.msg(xstr, 'default')
if self.details.get('description') and \
'description' in event and \
event['description'].strip():
descrIndent = detailsIndent + ' '
box = True # leave old non-box code for option later
if box:
topMarker = (descrIndent +
self.printer.art['ulc'] +
(self.printer.art['hrz'] *
((self.details.get('width') - len(descrIndent)) -
2)) +
self.printer.art['urc'])
botMarker = (descrIndent +
self.printer.art['llc'] +
(self.printer.art['hrz'] *
((self.details.get('width') - len(descrIndent)) -
2)) +
self.printer.art['lrc'])
xstr = "%s Description:\n%s\n%s\n%s\n" % (
detailsIndent,
topMarker,
_formatDescr(event['description'].strip(),
descrIndent, box),
botMarker
)
else:
marker = descrIndent + '-' * \
(self.details.get('width') - len(descrIndent))
xstr = "%s Description:\n%s\n%s\n%s\n" % (
detailsIndent,
marker,
_formatDescr(event['description'].strip(),
descrIndent, box),
marker
)
self.printer.msg(xstr, 'default')
def _delete_event(self, event):
if self.expert:
self._retry_with_backoff(
self._cal_service().events().
delete(calendarId=event['gcalcli_cal']['id'],
eventId=event['id']))
self.printer.msg('Deleted!\n', 'red')
return
self.printer.msg('Delete? [N]o [y]es [q]uit: ', 'magenta')
val = input()
if not val or val.lower() == 'n':
return
elif val.lower() == 'y':
self._retry_with_backoff(
self._cal_service().events().
delete(calendarId=event['gcalcli_cal']['id'],
eventId=event['id']))
self.printer.msg('Deleted!\n', 'red')
elif val.lower() == 'q':
sys.stdout.write('\n')
sys.exit(0)
else:
self.printer.err_msg('Error: invalid input\n')
sys.stdout.write('\n')
sys.exit(1)
def _SetEventStartEnd(self, start, end, event):
event['s'] = parse(start)
event['e'] - parse(end)
if self.options.get('allday'):
event['start'] = {'date': start,
'dateTime': None,
'timeZone': None}
event['end'] = {'date': end,
'dateTime': None,
'timeZone': None}
else:
event['start'] = {'date': None,
'dateTime': start,
'timeZone': event['gcalcli_cal']['timeZone']}
event['end'] = {'date': None,
'dateTime': end,
'timeZone': event['gcalcli_cal']['timeZone']}
return event
def _edit_event(self, event):
while True:
self.printer.msg(
'Edit?\n[N]o [s]ave [q]uit [t]itle [l]ocation [w]hen ' +
'len[g]th [r]eminder [c]olor [d]escr: ', 'magenta')
val = input()
if not val or val.lower() == 'n':
return
elif val.lower() == 'c':
val = get_input(self.printer, 'Color: ', VALID_COLORS)
if val:
self.options['override_color'] = True
event['colorId'] = get_override_color_id(val)
elif val.lower() == 's':
# copy only editable event details for patching
mod_event = {}
keys = ['summary', 'location', 'start', 'end', 'reminders',
'description', 'colorId']
for k in keys:
if k in event:
mod_event[k] = event[k]
self._retry_with_backoff(
self._cal_service().events().
patch(calendarId=event['gcalcli_cal']['id'],
eventId=event['id'],
body=mod_event))
self.printer.msg("Saved!\n", 'red')
return
elif not val or val.lower() == 'q':
sys.stdout.write('\n')
sys.exit(0)
elif val.lower() == 't':
val = get_input(self.printer, 'Title: ', STR_NOT_EMPTY)
if val.strip():
event['summary'] = val.strip()
elif val.lower() == 'l':
val = get_input(self.printer, 'Location: ', STR_ALLOW_EMPTY)
if val.strip():
event['location'] = val.strip()
elif val.lower() == 'w':
val = get_input(self.printer, 'When: ', PARSABLE_DATE).strip()
if val:
td = (event['e'] - event['s'])
length = ((td.days * 1440) + (td.seconds / 60))
all_day = self.options.get('allday')
try:
new_start, new_end = utils.get_times_from_duration(
val, length, all_day)
except ValueError as exc:
self.printer.err_msg(str(exc))
sys.exit(1)
event = self._SetEventStartEnd(new_start, new_end, event)
elif val.lower() == 'g':
val = get_input(
self.printer, 'Length (mins): ', STR_TO_INT)
if val:
all_day = self.options.get('allday')
try:
new_start, new_end = utils.get_times_from_duration(
event['start']['dateTime'], val,
all_day)
event = self._SetEventStartEnd(new_start, new_end, event)
except ValueError as exc:
self.printer.err_msg(str(exc))
elif val.lower() == 'r':
rem = []
while True:
r = get_input(
self.printer, "Enter a valid reminder or '.' to"
"end: ", REMINDER)
if r == '.':
break
rem.append(r)
if rem or not self.options['default_reminders']:
event['reminders'] = {'useDefault': False,
'overrides': []}
for r in rem:
n, m = utils.parse_reminder(r)
event['reminders']['overrides'].append({'minutes': n,
'method': m})
else:
event['reminders'] = {'useDefault': True,
'overrides': []}
elif val.lower() == 'd':
val = get_input(self.printer, 'Description: ', STR_ALLOW_EMPTY)
if val.strip():
event['description'] = val.strip()
else:
self.printer.err_msg('Error: invalid input\n')
sys.stdout.write('\n')
sys.exit(1)
self._PrintEvent(
event, event['s'].strftime('\n%Y-%m-%d'))
def _iterate_events(
self, start_datetime, event_list, year_date=False, work=None):
selected = 0
if len(event_list) == 0:
self.printer.msg('\nNo Events Found...\n', 'yellow')
return selected
# 10 chars for day and length must match 'indent' in _PrintEvent
day_format = '\n%Y-%m-%d' if year_date else '\n%a %b %d'
day = ''
for event in event_list:
if self.options['ignore_started'] and (event['s'] < self.now):
continue
if self.options['ignore_declined'] and self._DeclinedEvent(event):
continue
selected += 1
tmp_day_str = event['s'].strftime(day_format)
prefix = None
if year_date or tmp_day_str != day:
day = prefix = tmp_day_str
self._PrintEvent(event, prefix)
if work:
work(event)
return selected
def _GetAllEvents(self, cal, events, end):
eventList = []
while 1:
if 'items' not in events:
break
for event in events['items']:
event['gcalcli_cal'] = cal
if 'status' in event and event['status'] == 'cancelled':
continue
if 'dateTime' in event['start']:
event['s'] = parse(event['start']['dateTime'])
else:
# all date events
event['s'] = parse(event['start']['date'])
event['s'] = self._localize_datetime(event['s'])
if 'dateTime' in event['end']:
event['e'] = parse(event['end']['dateTime'])
else:
# all date events
event['e'] = parse(event['end']['date'])
event['e'] = self._localize_datetime(event['e'])
# For all-day events, Google seems to assume that the event
# time is based in the UTC instead of the local timezone. Here
# we filter out those events start beyond a specified end time.
if end and (event['s'] >= end):
continue
# http://en.wikipedia.org/wiki/Year_2038_problem
# Catch the year 2038 problem here as the python dateutil
# module can choke throwing a ValueError exception. If either
# the start or end time for an event has a year '>= 2038' dump
# it.
if event['s'].year >= 2038 or event['e'].year >= 2038:
continue
eventList.append(event)
pageToken = events.get('nextPageToken')
if pageToken:
events = self._retry_with_backoff(
self._cal_service().events().
list(calendarId=cal['id'], pageToken=pageToken))
else:
break
return eventList
def _search_for_events(self, start, end, search_text):
event_list = []
for cal in self.cals:
work = self._cal_service().events().\
list(calendarId=cal['id'],
timeMin=start.isoformat() if start else None,
timeMax=end.isoformat() if end else None,
q=search_text if search_text else None,
singleEvents=True)
events = self._retry_with_backoff(work)
event_list.extend(self._GetAllEvents(cal, events, end))
event_list.sort(key=lambda x: x['s'])
return event_list
def _DeclinedEvent(self, event):
if 'attendees' in event:
attendee = [a for a in event['attendees']
if a['email'] == event['gcalcli_cal']['id']][0]
if attendee and attendee['responseStatus'] == 'declined':
return True
return False
def ListAllCalendars(self):
accessLen = 0
for cal in self.allCals:
length = len(cal['accessRole'])
if length > accessLen:
accessLen = length
if accessLen < len('Access'):
accessLen = len('Access')
format = ' %0' + str(accessLen) + 's %s\n'
self.printer.msg(
format % ('Access', 'Title'), self.options['color_title'])
self.printer.msg(
format % ('------', '-----'), self.options['color_title'])
for cal in self.allCals:
self.printer.msg(
format % (cal['accessRole'], cal['summary']),
self._calendar_color(cal))
def _display_queried_events(
self, start, end, search=None, year_date=False):
event_list = self._search_for_events(start, end, search)
if self.options.get('tsv'):
return self._tsv(start, event_list)
else:
return self._iterate_events(start, event_list, year_date=year_date)
def TextQuery(self, search_text='', start=None, end=None):
if not search_text:
# the empty string would get *ALL* events...
raise GcalcliError('Search text is required.')
return self._display_queried_events(start, end, search_text, True)
def AgendaQuery(self, start=None, end=None):
if not start:
start = self.now.replace(hour=0, minute=0, second=0, microsecond=0)
if not end:
end = (start + timedelta(days=self.agenda_length))
return self._display_queried_events(start, end)
def CalQuery(self, cmd, start_text='', count=1):
if not start_text:
# convert now to midnight this morning and use for default
start = self.now.replace(hour=0,
minute=0,
second=0,
microsecond=0)
else:
try:
start = utils.get_time_from_str(start_text)
start = start.replace(hour=0, minute=0, second=0,
microsecond=0)
except Exception:
self.printer.err_msg(
'Error: failed to parse start time\n')
return
# convert start date to the beginning of the week or month
if cmd == 'calw':
dayNum = self._cal_monday(int(start.strftime("%w")))
start = (start - timedelta(days=dayNum))
end = (start + timedelta(days=(count * 7)))
else: # cmd == 'calm':
start = (start - timedelta(days=(start.day - 1)))
endMonth = (start.month + 1)
endYear = start.year
if endMonth == 13:
endMonth = 1
endYear += 1
end = start.replace(month=endMonth, year=endYear)
daysInMonth = (end - start).days
offsetDays = int(start.strftime('%w'))
if self.options['cal_monday']:
offsetDays -= 1
if offsetDays < 0:
offsetDays = 6
totalDays = (daysInMonth + offsetDays)
count = int(totalDays / 7)
if totalDays % 7:
count += 1
eventList = self._search_for_events(start, end, None)
self._GraphEvents(cmd, start, count, eventList)
def QuickAddEvent(self, event_text, reminders=None):
"""Wrapper around Google Calendar API's quickAdd"""
if not event_text:
raise GcalcliError('event_text is required for a quickAdd')
if len(self.cals) != 1:
# TODO: get a better name for this exception class
# and use it elsewhere
raise GcalcliError('You must only specify a single calendar\n')
new_event = self._retry_with_backoff(
self._cal_service().events().quickAdd(
calendarId=self.cals[0]['id'], text=event_text))
if reminders or not self.options['default_reminders']:
rem = {}
rem['reminders'] = {'useDefault': False,
'overrides': []}
for r in reminders:
n, m = utils.parse_reminder(r)
rem['reminders']['overrides'].append({'minutes': n,
'method': m})
new_event = self._retry_with_backoff(
self._cal_service().events().
patch(calendarId=self.cals[0]['id'],
eventId=new_event['id'],
body=rem))
if self.details.get('url'):
hlink = self._shorten_url(new_event['htmlLink'])
self.printer.msg('New event added: %s\n' % hlink, 'green')
return new_event
def AddEvent(
self, title, where, start, end, descr, who, reminders, color):
if len(self.cals) != 1:
# TODO: get a better name for this exception class
# and use it elsewhere
raise GcalcliError('You must only specify a single calendar\n')
event = {}
event['summary'] = title
if self.options['allday']:
event['start'] = {'date': start}
event['end'] = {'date': end}
else:
event['start'] = {'dateTime': start,
'timeZone': self.cals[0]['timeZone']}
event['end'] = {'dateTime': end,
'timeZone': self.cals[0]['timeZone']}
if where:
event['location'] = where
if descr:
event['description'] = descr
if color:
event['colorId'] = get_override_color_id(color)
event['attendees'] = list(map(lambda w: {'email': w}, who))
event = self._add_reminders(event, reminders)
events = self._cal_service().events()
request = events.insert(calendarId=self.cals[0]['id'], body=event)
new_event = self._retry_with_backoff(request)
if self.details.get('url'):
hlink = self._shorten_url(new_event['htmlLink'])
self.printer.msg('New event added: %s\n' % hlink, 'green')
return new_event
def ModifyEvents(
self, work, search_text, start=None, end=None, expert=False):
if not search_text:
raise GcalcliError('The empty string would get *ALL* events')
event_list = self._search_for_events(start, end, search_text)
self.expert = expert
return self._iterate_events(
self.now, event_list, year_date=True, work=work)
def Remind(self, minutes, command, use_reminders=False):
"""Check for events between now and now+minutes. If use_reminders then
only remind if now >= event['start'] - reminder"""
# perform a date query for now + minutes + slip
start = self.now
end = (start + timedelta(minutes=(minutes + 5)))
eventList = self._search_for_events(start, end, None)
message = ''
for event in eventList:
# skip this event if it already started
# XXX maybe add a 2+ minute grace period here...
if event['s'] < self.now:
continue
# not sure if 'reminders' always in event
if use_reminders and 'reminders' in event \
and 'overrides' in event['reminders']:
if all(event['s'] - timedelta(minutes=r['minutes']) > self.now
for r in event['reminders']['overrides']):
# don't remind if all reminders haven't arrived yet
continue
if self.options.get('military'):
tmp_time_str = event['s'].strftime('%H:%M')
else:
tmp_time_str = \
event['s'].strftime('%I:%M').lstrip('0') + \
event['s'].strftime('%p').lower()
message += '%s %s\n' % \
(tmp_time_str, self._valid_title(event).strip())
if not message:
return
cmd = shlex.split(command)
for i, a in zip(range(len(cmd)), cmd):
if a == '%s':
cmd[i] = message
pid = os.fork()
if not pid:
os.execvp(cmd[0], cmd)
def ImportICS(self, verbose=False, dump=False, reminders=None,
icsFile=None):
def CreateEventFromVOBJ(ve):
event = {}
if verbose:
print("+----------------+")
print("| Calendar Event |")
print("+----------------+")
if hasattr(ve, 'summary'):
if verbose:
print("Event........%s" % ve.summary.value)
event['summary'] = ve.summary.value
if hasattr(ve, 'location'):
if verbose:
print("Location.....%s" % ve.location.value)
event['location'] = ve.location.value
if not hasattr(ve, 'dtstart') or not hasattr(ve, 'dtend'):
self.printer.err_msg(
'Error: event does not have a dtstart and dtend!\n')
return None
if verbose:
if ve.dtstart.value:
print("Start........%s" % ve.dtstart.value.isoformat())
if ve.dtend.value:
print("End..........%s" % ve.dtend.value.isoformat())
if ve.dtstart.value:
print("Local Start..%s" % self._localize_datetime(
ve.dtstart.value))
if ve.dtend.value:
print("Local End....%s" % self._localize_datetime(
ve.dtend.value))
if hasattr(ve, 'rrule'):
if verbose:
print("Recurrence...%s" % ve.rrule.value)
event['recurrence'] = ["RRULE:" + ve.rrule.value]
if hasattr(ve, 'dtstart') and ve.dtstart.value:
# XXX
# Timezone madness! Note that we're using the timezone for the
# calendar being added to. This is OK if the event is in the
# same timezone. This needs to be changed to use the timezone
# from the DTSTART and DTEND values. Problem is, for example,
# the TZID might be "Pacific Standard Time" and Google expects
# a timezone string like "America/Los_Angeles". Need to find a
# way in python to convert to the more specific timezone
# string.
# XXX
# print ve.dtstart.params['X-VOBJ-ORIGINAL-TZID'][0]
# print self.cals[0]['timeZone']
# print dir(ve.dtstart.value.tzinfo)
# print vars(ve.dtstart.value.tzinfo)
start = ve.dtstart.value.isoformat()
if isinstance(ve.dtstart.value, datetime):
event['start'] = {'dateTime': start,
'timeZone': self.cals[0]['timeZone']}
else:
event['start'] = {'date': start}
event = self._add_reminders(event, reminders)
# Can only have an end if we have a start, but not the other
# way around apparently... If there is no end, use the start
if hasattr(ve, 'dtend') and ve.dtend.value:
end = ve.dtend.value.isoformat()
if isinstance(ve.dtend.value, datetime):
event['end'] = {'dateTime': end,
'timeZone': self.cals[0]['timeZone']}
else:
event['end'] = {'date': end}
else:
event['end'] = event['start']
if hasattr(ve, 'description') and ve.description.value.strip():
descr = ve.description.value.strip()
if verbose:
print("Description:\n%s" % descr)
event['description'] = descr
if hasattr(ve, 'organizer'):
if ve.organizer.value.startswith("MAILTO:"):
email = ve.organizer.value[7:]
else:
email = ve.organizer.value
if verbose:
print("organizer:\n %s" % email)
event['organizer'] = {'displayName': ve.organizer.name,
'email': email}
if hasattr(ve, 'attendee_list'):
if verbose:
print("attendees:")
event['attendees'] = []
for attendee in ve.attendee_list:
if attendee.value.upper().startswith("MAILTO:"):
email = attendee.value[7:]
else:
email = attendee.value
if verbose:
print(" %s" % email)
event['attendees'].append({'displayName': attendee.name,
'email': email})
return event
try:
import vobject
except ImportError:
self.printer.err_msg(
'Python vobject module not installed!\n')
sys.exit(1)
if dump:
verbose = True
if not dump and len(self.cals) != 1:
raise GcalcliError('Must specify a single calendar\n')
f = sys.stdin
if icsFile:
try:
f = icsFile
except Exception as e:
self.printer.err_msg('Error: ' + str(e) + '!\n')
sys.exit(1)
while True:
try:
v = next(vobject.readComponents(f))
except StopIteration:
break
for ve in v.vevent_list:
event = CreateEventFromVOBJ(ve)
if not event:
continue
if dump:
continue
if not verbose:
newEvent = self._retry_with_backoff(
self._cal_service().events().
insert(calendarId=self.cals[0]['id'],
body=event))
hlink = self._shorten_url(newEvent.get('htmlLink'))
self.printer.msg(
'New event added: %s\n' % hlink, 'green')
continue
self.printer.msg('\n[S]kip [i]mport [q]uit: ', 'magenta')
val = input()
if not val or val.lower() == 's':
continue
if val.lower() == 'i':
newEvent = self._retry_with_backoff(
self._cal_service().events().
insert(calendarId=self.cals[0]['id'],
body=event))
hlink = self._shorten_url(newEvent.get('htmlLink'))
self.printer.msg('New event added: %s\n' % hlink, 'green')
elif val.lower() == 'q':
sys.exit(0)
else:
self.printer.err_msg('Error: invalid input\n')
sys.exit(1)
# TODO: return the number of events added
return True
def parse_cal_names(cal_names):
cal_colors = {}
for name in cal_names:
cal_color = 'default'
parts = name.split("#")
parts_count = len(parts)
if parts_count >= 1:
cal_name = parts[0]
if len(parts) == 2:
cal_color = valid_color_name(parts[1])
if len(parts) > 2:
raise ValueError('Cannot parse calendar name: "%s"' % name)
cal_colors[cal_name] = cal_color
return [CalName(name=k, color=cal_colors[k]) for k in cal_colors.keys()]
def run_add_prompt(parsed_args, printer):
if parsed_args.title is None:
parsed_args.title = get_input(printer, 'Title: ', STR_NOT_EMPTY)
if parsed_args.where is None:
parsed_args.where = get_input(
printer, 'Location: ', STR_ALLOW_EMPTY)
if parsed_args.when is None:
parsed_args.when = get_input(printer, 'When: ', PARSABLE_DATE)
if parsed_args.duration is None:
if parsed_args.allday:
prompt = 'Duration (days): '
else:
prompt = 'Duration (minutes): '
parsed_args.duration = get_input(printer, prompt, STR_TO_INT)
if parsed_args.description is None:
parsed_args.description = get_input(
printer, 'Description: ', STR_ALLOW_EMPTY)
if parsed_args.event_color is None:
parsed_args.event_color = get_input(
printer, "Color: ", VALID_COLORS)
if not parsed_args.reminders:
while True:
r = get_input(
printer, "Enter a valid reminder or " "'.' to end: ",
REMINDER)
if r == '.':
break
n, m = utils.parse_reminder(str(r))
parsed_args.reminders.append(str(n) + ' ' + m)
def main():
parser = get_argument_parser()
try:
argv = sys.argv[1:]
gcalclirc = os.path.expanduser('~/.gcalclirc')
if os.path.exists(gcalclirc):
# We want .gcalclirc to be sourced before any other --flagfile
# params since we may be told to use a specific config folder, we
# need to store generated argv in temp variable
tmp_argv = ["@%s" % gcalclirc, ] + argv
else:
tmp_argv = argv
(parsed_args, unparsed) = parser.parse_known_args(tmp_argv)
except Exception as e:
sys.stderr.write(str(e))
parser.print_usage()
sys.exit(1)
if parsed_args.configFolder:
if not os.path.exists(os.path.expanduser(parsed_args.configFolder)):
os.makedirs(os.path.expanduser(parsed_args.configFolder))
if os.path.exists(os.path.expanduser("%s/gcalclirc" %
parsed_args.configFolder)):
rc_path = ["@%s/gcalclirc" % parsed_args.configFolder, ]
if not parsed_args.includeRc:
tmp_argv = rc_path + argv
else:
tmp_argv = rc_path + tmp_argv
(parsed_args, unparsed) = parser.parse_known_args(tmp_argv)
printer = Printer(
conky=parsed_args.conky, use_color=parsed_args.color,
art_style=parsed_args.lineart)
if unparsed:
try:
parsed_args = handle_unparsed(unparsed, parsed_args)
except Exception as e:
sys.stderr.write(str(e))
parser.print_usage()
sys.exit(1)
if parsed_args.locale:
try:
utils.set_locale(parsed_args.locale)
except ValueError as exc:
printer.err_msg(str(exc))
if len(parsed_args.calendar) == 0:
parsed_args.calendar = parsed_args.defaultCalendar
cal_names = parse_cal_names(parsed_args.calendar)
gcal = GoogleCalendarInterface(
cal_names=cal_names, printer=printer, **vars(parsed_args))
try:
if parsed_args.command == 'list':
gcal.ListAllCalendars()
elif parsed_args.command == 'agenda':
gcal.AgendaQuery(start=parsed_args.start, end=parsed_args.end)
elif parsed_args.command == 'calw':
gcal.CalQuery(
parsed_args.command, count=parsed_args.weeks,
start_text=parsed_args.start)
elif parsed_args.command == 'calm':
gcal.CalQuery(parsed_args.command, start_text=parsed_args.start)
elif parsed_args.command == 'quick':
if not parsed_args.text:
printer.err_msg('Error: invalid event text\n')
sys.exit(1)
# allow unicode strings for input
gcal.QuickAddEvent(
_u(parsed_args.text), reminders=parsed_args.reminders)
elif parsed_args.command == 'add':
if parsed_args.prompt:
run_add_prompt(parsed_args, printer)
# calculate "when" time:
try:
estart, eend = utils.get_times_from_duration(
parsed_args.when, parsed_args.duration,
parsed_args.allday)
except ValueError as exc:
printer.err_msg(str(exc))
# Since we actually need a valid start and end time in order to
# add the event, we cannot proceed.
raise
gcal.AddEvent(parsed_args.title, parsed_args.where, estart, eend,
parsed_args.description, parsed_args.who,
parsed_args.reminders, parsed_args.event_color)
elif parsed_args.command == 'search':
gcal.TextQuery(
parsed_args.text[0], start=parsed_args.start,
end=parsed_args.end)
elif parsed_args.command == 'delete':
gcal.ModifyEvents(
gcal._delete_event, parsed_args.text[0],
start=parsed_args.start, end=parsed_args.end,
expert=parsed_args.iamaexpert)
elif parsed_args.command == 'edit':
gcal.ModifyEvents(
gcal._edit_event, parsed_args.text[0],
start=parsed_args.start, end=parsed_args.end)
elif parsed_args.command == 'remind':
gcal.Remind(
parsed_args.minutes, parsed_args.cmd,
use_reminders=parsed_args.use_reminders)
elif parsed_args.command == 'import':
gcal.ImportICS(
parsed_args.verbose, parsed_args.dump,
parsed_args.reminders, parsed_args.file)
except GcalcliError as exc:
printer.err_msg(str(exc))
sys.exit(1)
def SIGINT_handler(signum, frame):
sys.stderr.write('Signal caught, bye!\n')
sys.exit(1)
signal.signal(signal.SIGINT, SIGINT_handler)
if __name__ == '__main__':
main()
|
from collections import deque
from hwt.hdl.constants import DIRECTION
from hwt.hdl.types.bits import Bits
from hwt.interfaces.std import Clk, Signal, VectSignal
from hwt.interfaces.tristate import TristateSig
from hwt.simulator.agentBase import SyncAgentBase
from hwt.synthesizer.interface import Interface
from hwt.synthesizer.param import Param
from pyMathBitPrecise.bit_utils import mask, get_bit
from hwtSimApi.agents.base import AgentBase
from hwtSimApi.hdlSimulator import HdlSimulator
from hwtSimApi.process_utils import OnRisingCallbackLoop, OnFallingCallbackLoop
from hwtSimApi.triggers import WaitCombRead, WaitWriteOnly, Timer
class SpiAgent(SyncAgentBase):
"""
Simulation agent for SPI interface
:ivar ~.txData: data to transceiver container
:ivar ~.rxData: received data
:ivar ~.chipSelects: values of chip select
chipSelects, rxData and txData are lists of integers
"""
BITS_IN_WORD = 8
def __init__(self, sim: HdlSimulator, intf: "Spi", allowNoReset=False):
AgentBase.__init__(self, sim, intf)
self.txData = deque()
self.rxData = deque()
self.chipSelects = deque()
self._txBitBuff = deque()
self._rxBitBuff = deque()
self.csMask = mask(intf.cs._dtype.bit_length())
self.slaveEn = False
# resolve clk and rstn
self.clk = self.intf._getAssociatedClk()._sigInside
self.rst, self.rstOffIn = self._discoverReset(intf, allowNoReset=allowNoReset)
# read on rising edge write on falling
self.monitorRx = OnRisingCallbackLoop(self.sim, self.clk,
self.monitorRx,
self.getEnable)
self.monitorTx = OnFallingCallbackLoop(self.sim, self.clk,
self.monitorTx,
self.getEnable)
self.driverRx = OnFallingCallbackLoop(self.sim, self.clk,
self.driverRx,
self.getEnable)
self.driverTx = OnRisingCallbackLoop(self.sim, self.clk,
self.driverTx,
self.getEnable)
def setEnable(self, en):
self._enabled = en
self.monitorRx.setEnable(en)
self.monitorTx.setEnable(en)
self.driverRx.setEnable(en)
self.driverTx.setEnable(en)
def splitBits(self, v):
return deque([get_bit(v, i)
for i in range(self.BITS_IN_WORD - 1, -1, -1)])
def mergeBits(self, bits):
t = Bits(self.BITS_IN_WORD, False)
val = 0
vld_mask = 0
for v in bits:
val <<= 1
val |= v.val
vld_mask <<= 1
vld_mask |= v.vld_mask
return t.getValueCls()(t, val, vld_mask)
def readRxSig(self, sig):
d = sig.read()
bits = self._rxBitBuff
bits.append(d)
if len(bits) == self.BITS_IN_WORD:
self.rxData.append(self.mergeBits(bits))
self._rxBitBuff = []
def writeTxSig(self, sig):
bits = self._txBitBuff
if not bits:
if not self.txData:
return
d = self.txData.popleft()
bits = self._txBitBuff = self.splitBits(d)
sig.write(bits.popleft())
def monitorRx(self):
yield WaitCombRead()
if self.notReset():
cs = self.intf.cs.read()
cs = int(cs)
if cs != self.csMask: # if any slave is enabled
if not self._rxBitBuff:
self.chipSelects.append(cs)
self.readRxSig(self.intf.mosi)
# def monitorTx_pre_set(self):
# yield WaitWriteOnly()
# self.writeTxSig(self.intf.miso)
def monitorTx(self):
yield WaitCombRead()
if self.notReset():
cs = self.intf.cs.read()
cs = int(cs)
if cs != self.csMask:
yield Timer(1)
yield WaitWriteOnly()
self.writeTxSig(self.intf.miso)
def driverRx(self):
yield WaitCombRead()
if self.notReset() and self.slaveEn:
self.readRxSig(self.intf.miso)
def driverTx(self):
yield WaitCombRead()
if self.notReset():
if not self._txBitBuff:
try:
cs = self.chipSelects.popleft()
except IndexError:
self.slaveEn = False
yield WaitWriteOnly()
self.intf.cs.write(self.csMask)
return
self.slaveEn = True
yield WaitWriteOnly()
self.intf.cs.write(cs)
yield WaitWriteOnly()
self.writeTxSig(self.intf.mosi)
def getDrivers(self):
return [self.driverRx(), self.driverTx()]
def getMonitors(self):
return [self.monitorRx(),
# self.monitorTx_pre_set(),
self.monitorTx()]
# http://www.corelis.com/education/SPI_Tutorial.htm
class Spi(Interface):
"""
Bare SPI interface (Serial peripheral interface)
.. hwt-autodoc::
"""
def _config(self):
self.SLAVE_CNT = Param(1)
self.HAS_MISO = Param(True)
self.HAS_MOSI = Param(True)
self.FREQ = Param(Clk.DEFAULT_FREQ)
def _declr(self):
self.clk = Clk()
self.clk.FREQ = self.FREQ
assert self.HAS_MOSI or self.HAS_MISO
if self.HAS_MOSI:
self.mosi = Signal() # master out slave in
if self.HAS_MISO:
self.miso = Signal(masterDir=DIRECTION.IN) # master in slave out
if self.SLAVE_CNT is not None:
self.cs = VectSignal(self.SLAVE_CNT) # chip select
self._associatedClk = self.clk
def _initSimAgent(self, sim: HdlSimulator):
self._ag = SpiAgent(sim, self)
class SpiTristate(Spi):
"""
SPI interface where mosi and miso signal are merged into one tri-state wire
.. hwt-autodoc::
"""
def _config(self):
Spi._config(self)
self.DATA_WIDTH = Param(1)
def _declr(self):
self.clk = Clk()
with self._paramsShared():
self.io = TristateSig() # mosi and miso in one wire
self.cs = VectSignal(self.SLAVE_CNT) # chip select
self._associatedClk = self.clk
class QSPI(SpiTristate):
"""
SPI interface with 4 tristate data wires
.. hwt-autodoc::
"""
def _config(self):
Spi._config(self)
self.DATA_WIDTH = Param(4)
|
#Draws PRC and ROC curves for predicted and true values
import argparse
from sklearn.metrics import precision_recall_curve,average_precision_score,roc_curve
import matplotlib.pyplot as plt
import h5py
import pickle
import numpy as np
from random import random
from operator import add
from pylab import rcParams
rcParams['figure.figsize'] = 10, 10
def parse_args():
parser=argparse.ArgumentParser("generate PRC & ROC curves")
parser.add_argument("--truth_hdf5")
parser.add_argument("--prediction_pickle")
parser.add_argument("--out_prefix")
parser.add_argument("--labels")
parser.add_argument("--title")
return parser.parse_args()
def filter_vals(precision,recall):
thresholds=[i/100.0 for i in range(100,0,-1)]
recall, precision = zip(*sorted(zip(recall, precision),reverse=True))
recall=list(recall)
precision=list(precision)
rounded_recall=[round(i,2) for i in recall]
filtered_precision=[]
filtered_recall=[]
cur_precision=0
for t in thresholds:
if t in rounded_recall:
cur_index=rounded_recall.index(t)
cur_precision=precision[cur_index]
filtered_precision.append(cur_precision)
filtered_recall.append(t)
return filtered_precision,filtered_recall
def main():
args=parse_args()
with open(args.prediction_pickle,'rb') as handle:
y_pred=pickle.load(handle)
y_true=h5py.File(args.truth_hdf5,'r')
y_true=np.asarray(y_true['Y']['default_output_mode_name'])
labels=open(args.labels,'r').read().strip().split('\n')[0].split('\t')[1::]
mean_precision=[]
mean_recall=[]
mean_fpr=[]
mean_tpr=[]
#initialize figure
fig1=plt.figure()
ax1=fig1.add_subplot(121)
ax2=fig1.add_subplot(122)
#compute precision & recall for each task
for i in range(len(labels)):
precision, recall, thresholds = precision_recall_curve(y_true[:,i], y_pred[:,i])
fpr,tpr,thresholds=roc_curve(y_true[:,i],y_pred[:,i])
cur_color=([random(),random(),random()])
ax1.step(recall, precision, color=cur_color, alpha=0.2,where='post',label=labels[i])
ax2.step(fpr,tpr,color=cur_color,alpha=0.2,where='post',label=labels[i])
precision,recall=filter_vals(precision,recall)
tpr,fpr=filter_vals(tpr,fpr)
if len(mean_precision)==0:
mean_precision=precision
else:
mean_precision=map(add, mean_precision,precision)
if len(mean_recall)==0:
mean_recall=recall
else:
mean_recall=map(add,mean_recall,recall)
if len(mean_tpr)==0:
mean_tpr=tpr
else:
mean_tpr=map(add,mean_tpr,tpr)
if len(mean_fpr)==0:
mean_fpr=fpr
else:
mean_fpr=map(add,mean_fpr,fpr)
#get average metric values
mean_precision=[i/len(labels) for i in mean_precision]
mean_recall=[i/len(labels) for i in mean_recall]
mean_tpr=[i/len(labels) for i in mean_tpr]
mean_fpr=[i/len(labels) for i in mean_fpr]
ax1.step(mean_recall, mean_precision, color=(0,0,0), linewidth=2,
where='post',label="mean")
ax1.set_xlabel('Recall')
ax1.set_ylabel('Precision')
ax1.set_ylim([0.0, 1.05])
ax1.set_xlim([0.0, 1.0])
ax1.set_title(args.title)
ax2.step(mean_fpr,mean_tpr,color=(0,0,0),linewidth=2,
where='post',label='mean')
ax2.set_xlabel('FPR')
ax2.set_ylabel('TPR')
ax2.set_ylim([0.0,1.05])
ax2.set_xlim([0.0,1.0])
ax2.set_title(args.title)
#generate output file
outf=open(args.out_prefix+'.average.metrics','w')
outf.write('Precision\tRecall\tFPR\tTPR\n')
for i in range(len(mean_precision)):
outf.write(str(mean_precision[i])+'\t'+str(mean_recall[i])+'\t'+str(mean_fpr[i])+'\t'+str(mean_tpr[i])+'\n')
#plt.savefig(args.out_prefix+".png",dpi=100)
plt.show()
if __name__=="__main__":
main()
|
<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ====================================================================
# @author: <NAME>
# @since: 11/02/2017
# @summary: A module with angle and coordinate transformations.
# @note: Parts of this file came from angle_utilities.py written by <NAME> of PCG at Cornell.
# Redistributed with permission.
# ====================================================================
# Provides functionality to convert between UV coordinates and angles as well
# as other useful angle utilities.
#
# Copyright 2014-2015 Program of Computer Graphics, Cornell University
# 580 Rhodes Hall
# Cornell University
# Ithaca NY 14853
# Web: http://www.graphics.cornell.edu/
#
# Not for commercial use. Do not redistribute without permission.
# ====================================================================
import math
import numpy as np
import common
'''
Convert a sky coordinate (azimuth, altitude) to fisheye UV coordinate (0-1, 0-1).
Note that images in this application were taken with North facing downward, so we must account for this in UV.
Note sampling pattern coordinates in this application were measured in altitude, but calculation below requires zenith.
Note altering of zenith to account for warp of lens used:
http://paulbourke.net/dome/fisheyecorrect/
'''
def SkyCoord2FisheyeUV(azimuth, altitude, lenswarp=True):
# 1) sky photos were saved as (North down, South up), so rotate "North" to polar coordinate system (0 deg East)
# 2) inverse azimuth because photos are taken from inside skydome, so east and west are flipped!
azimuth = 360 - ((azimuth + 270) % 360)
# convert altitude to zenith
zenith = (90 - altitude)
# convert from angles to radians
azimuth = azimuth * math.pi / 180.0
zenith = zenith * math.pi / 180.0
# compute radius
# account for non-linearity/warp of actual lens
if lenswarp and len(common.LensWarp) > 0:
radius = np.polyval(common.LensWarp, zenith)
# use ideal lens
else:
radius = np.polyval(common.LensIdeal, zenith)
# compute UVs
u = radius * math.cos(azimuth)
v = radius * math.sin(azimuth)
# adjust to [0, 1] range
u = 0.5 * u + 0.5
v = 0.5 * v + 0.5
return u, v
'''
Convert a fisheye UV coordinate (0-1, 0-1) to a sky coordinate (azimuth, altitude).
'''
def FisheyeUV2SkyCoord(u, v, lenswarp=True):
# adjust to [-1, 1] range
u = (u - 0.5) * 2
v = (v - 0.5) * 2
radius = math.sqrt((u * u) + (v * v))
# compute azimuth
azimuth = math.atan2(u, v)
# rotate azimuth so that position of North is pointing directly down
azimuth = (azimuth + 2*math.pi) % (2*math.pi)
# compute zenith
# account for non-linearity/warp of actual lens
if lenswarp and len(common.LensWarpInv) > 0:
zenith = np.polyval(common.LensWarpInv, radius)
# use ideal lens
else:
zenith = np.polyval(common.LensIdealInv, radius)
# convert zenith to altitude
altitude = (math.pi / 2) - zenith
# convert from radians to angles
azimuth = azimuth * 180.0 / math.pi
altitude = altitude * 180.0 / math.pi
return azimuth, altitude
'''
Convert an image pixel coordinate to a fisheye UV coordinate (0-1, 0-1).
'''
def Pixel2FisheyeUV(x, y, width, height):
u = (x - (int(width/2) - int(height/2))) / height
v = y / height
return u, v
'''
Take in a pair of (azimuth, altitude) sky coordintes and return the corresponding central angle between them.
https://en.wikipedia.org/wiki/Great-circle_distance#Formulas
'''
def CentralAngle(a, b, inRadians=False):
if not inRadians:
a = (math.radians(a[0]), math.radians(a[1]))
b = (math.radians(b[0]), math.radians(b[1]))
return math.acos( math.sin(a[1]) * math.sin(b[1]) + math.cos(a[1]) * math.cos(b[1]) * math.cos( abs(a[0]-b[0]) ) )
|
# Run a whole brain searchlight
# Import libraries
import nibabel as nib
import numpy as np
from mpi4py import MPI
from brainiak.searchlight.searchlight import Searchlight
from sklearn.model_selection import StratifiedShuffleSplit, GridSearchCV
from sklearn.svm import SVC
from scipy.spatial.distance import euclidean
import os
import pickle
from utils import results_path
# Import additional libraries you need
fs_data_dir = os.path.expanduser(results_path + '/searchlight_data')
num_subj = 3
# Load and perpare data for one subject
def load_fs_data(sub_id, mask=''):
# find file path
sub = 'sub-%.2d' % (sub_id)
input_dir = os.path.join(fs_data_dir, sub)
data_file = os.path.join(input_dir, 'data.nii.gz')
if mask == '':
mask_file = os.path.join(fs_data_dir, 'wb_mask.nii.gz')
else:
mask_file = os.path.join(fs_data_dir, '{}_mask.nii.gz'.format(mask))
# load bold data and some header information so that we can save searchlight results later
data_file = nib.load(data_file)
bold_data = data_file.get_data()
affine_mat = data_file.affine
dimsize = data_file.header.get_zooms()
# load mask
brain_mask = nib.load(mask_file)
brain_mask = brain_mask.get_data()
return bold_data, brain_mask, affine_mat, dimsize
def load_fs_label(sub_id, mask=''):
# find file path
sub = 'sub-%.2d' % (sub_id)
input_dir = os.path.join(fs_data_dir, sub)
label_file = os.path.join(input_dir, 'label.npz')
# load label
label = np.load(label_file)
label = label['label']
return label
# Data Path
data_path = os.path.expanduser(results_path + '/searchlight_results')
# if not os.path.exists(data_path):
# os.makedirs(data_path)
# Pull out the MPI information
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
# load mask
mask_file = os.path.join(fs_data_dir, 'wb_mask.nii.gz')
mask = nib.load(mask_file)
mask = mask.get_data()
# Loop over subjects
data = []
bcvar = []
for sub_id in range(1,num_subj+1):
if rank == 0:
data_i, mask, affine_mat, dimsize = load_fs_data(sub_id)
data.append(data_i)
else:
data.append(None)
bcvar_i = load_fs_label(sub_id)
bcvar.append(bcvar_i)
sl_rad = 1
max_blk_edge = 5
pool_size = 1
coords = np.where(mask)
# Create the searchlight object
sl = Searchlight(sl_rad=sl_rad,max_blk_edge=max_blk_edge)
# print("Setup searchlight inputs")
# print("Number of subjects: " + str(len(data)))
# print("Input data shape: " + str(data[0].shape))
# print("Input mask shape: " + str(mask.shape) + "\n")
# Distribute the information to the searchlights (preparing it to run)
sl.distribute(data, mask)
# Broadcast variables
sl.broadcast(bcvar)
# Set up the kernel function, in this case an SVM
def calc_svm(data, sl_mask, myrad, bcvar):
accuracy = []
sl_num_vx = sl_mask.shape[0] * sl_mask.shape[1] * sl_mask.shape[2]
num_epoch = data[0].shape[3]
# Loop over subjects to leave each subject out once:
for idx in range(len(data)):
# Pull out the data
# Testing data
data4D_test = data[idx]
labels_test = bcvar[idx]
bolddata_sl_test = data4D_test.reshape(sl_num_vx, num_epoch).T
# Training data
labels_train = []
bolddata_sl_train = np.empty((0, sl_num_vx))
for train_id in range(len(data)):
if train_id != idx:
labels_train.extend(list(bcvar[train_id]))
bolddata_sl_train = np.concatenate((bolddata_sl_train, data[train_id].reshape(sl_num_vx, num_epoch).T))
labels_train = np.array(labels_train)
# Train classifier
clf = SVC(kernel='linear', C=1)
clf.fit(bolddata_sl_train, labels_train)
# Test classifier
score = clf.score(bolddata_sl_test, labels_test)
accuracy.append(score)
return accuracy
# Run the searchlight analysis
print("Begin SearchLight in rank %s\n" % rank)
all_sl_result = sl.run_searchlight(calc_svm, pool_size=pool_size)
print("End SearchLight in rank %s\n" % rank)
# Only save the data if this is the first core
if rank == 0:
all_sl_result = all_sl_result[mask==1]
all_sl_result = [num_subj*[0] if not n else n for n in all_sl_result] # replace all None
# The average result
avg_vol = np.zeros((mask.shape[0], mask.shape[1], mask.shape[2]))
# Loop over subjects
for sub_id in range(1,num_subj+1):
sl_result = [r[sub_id-1] for r in all_sl_result]
# reshape
result_vol = np.zeros((mask.shape[0], mask.shape[1], mask.shape[2]))
result_vol[coords[0], coords[1], coords[2]] = sl_result
# Convert the output into what can be used
result_vol = result_vol.astype('double')
result_vol[np.isnan(result_vol)] = 0 # If there are nans we want this
# Add the processed result_vol into avg_vol
avg_vol += result_vol
# Save the volume
output_name = os.path.join(data_path, 'subj%s_whole_brain_SL.nii.gz' % (sub_id))
sl_nii = nib.Nifti1Image(result_vol, affine_mat)
hdr = sl_nii.header
hdr.set_zooms((dimsize[0], dimsize[1], dimsize[2]))
nib.save(sl_nii, output_name) # Save
# Save the average result
output_name = os.path.join(data_path, 'avg%s_whole_brain_SL.nii.gz' % (num_subj))
sl_nii = nib.Nifti1Image(avg_vol/num_subj, affine_mat)
hdr = sl_nii.header
hdr.set_zooms((dimsize[0], dimsize[1], dimsize[2]))
nib.save(sl_nii, output_name) # Save
print('Finished searchlight') |
<reponame>Madision-Jack/blade-build
# Copyright (c) 2012-2014 <NAME> and contributors
# pylint: skip-file
# type: ignore
import fnmatch
import functools
import io
import ntpath
import os
import posixpath
import re
import sys
import time
from collections import Sequence
from contextlib import contextmanager
from errno import EINVAL, ENOENT
from operator import attrgetter
from stat import S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO
try:
from urllib import quote as urlquote, quote as urlquote_from_bytes
except ImportError:
from urllib.parse import quote as urlquote, quote_from_bytes as urlquote_from_bytes
try:
intern = intern
except NameError:
intern = sys.intern
try:
basestring = basestring
except NameError:
basestring = str
supports_symlinks = True
try:
import nt
except ImportError:
nt = None
else:
if sys.getwindowsversion()[:2] >= (6, 0) and sys.version_info >= (3, 2):
from nt import _getfinalpathname
else:
supports_symlinks = False
_getfinalpathname = None
__all__ = [
"PurePath", "PurePosixPath", "PureWindowsPath",
"Path", "PosixPath", "WindowsPath",
]
#
# Internals
#
_py2 = sys.version_info < (3,)
_py2_fs_encoding = 'ascii'
def _py2_fsencode(parts):
# py2 => minimal unicode support
return [part.encode(_py2_fs_encoding) if isinstance(part, unicode)
else part for part in parts]
def _is_wildcard_pattern(pat):
# Whether this pattern needs actual matching using fnmatch, or can
# be looked up directly as a file.
return "*" in pat or "?" in pat or "[" in pat
class _Flavour(object):
"""A flavour implements a particular (platform-specific) set of path
semantics."""
def __init__(self):
self.join = self.sep.join
def parse_parts(self, parts):
if _py2:
parts = _py2_fsencode(parts)
parsed = []
sep = self.sep
altsep = self.altsep
drv = root = ''
it = reversed(parts)
for part in it:
if not part:
continue
if altsep:
part = part.replace(altsep, sep)
drv, root, rel = self.splitroot(part)
if sep in rel:
for x in reversed(rel.split(sep)):
if x and x != '.':
parsed.append(intern(x))
else:
if rel and rel != '.':
parsed.append(intern(rel))
if drv or root:
if not drv:
# If no drive is present, try to find one in the previous
# parts. This makes the result of parsing e.g.
# ("C:", "/", "a") reasonably intuitive.
for part in it:
drv = self.splitroot(part)[0]
if drv:
break
break
if drv or root:
parsed.append(drv + root)
parsed.reverse()
return drv, root, parsed
def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2):
"""
Join the two paths represented by the respective
(drive, root, parts) tuples. Return a new (drive, root, parts) tuple.
"""
if root2:
if not drv2 and drv:
return drv, root2, [drv + root2] + parts2[1:]
elif drv2:
if drv2 == drv or self.casefold(drv2) == self.casefold(drv):
# Same drive => second path is relative to the first
return drv, root, parts + parts2[1:]
else:
# Second path is non-anchored (common case)
return drv, root, parts + parts2
return drv2, root2, parts2
class _WindowsFlavour(_Flavour):
# Reference for Windows paths can be found at
# http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx
sep = '\\'
altsep = '/'
has_drv = True
pathmod = ntpath
is_supported = (nt is not None)
drive_letters = (
set(chr(x) for x in range(ord('a'), ord('z') + 1)) |
set(chr(x) for x in range(ord('A'), ord('Z') + 1))
)
ext_namespace_prefix = '\\\\?\\'
reserved_names = (
set(['CON', 'PRN', 'AUX', 'NUL']) |
set(['COM%d' % i for i in range(1, 10)]) |
set(['LPT%d' % i for i in range(1, 10)])
)
# Interesting findings about extended paths:
# - '\\?\c:\a', '//?/c:\a' and '//?/c:/a' are all supported
# but '\\?\c:/a' is not
# - extended paths are always absolute; "relative" extended paths will
# fail.
def splitroot(self, part, sep=sep):
first = part[0:1]
second = part[1:2]
if (second == sep and first == sep):
# XXX extended paths should also disable the collapsing of "."
# components (according to MSDN docs).
prefix, part = self._split_extended_path(part)
first = part[0:1]
second = part[1:2]
else:
prefix = ''
third = part[2:3]
if (second == sep and first == sep and third != sep):
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvvv root
# \\machine\mountpoint\directory\etc\...
# directory ^^^^^^^^^^^^^^
index = part.find(sep, 2)
if index != -1:
index2 = part.find(sep, index + 1)
# a UNC path can't have two slashes in a row
# (after the initial two)
if index2 != index + 1:
if index2 == -1:
index2 = len(part)
if prefix:
return prefix + part[1:index2], sep, part[index2 + 1:]
else:
return part[:index2], sep, part[index2 + 1:]
drv = root = ''
if second == ':' and first in self.drive_letters:
drv = part[:2]
part = part[2:]
first = third
if first == sep:
root = first
part = part.lstrip(sep)
return prefix + drv, root, part
def casefold(self, s):
return s.lower()
def casefold_parts(self, parts):
return [p.lower() for p in parts]
def resolve(self, path):
s = str(path)
if not s:
return os.getcwd()
if _getfinalpathname is not None:
return self._ext_to_normal(_getfinalpathname(s))
# Means fallback on absolute
return None
def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix):
prefix = ''
if s.startswith(ext_prefix):
prefix = s[:4]
s = s[4:]
if s.startswith('UNC\\'):
prefix += s[:3]
s = '\\' + s[3:]
return prefix, s
def _ext_to_normal(self, s):
# Turn back an extended path into a normal DOS-like path
return self._split_extended_path(s)[1]
def is_reserved(self, parts):
# NOTE: the rules for reserved names seem somewhat complicated
# (e.g. r"..\NUL" is reserved but not r"foo\NUL").
# We err on the side of caution and return True for paths which are
# not considered reserved by Windows.
if not parts:
return False
if parts[0].startswith('\\\\'):
# UNC paths are never reserved
return False
return parts[-1].partition('.')[0].upper() in self.reserved_names
def make_uri(self, path):
# Under Windows, file URIs use the UTF-8 encoding.
drive = path.drive
if len(drive) == 2 and drive[1] == ':':
# It's a path on a local drive => 'file:///c:/a/b'
rest = path.as_posix()[2:].lstrip('/')
return 'file:///%s/%s' % (
drive, urlquote_from_bytes(rest.encode('utf-8')))
else:
# It's a path on a network drive => 'file://host/share/a/b'
return 'file:' + urlquote_from_bytes(path.as_posix().encode('utf-8'))
class _PosixFlavour(_Flavour):
sep = '/'
altsep = ''
has_drv = False
pathmod = posixpath
is_supported = (os.name != 'nt')
def splitroot(self, part, sep=sep):
if part and part[0] == sep:
stripped_part = part.lstrip(sep)
# According to POSIX path resolution:
# http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap04.html#tag_04_11
# "A pathname that begins with two successive slashes may be
# interpreted in an implementation-defined manner, although more
# than two leading slashes shall be treated as a single slash".
if len(part) - len(stripped_part) == 2:
return '', sep * 2, stripped_part
else:
return '', sep, stripped_part
else:
return '', '', part
def casefold(self, s):
return s
def casefold_parts(self, parts):
return parts
def resolve(self, path):
sep = self.sep
accessor = path._accessor
seen = {}
def _resolve(path, rest):
if rest.startswith(sep):
path = ''
for name in rest.split(sep):
if not name or name == '.':
# current dir
continue
if name == '..':
# parent dir
path, _, _ = path.rpartition(sep)
continue
newpath = path + sep + name
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
raise RuntimeError("Symlink loop from %r" % newpath)
# Resolve the symbolic link
try:
target = accessor.readlink(newpath)
except OSError as e:
if e.errno != EINVAL:
raise
# Not a symlink
path = newpath
else:
seen[newpath] = None # not resolved symlink
path = _resolve(path, target)
seen[newpath] = path # resolved symlink
return path
# NOTE: according to POSIX, getcwd() cannot contain path components
# which are symlinks.
base = '' if path.is_absolute() else os.getcwd()
return _resolve(base, str(path)) or sep
def is_reserved(self, parts):
return False
def make_uri(self, path):
# We represent the path using the local filesystem encoding,
# for portability to other applications.
bpath = bytes(path)
return 'file://' + urlquote_from_bytes(bpath)
_windows_flavour = _WindowsFlavour()
_posix_flavour = _PosixFlavour()
class _Accessor:
"""An accessor implements a particular (system-specific or not) way of
accessing paths on the filesystem."""
class _NormalAccessor(_Accessor):
def _wrap_strfunc(strfunc):
@functools.wraps(strfunc)
def wrapped(pathobj, *args):
return strfunc(str(pathobj), *args)
return staticmethod(wrapped)
def _wrap_binary_strfunc(strfunc):
@functools.wraps(strfunc)
def wrapped(pathobjA, pathobjB, *args):
return strfunc(str(pathobjA), str(pathobjB), *args)
return staticmethod(wrapped)
stat = _wrap_strfunc(os.stat)
lstat = _wrap_strfunc(os.lstat)
open = _wrap_strfunc(os.open)
listdir = _wrap_strfunc(os.listdir)
chmod = _wrap_strfunc(os.chmod)
if hasattr(os, "lchmod"):
lchmod = _wrap_strfunc(os.lchmod)
else:
def lchmod(self, pathobj, mode):
raise NotImplementedError("lchmod() not available on this system")
mkdir = _wrap_strfunc(os.mkdir)
unlink = _wrap_strfunc(os.unlink)
rmdir = _wrap_strfunc(os.rmdir)
rename = _wrap_binary_strfunc(os.rename)
if sys.version_info >= (3, 3):
replace = _wrap_binary_strfunc(os.replace)
if nt:
if supports_symlinks:
symlink = _wrap_binary_strfunc(os.symlink)
else:
def symlink(a, b, target_is_directory):
raise NotImplementedError("symlink() not available on this system")
else:
# Under POSIX, os.symlink() takes two args
@staticmethod
def symlink(a, b, target_is_directory):
return os.symlink(str(a), str(b))
utime = _wrap_strfunc(os.utime)
# Helper for resolve()
def readlink(self, path):
return os.readlink(path)
_normal_accessor = _NormalAccessor()
#
# Globbing helpers
#
@contextmanager
def _cached(func):
try:
func.__cached__
yield func
except AttributeError:
cache = {}
def wrapper(*args):
try:
return cache[args]
except KeyError:
value = cache[args] = func(*args)
return value
wrapper.__cached__ = True
try:
yield wrapper
finally:
cache.clear()
def _make_selector(pattern_parts):
pat = pattern_parts[0]
child_parts = pattern_parts[1:]
if pat == '**':
cls = _RecursiveWildcardSelector
elif '**' in pat:
raise ValueError("Invalid pattern: '**' can only be an entire path component")
elif _is_wildcard_pattern(pat):
cls = _WildcardSelector
else:
cls = _PreciseSelector
return cls(pat, child_parts)
if hasattr(functools, "lru_cache"):
_make_selector = functools.lru_cache()(_make_selector)
class _Selector:
"""A selector matches a specific glob pattern part against the children
of a given path."""
def __init__(self, child_parts):
self.child_parts = child_parts
if child_parts:
self.successor = _make_selector(child_parts)
else:
self.successor = _TerminatingSelector()
def select_from(self, parent_path):
"""Iterate over all child paths of `parent_path` matched by this
selector. This can contain parent_path itself."""
path_cls = type(parent_path)
is_dir = path_cls.is_dir
exists = path_cls.exists
listdir = parent_path._accessor.listdir
return self._select_from(parent_path, is_dir, exists, listdir)
class _TerminatingSelector:
def _select_from(self, parent_path, is_dir, exists, listdir):
yield parent_path
class _PreciseSelector(_Selector):
def __init__(self, name, child_parts):
self.name = name
_Selector.__init__(self, child_parts)
def _select_from(self, parent_path, is_dir, exists, listdir):
if not is_dir(parent_path):
return
path = parent_path._make_child_relpath(self.name)
if exists(path):
for p in self.successor._select_from(path, is_dir, exists, listdir):
yield p
class _WildcardSelector(_Selector):
def __init__(self, pat, child_parts):
self.pat = re.compile(fnmatch.translate(pat))
_Selector.__init__(self, child_parts)
def _select_from(self, parent_path, is_dir, exists, listdir):
if not is_dir(parent_path):
return
cf = parent_path._flavour.casefold
for name in listdir(parent_path):
casefolded = cf(name)
if self.pat.match(casefolded):
path = parent_path._make_child_relpath(name)
for p in self.successor._select_from(path, is_dir, exists, listdir):
yield p
class _RecursiveWildcardSelector(_Selector):
def __init__(self, pat, child_parts):
_Selector.__init__(self, child_parts)
def _iterate_directories(self, parent_path, is_dir, listdir):
yield parent_path
for name in listdir(parent_path):
path = parent_path._make_child_relpath(name)
if is_dir(path):
for p in self._iterate_directories(path, is_dir, listdir):
yield p
def _select_from(self, parent_path, is_dir, exists, listdir):
if not is_dir(parent_path):
return
with _cached(listdir) as listdir:
yielded = set()
try:
successor_select = self.successor._select_from
for starting_point in self._iterate_directories(parent_path, is_dir, listdir):
for p in successor_select(starting_point, is_dir, exists, listdir):
if p not in yielded:
yield p
yielded.add(p)
finally:
yielded.clear()
#
# Public API
#
class _PathParents(Sequence):
"""This object provides sequence-like access to the logical ancestors
of a path. Don't try to construct it yourself."""
__slots__ = ('_pathcls', '_drv', '_root', '_parts')
def __init__(self, path):
# We don't store the instance to avoid reference cycles
self._pathcls = type(path)
self._drv = path._drv
self._root = path._root
self._parts = path._parts
def __len__(self):
if self._drv or self._root:
return len(self._parts) - 1
else:
return len(self._parts)
def __getitem__(self, idx):
if idx < 0 or idx >= len(self):
raise IndexError(idx)
return self._pathcls._from_parsed_parts(self._drv, self._root,
self._parts[:-idx - 1])
def __repr__(self):
return "<{0}.parents>".format(self._pathcls.__name__)
class PurePath(object):
"""PurePath represents a filesystem path and offers operations which
don't imply any actual filesystem I/O. Depending on your system,
instantiating a PurePath will return either a PurePosixPath or a
PureWindowsPath object. You can also instantiate either of these classes
directly, regardless of your system.
"""
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
def __new__(cls, *args):
"""Construct a PurePath from one or several strings and or existing
PurePath objects. The strings and path objects are combined so as
to yield a canonicalized path, which is incorporated into the
new PurePath object.
"""
if cls is PurePath:
cls = PureWindowsPath if os.name == 'nt' else PurePosixPath
return cls._from_parts(args)
def __reduce__(self):
# Using the parts tuple helps share interned path parts
# when pickling related paths.
return (self.__class__, tuple(self._parts))
@classmethod
def _parse_args(cls, args):
# This is useful when you don't want to create an instance, just
# canonicalize some constructor arguments.
parts = []
for a in args:
if isinstance(a, PurePath):
parts += a._parts
elif isinstance(a, basestring):
parts.append(a)
else:
raise TypeError(
"argument should be a path or str object, not %r"
% type(a))
return cls._flavour.parse_parts(parts)
@classmethod
def _from_parts(cls, args, init=True):
# We need to call _parse_args on the instance, so as to get the
# right flavour.
self = object.__new__(cls)
drv, root, parts = self._parse_args(args)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _from_parsed_parts(cls, drv, root, parts, init=True):
self = object.__new__(cls)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _format_parsed_parts(cls, drv, root, parts):
if drv or root:
return drv + root + cls._flavour.join(parts[1:])
else:
return cls._flavour.join(parts)
def _init(self):
# Overriden in concrete Path
pass
def _make_child(self, args):
drv, root, parts = self._parse_args(args)
drv, root, parts = self._flavour.join_parsed_parts(
self._drv, self._root, self._parts, drv, root, parts)
return self._from_parsed_parts(drv, root, parts)
def __str__(self):
"""Return the string representation of the path, suitable for
passing to system calls."""
try:
return self._str
except AttributeError:
self._str = self._format_parsed_parts(self._drv, self._root,
self._parts) or '.'
return self._str
def as_posix(self):
"""Return the string representation of the path with forward (/)
slashes."""
f = self._flavour
return str(self).replace(f.sep, '/')
def __bytes__(self):
"""Return the bytes representation of the path. This is only
recommended to use under Unix."""
if sys.version_info < (3, 2):
raise NotImplementedError("needs Python 3.2 or later")
return os.fsencode(str(self))
def __repr__(self):
return "{0}({1!r})".format(self.__class__.__name__, self.as_posix())
def as_uri(self):
"""Return the path as a 'file' URI."""
if not self.is_absolute():
raise ValueError("relative path can't be expressed as a file URI")
return self._flavour.make_uri(self)
@property
def _cparts(self):
# Cached casefolded parts, for hashing and comparison
try:
return self._cached_cparts
except AttributeError:
self._cached_cparts = self._flavour.casefold_parts(self._parts)
return self._cached_cparts
def __eq__(self, other):
if not isinstance(other, PurePath):
return NotImplemented
return self._cparts == other._cparts and self._flavour is other._flavour
def __ne__(self, other):
return not self == other
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash(tuple(self._cparts))
return self._hash
def __lt__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts < other._cparts
def __le__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts <= other._cparts
def __gt__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts > other._cparts
def __ge__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts >= other._cparts
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
@property
def anchor(self):
"""The concatenation of the drive and root, or ''."""
anchor = self._drv + self._root
return anchor
@property
def name(self):
"""The final path component, if any."""
parts = self._parts
if len(parts) == (1 if (self._drv or self._root) else 0):
return ''
return parts[-1]
@property
def suffix(self):
"""The final component's last suffix, if any."""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[i:]
else:
return ''
@property
def suffixes(self):
"""A list of the final component's suffixes, if any."""
name = self.name
if name.endswith('.'):
return []
name = name.lstrip('.')
return ['.' + suffix for suffix in name.split('.')[1:]]
@property
def stem(self):
"""The final path component, minus its last suffix."""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[:i]
else:
return name
def with_name(self, name):
"""Return a new path with the file name changed."""
if not self.name:
raise ValueError("%r has an empty name" % (self,))
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def with_suffix(self, suffix):
"""Return a new path with the file suffix changed (or added, if none)."""
# XXX if suffix is None, should the current suffix be removed?
drv, root, parts = self._flavour.parse_parts((suffix,))
if drv or root or len(parts) != 1:
raise ValueError("Invalid suffix %r" % (suffix))
suffix = parts[0]
if not suffix.startswith('.'):
raise ValueError("Invalid suffix %r" % (suffix))
name = self.name
if not name:
raise ValueError("%r has an empty name" % (self,))
old_suffix = self.suffix
if not old_suffix:
name = name + suffix
else:
name = name[:-len(old_suffix)] + suffix
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def relative_to(self, *other):
"""Return the relative path to another path identified by the passed
arguments. If the operation is not possible (because this is not
a subpath of the other path), raise ValueError.
"""
# For the purpose of this method, drive and root are considered
# separate parts, i.e.:
# Path('c:/').relative_to('c:') gives Path('/')
# Path('c:/').relative_to('/') raise ValueError
if not other:
raise TypeError("need at least one argument")
parts = self._parts
drv = self._drv
root = self._root
if root:
abs_parts = [drv, root] + parts[1:]
else:
abs_parts = parts
to_drv, to_root, to_parts = self._parse_args(other)
if to_root:
to_abs_parts = [to_drv, to_root] + to_parts[1:]
else:
to_abs_parts = to_parts
n = len(to_abs_parts)
cf = self._flavour.casefold_parts
if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts):
formatted = self._format_parsed_parts(to_drv, to_root, to_parts)
raise ValueError("{!r} does not start with {!r}"
.format(str(self), str(formatted)))
return self._from_parsed_parts('', root if n == 1 else '',
abs_parts[n:])
@property
def parts(self):
"""An object providing sequence-like access to the
components in the filesystem path."""
# We cache the tuple to avoid building a new one each time .parts
# is accessed. XXX is this necessary?
try:
return self._pparts
except AttributeError:
self._pparts = tuple(self._parts)
return self._pparts
def joinpath(self, *args):
"""Combine this path with one or several arguments, and return a
new path representing either a subpath (if all arguments are relative
paths) or a totally different path (if one of the arguments is
anchored).
"""
return self._make_child(args)
def __truediv__(self, key):
return self._make_child((key,))
def __rtruediv__(self, key):
return self._from_parts([key] + self._parts)
if sys.version_info < (3,):
__div__ = __truediv__
__rdiv__ = __rtruediv__
@property
def parent(self):
"""The logical parent of the path."""
drv = self._drv
root = self._root
parts = self._parts
if len(parts) == 1 and (drv or root):
return self
return self._from_parsed_parts(drv, root, parts[:-1])
@property
def parents(self):
"""A sequence of this path's logical parents."""
return _PathParents(self)
def is_absolute(self):
"""True if the path is absolute (has both a root and, if applicable,
a drive)."""
if not self._root:
return False
return not self._flavour.has_drv or bool(self._drv)
def is_reserved(self):
"""Return True if the path contains one of the special names reserved
by the system, if any."""
return self._flavour.is_reserved(self._parts)
def match(self, path_pattern):
"""
Return True if this path matches the given pattern.
"""
cf = self._flavour.casefold
path_pattern = cf(path_pattern)
drv, root, pat_parts = self._flavour.parse_parts((path_pattern,))
if not pat_parts:
raise ValueError("empty pattern")
if drv and drv != cf(self._drv):
return False
if root and root != cf(self._root):
return False
parts = self._cparts
if drv or root:
if len(pat_parts) != len(parts):
return False
pat_parts = pat_parts[1:]
elif len(pat_parts) > len(parts):
return False
for part, pat in zip(reversed(parts), reversed(pat_parts)):
if not fnmatch.fnmatchcase(part, pat):
return False
return True
class PurePosixPath(PurePath):
_flavour = _posix_flavour
__slots__ = ()
class PureWindowsPath(PurePath):
_flavour = _windows_flavour
__slots__ = ()
# Filesystem-accessing classes
class Path(PurePath):
__slots__ = (
'_accessor',
)
def __new__(cls, *args, **kwargs):
if cls is Path:
cls = WindowsPath if os.name == 'nt' else PosixPath
self = cls._from_parts(args, init=False)
if not self._flavour.is_supported:
raise NotImplementedError("cannot instantiate %r on your system"
% (cls.__name__,))
self._init()
return self
def _init(self,
# Private non-constructor arguments
template=None,
):
if template is not None:
self._accessor = template._accessor
else:
self._accessor = _normal_accessor
def _make_child_relpath(self, part):
# This is an optimization used for dir walking. `part` must be
# a single part relative to this path.
parts = self._parts + [part]
return self._from_parsed_parts(self._drv, self._root, parts)
def _opener(self, name, flags, mode=0o666):
# A stub for the opener argument to built-in open()
return self._accessor.open(self, flags, mode)
def _raw_open(self, flags, mode=0o777):
"""
Open the file pointed by this path and return a file descriptor,
as os.open() does.
"""
return self._accessor.open(self, flags, mode)
# Public API
@classmethod
def cwd(cls):
"""Return a new path pointing to the current working directory
(as returned by os.getcwd()).
"""
return cls(os.getcwd())
def iterdir(self):
"""Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'.
"""
for name in self._accessor.listdir(self):
if name in ('.', '..'):
# Yielding a path object for these makes little sense
continue
yield self._make_child_relpath(name)
def glob(self, pattern):
"""Iterate over this subtree and yield all existing files (of any
kind, including directories) matching the given pattern.
"""
pattern = self._flavour.casefold(pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(tuple(pattern_parts))
for p in selector.select_from(self):
yield p
def rglob(self, pattern):
"""Recursively yield all existing files (of any kind, including
directories) matching the given pattern, anywhere in this subtree.
"""
pattern = self._flavour.casefold(pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(("**",) + tuple(pattern_parts))
for p in selector.select_from(self):
yield p
def absolute(self):
"""Return an absolute version of this path. This function works
even if the path doesn't point to anything.
No normalization is done, i.e. all '.' and '..' will be kept along.
Use resolve() to get the canonical path to a file.
"""
# XXX untested yet!
if self.is_absolute():
return self
# FIXME this must defer to the specific flavour (and, under Windows,
# use nt._getfullpathname())
obj = self._from_parts([os.getcwd()] + self._parts, init=False)
obj._init(template=self)
return obj
def resolve(self):
"""
Make the path absolute, resolving all symlinks on the way and also
normalizing it (for example turning slashes into backslashes under
Windows).
"""
s = self._flavour.resolve(self)
if s is None:
# No symlink resolution => for consistency, raise an error if
# the path doesn't exist or is forbidden
self.stat()
s = str(self.absolute())
# Now we have no symlinks in the path, it's safe to normalize it.
normed = self._flavour.pathmod.normpath(s)
obj = self._from_parts((normed,), init=False)
obj._init(template=self)
return obj
def stat(self):
"""
Return the result of the stat() system call on this path, like
os.stat() does.
"""
return self._accessor.stat(self)
def owner(self):
"""
Return the login name of the file owner.
"""
import pwd
return pwd.getpwuid(self.stat().st_uid).pw_name
def group(self):
"""
Return the group name of the file gid.
"""
import grp
return grp.getgrgid(self.stat().st_gid).gr_name
def open(self, mode='r', buffering=-1, encoding=None,
errors=None, newline=None):
"""
Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
if sys.version_info >= (3, 3):
return io.open(str(self), mode, buffering, encoding, errors, newline,
opener=self._opener)
else:
return io.open(str(self), mode, buffering, encoding, errors, newline)
def touch(self, mode=0o666, exist_ok=True):
"""
Create this file with the given access mode, if it doesn't exist.
"""
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
t = time.time()
try:
self._accessor.utime(self, (t, t))
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = self._raw_open(flags, mode)
os.close(fd)
def mkdir(self, mode=0o777, parents=False):
if not parents:
self._accessor.mkdir(self, mode)
else:
try:
self._accessor.mkdir(self, mode)
except OSError as e:
if e.errno != ENOENT:
raise
self.parent.mkdir(parents=True)
self._accessor.mkdir(self, mode)
def chmod(self, mode):
"""
Change the permissions of the path, like os.chmod().
"""
self._accessor.chmod(self, mode)
def lchmod(self, mode):
"""
Like chmod(), except if the path points to a symlink, the symlink's
permissions are changed, rather than its target's.
"""
self._accessor.lchmod(self, mode)
def unlink(self):
"""
Remove this file or link.
If the path is a directory, use rmdir() instead.
"""
self._accessor.unlink(self)
def rmdir(self):
"""
Remove this directory. The directory must be empty.
"""
self._accessor.rmdir(self)
def lstat(self):
"""
Like stat(), except if the path points to a symlink, the symlink's
status information is returned, rather than its target's.
"""
return self._accessor.lstat(self)
def rename(self, target):
"""
Rename this path to the given path.
"""
self._accessor.rename(self, target)
def replace(self, target):
"""
Rename this path to the given path, clobbering the existing
destination if it exists.
"""
if sys.version_info < (3, 3):
raise NotImplementedError("replace() is only available "
"with Python 3.3 and later")
self._accessor.replace(self, target)
def symlink_to(self, target, target_is_directory=False):
"""
Make this path a symlink pointing to the given path.
Note the order of arguments (self, target) is the reverse of os.symlink's.
"""
self._accessor.symlink(target, self, target_is_directory)
# Convenience functions for querying the stat results
def exists(self):
"""
Whether this path exists.
"""
try:
self.stat()
except OSError as e:
if e.errno != ENOENT:
raise
return False
return True
def is_dir(self):
"""
Whether this path is a directory.
"""
try:
return S_ISDIR(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_file(self):
"""
Whether this path is a regular file (also True for symlinks pointing
to regular files).
"""
try:
return S_ISREG(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_symlink(self):
"""
Whether this path is a symbolic link.
"""
try:
return S_ISLNK(self.lstat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist
return False
def is_block_device(self):
"""
Whether this path is a block device.
"""
try:
return S_ISBLK(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_char_device(self):
"""
Whether this path is a character device.
"""
try:
return S_ISCHR(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_fifo(self):
"""
Whether this path is a FIFO.
"""
try:
return S_ISFIFO(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_socket(self):
"""
Whether this path is a socket.
"""
try:
return S_ISSOCK(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
class PosixPath(Path, PurePosixPath):
__slots__ = ()
class WindowsPath(Path, PureWindowsPath):
__slots__ = ()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.