text stringlengths 0 27.1M | meta dict |
|---|---|
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
import torch.optim as optim
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.datasets as datasets
import torchvision.utils as utils
import numpy as np
import time
import torch.nn.functional as F
from torch.nn import init
from PIL import Image
import vgg
import argparse
import model
def main():
parser = argparse.ArgumentParser(description='Pytorch implementation of Neural Artistic Style Transfer')
parser.add_argument('--w_content', default=1.0, type=float, help='Weight for content loss')
parser.add_argument('--w_style', default=10000.0, type=float, help='Weight for style loss')
parser.add_argument('--img_content', default='content.jpg', help='Image name for content')
parser.add_argument('--img_style', default='style.jpg', help='Image name for style')
parser.add_argument('--iteration', '-i', default=50, type=int, help='Total iteration')
args = parser.parse_args()
### Setting parameters ###
w_content = args.w_content
w_style = args.w_style
iteration = args.iteration
### Load Model ###
net = vgg.vgg19(pretrained=True).cuda().eval()
### Load Images ###
image_content, image_style = model.image_loader(args.img_content, args.img_style)
image_modify = image_content.clone()
image_modify.requires_grad = True
### Iteration ###
net_m, content_losses, style_losses = model.get_layer_out(net, image_content, image_style)
optimi = optim.LBFGS([image_modify])
for epoch in range(iteration):
def closure():
optimi.zero_grad()
net_m(image_modify)
content_loss_sum = 0.0
style_loss_sum = 0.0
for c in content_losses:
content_loss_sum += c.loss
for s in style_losses:
style_loss_sum += s.loss
loss = style_loss_sum * w_style + content_loss_sum * w_content
loss.backward()
if True:
print('epoch: {}, loss: {} / {} / {}'.format(epoch, loss.data, style_loss_sum.data*w_style, content_loss_sum.data*w_content))
return loss
optimi.step(closure)
image_modify.data.clamp_(0, 1)
utils.save_image(torch.squeeze(image_modify), 'outout{}.jpg'.format(epoch))
if __name__ == '__main__':
main()
| {
"alphanum_fraction": 0.6761268781,
"author": null,
"avg_line_length": 36.303030303,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6432abbd895214c5aa3c38290cb18e9ea7ccbdb4",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d9f7ce294abe7e4fda8d35509f49259fec8d1f62",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "minkyu-choi04/research_paper_implementation",
"max_forks_repo_path": "main.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d9f7ce294abe7e4fda8d35509f49259fec8d1f62",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "minkyu-choi04/research_paper_implementation",
"max_issues_repo_path": "main.py",
"max_line_length": 142,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "d9f7ce294abe7e4fda8d35509f49259fec8d1f62",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "minkyu-choi04/research_paper_implementation",
"max_stars_repo_path": "main.py",
"max_stars_repo_stars_event_max_datetime": "2020-04-19T15:17:45.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-04-19T15:17:45.000Z",
"num_tokens": 538,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2396
} |
import os
import glob
import subprocess
from numpy import testing
import numpy as np
from numpy.core.numeric import NaN
from numpy.lib.function_base import average
from numpy.lib.shape_base import split
import pandas as pd
import json
import io
import matplotlib.pyplot as plt
# Genetic algorithm in the future?
"""
json_pandas_molecules dataframe
'name', 'exc', 'nm','osci', 'method_basis_set', 'orbital_Numbers', 'HOMO','LUMO', 'generalSMILES', 'localName', 'parts', 'SMILES'
"""
def json_pandas_molecule(path_results):
dat = pd.read_json(path_results)
# print('dat', dat)
FIELDS = ["name", "localName", "generalSMILES"]
df = pd.json_normalize(dat["molecules"])
df[FIELDS]
#['A', 'B', 'C'] <-this is your columns order
df = df[[
'name', 'parts',
'generalSMILES','localName',
'SMILES', 'excitations',
'HOMO', 'LUMO',
]]
df = pd.json_normalize(data=dat['molecules'], record_path='excitations',
meta=['name', 'HOMO','LUMO', 'SMILES', 'generalSMILES','localName', 'parts' ])
df = df [[
'name', 'exc', 'nm','osci', 'method_basis_set', 'orbital_Numbers', 'HOMO','LUMO', 'generalSMILES', 'localName', 'parts', 'SMILES'
]]
return df
def json_pandas_molecule_BM(path_results):
dat = pd.read_json(path_results)
#print(path_results)
FIELDS = ["name", "localName", "generalSMILES"]
df = pd.json_normalize(dat["molecules"])
df[FIELDS]
#['A', 'B', 'C'] <-this is your columns order
df = df[[
'name', 'parts',
'generalSMILES','localName',
'SMILES', 'excitations',
'HOMO', 'LUMO', 'exp'
]]
df = pd.json_normalize(data=dat['molecules'], record_path='excitations',
meta=['name', 'HOMO','LUMO', 'SMILES', 'generalSMILES','localName', 'parts', 'exp' ])
df = df [[
'name', 'exc', 'nm','osci', 'method_basis_set', 'orbital_Numbers', 'HOMO','LUMO', 'generalSMILES', 'localName', 'parts', 'SMILES', 'exp'
]]
#print((df,'AAAAAAAAAAAAAAAA'))
return df
def json_pandas_molecule_BM(path_results):
# print(path_results)
# print(os.getcwd())
dat = pd.read_json(path_results)
# print(dat)
FIELDS = ["name", "localName", "generalSMILES"]
df = pd.json_normalize(dat["molecules"])
df[FIELDS]
#['A', 'B', 'C'] <-this is your columns order
df = df[[
'name', 'parts',
'generalSMILES','localName',
'SMILES', 'excitations',
'HOMO', 'LUMO', 'exp'
]]
df = pd.json_normalize(data=dat['molecules'], record_path='excitations',
meta=['name', 'HOMO','LUMO', 'SMILES', 'generalSMILES','localName', 'parts', 'exp' ])
df = df [[
'name', 'exc', 'nm','osci', 'method_basis_set', 'orbital_Numbers', 'HOMO','LUMO', 'generalSMILES', 'localName', 'parts', 'SMILES', 'exp'
]]
return df
"""
Excitation pandas:::
LocalName generalSMILES Excitation1 Excitation2 Excitation3
"""
def nm_osci_df (df):
nm_osci = df[["nm", 'osci', 'generalSMILES']]
nm_osci = nm_osci.sort_values(['nm', 'osci'], ascending=(False, False))
#print(nm_osci.head(10))
return nm_osci
def name_nm_df (df):
name_nm = df[["name", 'nm']]
return name_nm
def name_nm_osci_LUMO_df (df):
df = df[["name", 'nm', 'osci', 'LUMO']]
return df
def name_nm_osci_LUMO_exc_df (df):
df = df[["name", 'nm', 'osci', 'LUMO', 'exc']]
return df
def gen_allowed_dict (df):
# add logic for filtering through df to determine if flagged or not
allowed_dict = {}
for index, row in df.iterrows():
names = row["name"].split('_')
a, b, d = names[0], names[1], names[2]
for i in names:
allowed_dict[i] = True
return allowed_dict
def acquire_averages(df, piece_dict, allowed_dict ):
for key, val in piece_dict.items():
#print((key,val))
data = {
'nm': [],
'osci': [],
'LUMO': []
}
for index, row in df.iterrows():
d, b, a = row['name'].split("_")
#print(row)
if key in row['name'] and allowed_dict[d] and allowed_dict[b] and allowed_dict[a] and row['exc']==1:
data['nm'].append(row['nm'])
data['osci'].append(row['osci'])
data['LUMO'].append(row['LUMO'])
#std_nm = np.std(np.array(data['nm']))
avg_nm = sum(data['nm']) / len(data['nm'])
#print(avg_nm)
avg_osci = sum(data['osci']) / len(data['osci'])
avg_LUMO = sum(data['LUMO']) / len(data['LUMO'])
piece_dict[key] = [avg_nm, avg_osci, avg_LUMO]
#print(piece_dict)
for key, value in piece_dict.items():
#print(key, value[0])
print("KEY: %s, nm: %.1f" % (key, value[0]))
print()
return piece_dict
def evalAllowed(piece_dict, allowed_dict):
for key, val in piece_dict.items():
if val[0] < 430:
allowed_dict[key] = False
if val[1] < 0.1:
allowed_dict[key] = False
if val[2] > -0.9:
allowed_dict[key] = False
return allowed_dict
def score_pieces (df):
# split name by _
# for each unique eAccptor, eDonor, backbone -> tally score with weighted targets
# Could a system of equations be employed to solve for "average" contribution from each piece?
allowed_dict = gen_allowed_dict(df)
eA_dict = {}
eD_dict = {}
bb_dict = {}
for i in df['name']:
name = i.split('_')
eA, eD, bb = name[0], name[2], name[1]
eA_dict[eA] = [0, 0, 0] # nm_avg, osci_avg,
eD_dict[eD] = [0, 0, 0]
bb_dict[bb] = [0, 0, 0]
'''
Need to update to
if LUMO < -0.9:
0.5*lambda/650 + 0.3*f + 0.2*LUMO/-0.1.3
'''
#bb_dict = acquire_averages(df, bb_dict, allowed_dict)
#eA_dict = acquire_averages(df, eA_dict, allowed_dict)
#eD_dict = acquire_averages(df, eD_dict, allowed_dict)
#allowed_dict = evalAllowed(bb_dict, allowed_dict)
#allowed_dict = evalAllowed(eA_dict, allowed_dict)
#allowed_dict = evalAllowed(eD_dict, allowed_dict)
#print(allowed_dict)
return
'''
col = df.loc[: , "salary_1":"salary_3"]
where "salary_1" is the start column name and "salary_3" is the end column name
df['salary_mean'] = col.mean(axis=1)
'''
def score_structures(df):
"""
score_col = []
for index, row in df.iterrows():
if row["exc"] == 1:
nm = row["nm"]
f = row["osci"]
LUMO = row["LUMO"]
if LUMO > -0.9:
score = NaN
else:
score = 0.7*nm/650 + 0.2*f + 0.1*LUMO/-1.3
print(score)
score_col.append(score)
"""
df["score"] = 0.85*df["nm"]/650 + 0.10*df['osci'] + 0.05*df['LUMO']/-1.3
#print(df['score'])
return df
def total_allowed_dict(allowed_dict):
total = 0
for val in allowed_dict.values():
if val:
total += 1
#print("TOTAL =", total)
return total
def acquire_allowed(allowed_dict):
allowed = []
banned = []
for key, val in allowed_dict.items():
if val:
allowed.append(key)
else:
banned.append(key)
return allowed, banned
def score_piece(df, banned_lst=[], structures=['bb'], col_name='CAM-B3LYP/6-311G(d,p)', score_type='nm', above_score=460):
"""
Score structures by each piece (EA, ED, or BB)
score_type can either be 'nm' or 'score'
above_score sets bar for score to exceed for the average of the piece
e.g. if the average 'nm' for 1bb < above_score, then it is banned from competing again
"""
df = score_structures(df)
allowed_dict = gen_allowed_dict(df)
pieces = {'ea': [],
'bb': [],
'ed': []
}
total_allowed_dict(allowed_dict)
pos = -1
for key, allowed in allowed_dict.items():
score_lst = []
for index, row in df.iterrows():
if row['exc'] == 1 and allowed and key in row['name'] and row['method_basis_set'] == col_name:
name_split = row['name'].split("_")
for n, i in enumerate(name_split):
if i == key:
pos = n
nm = row["nm"]
f = row['osci']
LUMO = row["LUMO"]
score = row['score']
if score_type == 'score':
score_lst.append(score)
elif score_type == 'nm':
score_lst.append(nm)
if pos == 0:
pos = 'ea'
elif pos == 1:
pos = 'bb'
elif pos == 2:
pos = 'ed'
score_avg = sum(score_lst)/len(score_lst)
pieces[pos].append([key, score_avg])
for key, value in pieces.items():
if key in structures:
if key in structures:
grouping = sorted(value, key=lambda x:x[1], reverse=True)
length = len(grouping)
for n, i in enumerate(grouping):
#print(i)
if i[1] < above_score:
allowed_dict[i[0]] = False
total_allowed_dict(allowed_dict)
allowed, banned = acquire_allowed(allowed_dict)
for i in banned:
banned_lst.append(i)
#print(allowed, banned_lst, sep='\n')
return allowed, banned_lst
def df_molecules_to_df_method_basisset(df_molecules, method_basis_set=[]):
df = {
"Name": [],
}
for i in method_basis_set:
df[i] = []
df = pd.DataFrame(df)
#print(df)
for i1, r1 in df_molecules.iterrows():
#print(r1['name'])
#print(df.Name)
#print(r1['name'] in df.Name)
"""
method_basis_set_lst = ['-' for i in method_basis_set]
method_basis_set_lst.insert(0, r1['name'])
df.loc[len(df)] = method_basis_set_lst
Names = pd.Series(df['Name'])
print(df)
print(r1['name'])
print(df['Name'])
print()
break
"""
Names = [str(i) for i in df['Name'].values]
if str(r1['name']) not in Names:
method_basis_set_lst = [i for i in method_basis_set]
for n, j in enumerate(method_basis_set_lst):
#print(j, r1['method_basis_set'])
if str(j) == str(r1['method_basis_set']):
method_basis_set_lst[n] = r1['nm']
method_basis_set_lst.insert(0, r1['name'])
#if r1['name'] == "1ed_16b_1ea":
# print(method_basis_set_lst)
df.loc[len(df)] = method_basis_set_lst
else:
#df.ix[df['id'] == 12, ['uid','gid']] = ['IN','IN-1']
for j in method_basis_set:
if str(j) == r1['method_basis_set']:
if r1['exc'] == 1:
df.loc[df['Name'] == r1['name'], [j]] = [r1['nm']]
#nm = df.sort_values([method_basis_set[0]], ascending=(False))
return df
def df_molecules_BM_to_df_method_basisset(df_molecules, method_basis_set=[]):
df = {
"Name": [],
}
for i in method_basis_set:
df[i] = []
df['Exp'] = []
df = pd.DataFrame(df)
#print(df)
for i1, r1 in df_molecules.iterrows():
#print(r1['name'])
#print(df.Name)
#print(r1['name'] in df.Name)
"""
method_basis_set_lst = ['-' for i in method_basis_set]
method_basis_set_lst.insert(0, r1['name'])
df.loc[len(df)] = method_basis_set_lst
Names = pd.Series(df['Name'])
print(df)
print(r1['name'])
print(df['Name'])
print()
break
"""
Names = [str(i) for i in df['Name'].values]
if str(r1['name']) not in Names:
method_basis_set_lst = [i for i in method_basis_set]
for n, j in enumerate(method_basis_set_lst):
#print(j, r1['method_basis_set'])
if str(j) == str(r1['method_basis_set']):
method_basis_set_lst[n] = r1['nm']
method_basis_set_lst.insert(0, r1['name'])
method_basis_set_lst.append(r1['exp'])
#if r1['name'] == "1ed_16b_1ea":
# print(method_basis_set_lst)
df.loc[len(df)] = method_basis_set_lst
else:
#df.ix[df['id'] == 12, ['uid','gid']] = ['IN','IN-1']
for j in method_basis_set:
if str(j) == r1['method_basis_set']:
if r1['exc'] == 1:
#df.loc[df['Name'] == r1['name'], [j]] = [r1['nm']]
df.loc[df['Name'] == r1['name'], [j, 'Exp']] = [r1['nm'], r1['exp']]
#nm = df.sort_values([method_basis_set[0]], ascending=(False))
return df
def convert_df_nm_to_eV(df, columns_convert=["Exp"]):
h = 6.626E-34
c = 3E17
Joules_to_eV = 1.602E-19
for i in columns_convert:
df[i] = df[i].apply(lambda x: h*c/(x*Joules_to_eV))
return df
def convert_df_eV_to_nm(df, columns_convert=["Exp"]):
h = 6.626E-34
c = 3E17
Joules_to_eV = 1.602E-19
for i in columns_convert:
df[i] = df[i].apply(lambda x: h*c/(x*Joules_to_eV))
return df
def plot_methods(df,
weighted_avg=['CAM-B3LYP/6-311G(d,p)','PBE1PBE/6-311G(d,p)'],
headers_colors=[
['CAM-B3LYP/6-311G(d,p)', 'blue'], ['BHandHLYP/6-311G(d,p)', 'red'], ['PBE0/6-311G(d,p)', 'orange'], ['Weighted Average', 'green']
],
outname='dye_plot_weighted.png',
exp=False,
weights=[0.6594543456, 0.3405456544],
units='eV',
sort_by='Weighted Avg.',
transparent=False,
):
# 72 hours for calculation for each Dye vs. $10k and 3mnths
"""
df must be df_method_basisset
"""
df = df.drop(['Name'], axis=1)
df = df.apply(lambda x: pd.to_numeric(x, errors='coerce')).dropna()
df['Weighted Avg.'] = df['CAM-B3LYP/6-311G(d,p)']*weights[0] + df['PBE1PBE/6-311G(d,p)']*weights[1]
if exp:
#df = df.sort_values(['Exp'], ascending=False)
df = df.sort_values(['Weighted Avg.'], ascending=False)
dif = (df['Weighted Avg.'] - df['Exp']).abs().mean()
print("average difference", dif)
headers_colors.insert( 3, ['Exp.', 'black'])
df = df.sort_values(sort_by, ascending=False)
else:
df = df.sort_values([sort_by], ascending=False)
fig = plt.figure(dpi=400)
dye_cnt = range(len(df['Weighted Avg.']))
for ind, col in enumerate(df[::-1]):
# print(ind)
# print(col)
# print(headers_colors[ind][0])
# print(list(df[col]))
try:
plt.plot(
dye_cnt, list(df[col]),
label=headers_colors[ind][0],
color=headers_colors[ind][1],
linewidth=1
)
except:
print("Error in color and label.\nNo label or specific color assigned\n\n")
plt.plot(
dye_cnt, list(df[col]),
linewidth=1
)
# plt.title('Methods on Theoretical Dyes')
plt.xlabel("Theoretical Dyes Sorted by the Weighted Average Excitation Energy")
plt.ylabel("Excitation Energy (%s)" % units)
plt.grid(color='grey', which='major',
axis='y',
linestyle='-', linewidth=0.2)
plt.legend()
print(outname)
print(os.getcwd())
plt.savefig(outname, transparent=transparent)
def plot_solvents(df, outname, units='eV', exp=True, transparent=True,
solvents=["Dichloromethane", 'N,N-Dimethylformamide', 'Tetrahydrofuran'],
functionals=["CAM-B3LYP", "PBE1PBE", 'bhandhlyp',]
):
df = df.drop(['Name'], axis=1)
df = df.apply(lambda x: pd.to_numeric(x, errors='coerce')).dropna()
headers_colors=[]
clean_solv = []
for solv in solvents:
clean_solv.append(clean_solvent(solv).lower())
if exp:
#df = df.sort_values(['Exp'], ascending=False)
# df = df.sort_values(['Weighted Avg.'], ascending=False)
# dif = (df['Weighted Avg.'] - df['Exp']).abs().mean()
# print("average difference", dif)
headers_colors.append(['Exp.', 'black'])
#df = df.sort_values(sort_by, ascending=False)
# else:
#df = df.sort_values([sort_by], ascending=False)
for k in functionals:
fig = plt.figure(dpi=400)
# dye_cnt = range(len(df['Weighted Avg.']))
dye_cnt = range(df.shape[0])
for ind, col in enumerate(df[::-1]):
if k in col:
# print(ind)
# print(col)
# print(headers_colors[ind][0])
# print(list(df[col]))
print(col)
label = ''
for ind, solv in enumerate(clean_solv):
if solv in col:
label = solvents[ind]
if label == '':
label = 'Vacuum'
# try:
plt.plot(
dye_cnt, list(df[col]),
label=label,
# color=headers_colors[ind][1],
linewidth=1
)
# except:
# print("Error in color and label.\nNo label or specific color assigned\n\n")
# plt.plot(
# dye_cnt, list(df[col]),
# linewidth=1
# )
# plt.title('Methods on Theoretical Dyes')
plt.xlabel("Benchmark Dyes")
plt.ylabel("Excitation Energy (%s)" % units)
plt.grid(color='grey', which='major',
axis='y',
linestyle='-', linewidth=0.2)
plt.legend()
# print(col)
print(os.getcwd(), k)
plt.savefig("%s.png" % (k), transparent=transparent)
def plot_methods_og(df,
weighted_avg=['CAM-B3LYP/6-311G(d,p)','PBE1PBE/6-311G(d,p)'],
headers_colors=[['CAM-B3LYP/6-311G(d,p)', 'blue'], ['BHandHLYP/6-311G(d,p)', 'purple'], ['PBE0/6-311G(d,p)', 'red'], ['Weighted Average', 'green']],
):
# 72 hours for calculation for each Dye vs. $10k and 3mnths
"""
df must be df_method_basisset
"""
df = df.drop(['Name'], axis=1)
df = df.apply(lambda x: pd.to_numeric(x, errors='coerce')).dropna()
df['Weighted Avg.'] = df[['CAM-B3LYP/6-311G(d,p)','PBE1PBE/6-311G(d,p)']].mean(axis=1)
df = df.sort_values(['Weighted Avg.'], ascending=False)
fig = plt.figure(dpi=400)
dye_cnt = range(len(df['Weighted Avg.']))
for ind, col in enumerate(df[::-1]):
print(ind)
print(headers_colors[ind][0])
plt.plot(
dye_cnt, list(df[col]),
label=headers_colors[ind][0],
color=headers_colors[ind][1],
linewidth=1
)
plt.title('Methods on Theoretical Dyes')
plt.xlabel("Theoretical Dyes Sorted by the Weighted Average Excitation Energy")
plt.ylabel("Excitation Energy (nm)")
plt.grid(color='grey', which='major',
axis='y',
linestyle='-', linewidth=0.2)
plt.legend()
plt.savefig("dyes_theor_methods.png")
def plot_methods_BM(
df,
):
"""
exp_data={
"dyes": ['AP25', 'D1', 'D3', 'XY1', 'ZL003'],
"CAM": [-127.31, -39.04, -22.85, -34.71, -20.29],
"PBE": [-13.80, 141.99, 238.93, 125.85, 91.99],
"Weighted": [-89.85, 20.70, 63.54, 18.28, 16.76],
}
"""
fig = plt.figure(dpi=400)
plt.axhline(y=0, color='black', linestyle='-', linewidth=0.5)
plt.plot(
exp_data["dyes"], exp_data['CAM'],
label="CAM-B3LYP/6-311G(d,p)", color='blue',
)
plt.plot(
exp_data["dyes"], exp_data['PBE'],
label="PBE0/6-311G(d,p)", color='red',
)
plt.plot(
exp_data["dyes"], exp_data['Weighted'],
label="Weighted Average", color='green',
)
zeros = [0 for i in range(len(exp_data['dyes']))]
plt.plot(
exp_data["dyes"], zeros,
'.',
label="Experiment", color='black',
)
plt.grid(color='grey', which='major',
axis='y',
linestyle='-', linewidth=0.2)
plt.title('Methods Compared with Experimental Dyes\n')
plt.ylim([-150, 300])
plt.xlabel("Experimental Dyes")
plt.ylabel("Experimental Difference (nm)")
plt.legend()
plt.savefig("dyes_exp_methods.png")
def plot_methods_exp(
exp_data={
"dyes": ['AP25', 'D1', 'D3', 'XY1', 'ZL003'],
"CAM": [-127.31, -39.04, -22.85, -34.71, -20.29],
"PBE": [-13.80, 141.99, 238.93, 125.85, 91.99],
"Weighted": [-89.85, 20.70, 63.54, 18.28, 16.76],
}
):
fig = plt.figure(dpi=400)
plt.axhline(y=0, color='black', linestyle='-', linewidth=0.5)
plt.plot(
exp_data["dyes"], exp_data['CAM'],
label="CAM-B3LYP/6-311G(d,p)", color='blue',
)
plt.plot(
exp_data["dyes"], exp_data['PBE'],
label="PBE0/6-311G(d,p)", color='red',
)
plt.plot(
exp_data["dyes"], exp_data['Weighted'],
label="Weighted Average", color='green',
)
zeros = [0 for i in range(len(exp_data['dyes']))]
plt.plot(
exp_data["dyes"], zeros,
'.',
label="Experiment", color='black',
)
plt.grid(color='grey', which='major',
axis='y',
linestyle='-', linewidth=0.2)
plt.title('Methods Compared with Experimental Dyes\n')
plt.ylim([-150, 300])
plt.xlabel("Experimental Dyes")
plt.ylabel("Experimental Difference (nm)")
plt.legend()
plt.savefig("dyes_exp_methods.png")
def df_conv_energy(df, min_num=300):
"""
Converts from nm to eV or eV to nm
"""
h = 6.626E-34
c = 2.998E17
J_over_eV = 1.602E-19
for col in df:
if col != 'Name':
df = df[pd.to_numeric(df[col], errors='coerce').notnull()]
df[col]= df[col].mask( df[col] > min_num, h*c/(df[col]*J_over_eV) )
return df
def df_diff_std(df, col_names=['CAM-B3LYP/6-311G(d,p)', 'PBE1PBE/6-311G(d,p)']):
"""
Calculates the difference between two method energies and calculates the std of the differences
"""
dif_col = "%s - %s" % (col_names[0], col_names[1])
df[dif_col] = df[col_names[0]] - df[col_names[1]]
dif_std_col = "std(%s)" % dif_col
std_val = df[dif_col].std(axis=0)
mean_val = df[dif_col].mean(axis=0)
# z_score = x - mu / sig
df[dif_std_col] = (df[dif_col] -mean_val) / std_val
df = df.sort_values([dif_std_col], ascending=True )
datatypes = df.dtypes()
print(datatypes)
#df.hist(column=dif_col)
#plt.show()
"""
val = pd.qcut(df[dif_col], q=4)
print(val)
# print(df[dif_col].describe())
bin_labels_5 = ['Bronze', 'Silver', 'Gold', 'Platinum', 'Diamond']
results, bin_edges = pd.qcut(df[dif_col],
q=[0, .2, .4, .6, .8, 1],
labels=bin_labels_5,
retbins=True)
results_table = pd.DataFrame(zip(bin_edges, bin_labels_5),
columns=['Threshold', 'Tier'])
print(results_table)
"""
#df_hist = df.filter(['Name', dif_std_col], axis=1)
#print(df_hist)
def mean_abs_error_weighted(df, methods=['CAM-B3LYP/6-311G(d,p)', 'PBE1PBE/6-311G(d,p)'], weights=[0.6594543456, 0.3405456544]):
df['Weighted Avg.'] = df[methods[0]]*weights[0] + df[methods[1]]*weights[1]
return (df['Weighted Avg.'] - df['Exp']).abs().mean()
def mean_abs_error(df, method='Dif. CAM-B3LYP/6-311G(d,p)'):
return df[method].abs().mean()
def weighted_avg_df(df, methods=['CAM-B3LYP/6-311G(d,p)', 'PBE1PBE/6-311G(d,p)'], weights=[0.6594543456, 0.3405456544]):
df['Weighted Avg.'] = df[methods[0]]*weights[0] + df[methods[1]]*weights[1]
return df
def benchmarkFlow(path_benchmark="Benchmark/benchmarks.json"):
df_molecules = json_pandas_molecule_BM(path_benchmark)
methods_basissets = ['CAM-B3LYP/6-311G(d,p)', 'bhandhlyp/6-311G(d,p)', 'PBE1PBE/6-311G(d,p)']
df = df_molecules_BM_to_df_method_basisset(df_molecules, methods_basissets)
convert_lst = methods_basissets.copy()
convert_lst.append("Exp")
print(df)
df = convert_df_nm_to_eV(df, convert_lst)
unlucky = {
"AP25": [2.329644,2.295717,1.920780,1.880036],
"D1": [2.337250,2.285609,1.742975,2.176884],
"D3": [2.301722,2.209749,1.549403,2.207872],
"XY1": [2.398999,2.314932,1.839675,2.247870],
"NL6": [2.250481,2.239272,1.383166,2.050367],
"ZL003": [2.488369,2.437129,2.031108,2.390798],
"JW1": [2.320036,2.302322,1.910812,2.103091],
}
for key, val in unlucky.items():
row = {
'Name': key,
methods_basissets[0]: val[0],
methods_basissets[1]: val[1],
methods_basissets[2]: val[2],
'Exp': val[3],
}
df = df.append(row, ignore_index=True)
df = convert_df_nm_to_eV(df, convert_lst)
plot_methods(df, exp=True)
df = convert_df_nm_to_eV(df, convert_lst)
df_dif = df_differences_exp(df, methods_basissets)
df_dif = weighted_avg_df(df, convert_lst)
print(df_dif)
df_dif.to_csv("benchmarks.csv", index=False)
print(mean_abs_error_weighted(df))
print(mean_abs_error(df_dif, "Dif. PBE1PBE/6-311G(d,p)"))
def benchamrkPredictPCE(
path_benchmark="Benchmark/benchmarks.json",
path_ipce="src/ipce.csv",
extra_values={
"AP25": [2.329644,2.295717,1.920780,1.880036],
"D1": [2.337250,2.285609,1.742975,2.176884],
"D3": [2.301722,2.209749,1.549403,2.207872],
"XY1": [2.398999,2.314932,1.839675,2.247870],
"NL6": [2.250481,2.239272,1.383166,2.050367],
"ZL003": [2.488369,2.437129,2.031108,2.390798],
"JW1": [2.320036,2.302322,1.910812,2.103091],
},
):
df_molecules = json_pandas_molecule_BM(path_benchmark)
methods_basissets = ['CAM-B3LYP/6-311G(d,p)', 'bhandhlyp/6-311G(d,p)', 'PBE1PBE/6-311G(d,p)']
df = df_molecules_BM_to_df_method_basisset(df_molecules, methods_basissets)
convert_lst = methods_basissets.copy()
convert_lst.append("Exp")
df = convert_df_nm_to_eV(df, convert_lst)
unlucky = {
"AP25": [2.329644,2.295717,1.920780,1.880036],
"D1": [2.337250,2.285609,1.742975,2.176884],
"D3": [2.301722,2.209749,1.549403,2.207872],
"XY1": [2.398999,2.314932,1.839675,2.247870],
"NL6": [2.250481,2.239272,1.383166,2.050367],
"ZL003": [2.488369,2.437129,2.031108,2.390798],
"JW1": [2.320036,2.302322,1.910812,2.103091],
}
for key, val in unlucky.items():
row = {
'Name': key,
methods_basissets[0]: val[0],
methods_basissets[1]: val[1],
methods_basissets[2]: val[2],
'Exp': val[3],
}
df = df.append(row, ignore_index=True)
#df = convert_df_nm_to_eV(df, convert_lst)
#df_dif = df_differences_exp(df, methods_basissets)
df = weighted_avg_df(df, convert_lst)
print(df)
ipce = pd.read_csv(path_ipce)
ipce['IPCE'] = ipce['IPCE'].astype(float)
ipce['Abs. Max'] = ipce['Abs. Max'].astype(float)
ipce = convert_df_nm_to_eV(ipce, ['IPCE', 'Abs. Max'])
ipce = ipce.sort_values(['Name'], axis=0).reset_index(drop=True)
del ipce['Name']
df = df.sort_values(['Name'], axis=0).reset_index(drop=True)
df2 = pd.concat([df, ipce], axis=1).reindex(ipce.index)
#print(ipce['Name'])
#print(df['Name'])
avg_ipce_from_abs_max = (df2['Abs. Max'] - df2['IPCE']).mean()
print('avg:', avg_ipce_from_abs_max)
df2['Comp. IPCE'] = df2['Weighted Avg.'] - avg_ipce_from_abs_max
h = 6.626E-34
c = 3E17
J_to_eV = 1.602E-19
del df2['CAM-B3LYP/6-311G(d,p)']
del df2['bhandhlyp/6-311G(d,p)']
del df2['PBE1PBE/6-311G(d,p)']
del df2['Exp']
FF_h = 0.75
FF_l = 0.60
I_o = 1
energy_cut_off = 400
#energy_cut_off = h*c/(energy_cut_off*J_to_eV)
df2['Comp. Jsc'] = ((h*c/(df2['Comp. IPCE'] * J_to_eV)) - energy_cut_off)/100*7.5
df2['Comp. Voc_h'] = (df2['Weighted Avg.'] - 0.4)
df2['Comp. Voc_l'] = (df2['Weighted Avg.'] - 0.6)
df2['Comp. PCE Voc_l FF_l'] = df2['Comp. Jsc'] * df2['Comp. Voc_l'] * FF_l / I_o
df2['Comp. PCE Voc_l FF_h'] = df2['Comp. Jsc'] * df2['Comp. Voc_l'] * FF_h / I_o
df2['Comp. PCE Voc_h FF_l'] = df2['Comp. Jsc'] * df2['Comp. Voc_h'] * FF_l / I_o
df2['Comp. PCE Voc_h FF_h'] = df2['Comp. Jsc'] * df2['Comp. Voc_h'] * FF_h / I_o
df2['Exp. PCE Voc_l FF_l'] = (((h*c)/(df2['IPCE'] * J_to_eV) - energy_cut_off) / 100*7.5) * (df2['Abs. Max'] - 0.6) * FF_l
df2['Exp. PCE Voc_l FF_h'] = (((h*c)/(df2['IPCE'] * J_to_eV) - energy_cut_off) / 100*7.5) * (df2['Abs. Max'] - 0.6) * FF_h
df2['Exp. PCE Voc_h FF_l'] = (((h*c)/(df2['IPCE'] * J_to_eV) - energy_cut_off) / 100*7.5) * (df2['Abs. Max'] - 0.4) * FF_l
df2['Exp. PCE Voc_h FF_h'] = (((h*c)/(df2['IPCE'] * J_to_eV) - energy_cut_off) / 100*7.5) * (df2['Abs. Max'] - 0.4) * FF_h
"""
df2['Comp. Jsc'] = h*c/((df2['Comp. IPCE'] + energy_cut_off)*J_to_eV) /100*7.5
df2['Comp. Voc_h'] = (df2['Weighted Avg.'] - 0.4)
df2['Comp. Voc_l'] = (df2['Weighted Avg.'] - 0.6)
df2['Comp. PCE Voc_l FF_l'] = df2['Comp. Jsc'] * df2['Comp. Voc_l'] * FF_l / I_o
df2['Comp. PCE Voc_l FF_h'] = df2['Comp. Jsc'] * df2['Comp. Voc_l'] * FF_h / I_o
df2['Comp. PCE Voc_h FF_l'] = df2['Comp. Jsc'] * df2['Comp. Voc_h'] * FF_l / I_o
df2['Comp. PCE Voc_h FF_h'] = df2['Comp. Jsc'] * df2['Comp. Voc_h'] * FF_h / I_o
df2['Exp. PCE Voc_l FF_l'] = ((df2['IPCE'] + energy_cut_off) / 100*7.5) * (df2['Abs. Max'] - 0.6) * FF_l
df2['Exp. PCE Voc_l FF_h'] = ((df2['IPCE'] + energy_cut_off) / 100*7.5) * (df2['Abs. Max'] - 0.6) * FF_h
df2['Exp. PCE Voc_h FF_l'] = ((df2['IPCE'] + energy_cut_off) / 100*7.5) * (df2['Abs. Max'] - 0.4) * FF_l
df2['Exp. PCE Voc_h FF_h'] = ((df2['IPCE'] + energy_cut_off) / 100*7.5) * (df2['Abs. Max'] - 0.4) * FF_h
"""
df2.to_csv("pce_predict.csv")
print(df2)
def df_differences_exp(df, methods):
for i in methods:
df['Dif. %s'%i] = df[i] - df['Exp']
print('Avg. Dif. %s'%i, df['Dif. %s'%i].mean(axis=0))
return df
def theoretical_dyes_basis_set_out(
path_results_json,
methods_basissets=['CAM-B3LYP/6-311G(d,p)', 'bhandhlyp/6-311G(d,p)', 'PBE1PBE/6-311G(d,p)'],
units='eV',
output_csv='',
output_graph='',
output_latex='',
plot_js = {
"weighted_avg" :['CAM-B3LYP/6-311G(d,p)','PBE1PBE/6-311G(d,p)'],
"headers_colors":[
['CAM-B3LYP/6-311G(d,p)', 'blue'], ['BHandHLYP/6-311G(d,p)', 'red'], ['PBE0/6-311G(d,p)', 'orange'], ['Weighted Average', 'green']
],
"weights":[0.71, 0.29],
}
):
df_molecules = json_pandas_molecule(path_results_json)
df = df_molecules_to_df_method_basisset(df_molecules, methods_basissets)
if units.lower() == 'ev':
df = df_conv_energy(df)
if output_csv != '':
df2 = df.sort_values(methods_basissets[0], ascending=True)
df2.to_csv('%s.csv'%output_csv, index=False)
if output_graph != '':
print("working on graph")
plot_methods(df, weighted_avg=plot_js['weighted_avg'], headers_colors=plot_js['headers_colors'], weights=plot_js['weights'], outname=output_graph, transparent=True)
if output_latex != '':
df2 = df.sort_values(methods_basissets[0], ascending=True)
df2.to_latex('%s.tex'%output_csv, index=False)
print(df)
def benchmarks_dyes_basis_set_out(
path_results_json,
methods_basissets=['CAM-B3LYP/6-311G(d,p)', 'bhandhlyp/6-311G(d,p)', 'PBE1PBE/6-311G(d,p)'],
units='eV',
output_csv='',
output_graph='',
output_latex='',
plot_js = {
"weighted_avg" :['CAM-B3LYP/6-311G(d,p)','PBE1PBE/6-311G(d,p)'],
"headers_colors":[
['CAM-B3LYP/6-311G(d,p)', 'blue'], ['BHandHLYP/6-311G(d,p)', 'red'], ['PBE0/6-311G(d,p)', 'orange'], ['Weighted Average', 'green']
],
"weights":[0.71, 0.29],
}
):
df_molecules = json_pandas_molecule_BM(path_results_json)
df = df_molecules_BM_to_df_method_basisset(df_molecules, methods_basissets)
convert_lst = methods_basissets.copy()
convert_lst.append("Exp")
print(df)
df = convert_df_nm_to_eV(df, convert_lst)
unlucky = {
"AP25": [2.329644,2.295717,1.920780,1.880036],
"D1": [2.337250,2.285609,1.742975,2.176884],
"D3": [2.301722,2.209749,1.549403,2.207872],
"XY1": [2.398999,2.314932,1.839675,2.247870],
"NL6": [2.250481,2.239272,1.383166,2.050367],
"ZL003": [2.488369,2.437129,2.031108,2.390798],
"JW1": [2.320036,2.302322,1.910812,2.103091],
}
for key, val in unlucky.items():
row = {
'Name': key,
methods_basissets[0]: val[0],
methods_basissets[1]: val[1],
methods_basissets[2]: val[2],
'Exp': val[3],
}
df = df.append(row, ignore_index=True)
if units.lower() == 'nm':
df = convert_df_nm_to_eV(df, convert_lst)
elif units.lower() == 'ev':
pass
else:
print("unit not acceptable")
df = convert_df_nm_to_eV(df, convert_lst)
# df = df_molecules_to_df_method_basisset(df, methods_basissets)
if units.lower() == 'ev':
df = df_conv_energy(df)
if output_csv != '':
df2 = df.sort_values(methods_basissets[0], ascending=False)
df2.to_csv('%s.csv'%output_csv, index=False)
if output_graph != '':
print("working on graph")
plot_methods(df, weighted_avg=plot_js['weighted_avg'], headers_colors=plot_js['headers_colors'], weights=plot_js['weights'], outname=output_graph, exp=True, sort_by='Exp', transparent=True)
if output_latex != '':
df2 = df.sort_values(methods_basissets[0], ascending=True)
df2.to_latex('%s.tex'%output_csv, index=False)
print(df)
def clean_solvent(solvent):
return solvent.replace("-", '').replace(",", '')
# def mean_abs_error_weighted(df, methods=['CAM-B3LYP/6-311G(d,p)', 'PBE1PBE/6-311G(d,p)'], weights=[0.6594543456, 0.3405456544]):
# df['Weighted Avg.'] = df[methods[0]]*weights[0] + df[methods[1]]*weights[1]
# return (df['Weighted Avg.'] - df['Exp']).abs().mean()
# def mean_abs_error(df, method='Dif. CAM-B3LYP/6-311G(d,p)'):
# return df[method].abs().mean()
# def weighted_avg_df(df, methods=['CAM-B3LYP/6-311G(d,p)', 'PBE1PBE/6-311G(d,p)'], weights=[0.6594543456, 0.3405456544]):
# df['Weighted Avg.'] = df[methods[0]]*weights[0] + df[methods[1]]*weights[1]
# return df
def solvent_mean_abs_error(
df,
methods_basissets_avg=['CAM-B3LYP/6-311G(d,p)', 'PBE1PBE/6-311G(d,p)'],
solvents=['dichloromethane', 'n,n-dimethylformamide', 'tetrahydrofuran'],
):
solvents=[clean_solvent(i) for i in solvents]
avg1 = (df[methods_basissets_avg[0]] - df['Exp']).mean()
avg2 = (df[methods_basissets_avg[1]] - df['Exp']).mean()
# assume avg1 is positive and avg2 is negative
# c1*avg1 + c2*avg2 = weighted_avg
# let c2=1 and solve for constant c when weighted_avg is 0
# c*avg1 = -avg2
# c = -avg2/avg1
c = -avg2/avg1
# ratio avg2 to avg1 1:c, convert ratio to percentages
c1 = c / (c+1)
c2 = 1 / (c+1)
df['vacuum_MAE'] = df[methods_basissets_avg[0]]*c1 + df[methods_basissets_avg[1]]*c2
weighted_avg = (df['vacuum_MAE'] - df['Exp']).mean() #should be approximately zero.
mae = (df['vacuum_MAE'] - df['Exp']).abs().mean()
print("\nContributions: %s %.2f %s %.2f" % (methods_basissets_avg[0], c1, methods_basissets_avg[1], c2) )
print("This should be approximately zero:", weighted_avg)
print("Mean Absolute Error of Weighted:", mae, '\n')
for i in solvents:
avg1 = (df["%s_%s" % (methods_basissets_avg[0], i) ] - df['Exp']).mean()
avg2 = (df["%s_%s" % (methods_basissets_avg[1], i) ] - df['Exp']).mean()
# assume avg1 is positive and avg2 is negative
# c1*avg1 + c2*avg2 = weighted_avg
# let c2=1 and solve for constant c when weighted_avg is 0
# c*avg1 = -avg2
# c = -avg2/avg1
c = -avg2/avg1
# ratio avg2 to avg1 1:c, convert ratio to percentages
c1 = c / (c+1)
c2 = 1 / (c+1)
df["%s_MAE" % i] = df["%s_%s" % (methods_basissets_avg[0], i) ]*c1 + df["%s_%s" % (methods_basissets_avg[1], i) ] *c2
weighted_avg = (df['%s_MAE'%i] - df['Exp']).mean() #should be approximately zero.
mae = (df['%s_MAE'%i] - df['Exp']).abs().mean()
print("Solvent: %s" % i)
print("Contributions: %s %.2f %s %.2f" % (methods_basissets_avg[0], c1, methods_basissets_avg[1], c2) )
print("This should be approximately zero:", weighted_avg)
print("Mean Absolute Error of Weighted:", mae, '\n')
# print(avg1, avg2, c1, c2, mae)
# df = df.drop(columns=[col for col in df if col not in final_table_columns])
def benchmarks_solvation(
path_results_json,
methods_basissets=['CAM-B3LYP/6-311G(d,p)', 'bhandhlyp/6-311G(d,p)', 'PBE1PBE/6-311G(d,p)'],
solvents=['dichloromethane', 'n,n-dimethylformamide', 'tetrahydrofuran'],
units='eV',
output_csv='',
output_graph='',
output_latex='',
plot_js = {
"weighted_avg" :['CAM-B3LYP/6-311G(d,p)','PBE1PBE/6-311G(d,p)'],
"headers_colors":[
['CAM-B3LYP/6-311G(d,p)', 'blue'], ['BHandHLYP/6-311G(d,p)', 'red'], ['PBE0/6-311G(d,p)', 'orange'], ['Weighted Average', 'green']
],
"weights":[0.6594543456, 0.3405456544],
},
weight_solvents=True,
):
df_molecules = json_pandas_molecule_BM(path_results_json)
method_solvent = methods_basissets.copy()
for i in methods_basissets:
for j in solvents:
name = "%s_%s" % (i, clean_solvent(j))
method_solvent.append(name)
# print(method_solvent)
df = df_molecules_BM_to_df_method_basisset(df_molecules, method_solvent)
# print(df)
convert_lst = methods_basissets.copy()
convert_lst.append("Exp")
# print(df)
# print(df)
df = convert_df_nm_to_eV(df, convert_lst)
unlucky = {
"AP25": [2.329644,2.295717,1.920780,1.880036],
"D1": [2.337250,2.285609,1.742975,2.176884],
"D3": [2.301722,2.209749,1.549403,2.207872],
"XY1": [2.398999,2.314932,1.839675,2.247870],
"NL6": [2.250481,2.239272,1.383166,2.050367],
"ZL003": [2.488369,2.437129,2.031108,2.390798],
"JW1": [2.320036,2.302322,1.910812,2.103091],
}
for key, val in unlucky.items():
row = {
'Name': key,
methods_basissets[0]: val[0],
methods_basissets[1]: val[1],
methods_basissets[2]: val[2],
'Exp': val[3],
}
df = df.append(row, ignore_index=True)
if units.lower() == 'nm':
df = convert_df_nm_to_eV(df, convert_lst)
elif units.lower() == 'ev':
pass
else:
print("unit not acceptable")
df = convert_df_nm_to_eV(df, convert_lst)
# df = df_molecules_to_df_method_basisset(df, methods_basissets)
if units.lower() == 'ev':
df = df_conv_energy(df)
# print(df)
if weight_solvents:
solvent_mean_abs_error(df)
if output_csv != '':
df2 = df.sort_values(methods_basissets[0], ascending=False)
df2.to_csv('%s.csv'%output_csv, index=False)
if output_graph != '':
# plot_methods(df, weighted_avg=plot_js['weighted_avg'], headers_colors=plot_js['headers_colors'], weights=plot_js['weights'], outname=output_graph, exp=True, sort_by='Exp', transparent=True)
plot_solvents(df, outname='%s.png' % (output_graph))
if output_latex != '':
df2 = df.sort_values(methods_basissets[0], ascending=False)
df2.to_latex('%s.tex'%output_csv, index=False)
# print(df)
df2 = pd.concat([df, ipce], axis=1).reindex(ipce.index)
#print(ipce['Name'])
#print(df['Name'])
avg_ipce_from_abs_max = (df2['Abs. Max'] - df2['IPCE']).mean()
print('avg:', avg_ipce_from_abs_max)
df2['Comp. IPCE'] = df2['Weighted Avg.'] - avg_ipce_from_abs_max
h = 6.626E-34
c = 3E17
J_to_eV = 1.602E-19
del df2['CAM-B3LYP/6-311G(d,p)']
del df2['bhandhlyp/6-311G(d,p)']
del df2['PBE1PBE/6-311G(d,p)']
del df2['Exp']
FF_h = 0.75
FF_l = 0.60
I_o = 1
energy_cut_off = 400
#energy_cut_off = h*c/(energy_cut_off*J_to_eV)
df2['Comp. Jsc'] = ((h*c/(df2['Comp. IPCE'] * J_to_eV)) - energy_cut_off)/100*7.5
df2['Comp. Voc_h'] = (df2['Weighted Avg.'] - 0.4)
df2['Comp. Voc_l'] = (df2['Weighted Avg.'] - 0.6)
df2['Comp. PCE Voc_l FF_l'] = df2['Comp. Jsc'] * df2['Comp. Voc_l'] * FF_l / I_o
df2['Comp. PCE Voc_l FF_h'] = df2['Comp. Jsc'] * df2['Comp. Voc_l'] * FF_h / I_o
df2['Comp. PCE Voc_h FF_l'] = df2['Comp. Jsc'] * df2['Comp. Voc_h'] * FF_l / I_o
df2['Comp. PCE Voc_h FF_h'] = df2['Comp. Jsc'] * df2['Comp. Voc_h'] * FF_h / I_o
df2['Exp. PCE Voc_l FF_l'] = (((h*c)/(df2['IPCE'] * J_to_eV) - energy_cut_off) / 100*7.5) * (df2['Abs. Max'] - 0.6) * FF_l
df2['Exp. PCE Voc_l FF_h'] = (((h*c)/(df2['IPCE'] * J_to_eV) - energy_cut_off) / 100*7.5) * (df2['Abs. Max'] - 0.6) * FF_h
df2['Exp. PCE Voc_h FF_l'] = (((h*c)/(df2['IPCE'] * J_to_eV) - energy_cut_off) / 100*7.5) * (df2['Abs. Max'] - 0.4) * FF_l
df2['Exp. PCE Voc_h FF_h'] = (((h*c)/(df2['IPCE'] * J_to_eV) - energy_cut_off) / 100*7.5) * (df2['Abs. Max'] - 0.4) * FF_h
"""
df2['Comp. Jsc'] = h*c/((df2['Comp. IPCE'] + energy_cut_off)*J_to_eV) /100*7.5
df2['Comp. Voc_h'] = (df2['Weighted Avg.'] - 0.4)
df2['Comp. Voc_l'] = (df2['Weighted Avg.'] - 0.6)
df2['Comp. PCE Voc_l FF_l'] = df2['Comp. Jsc'] * df2['Comp. Voc_l'] * FF_l / I_o
df2['Comp. PCE Voc_l FF_h'] = df2['Comp. Jsc'] * df2['Comp. Voc_l'] * FF_h / I_o
df2['Comp. PCE Voc_h FF_l'] = df2['Comp. Jsc'] * df2['Comp. Voc_h'] * FF_l / I_o
df2['Comp. PCE Voc_h FF_h'] = df2['Comp. Jsc'] * df2['Comp. Voc_h'] * FF_h / I_o
df2['Exp. PCE Voc_l FF_l'] = ((df2['IPCE'] + energy_cut_off) / 100*7.5) * (df2['Abs. Max'] - 0.6) * FF_l
df2['Exp. PCE Voc_l FF_h'] = ((df2['IPCE'] + energy_cut_off) / 100*7.5) * (df2['Abs. Max'] - 0.6) * FF_h
df2['Exp. PCE Voc_h FF_l'] = ((df2['IPCE'] + energy_cut_off) / 100*7.5) * (df2['Abs. Max'] - 0.4) * FF_l
df2['Exp. PCE Voc_h FF_h'] = ((df2['IPCE'] + energy_cut_off) / 100*7.5) * (df2['Abs. Max'] - 0.4) * FF_h
"""
df2.to_csv("pce_predict.csv")
print(df2)
def df_differences_exp(df, methods):
for i in methods:
print(i)
df['Dif. %s'%i] = df[i] - df['Exp']
print('Avg. Dif. %s'%i, df['Dif. %s'%i].mean(axis=0))
return df
### mean absolute error
def main():
location = os.getcwd().split('/')[-1]
if location == 'src':
os.chdir("..")
elif location == 'results':
os.chdir("..")
else:
print("need to be in src, results or Dyes directory")
""""""
# Theoretical data
methods_basissets=['CAM-B3LYP/6-311G(d,p)', 'PBE1PBE/6-311G(d,p)']
plot_js = {
"weighted_avg" :['CAM-B3LYP/6-311G(d,p)','PBE1PBE/6-311G(d,p)'],
"headers_colors":[
['CAM-B3LYP/6-311G(d,p)', 'blue'], ['BHandHLYP/6-311G(d,p)', 'red'], ['PBE0/6-311G(d,p)', 'orange'], ['Weighted Average', 'green']
],
"weights":[0.71, 0.29],
}
""""""
""""""
# theoretical_dyes_basis_set_out('results.json', output_csv='theoretical', output_latex='theoretical', output_graph='theoretical', )
theoretical_dyes_basis_set_out('results.json', output_csv='theoretical', output_latex='theoretical', output_graph='theoretical', plot_js=plot_js, methods_basissets=methods_basissets)
""""""
""""""
# Benchmark data
# benchmarks_dyes_basis_set_out('Benchmark/benchmarks.json', output_csv='bm', output_latex='bm', output_graph='bm')
# benchmarks_solvation('Benchmark/benchmarks.json', output_graph='test')
# benchmarks_solvation('Benchmark/benchmarks.json', )
""""""
# df_molecules = json_pandas_molecule('og_results.json')
# df_molecules = json_pandas_molecule_BM('Benchmark/benchmarks.json')
#benchmarkFlow()
# benchamrkPredictPCE()
# criteria
# nm : greatest ::: Higher
# osci : LUMO : second ::: osci > 0.1 ::: LUMO > -0.9 eg. -0.8 is better
if __name__ == "__main__":
main() | {
"alphanum_fraction": 0.5591706821,
"author": null,
"avg_line_length": 35.9634340223,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "57ba114517c7b4c5b8c764b78852129b570ee384",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a74bc6b9bb1101a9da80e664efa3361ce5a75691",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Awallace3/Dyes",
"max_forks_repo_path": "src/gather_results.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a74bc6b9bb1101a9da80e664efa3361ce5a75691",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Awallace3/Dyes",
"max_issues_repo_path": "src/gather_results.py",
"max_line_length": 199,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "a74bc6b9bb1101a9da80e664efa3361ce5a75691",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Awallace3/Dyes",
"max_stars_repo_path": "src/gather_results.py",
"max_stars_repo_stars_event_max_datetime": "2021-06-02T21:00:07.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-06-02T21:00:07.000Z",
"num_tokens": 14452,
"path": null,
"reason": "import numpy,from numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 45242
} |
[STATEMENT]
lemma source_all_outarcs_T:
"\<lbrakk>undirected_tree G; tail G e = root; e \<in> arcs G\<rbrakk> \<Longrightarrow> e \<in> arcs T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>undirected_tree G; tail G e = root; e \<in> arcs G\<rbrakk> \<Longrightarrow> e \<in> arcs T
[PROOF STEP]
using source_no_inarc_T undir_arcs_compl_un_eq_arcs
[PROOF STATE]
proof (prove)
using this:
head G ?e = root \<Longrightarrow> ?e \<notin> arcs T
undirected_tree G \<Longrightarrow> {e2 \<in> arcs G. \<exists>e1\<in>arcs T. head G e2 = tail G e1 \<and> head G e1 = tail G e2} \<union> arcs T = arcs G
goal (1 subgoal):
1. \<lbrakk>undirected_tree G; tail G e = root; e \<in> arcs G\<rbrakk> \<Longrightarrow> e \<in> arcs T
[PROOF STEP]
by blast | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Query_Optimization_Directed_Tree_Additions",
"hexsha": null,
"include": null,
"lang": null,
"length": 2,
"llama_tokens": 319,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
\documentclass[11pt, compress]{beamer}
\usepackage{preamb}
\usepackage{tikz,tkz-tab}
\usepackage{tkz-euclide}
\usepackage{movie15}
\usepackage{hyperref}
\setbeamertemplate{navigation symbols}{}
\usetheme{Warsaw}
\setbeamertemplate{theorem begin}{{
\inserttheoremheadfont
\inserttheoremname
\inserttheorempunctuation
}}
\setbeamertemplate{theorem end}{}
\newtheorem{proposition}[theorem]{Proposition}
\theoremstyle{definition}
\newtheorem{mydef}[theorem]{Définition}
\makeatletter
%\captionsetup[figure]{labelformat=empty}
\definecolor{beamer@blendedpurp}{RGB}{241, 148, 138}
% 0.8,0.2,0.3 rouge carmin presque rose assez élégant avec rgb
%235 77 77 corail
% .75 ,.2,.2 rouge clair
\setbeamercolor{structure}{fg=beamer@blendedpurp}
\setbeamercolor*{palette quaternary}{fg=black,bg=white!80!gray } %bg=couleur à gauche header back
\makeatother
%\setbeamercolor{section in head/foot}{} no touch en fait casse tout
%\setbeamercolor{subsection in head/foot}{fg=black,bg=gray!30} idem
\makeatletter
\defbeamertemplate*{footline}{shadow theme}
{%
\leavevmode%
\hbox{\begin{beamercolorbox}[wd=.5\paperwidth,ht=2.5ex,dp=1.125ex,leftskip=.3cm,rightskip=.3cm plus1fil]{title in head/foot}%
\usebeamerfont{title in head/foot}\insertshorttitle%
\end{beamercolorbox}}%
\begin{beamercolorbox}[wd=.5\paperwidth,ht=2.5ex,dp=1.125ex,leftskip=.3cm plus1fil,rightskip=.3cm]{author in head/foot}%
\usebeamerfont{author in head/foot}\hfill\insertframenumber\,/\,\inserttotalframenumber
\end{beamercolorbox}%
\vskip0pt%
}
\setbeamertemplate{section in toc}{\inserttocsectionnumber.~\inserttocsection}
%\setbeamertemplate{section in toc}{\textcolor{structure.fg}{$\blacktriangleright$}\hspace{1.2 em}~\inserttocsection \\}
%\setbeamertemplate{section in toc}{\inserttocsectionnumber.~\inserttocsection}
\setbeamercolor*{section in toc}{fg=black}
\setbeamercolor*{enumerate item}{fg=black}
\setbeamercolor*{enumerate subitem}{fg=black}
\newcommand*{\rom}[1]{\expandafter\@slowromancap\romannumeral #1@}
\makeatother
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% end styling beamer %%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{tcolorbox}
\newtcolorbox{mybox}{colback=red!5!white,colframe=red!75!black}
\title{Montpellier Network's Package}
\subtitle{Project HMMA$238$}
\author{\vspace*{-1.5cm}Fanchon Herman, Ryma Lakehal et Sahbane Abdesstar}
\date{\vspace*{-2cm}25 Juin 2020}
\institute[Montpellier University]{Montpellier University}
\titlegraphic{%
\makebox[0.9\paperwidth]{%
\includegraphics[scale=.07]{logo_fds.png}%
\hfill%
\includegraphics[scale=.2]{um.png}
\hspace{.25cm}
}%
}
\begin{document}
{
\def\mytitleframe{\bgroup
\makeatletter
\setbeamertemplate{footline}
{%
\leavevmode%
\hbox{\begin{beamercolorbox}[wd=.5\paperwidth,ht=2.5ex,dp=1.125ex,leftskip=.3cm,rightskip=.3cm plus1fil]{title in head/foot}%
\usebeamerfont{title in head/foot}\insertshorttitle%
\end{beamercolorbox}}%
\begin{beamercolorbox}[wd=.5\paperwidth,ht=2.5ex,dp=1.125ex,leftskip=.3cm plus1fil,rightskip=.3cm]{author in head/foot}%
\usebeamerfont{author in head/foot}%\hfill\insertframenumber\,/\,\inserttotalframenumber
\end{beamercolorbox}%
\vskip0pt%
}
\maketitle
\egroup
\addtocounter{framenumber}{-1}
}
\makeatother
\mytitleframe
}
\section*{Contents}
\begin{frame}
\frametitle{Table of contents}
\tableofcontents
\end{frame}
\section[Intro]{Introduction}
\begin{frame}{Introduction}
\begin{block}{}
Package python : \href{https://github.com/fanchonherman/project_network}{\beamergotobutton{Github network}}
\end{block}
\begin{block}{Project subject}
\begin{enumerate}[label=$\bullet$]
\item videos and widget,
\item transport: car, bike and walk,
\item from La Maison du Lez to Place Eugène Bataillon,
\item shortest path.
\end{enumerate}
\end{block}
\end{frame}
\begin{frame}{Important functions}
\begin{columns}
\column{0.50\linewidth}
\begin{enumerate}[label=$\bullet$]
\item<1-> type\_transport,
\item<2-> distance\_type\_transport,
\item<3-> times,
\item<4-> animation\_type\_transport.
\end{enumerate}
\column{0.50\linewidth}
\centering
\includegraphics[scale=.3]{logo_bike.jpg}
\end{columns}
\end{frame}
\section[Visu]{Vizualisation of the shortest path}
\subsection{Walk}
\begin{frame}{Vizualisation of the shortest path}
\begin{block}{}
net.type\_transport('walk')
\end{block}
\begin{figure}[H]
\centering
\includegraphics[scale=.45]{walk.png}
\caption{Vizualisation of the shortest path in walk.}
\label{fig:walk}
\end{figure}
\end{frame}
\subsection{Car}
\begin{frame}{}
\begin{block}{}
net.type\_transport('drive')
\end{block}
\begin{figure}[H]
\centering
\includegraphics[scale=.45]{drive.png}
\caption{Vizualisation of the shortest path in car.}
\label{fig:car}
\end{figure}
\end{frame}
\subsection{Bike}
\begin{frame}{}
\begin{block}{}
net.type\_transport('bike')
\end{block}
\begin{figure}[H]
\centering
\includegraphics[scale=.4]{bike.png}
\caption{Vizualisation of the shortest path in bike.}
\label{fig:bike}
\end{figure}
To see the animations and the widget, click on this link \href{https://github.com/fanchonherman/project_network/tree/master/report}{\beamergotobutton{report}} then launch the notebook.
\end{frame}
\section[Times]{Study time of functions}
\begin{frame}{Study time of functions}
\begin{figure}[H]
\centering
\includegraphics[scale=.5]{histogram.png}
\caption{Histogram of the time of functions according to the type of transport.}
\label{fig:histogram}
\end{figure}
\end{frame}
\begin{frame}{Study time of animation function with "TimestampedGeoJson"}
\begin{figure}[H]
\centering
\includegraphics[scale=.5]{GeoJson_histogram.pdf}
\caption{Histogram of the time of functions according to the type of transport.}
\label{fig:histogram}
\end{figure}
\end{frame}
\section[Conclu]{Conclusion}
\begin{frame}{Conclusion}
\begin{block}{What we have learnt}
\begin{enumerate}[label=$\bullet$]
\item osmnx,
\item networkx,
\item improvements.
\end{enumerate}
\end{block}
\begin{block}{Upgrades}
\begin{enumerate}[label=$\bullet$]
\item computation time,
\item display informations on the map. %ex la vitesse de la personne
\end{enumerate}
\end{block}
\end{frame}
\begin{frame}{Animation by storing images}
\center \Large \Huge Animation by storing images
\section{Animation by storing images}
\end{frame}
\begin{frame}{Steps}
$\bullet$ Creating a list of more than 200 points situated on the path between the origin and the destination points. \\
$\bullet$ For each point, creating an image of the graph showing the icon on that point. \\
$\bullet$ Storing the images one by one in a folder named "temp", created on the fly during the process. \\
$\bullet$ Creating the animation by repeatedly shoing the images one by one.
\end{frame}
\begin{frame}{Generating points}
\center \Large \Huge Generating points
\section{Generating points}
\end{frame}
\begin{frame}{Generating points}
$\bullet$ route variable is not enough.\\
$\bullet$ points\_generate().\\
$\bullet$ Removing some points that are too close to each others. \\
$\bullet$ Adding more points.
\end{frame}
\begin{frame}{Generating images}
\center \Large \Huge Generating images
\section{Generating images}
\end{frame}
\begin{frame}{Translation}
$\bullet$ AnnotationBbox().\\
$\bullet$ for each point, create an image showing the icon in the point.\\
$\bullet$ translation mouvement. \\
\end{frame}
\begin{frame}{Rotation}
$\bullet$ PILLOW package.\\
$\bullet$ rotate() method on the Image object.\\
$\bullet$ $bearing(p(i-1), p(i)) + 180 - bearing(p(i+1), p(i)).$ \\
\end{frame}
\begin{frame}{Pedestrian walking effect}
$\bullet$ PILLOW package.\\
$\bullet$ mirror() methode of the ImageOps module.\\
$\bullet$ alternate between the icon and it's mirror image. \\
\end{frame}
\begin{frame}{Animation}
\center \Large \Huge Animation
\section{Animation}
\end{frame}
\begin{frame}{Animation}
$\bullet$ Transforming png images to np.array using imagio package.\\
$\bullet$ storing the np.arrays one by one in a list.\\
$\bullet$ using animation.FuncAnimation() to animate them. \\
\end{frame}
\end{document} | {
"alphanum_fraction": 0.7111268771,
"author": null,
"avg_line_length": 29.4668989547,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "55291657d4b72305710e382dbdfe0ff6ca964fda",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2021-01-26T00:50:14.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-04-10T15:37:14.000Z",
"max_forks_repo_head_hexsha": "749b87b056e14fc0541a48281e2396c80aff0a21",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "fanchonherman/project_network",
"max_forks_repo_path": "beamer/beamer_network.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "749b87b056e14fc0541a48281e2396c80aff0a21",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "fanchonherman/project_network",
"max_issues_repo_path": "beamer/beamer_network.tex",
"max_line_length": 184,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "749b87b056e14fc0541a48281e2396c80aff0a21",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "fanchonherman/project_network",
"max_stars_repo_path": "beamer/beamer_network.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2563,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 8457
} |
# -*- coding: utf-8 -*-
"""This module is mean to be used to get the main training data for train the model to be used on ml_rivets.mll node
This code is to be used on maya with numpy library
MIT License
Copyright (c) 2020 Mauro Lopez
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import json
import time
import logging
import meshData
import transformData
import constants
import numpy as np
from maya import cmds
_logger = logging.getLogger(__name__)
def closestTriangleToTransform(transform, meshName):
"""get the closest mesh triangles ids from a transomr position
Args:
transform (str): name of the transfomr to get the position
meshName (str): name of the mesh to get the triangle
Returns:
list: 3 mesh vertices ids
"""
faceVertices, points = meshData.getMeshData(meshName)
vertexFaces = meshData.getMeshVertexFaces(faceVertices)
point = np.array(cmds.xform(transform, q=1, ws=1, t=1), dtype=np.double)
return meshData.getClosestTriangle(point, points, vertexFaces, faceVertices)
def filterUnnecesaryTransforms(meshName, transforms, vertices, tol=0.1):
"""remove all controls that wont affect the driven position
Args:
meshName (str): name of the mesh to get the deformation
transforms (list): transforms names to evaluate
vertices (list): list of vertices to chekc the deformation
tol (float, optional): how much vertex deformation is accepted. Defaults to 0.1.
Returns:
list: list of drivers that affects the vertex position
"""
points = meshData.getMeshPoints(meshName)
contrlDict = transformData.getAttrDict(transforms)
results = set()
for control, attrDict in contrlDict.iteritems():
if testControl(control, attrDict, meshName, vertices, points, tol):
results.add(control)
return results
def testControl(control, attrDict, meshName, vertices, points, tol=0.1):
"""test if a control affect any vertices positions
Args:
control (sre): name of the control to evaluate
meshName (str): name of the mesh to get the deformation
attrDict (str): control attributes to edit to check the vertice deformation
points (np.array): cached mesh positions
tol (float, optional): how much vertex deformation is accepted. Defaults to 0.1.
Returns:
bool: True if the control modify any of the vertices
"""
for attr, limits in attrDict.iteritems():
for lim in limits:
transformData.setAtributes(control, attr, value=lim)
deltaPoints = meshData.getVerticesDeltas(meshName, vertices, points)
if np.linalg.norm(deltaPoints) > tol:
transformData.setAtributes(control, attr, value=0)
return True
transformData.setAtributes(control, attr, value=0)
return False
def saveJsonFile(filePath, myData):
"""save data to a json file
Args:
filePath (str): filepath where to save the data
myData (vairable): data to serialize
"""
with open(filePath, 'wb') as myfile:
json.dump(myData, myfile, indent=4)
def readJsonFile(filePath):
"""read data from json file
Args:
filePath (str): location of the json file
Returns:
variable: data read form the json file
"""
result = None
with open(filePath, 'r') as myfile:
result = json.load(myfile)
return result
def resetControls(controlsDict):
"""set all attributes form the control dict to 0
Args:
controlsDict (dict): {controlName;{attrName:limits}}
"""
for control, attrDict in controlsDict.iteritems():
for attr in attrDict.keys():
transformData.setAtributes(control, attr, value=0)
def getData(mesh, driverList, drivenList, folderData, filePrefix='', samples=1000):
"""set random values to the attributes of the driverList to het the data necessary to
train the model for predict rivets positions
Args:
mesh (str): name of the mesh to get the deformation
driverList (list): name of transforms to set attributes
drivenList (list): list of transforms that will recieve the rivets
folderData (str): folder path where to save the data to train
filePrefix (str, optional): prefix name to the saved files
samples (int, opitonal): how many random samples to create
(more smaples, slower but accurated results), default 300
"""
start = time.time()
vertices = list()
if not os.path.exists(folderData):
os.makedirs(folderData)
for driven in drivenList:
_logger.info('Getting closest veritces for {}'.format(driven))
vertices.extend(closestTriangleToTransform(driven, mesh))
_logger.info('> filtering driver list')
controlsDict = transformData.getAttrDict(driverList)
resetControls(controlsDict)
filterdeDrivers = list(filterUnnecesaryTransforms(mesh, driverList, vertices, tol=0.1))
_logger.info("Driver filtered from {} to {}".format(len(driverList), len(filterdeDrivers)))
points = meshData.getMeshPoints(mesh)
csvIn = os.path.join(folderData, '{}{}.{}'.format(filePrefix,
constants.INFILESUFIX,
constants.NUMPYIOFORMAT))
csvOut = os.path.join(folderData, '{}{}.{}'.format(filePrefix,
constants.OUTFILESUFIX,
constants.NUMPYIOFORMAT))
transformsPath = os.path.join(folderData, '{}{}.{}'.format(filePrefix,
constants.TRFFILESUFIX,
constants.DATAIOFORMAT))
# Iterate meshes over time to sample displacements
localMatrices = list()
dsplcs = list()
for i in range(samples):
_logger.info('> Building sample ' + str(i))
localMtx = np.array([], dtype=np.double)
for control in filterdeDrivers:
for attr, limits in controlsDict[control].iteritems():
transformData.setRandomAttributes(control, attr, limits)
localMtx = np.append(localMtx, transformData.getControlLocalMatrix(control))
dsplc = np.array(meshData.getVerticesDeltas(mesh, vertices, points), dtype=np.double).flatten()
localMatrices.append(localMtx)
dsplcs.append(dsplc)
localMatrices = np.stack(localMatrices)
dsplcs = np.stack(dsplcs)
np.savetxt(csvIn, localMatrices)
_logger.info("{} saved".format(csvIn))
np.savetxt(csvOut, dsplcs)
_logger.info("{} saved".format(csvOut))
saveJsonFile(transformsPath, {'drivers': filterdeDrivers, 'drivens': drivenList})
_logger.info("{} saved".format(transformsPath))
resetControls(controlsDict)
end = time.time()
_logger.info('Procces ended in {} sec'.format(end - start))
return csvIn, csvOut, transformsPath | {
"alphanum_fraction": 0.6728855721,
"author": null,
"avg_line_length": 40.4020100503,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "3a293ce6548d8d43978c10e01b0520c5c348a410",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 13,
"max_forks_repo_forks_event_max_datetime": "2022-03-19T06:31:42.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-05-24T07:35:51.000Z",
"max_forks_repo_head_hexsha": "68ce7abe77fdeb3fc1224b98dd1fc58532eeba3d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "1fth3n3ls3/maya-ml-rivet",
"max_forks_repo_path": "pyutils/getRivetsSceneData.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "68ce7abe77fdeb3fc1224b98dd1fc58532eeba3d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "1fth3n3ls3/maya-ml-rivet",
"max_issues_repo_path": "pyutils/getRivetsSceneData.py",
"max_line_length": 116,
"max_stars_count": 42,
"max_stars_repo_head_hexsha": "8148c31edde8863ec388194fd2f58157990fd284",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "fruityvince/maya-ml-rivet",
"max_stars_repo_path": "pyutils/getRivetsSceneData.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-13T00:59:59.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-05-20T07:10:06.000Z",
"num_tokens": 1778,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 8040
} |
"""
Cython wrapper to provide python interfaces to
PROJ.4 (http://trac.osgeo.org/proj/) functions.
Performs cartographic transformations and geodetic computations.
The Proj class can convert from geographic (longitude,latitude)
to native map projection (x,y) coordinates and vice versa, or
from one map projection coordinate system directly to another.
The module variable pj_list is a dictionary containing all the
available projections and their descriptions.
The Geod class can perform forward and inverse geodetic, or
Great Circle, computations. The forward computation involves
determining latitude, longitude and back azimuth of a terminus
point given the latitude and longitude of an initial point, plus
azimuth and distance. The inverse computation involves
determining the forward and back azimuths and distance given the
latitudes and longitudes of an initial and terminus point.
Input coordinates can be given as python arrays, lists/tuples,
scalars or numpy/Numeric/numarray arrays. Optimized for objects
that support the Python buffer protocol (regular python and
numpy array objects).
Download: http://python.org/pypi/pyproj
Requirements: python 2.4 or higher.
Example scripts are in 'test' subdirectory of source distribution.
The 'test()' function will run the examples in the docstrings.
Contact: Jeffrey Whitaker <jeffrey.s.whitaker@noaa.gov
copyright (c) 2006 by Jeffrey Whitaker.
Permission to use, copy, modify, and distribute this software
and its documentation for any purpose and without fee is hereby
granted, provided that the above copyright notice appear in all
copies and that both the copyright notice and this permission
notice appear in supporting documentation. THE AUTHOR DISCLAIMS
ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT
SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """
import sys
from pyproj import _proj
from pyproj.datadir import pyproj_datadir
__version__ = _proj.__version__
set_datapath = _proj.set_datapath
from array import array
import os, math
#import numpy as np
# Python 2/3 compatibility
if sys.version_info[0] == 2: # Python 2
string_types = (basestring,)
else: # Python 3
string_types = (str,)
pj_list={
'aea': "Albers Equal Area",
'aeqd': "Azimuthal Equidistant",
'airy': "Airy",
'aitoff': "Aitoff",
'alsk': "Mod. Stererographics of Alaska",
'apian': "Apian Globular I",
'august': "August Epicycloidal",
'bacon': "Bacon Globular",
'bipc': "Bipolar conic of western hemisphere",
'boggs': "Boggs Eumorphic",
'bonne': "Bonne (Werner lat_1=90)",
'cass': "Cassini",
'cc': "Central Cylindrical",
'cea': "Equal Area Cylindrical",
'chamb': "Chamberlin Trimetric",
'collg': "Collignon",
'crast': "Craster Parabolic (Putnins P4)",
'denoy': "Denoyer Semi-Elliptical",
'eck1': "Eckert I",
'eck2': "Eckert II",
'eck3': "Eckert III",
'eck4': "Eckert IV",
'eck5': "Eckert V",
'eck6': "Eckert VI",
'eqc': "Equidistant Cylindrical (Plate Caree)",
'eqdc': "Equidistant Conic",
'etmerc': "Extended Transverse Mercator" ,
'euler': "Euler",
'fahey': "Fahey",
'fouc': "Foucaut",
'fouc_s': "Foucaut Sinusoidal",
'gall': "Gall (Gall Stereographic)",
'geocent': "Geocentric",
'geos': "Geostationary Satellite View",
'gins8': "Ginsburg VIII (TsNIIGAiK)",
'gn_sinu': "General Sinusoidal Series",
'gnom': "Gnomonic",
'goode': "Goode Homolosine",
'gs48': "Mod. Stererographics of 48 U.S.",
'gs50': "Mod. Stererographics of 50 U.S.",
'hammer': "Hammer & Eckert-Greifendorff",
'hatano': "Hatano Asymmetrical Equal Area",
'healpix': "HEALPix",
'rhealpix': "rHEALPix",
'igh': "Interrupted Goode Homolosine",
'imw_p': "Internation Map of the World Polyconic",
'isea': "Icosahedral Snyder Equal Area",
'kav5': "Kavraisky V",
'kav7': "Kavraisky VII",
'krovak': "Krovak",
'labrd': "Laborde",
'laea': "Lambert Azimuthal Equal Area",
'lagrng': "Lagrange",
'larr': "Larrivee",
'lask': "Laskowski",
'lonlat': "Lat/long (Geodetic)",
'latlon': "Lat/long (Geodetic alias)",
'latlong': "Lat/long (Geodetic alias)",
'longlat': "Lat/long (Geodetic alias)",
'lcc': "Lambert Conformal Conic",
'lcca': "Lambert Conformal Conic Alternative",
'leac': "Lambert Equal Area Conic",
'lee_os': "Lee Oblated Stereographic",
'loxim': "Loximuthal",
'lsat': "Space oblique for LANDSAT",
'mbt_s': "McBryde-Thomas Flat-Polar Sine",
'mbt_fps': "McBryde-Thomas Flat-Pole Sine (No. 2)",
'mbtfpp': "McBride-Thomas Flat-Polar Parabolic",
'mbtfpq': "McBryde-Thomas Flat-Polar Quartic",
'mbtfps': "McBryde-Thomas Flat-Polar Sinusoidal",
'merc': "Mercator",
'mil_os': "Miller Oblated Stereographic",
'mill': "Miller Cylindrical",
'moll': "Mollweide",
'murd1': "Murdoch I",
'murd2': "Murdoch II",
'murd3': "Murdoch III",
'natearth': "Natural Earth",
'nell': "Nell",
'nell_h': "Nell-Hammer",
'nicol': "Nicolosi Globular",
'nsper': "Near-sided perspective",
'nzmg': "New Zealand Map Grid",
'ob_tran': "General Oblique Transformation",
'ocea': "Oblique Cylindrical Equal Area",
'oea': "Oblated Equal Area",
'omerc': "Oblique Mercator",
'ortel': "Ortelius Oval",
'ortho': "Orthographic",
'pconic': "Perspective Conic",
'poly': "Polyconic (American)",
'putp1': "Putnins P1",
'putp2': "Putnins P2",
'putp3': "Putnins P3",
'putp3p': "Putnins P3'",
'putp4p': "Putnins P4'",
'putp5': "Putnins P5",
'putp5p': "Putnins P5'",
'putp6': "Putnins P6",
'putp6p': "Putnins P6'",
'qua_aut': "Quartic Authalic",
'robin': "Robinson",
'rouss': "Roussilhe Stereographic",
'rpoly': "Rectangular Polyconic",
'sinu': "Sinusoidal (Sanson-Flamsteed)",
'somerc': "Swiss. Obl. Mercator",
'stere': "Stereographic",
'sterea': "Oblique Stereographic Alternative",
'gstmerc': "Gauss-Schreiber Transverse Mercator (aka Gauss-Laborde Reunion)",
'tcc': "Transverse Central Cylindrical",
'tcea': "Transverse Cylindrical Equal Area",
'tissot': "Tissot Conic",
'tmerc': "Transverse Mercator",
'tpeqd': "Two Point Equidistant",
'tpers': "Tilted perspective",
'ups': "Universal Polar Stereographic",
'urm5': "Urmaev V",
'urmfps': "Urmaev Flat-Polar Sinusoidal",
'utm': "Universal Transverse Mercator (UTM)",
'vandg': "van der Grinten (I)",
'vandg2': "van der Grinten II",
'vandg3': "van der Grinten III",
'vandg4': "van der Grinten IV",
'vitk1': "Vitkovsky I",
'wag1': "Wagner I (Kavraisky VI)",
'wag2': "Wagner II",
'wag3': "Wagner III",
'wag4': "Wagner IV",
'wag5': "Wagner V",
'wag6': "Wagner VI",
'wag7': "Wagner VII",
'weren': "Werenskiold I",
'wink1': "Winkel I",
'wink2': "Winkel II",
'wintri': "Winkel Tripel"}
pj_ellps={
"MERIT": {'a':6378137.0,'rf':298.257,'description':"MERIT 1983"},
"SGS85": {'a':6378136.0,'rf':298.257,'description':"Soviet Geodetic System 85"},
"GRS80": {'a':6378137.0,'rf':298.257222101,'description':"GRS 1980(IUGG, 1980)"},
"IAU76": {'a':6378140.0,'rf':298.257,'description':"IAU 1976"},
"airy": {'a':6377563.396,'b':6356256.910,'description':"Airy 1830"},
"APL4.9": {'a':6378137.0,'rf':298.25,'description':"Appl. Physics. 1965"},
"NWL9D": {'a':6378145.0,'rf':298.25,'description':" Naval Weapons Lab., 1965"},
"mod_airy": {'a':6377340.189,'b':6356034.446,'description':"Modified Airy"},
"andrae": {'a':6377104.43,'rf':300.0,'description':"Andrae 1876 (Den., Iclnd.)"},
"aust_SA": {'a':6378160.0,'rf':298.25,'description':"Australian Natl & S. Amer. 1969"},
"GRS67": {'a':6378160.0,'rf':298.2471674270,'description':"GRS 67(IUGG 1967)"},
"bessel": {'a':6377397.155,'rf':299.1528128,'description':"Bessel 1841"},
"bess_nam": {'a':6377483.865,'rf':299.1528128,'description':"Bessel 1841 (Namibia)"},
"clrk66": {'a':6378206.4,'b':6356583.8,'description':"Clarke 1866"},
"clrk80": {'a':6378249.145,'rf':293.4663,'description':"Clarke 1880 mod."},
"CPM": {'a':6375738.7,'rf':334.29,'description':"Comm. des Poids et Mesures 1799"},
"delmbr": {'a':6376428.,'rf':311.5,'description':"Delambre 1810 (Belgium)"},
"engelis": {'a':6378136.05,'rf':298.2566,'description':"Engelis 1985"},
"evrst30": {'a':6377276.345,'rf':300.8017,'description':"Everest 1830"},
"evrst48": {'a':6377304.063,'rf':300.8017,'description':"Everest 1948"},
"evrst56": {'a':6377301.243,'rf':300.8017,'description':"Everest 1956"},
"evrst69": {'a':6377295.664,'rf':300.8017,'description':"Everest 1969"},
"evrstSS": {'a':6377298.556,'rf':300.8017,'description':"Everest (Sabah & Sarawak)"},
"fschr60": {'a':6378166.,'rf':298.3,'description':"Fischer (Mercury Datum) 1960"},
"fschr60m": {'a':6378155.,'rf':298.3,'description':"Modified Fischer 1960"},
"fschr68": {'a':6378150.,'rf':298.3,'description':"Fischer 1968"},
"helmert": {'a':6378200.,'rf':298.3,'description':"Helmert 1906"},
"hough": {'a':6378270.0,'rf':297.,'description':"Hough"},
"intl": {'a':6378388.0,'rf':297.,'description':"International 1909 (Hayford)"},
"krass": {'a':6378245.0,'rf':298.3,'description':"Krassovsky, 1942"},
"kaula": {'a':6378163.,'rf':298.24,'description':"Kaula 1961"},
"lerch": {'a':6378139.,'rf':298.257,'description':"Lerch 1979"},
"mprts": {'a':6397300.,'rf':191.,'description':"Maupertius 1738"},
"new_intl": {'a':6378157.5,'b':6356772.2,'description':"New International 1967"},
"plessis": {'a':6376523.,'b':6355863.,'description':"Plessis 1817 (France)"},
"SEasia": {'a':6378155.0,'b':6356773.3205,'description':"Southeast Asia"},
"walbeck": {'a':6376896.0,'b':6355834.8467,'description':"Walbeck"},
"WGS60": {'a':6378165.0,'rf':298.3,'description':"WGS 60"},
"WGS66": {'a':6378145.0,'rf':298.25,'description':"WGS 66"},
"WGS72": {'a':6378135.0,'rf':298.26,'description':"WGS 72"},
"WGS84": {'a':6378137.0,'rf':298.257223563,'description':"WGS 84"},
"sphere": {'a':6370997.0,'b':6370997.0,'description':"Normal Sphere"},
}
#if not os.path.isdir(pyproj_datadir):
# msg="proj data directory not found. Expecting it at: %s"%pyproj_datadir
# raise IOError(msg)
set_datapath(pyproj_datadir)
class Proj(_proj.Proj):
"""
performs cartographic transformations (converts from
longitude,latitude to native map projection x,y coordinates and
vice versa) using proj (http://trac.osgeo.org/proj/).
A Proj class instance is initialized with proj map projection
control parameter key/value pairs. The key/value pairs can
either be passed in a dictionary, or as keyword arguments,
or as a proj4 string (compatible with the proj command). See
http://www.remotesensing.org/geotiff/proj_list for examples of
key/value pairs defining different map projections.
Calling a Proj class instance with the arguments lon, lat will
convert lon/lat (in degrees) to x/y native map projection
coordinates (in meters). If optional keyword 'inverse' is True
(default is False), the inverse transformation from x/y to
lon/lat is performed. If optional keyword 'radians' is True
(default is False) lon/lat are interpreted as radians instead of
degrees. If optional keyword 'errcheck' is True (default is
False) an exception is raised if the transformation is invalid.
If errcheck=False and the transformation is invalid, no
exception is raised and 1.e30 is returned. If the optional keyword
'preserve_units' is True, the units in map projection coordinates
are not forced to be meters.
Works with numpy and regular python array objects, python
sequences and scalars.
"""
def __new__(self, projparams=None, preserve_units=False, **kwargs):
"""
initialize a Proj class instance.
Proj4 projection control parameters must either be given in a
dictionary 'projparams' or as keyword arguments. See the proj
documentation (http://trac.osgeo.org/proj/) for more information
about specifying projection parameters.
Example usage:
>>> from pyproj import Proj
>>> p = Proj(proj='utm',zone=10,ellps='WGS84') # use kwargs
>>> x,y = p(-120.108, 34.36116666)
>>> 'x=%9.3f y=%11.3f' % (x,y)
'x=765975.641 y=3805993.134'
>>> 'lon=%8.3f lat=%5.3f' % p(x,y,inverse=True)
'lon=-120.108 lat=34.361'
>>> # do 3 cities at a time in a tuple (Fresno, LA, SF)
>>> lons = (-119.72,-118.40,-122.38)
>>> lats = (36.77, 33.93, 37.62 )
>>> x,y = p(lons, lats)
>>> 'x: %9.3f %9.3f %9.3f' % x
'x: 792763.863 925321.537 554714.301'
>>> 'y: %9.3f %9.3f %9.3f' % y
'y: 4074377.617 3763936.941 4163835.303'
>>> lons, lats = p(x, y, inverse=True) # inverse transform
>>> 'lons: %8.3f %8.3f %8.3f' % lons
'lons: -119.720 -118.400 -122.380'
>>> 'lats: %8.3f %8.3f %8.3f' % lats
'lats: 36.770 33.930 37.620'
>>> p2 = Proj('+proj=utm +zone=10 +ellps=WGS84') # use proj4 string
>>> x,y = p2(-120.108, 34.36116666)
>>> 'x=%9.3f y=%11.3f' % (x,y)
'x=765975.641 y=3805993.134'
>>> p = Proj(init="epsg:32667")
>>> 'x=%12.3f y=%12.3f (meters)' % p(-114.057222, 51.045)
'x=-1783486.760 y= 6193833.196 (meters)'
>>> p = Proj("+init=epsg:32667",preserve_units=True)
>>> 'x=%12.3f y=%12.3f (feet)' % p(-114.057222, 51.045)
'x=-5851322.810 y=20320934.409 (feet)'
>>> p = Proj(proj='hammer') # hammer proj and inverse
>>> x,y = p(-30,40)
>>> 'x=%12.3f y=%12.3f' % (x,y)
'x=-2711575.083 y= 4395506.619'
>>> lon,lat = p(x,y,inverse=True)
>>> 'lon=%9.3f lat=%9.3f (degrees)' % (lon,lat)
'lon= -30.000 lat= 40.000 (degrees)'
"""
# if projparams is None, use kwargs.
if projparams is None:
if len(kwargs) == 0:
raise RuntimeError('no projection control parameters specified')
else:
projstring = _dict2string(kwargs)
elif isinstance(projparams, string_types):
# if projparams is a string or a unicode string, interpret as a proj4 init string.
projstring = projparams
else: # projparams a dict
projstring = _dict2string(projparams)
# make sure units are meters if preserve_units is False.
if not projstring.count('+units=') and not preserve_units:
projstring = '+units=m '+projstring
else:
kvpairs = []
for kvpair in projstring.split():
if kvpair.startswith('+units') and not preserve_units:
k,v = kvpair.split('=')
kvpairs.append(k+'=m ')
else:
kvpairs.append(kvpair+' ')
projstring = ''.join(kvpairs)
# look for EPSG, replace with epsg (EPSG only works
# on case-insensitive filesystems).
projstring = projstring.replace('EPSG','epsg')
return _proj.Proj.__new__(self, projstring)
def __call__(self, *args, **kw):
#,lon,lat,inverse=False,radians=False,errcheck=False):
"""
Calling a Proj class instance with the arguments lon, lat will
convert lon/lat (in degrees) to x/y native map projection
coordinates (in meters). If optional keyword 'inverse' is True
(default is False), the inverse transformation from x/y to
lon/lat is performed. If optional keyword 'radians' is True
(default is False) the units of lon/lat are radians instead of
degrees. If optional keyword 'errcheck' is True (default is
False) an exception is raised if the transformation is invalid.
If errcheck=False and the transformation is invalid, no
exception is raised and 1.e30 is returned.
Inputs should be doubles (they will be cast to doubles if they
are not, causing a slight performance hit).
Works with numpy and regular python array objects, python
sequences and scalars, but is fastest for array objects.
"""
inverse = kw.get('inverse', False)
radians = kw.get('radians', False)
errcheck = kw.get('errcheck', False)
#if len(args) == 1:
# latlon = np.array(args[0], copy=True,
# order='C', dtype=float, ndmin=2)
# if inverse:
# _proj.Proj._invn(self, latlon, radians=radians, errcheck=errcheck)
# else:
# _proj.Proj._fwdn(self, latlon, radians=radians, errcheck=errcheck)
# return latlon
lon, lat = args
# process inputs, making copies that support buffer API.
inx, xisfloat, xislist, xistuple = _copytobuffer(lon)
iny, yisfloat, yislist, yistuple = _copytobuffer(lat)
# call proj4 functions. inx and iny modified in place.
if inverse:
_proj.Proj._inv(self, inx, iny, radians=radians, errcheck=errcheck)
else:
_proj.Proj._fwd(self, inx, iny, radians=radians, errcheck=errcheck)
# if inputs were lists, tuples or floats, convert back.
outx = _convertback(xisfloat,xislist,xistuple,inx)
outy = _convertback(yisfloat,yislist,xistuple,iny)
return outx, outy
def to_latlong(self):
"""returns an equivalent Proj in the corresponding lon/lat
coordinates. (see pj_latlong_from_proj() in the Proj.4 C API)"""
return _proj.Proj.to_latlong(self)
def is_latlong(self):
"""returns True if projection in geographic (lon/lat) coordinates"""
return _proj.Proj.is_latlong(self)
def is_geocent(self):
"""returns True if projection in geocentric (x/y) coordinates"""
return _proj.Proj.is_geocent(self)
def transform(p1, p2, x, y, z=None, radians=False):
"""
x2, y2, z2 = transform(p1, p2, x1, y1, z1, radians=False)
Transform points between two coordinate systems defined by the
Proj instances p1 and p2.
The points x1,y1,z1 in the coordinate system defined by p1 are
transformed to x2,y2,z2 in the coordinate system defined by p2.
z1 is optional, if it is not set it is assumed to be zero (and
only x2 and y2 are returned).
In addition to converting between cartographic and geographic
projection coordinates, this function can take care of datum
shifts (which cannot be done using the __call__ method of the
Proj instances). It also allows for one of the coordinate
systems to be geographic (proj = 'latlong').
If optional keyword 'radians' is True (default is False) and p1
is defined in geographic coordinate (pj.is_latlong() is True),
x1,y1 is interpreted as radians instead of the default degrees.
Similarly, if p2 is defined in geographic coordinates and
radians=True, x2, y2 are returned in radians instead of degrees.
if p1.is_latlong() and p2.is_latlong() both are False, the
radians keyword has no effect.
x,y and z can be numpy or regular python arrays, python
lists/tuples or scalars. Arrays are fastest. For projections in
geocentric coordinates, values of x and y are given in meters.
z is always meters.
Example usage:
>>> # projection 1: UTM zone 15, grs80 ellipse, NAD83 datum
>>> # (defined by epsg code 26915)
>>> p1 = Proj(init='epsg:26915')
>>> # projection 2: UTM zone 15, clrk66 ellipse, NAD27 datum
>>> p2 = Proj(init='epsg:26715')
>>> # find x,y of Jefferson City, MO.
>>> x1, y1 = p1(-92.199881,38.56694)
>>> # transform this point to projection 2 coordinates.
>>> x2, y2 = transform(p1,p2,x1,y1)
>>> '%9.3f %11.3f' % (x1,y1)
'569704.566 4269024.671'
>>> '%9.3f %11.3f' % (x2,y2)
'569722.342 4268814.027'
>>> '%8.3f %5.3f' % p2(x2,y2,inverse=True)
' -92.200 38.567'
>>> # process 3 points at a time in a tuple
>>> lats = (38.83,39.32,38.75) # Columbia, KC and StL Missouri
>>> lons = (-92.22,-94.72,-90.37)
>>> x1, y1 = p1(lons,lats)
>>> x2, y2 = transform(p1,p2,x1,y1)
>>> xy = x1+y1
>>> '%9.3f %9.3f %9.3f %11.3f %11.3f %11.3f' % xy
'567703.344 351730.944 728553.093 4298200.739 4353698.725 4292319.005'
>>> xy = x2+y2
>>> '%9.3f %9.3f %9.3f %11.3f %11.3f %11.3f' % xy
'567721.149 351747.558 728569.133 4297989.112 4353489.644 4292106.305'
>>> lons, lats = p2(x2,y2,inverse=True)
>>> xy = lons+lats
>>> '%8.3f %8.3f %8.3f %5.3f %5.3f %5.3f' % xy
' -92.220 -94.720 -90.370 38.830 39.320 38.750'
>>> # test datum shifting, installation of extra datum grid files.
>>> p1 = Proj(proj='latlong',datum='WGS84')
>>> x1 = -111.5; y1 = 45.25919444444
>>> p2 = Proj(proj="utm",zone=10,datum='NAD27')
>>> x2, y2 = transform(p1, p2, x1, y1)
>>> "%s %s" % (str(x2)[:9],str(y2)[:9])
'1402285.9 5076292.4'
"""
# check that p1 and p2 are from the Proj class
if not isinstance(p1, Proj):
raise TypeError("p1 must be a Proj class")
if not isinstance(p2, Proj):
raise TypeError("p2 must be a Proj class")
# process inputs, making copies that support buffer API.
inx, xisfloat, xislist, xistuple = _copytobuffer(x)
iny, yisfloat, yislist, yistuple = _copytobuffer(y)
if z is not None:
inz, zisfloat, zislist, zistuple = _copytobuffer(z)
else:
inz = None
# call pj_transform. inx,iny,inz buffers modified in place.
_proj._transform(p1,p2,inx,iny,inz,radians)
# if inputs were lists, tuples or floats, convert back.
outx = _convertback(xisfloat,xislist,xistuple,inx)
outy = _convertback(yisfloat,yislist,xistuple,iny)
if inz is not None:
outz = _convertback(zisfloat,zislist,zistuple,inz)
return outx, outy, outz
else:
return outx, outy
def _copytobuffer_return_scalar(x):
try:
# inx,isfloat,islist,istuple
return array('d',(float(x),)),True,False,False
except:
raise TypeError('input must be an array, list, tuple or scalar')
def _copytobuffer(x):
"""
return a copy of x as an object that supports the python Buffer
API (python array if input is float, list or tuple, numpy array
if input is a numpy array). returns copyofx, isfloat, islist,
istuple (islist is True if input is a list, istuple is true if
input is a tuple, isfloat is true if input is a float).
"""
# make sure x supports Buffer API and contains doubles.
isfloat = False; islist = False; istuple = False
# first, if it's a numpy array scalar convert to float
# (array scalars don't support buffer API)
if hasattr(x,'shape'):
if x.shape == ():
return _copytobuffer_return_scalar(x)
else:
try:
# typecast numpy arrays to double.
# (this makes a copy - which is crucial
# since buffer is modified in place)
x.dtype.char
# Basemap issue
# https://github.com/matplotlib/basemap/pull/223/files
# (deal with input array in fortran order)
inx = x.copy(order="C").astype('d')
# inx,isfloat,islist,istuple
return inx,False,False,False
except:
try: # perhaps they are Numeric/numarrays?
# sorry, not tested yet.
# i don't know Numeric/numarrays has `shape'.
x.typecode()
inx = x.astype('d')
# inx,isfloat,islist,istuple
return inx,False,False,False
except:
raise TypeError('input must be an array, list, tuple or scalar')
else:
# perhaps they are regular python arrays?
if hasattr(x, 'typecode'):
#x.typecode
inx = array('d',x)
# try to convert to python array
# a list.
elif type(x) == list:
inx = array('d',x)
islist = True
# a tuple.
elif type(x) == tuple:
inx = array('d',x)
istuple = True
# a scalar?
else:
return _copytobuffer_return_scalar(x)
return inx,isfloat,islist,istuple
def _convertback(isfloat,islist,istuple,inx):
# if inputs were lists, tuples or floats, convert back to original type.
if isfloat:
return inx[0]
elif islist:
return inx.tolist()
elif istuple:
return tuple(inx)
else:
return inx
def _dict2string(projparams):
# convert a dict to a proj4 string.
pjargs = []
for key,value in projparams.items():
pjargs.append('+'+key+"="+str(value)+' ')
return ''.join(pjargs)
class Geod(_proj.Geod):
"""
performs forward and inverse geodetic, or Great Circle,
computations. The forward computation (using the 'fwd' method)
involves determining latitude, longitude and back azimuth of a
computations. The forward computation (using the 'fwd' method)
involves determining latitude, longitude and back azimuth of a
terminus point given the latitude and longitude of an initial
point, plus azimuth and distance. The inverse computation (using
the 'inv' method) involves determining the forward and back
azimuths and distance given the latitudes and longitudes of an
initial and terminus point.
"""
def __new__(self, initstring=None, **kwargs):
"""
initialize a Geod class instance.
Geodetic parameters for specifying the ellipsoid
can be given in a dictionary 'initparams', as keyword arguments,
or as as proj4 geod initialization string.
Following is a list of the ellipsoids that may be defined using the
'ellps' keyword (these are stored in the model variable pj_ellps)::
MERIT a=6378137.0 rf=298.257 MERIT 1983
SGS85 a=6378136.0 rf=298.257 Soviet Geodetic System 85
GRS80 a=6378137.0 rf=298.257222101 GRS 1980(IUGG, 1980)
IAU76 a=6378140.0 rf=298.257 IAU 1976
airy a=6377563.396 b=6356256.910 Airy 1830
APL4.9 a=6378137.0. rf=298.25 Appl. Physics. 1965
airy a=6377563.396 b=6356256.910 Airy 1830
APL4.9 a=6378137.0. rf=298.25 Appl. Physics. 1965
NWL9D a=6378145.0. rf=298.25 Naval Weapons Lab., 1965
mod_airy a=6377340.189 b=6356034.446 Modified Airy
andrae a=6377104.43 rf=300.0 Andrae 1876 (Den., Iclnd.)
aust_SA a=6378160.0 rf=298.25 Australian Natl & S. Amer. 1969
GRS67 a=6378160.0 rf=298.247167427 GRS 67(IUGG 1967)
bessel a=6377397.155 rf=299.1528128 Bessel 1841
bess_nam a=6377483.865 rf=299.1528128 Bessel 1841 (Namibia)
clrk66 a=6378206.4 b=6356583.8 Clarke 1866
clrk80 a=6378249.145 rf=293.4663 Clarke 1880 mod.
CPM a=6375738.7 rf=334.29 Comm. des Poids et Mesures 1799
delmbr a=6376428. rf=311.5 Delambre 1810 (Belgium)
engelis a=6378136.05 rf=298.2566 Engelis 1985
evrst30 a=6377276.345 rf=300.8017 Everest 1830
evrst48 a=6377304.063 rf=300.8017 Everest 1948
evrst56 a=6377301.243 rf=300.8017 Everest 1956
evrst69 a=6377295.664 rf=300.8017 Everest 1969
evrstSS a=6377298.556 rf=300.8017 Everest (Sabah & Sarawak)
fschr60 a=6378166. rf=298.3 Fischer (Mercury Datum) 1960
fschr60m a=6378155. rf=298.3 Modified Fischer 1960
fschr68 a=6378150. rf=298.3 Fischer 1968
helmert a=6378200. rf=298.3 Helmert 1906
hough a=6378270.0 rf=297. Hough
helmert a=6378200. rf=298.3 Helmert 1906
hough a=6378270.0 rf=297. Hough
intl a=6378388.0 rf=297. International 1909 (Hayford)
krass a=6378245.0 rf=298.3 Krassovsky, 1942
kaula a=6378163. rf=298.24 Kaula 1961
lerch a=6378139. rf=298.257 Lerch 1979
mprts a=6397300. rf=191. Maupertius 1738
new_intl a=6378157.5 b=6356772.2 New International 1967
plessis a=6376523. b=6355863. Plessis 1817 (France)
SEasia a=6378155.0 b=6356773.3205 Southeast Asia
walbeck a=6376896.0 b=6355834.8467 Walbeck
WGS60 a=6378165.0 rf=298.3 WGS 60
WGS66 a=6378145.0 rf=298.25 WGS 66
WGS72 a=6378135.0 rf=298.26 WGS 72
WGS84 a=6378137.0 rf=298.257223563 WGS 84
sphere a=6370997.0 b=6370997.0 Normal Sphere (r=6370997)
The parameters of the ellipsoid may also be set directly using
the 'a' (semi-major or equatorial axis radius) keyword, and
any one of the following keywords: 'b' (semi-minor,
or polar axis radius), 'e' (eccentricity), 'es' (eccentricity
squared), 'f' (flattening), or 'rf' (reciprocal flattening).
See the proj documentation (http://trac.osgeo.org/proj/) for more
See the proj documentation (http://trac.osgeo.org/proj/) for more
information about specifying ellipsoid parameters (specifically,
the chapter 'Specifying the Earth's figure' in the main Proj
users manual).
Example usage:
>>> from pyproj import Geod
>>> g = Geod(ellps='clrk66') # Use Clarke 1966 ellipsoid.
>>> # specify the lat/lons of some cities.
>>> boston_lat = 42.+(15./60.); boston_lon = -71.-(7./60.)
>>> portland_lat = 45.+(31./60.); portland_lon = -123.-(41./60.)
>>> newyork_lat = 40.+(47./60.); newyork_lon = -73.-(58./60.)
>>> london_lat = 51.+(32./60.); london_lon = -(5./60.)
>>> # compute forward and back azimuths, plus distance
>>> # between Boston and Portland.
>>> az12,az21,dist = g.inv(boston_lon,boston_lat,portland_lon,portland_lat)
>>> "%7.3f %6.3f %12.3f" % (az12,az21,dist)
'-66.531 75.654 4164192.708'
>>> # compute latitude, longitude and back azimuth of Portland,
>>> # given Boston lat/lon, forward azimuth and distance to Portland.
>>> endlon, endlat, backaz = g.fwd(boston_lon, boston_lat, az12, dist)
>>> "%6.3f %6.3f %13.3f" % (endlat,endlon,backaz)
'45.517 -123.683 75.654'
>>> # compute the azimuths, distances from New York to several
>>> # cities (pass a list)
>>> lons1 = 3*[newyork_lon]; lats1 = 3*[newyork_lat]
>>> lons2 = [boston_lon, portland_lon, london_lon]
>>> lats2 = [boston_lat, portland_lat, london_lat]
>>> az12,az21,dist = g.inv(lons1,lats1,lons2,lats2)
>>> for faz,baz,d in list(zip(az12,az21,dist)): "%7.3f %7.3f %9.3f" % (faz,baz,d)
' 54.663 -123.448 288303.720'
'-65.463 79.342 4013037.318'
' 51.254 -71.576 5579916.651'
>>> g2 = Geod('+ellps=clrk66') # use proj4 style initialization string
>>> az12,az21,dist = g2.inv(boston_lon,boston_lat,portland_lon,portland_lat)
>>> "%7.3f %6.3f %12.3f" % (az12,az21,dist)
'-66.531 75.654 4164192.708'
"""
# if initparams is a proj-type init string,
# convert to dict.
ellpsd = {}
if initstring is not None:
for kvpair in initstring.split():
# Actually only +a and +b are needed
# We can ignore safely any parameter that doesn't have a value
if kvpair.find('=') == -1:
continue
k,v = kvpair.split('=')
k = k.lstrip('+')
if k in ['a','b','rf','f','es','e']:
v = float(v)
ellpsd[k] = v
# merge this dict with kwargs dict.
kwargs = dict(list(kwargs.items()) + list(ellpsd.items()))
self.sphere = False
if 'ellps' in kwargs:
# ellipse name given, look up in pj_ellps dict
ellps_dict = pj_ellps[kwargs['ellps']]
a = ellps_dict['a']
if ellps_dict['description']=='Normal Sphere':
self.sphere = True
if 'b' in ellps_dict:
b = ellps_dict['b']
es = 1. - (b * b) / (a * a)
f = (a - b)/a
elif 'rf' in ellps_dict:
f = 1./ellps_dict['rf']
b = a*(1. - f)
es = 1. - (b * b) / (a * a)
else:
# a (semi-major axis) and one of
# b the semi-minor axis
# rf the reciprocal flattening
# f flattening
# es eccentricity squared
# must be given.
a = kwargs['a']
if 'b' in kwargs:
b = kwargs['b']
es = 1. - (b * b) / (a * a)
f = (a - b)/a
elif 'rf' in kwargs:
f = 1./kwargs['rf']
b = a*(1. - f)
es = 1. - (b * b) / (a * a)
elif 'f' in kwargs:
f = kwargs['f']
b = a*(1. - f)
es = 1. - (b/a)**2
elif 'es' in kwargs:
es = kwargs['es']
b = math.sqrt(a**2 - es*a**2)
f = (a - b)/a
elif 'e' in kwargs:
es = kwargs['e']**2
b = math.sqrt(a**2 - es*a**2)
f = (a - b)/a
else:
b = a
f = 0.
es = 0.
#msg='ellipse name or a, plus one of f,es,b must be given'
#raise ValueError(msg)
if math.fabs(f) < 1.e-8: self.sphere = True
self.a = a
self.b = b
self.f = f
self.es = es
return _proj.Geod.__new__(self, a, f)
def fwd(self, lons, lats, az, dist, radians=False):
"""
forward transformation - Returns longitudes, latitudes and back
azimuths of terminus points given longitudes (lons) and
latitudes (lats) of initial points, plus forward azimuths (az)
and distances (dist).
latitudes (lats) of initial points, plus forward azimuths (az)
and distances (dist).
Works with numpy and regular python array objects, python
sequences and scalars.
if radians=True, lons/lats and azimuths are radians instead of
degrees. Distances are in meters.
"""
# process inputs, making copies that support buffer API.
inx, xisfloat, xislist, xistuple = _copytobuffer(lons)
iny, yisfloat, yislist, yistuple = _copytobuffer(lats)
inz, zisfloat, zislist, zistuple = _copytobuffer(az)
ind, disfloat, dislist, distuple = _copytobuffer(dist)
_proj.Geod._fwd(self, inx, iny, inz, ind, radians=radians)
# if inputs were lists, tuples or floats, convert back.
outx = _convertback(xisfloat,xislist,xistuple,inx)
outy = _convertback(yisfloat,yislist,xistuple,iny)
outz = _convertback(zisfloat,zislist,zistuple,inz)
return outx, outy, outz
def inv(self,lons1,lats1,lons2,lats2,radians=False):
"""
inverse transformation - Returns forward and back azimuths, plus
distances between initial points (specified by lons1, lats1) and
terminus points (specified by lons2, lats2).
Works with numpy and regular python array objects, python
sequences and scalars.
if radians=True, lons/lats and azimuths are radians instead of
degrees. Distances are in meters.
"""
# process inputs, making copies that support buffer API.
inx, xisfloat, xislist, xistuple = _copytobuffer(lons1)
iny, yisfloat, yislist, yistuple = _copytobuffer(lats1)
inz, zisfloat, zislist, zistuple = _copytobuffer(lons2)
ind, disfloat, dislist, distuple = _copytobuffer(lats2)
_proj.Geod._inv(self,inx,iny,inz,ind,radians=radians)
# if inputs were lists, tuples or floats, convert back.
outx = _convertback(xisfloat,xislist,xistuple,inx)
outy = _convertback(yisfloat,yislist,xistuple,iny)
outz = _convertback(zisfloat,zislist,zistuple,inz)
return outx, outy, outz
def npts(self, lon1, lat1, lon2, lat2, npts, radians=False):
"""
Given a single initial point and terminus point (specified by
python floats lon1,lat1 and lon2,lat2), returns a list of
longitude/latitude pairs describing npts equally spaced
intermediate points along the geodesic between the initial and
terminus points.
if radians=True, lons/lats are radians instead of degrees.
Example usage:
>>> from pyproj import Geod
>>> g = Geod(ellps='clrk66') # Use Clarke 1966 ellipsoid.
>>> # specify the lat/lons of Boston and Portland.
>>> g = Geod(ellps='clrk66') # Use Clarke 1966 ellipsoid.
>>> # specify the lat/lons of Boston and Portland.
>>> boston_lat = 42.+(15./60.); boston_lon = -71.-(7./60.)
>>> portland_lat = 45.+(31./60.); portland_lon = -123.-(41./60.)
>>> # find ten equally spaced points between Boston and Portland.
>>> lonlats = g.npts(boston_lon,boston_lat,portland_lon,portland_lat,10)
>>> for lon,lat in lonlats: '%6.3f %7.3f' % (lat, lon)
'43.528 -75.414'
'44.637 -79.883'
'45.565 -84.512'
'46.299 -89.279'
'46.830 -94.156'
'47.149 -99.112'
'47.251 -104.106'
'47.136 -109.100'
'46.805 -114.051'
'46.262 -118.924'
>>> # test with radians=True (inputs/outputs in radians, not degrees)
>>> import math
>>> dg2rad = math.radians(1.)
>>> rad2dg = math.degrees(1.)
>>> lonlats = g.npts(dg2rad*boston_lon,dg2rad*boston_lat,dg2rad*portland_lon,dg2rad*portland_lat,10,radians=True)
>>> for lon,lat in lonlats: '%6.3f %7.3f' % (rad2dg*lat, rad2dg*lon)
'43.528 -75.414'
'44.637 -79.883'
'45.565 -84.512'
'46.299 -89.279'
'46.830 -94.156'
'47.149 -99.112'
'47.251 -104.106'
'47.136 -109.100'
'46.805 -114.051'
'46.262 -118.924'
"""
lons, lats = _proj.Geod._npts(self, lon1, lat1, lon2, lat2, npts, radians=radians)
return list(zip(lons, lats))
def test():
"""run the examples in the docstrings using the doctest module"""
import doctest, pyproj
doctest.testmod(pyproj,verbose=True)
if __name__ == "__main__": test()
| {
"alphanum_fraction": 0.6144168299,
"author": null,
"avg_line_length": 43.4356103024,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "40f79d858742495e21cb7ab1ade7f299bcdb616e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 607,
"max_forks_repo_forks_event_max_datetime": "2022-01-05T14:57:09.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-07-20T03:34:04.000Z",
"max_forks_repo_head_hexsha": "b23a8464abdd88050b83310e1d0e99c54dac28ab",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Con-Mi/lambda-packs",
"max_forks_repo_path": "Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/pyproj/__init__.py",
"max_issues_count": 1360,
"max_issues_repo_head_hexsha": "b23a8464abdd88050b83310e1d0e99c54dac28ab",
"max_issues_repo_issues_event_max_datetime": "2021-07-27T12:46:40.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-07-20T02:06:42.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Con-Mi/lambda-packs",
"max_issues_repo_path": "Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/pyproj/__init__.py",
"max_line_length": 121,
"max_stars_count": 2557,
"max_stars_repo_head_hexsha": "b23a8464abdd88050b83310e1d0e99c54dac28ab",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Con-Mi/lambda-packs",
"max_stars_repo_path": "Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/pyproj/__init__.py",
"max_stars_repo_stars_event_max_datetime": "2022-01-25T10:53:35.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-07-19T22:20:45.000Z",
"num_tokens": 11981,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 38788
} |
"""
Unified place for determining if external dependencies are installed or not.
You should import all external modules using the import_module() function.
For example
>>> from sympy.external import import_module
>>> numpy = import_module('numpy')
If the resulting library is not installed, or if the installed version
is less than a given minimum version, the function will return None.
Otherwise, it will return the library. See the docstring of
import_module() for more information.
"""
from sympy.external.importtools import import_module
__all__ = ['import_module']
| {
"alphanum_fraction": 0.7871972318,
"author": null,
"avg_line_length": 27.5238095238,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "549b4b96cdce0ee4d31960e89cb9dc26af0e105d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4490,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T17:24:05.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-01-01T17:48:07.000Z",
"max_forks_repo_head_hexsha": "dcdf59bbc6b13ddbc329431adf72fcee294b6389",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "utkarshdeorah/sympy",
"max_forks_repo_path": "sympy/external/__init__.py",
"max_issues_count": 15102,
"max_issues_repo_head_hexsha": "dcdf59bbc6b13ddbc329431adf72fcee294b6389",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T22:53:13.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-01-01T01:33:17.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "utkarshdeorah/sympy",
"max_issues_repo_path": "sympy/external/__init__.py",
"max_line_length": 76,
"max_stars_count": 8323,
"max_stars_repo_head_hexsha": "dcdf59bbc6b13ddbc329431adf72fcee294b6389",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "utkarshdeorah/sympy",
"max_stars_repo_path": "sympy/external/__init__.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T13:13:19.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-02T15:51:43.000Z",
"num_tokens": 114,
"path": null,
"reason": "from sympy",
"repo": null,
"save_path": null,
"sha": null,
"size": 578
} |
import glob
from pathlib import Path
import cv2
import matplotlib.pyplot as plt
import numpy as np
from keras import models
from matplotlib import image as mpimg
if __name__ == '__main__':
# Visualizing intermediate activation in Convolutional Neural Networks with Keras
# https://github.com/gabrielpierobon/cnnshapes/blob/master/README.md
model = models.load_model('model.h5')
model.summary()
images_per_row = 2
layer_names = [layer.name for layer in model.layers[3:5]]
layer_outputs = [layer.output for layer in model.layers[3:5]]
activation_model = models.Model(inputs=model.input, outputs=layer_outputs)
fdir = 'writeup-img/'
files = glob.glob(fdir + 'act*.jpg')
for fname in files:
activations = activation_model.predict(mpimg.imread(fname)[None, :, :, :])
for layer_name, layer_activation in zip(layer_names, activations): # Displays the feature maps
n_features = layer_activation.shape[-1] # Number of features in the feature map
size_h = layer_activation.shape[1] # The feature map has shape (1, size, size, n_features).
size_w = layer_activation.shape[2]
n_cols = n_features // images_per_row # Tiles the activation channels in this matrix
display_grid = np.zeros((size_h * n_cols, images_per_row * size_w))
for col in range(n_cols): # Tiles each filter into a big horizontal grid
for row in range(images_per_row):
channel_image = layer_activation[0, :, :, col * images_per_row + row]
channel_image -= channel_image.mean() # Post-processes the feature to make it visually palatable
if channel_image.std() != 0:
channel_image /= channel_image.std()
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255).astype('uint8')
display_grid[col * size_h: (col + 1) * size_h, row * size_w: (row + 1) * size_w] = channel_image
img = cv2.resize(display_grid, (480, int(480 / display_grid.shape[1] * display_grid.shape[0])),
interpolation=cv2.INTER_NEAREST)
fpath = Path(fname)
fpath = fdir + 'out_' + fpath.stem + '_' + layer_name + fpath.suffix
plt.imsave(fpath, img, cmap='viridis')
| {
"alphanum_fraction": 0.6346710799,
"author": null,
"avg_line_length": 49.3265306122,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d43bd431e64fe4d124b8a846b2e046776d5c40de",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "1249e4a6124ff085ffbca2d59f6aa85348ad2002",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "stremetskyi-o/CarND-Behavioral-Cloning-P3",
"max_forks_repo_path": "visualize_activations.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1249e4a6124ff085ffbca2d59f6aa85348ad2002",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "stremetskyi-o/CarND-Behavioral-Cloning-P3",
"max_issues_repo_path": "visualize_activations.py",
"max_line_length": 117,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "1249e4a6124ff085ffbca2d59f6aa85348ad2002",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "stremetskyi-o/CarND-Behavioral-Cloning-P3",
"max_stars_repo_path": "visualize_activations.py",
"max_stars_repo_stars_event_max_datetime": "2019-09-19T09:50:48.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-09-19T09:50:48.000Z",
"num_tokens": 558,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2417
} |
import tensorflow as tf
import numpy as np
import logging.config
import functions
import json
from datetime import datetime
np.set_printoptions(suppress=True)
# 1 - logging
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_logdir = "tf_logs"
logdir = "{}/run-{}/".format(root_logdir, now)
with open('./logging_config.json') as f:
config = json.load(f)
logging.config.dictConfig(config)
logger = logging.getLogger('base')
# 2
logger.info("setting metaparameters")
n_epochs = 10
learning_rate = 0.01
batch_size = 100
logger.info("n_epochs: {}, learning_rate: {}, batch_size: {}"
.format(n_epochs, learning_rate, batch_size))
# 3
logger.info("data preparation")
X_np, y_np = functions.get_data()
m, n = X_np.shape
X_split, y_split, n_batches = functions.split_data(X_np, y_np, batch_size, m)
# 4
logger.info("starting construction phase")
X = tf.placeholder(tf.float32, shape=(None, n), name="X")
y = tf.placeholder(tf.float32, shape=(None, 1), name="y")
theta = tf.Variable(tf.random_uniform([n, 1], -1.0, 1.0), name="theta")
y_pred = tf.matmul(X, theta, name="predictions")
# 5
with tf.name_scope("loss") as scope:
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(mse)
# 6
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# 7
mse_summary = tf.summary.scalar('MSE', mse)
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
logger.info("ending construction phase")
# 8
logger.info("starting execution")
with tf.Session() as sess:
# 6
# saver.restore(sess, "/tmp/my_model_final.ckpt")
sess.run(init)
for epoch in range(n_epochs):
for batch_index in range(n_batches):
# 9
X_batch, y_batch = X_split[batch_index], y_split[batch_index]
if batch_index % 10 == 0:
summary_str = mse_summary.eval(feed_dict={X: X_batch,
y: y_batch})
step = epoch * n_batches + batch_index
# 10
file_writer.add_summary(summary_str, step)
# 11
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
logger.info("Epoch {}, MSE = {}"
.format(epoch, mse.eval(feed_dict={X: X_batch,
y: y_batch})))
# 12
best_theta = theta.eval()
# 13
save_path = saver.save(sess, "/tmp/my_model_final.ckpt")
file_writer.close()
logger.info("execution ended")
pd_comp = functions.compare_scores(X_np, y_np, best_theta.ravel())
logger.info(pd_comp)
| {
"alphanum_fraction": 0.642883413,
"author": null,
"avg_line_length": 28.0309278351,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a796c9164229f5cd94632ebdc1f6917345736469",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "fd61d6062d050d3aabb7bca5c7826b9ffe34c1cd",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tomis9/cookbook",
"max_forks_repo_path": "scratchpad/tensorflow_lm/network/lm_sgd.py",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "fd61d6062d050d3aabb7bca5c7826b9ffe34c1cd",
"max_issues_repo_issues_event_max_datetime": "2020-02-08T00:26:07.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-12-31T13:58:48.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tomis9/cookbook",
"max_issues_repo_path": "scratchpad/tensorflow_lm/network/lm_sgd.py",
"max_line_length": 77,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "fd61d6062d050d3aabb7bca5c7826b9ffe34c1cd",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tomis9/cookbook",
"max_stars_repo_path": "scratchpad/tensorflow_lm/network/lm_sgd.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 685,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2719
} |
"""
Profile groupfitter
See where the bulk of computation occurs.
Examples on how to profile with python
https://docs.python.org/2/library/profile.html
"""
import cProfile
import logging
import numpy as np
import pstats
import sys
sys.path.insert(0, '..')
from chronostar.synthdata import SynthData
from chronostar import tabletool
from chronostar import compfitter
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, filename='temp_logs/groupfitter.log')
save_dir = 'temp_data/'
group_savefile = save_dir + 'origins_stat.npy'
xyzuvw_init_savefile = save_dir + 'xyzuvw_init_stat.npy'
astro_savefile = save_dir + 'astro_table_stat.txt'
xyzuvw_conv_savefile = save_dir + 'xyzuvw_conv_stat.fits'
pars = np.array([0., 0., 0., 0., 0., 0., 5., 2., 1e-8])
starcount = 100
error_frac = 1.
synth_data = SynthData(pars=pars, starcounts=starcount)
synth_data.synthesise_everything()
tabletool.convert_table_astro2cart(synth_data.table)
data = tabletool.build_data_dict_from_table(synth_data.table)
stat_file = 'stat_dumps/groupfitter.stat'
# best_fit, chain, lnprob = \
cProfile.run(
"groupfitter.fit_comp(data=data, plot_it=True,"
"convergence_tol=2., burnin_steps=400, plot_dir='temp_plots/',"
"save_dir='temp_data/')",
stat_file,
)
stat = pstats.Stats(stat_file)
stat.sort_stats('cumtime')
stat.print_stats(0.1)
| {
"alphanum_fraction": 0.7144837145,
"author": null,
"avg_line_length": 27.2264150943,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "5faf00e1a924a81cafefcb619893fa56f61d7dd1",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "bdb5cd965e862ba5cc21bee75d5c8620e106c0cc",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tcrundall/chronostar",
"max_forks_repo_path": "profiling/profile_groupfitter.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "bdb5cd965e862ba5cc21bee75d5c8620e106c0cc",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tcrundall/chronostar",
"max_issues_repo_path": "profiling/profile_groupfitter.py",
"max_line_length": 81,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "bdb5cd965e862ba5cc21bee75d5c8620e106c0cc",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tcrundall/chronostar",
"max_stars_repo_path": "profiling/profile_groupfitter.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 395,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1443
} |
from Attribute import Attribute
from Utility import Utility
from Observation import Observation
from HyperParameters import HyperParameters
from Memory import Memory
import numpy
import random
import math
class CapsuleMemory(Memory):
def __init__(self):
self._observations : list = list() # Observation
self._savedObservations : list = list() # Observation
self._lambdaXMapping : dict = dict() # Column Index - Attribute
self._lambdaYMapping : dict = dict() # Column Index - Attribute
self._lambdaYGenerator = None # -> Rand Y
self._lambdaXInferer = None # Y -> X
self._lambdaYInferer = None # X -> Y
self._lambdaY = None # X {Attribute - Value} - Y {Attribute - Value}
self._indexInEpoch : int = 0
self._epochsCompleted : int = 0
self._scrambled : list = None # Column Index
def getJSONMain(self):
obsList = []
for obs in self._savedObservations[0].getInputObservations():
obsList.append(obs.getJSONOutput())
return obsList
def getJSONMemory(self):
memoryList = []
# All memory entries apart from the first
for idx in range(1, len(self._savedObservations)):
obsList = []
for obs in self._savedObservations[idx].getInputObservations():
obsList.append(obs.getJSONOutput())
memoryList.append(obsList)
return memoryList
def setLambdaKnownG(self, lambdaYGenerator, lambdaXInferer, xMapping : dict, yMapping : dict):
# xMapping # Column Index - Attribute
# yMapping # Column Index - Attribute
self._lambdaYGenerator = lambdaYGenerator
self._lambdaXInferer = lambdaXInferer
self._lambdaXMapping = xMapping
self._lambdaYMapping = yMapping
def setLambdaKnownGamma(self, lambdaY):
self._lambdaY = lambdaY
def addObservations(self, observation : list):
self._observations.extend(observation)
def addSavedObservations(self, observation : list):
self._savedObservations.extend(observation)
def clearObservations(self):
self._savedObservations.extend(self._observations)
self._observations = []
def getObservations(self):
return self._observations
def getObservation(self, index : int):
return self._observations[index]
def getNumObservations(self):
return len(self._observations)
def getMeanProbability(self):
if not self._savedObservations:
return 1.0
meanProb = 0.0
for obs in self._savedObservations:
meanProb = meanProb + obs.getProbability()
return meanProb / float(len(self._savedObservations))
def getBestObservationAttributes(self):
bestObs = None
for obs in self._savedObservations:
if bestObs == None or obs.getProbability() > bestObs.getProbability():
bestObs = obs
if bestObs is None:
attrVals = {}
if len(self._lambdaYMapping) > 0:
for idx, attr in self._lambdaYMapping.items():
# We invent one with all Attributes set to 0.5 (mainly for primitive capsules)
if attr not in attrVals:
attrVals[attr] = [0.5]
else:
attrVals[attr].append(0.5)
return attrVals
return bestObs.getOutputsList() # Attribute - List of Values
def cleanupObservations(self, applySymmetries, offsetLabelX : str, offsetLabelY : str, offsetLabelRatio : str, targetLabelX : str, targetLabelY : str, targetLabelSize : str):
if offsetLabelX is not None and offsetLabelY is not None and offsetLabelRatio is not None:
for observation in self._observations:
observation.offset(offsetLabelX, offsetLabelY, offsetLabelRatio, targetLabelX, targetLabelY, targetLabelSize)
sortedObs = sorted(self._observations, reverse = True, key = (lambda x : x.getProbability()))
for index, obs in enumerate(sortedObs):
for index2 in range(index, len(sortedObs)):
if sortedObs[index2] in self._observations:
if sortedObs[index2].isZeroObservation():
self.removeObservation(sortedObs[index2])
elif index2 > index and CapsuleMemory.checkSimilarObservations(obs.getOutputs(), sortedObs[index2].getOutputs()) > HyperParameters.SimilarObservationsCutOff:
self.removeObservation(sortedObs[index2])
# We remove detections that are too small
elif sortedObs[index2].getOutput(attributeName = "Size") * sortedObs[index2].getOutput(attributeName = "Aspect-Ratio") < HyperParameters.SizeCutoff:
self.removeObservation(sortedObs[index2])
for observation in self._observations:
observation.cleanupSymmetries(applySymmetries)
def removeObservation(self, observation : Observation):
if observation in self._observations:
self._observations.remove(observation)
return True
return False
def rescaleAttribute(self, attribute : Attribute, scale : float):
for obs in self._savedObservations:
obs.rescaleAttribute(attribute, scale)
def transformDataPoint(self, initialObservation : Observation, mainSymmetry : float, symmetries : dict):
# symmetries # Capsule - Symmetry
inputs = {} # Attribute - List of Values
outputs = {} # Attribute - List of Values
# TODO: Do Preposition transformations
# TODO: Do Adjective transformations
inputs = initialObservation.getInputs()
outputs = self._lambdaY(inputs)
centerX = [valueList for (key, valueList) in outputs.items() if key.getName() == "Position-X"][0][0]
centerY = [valueList for (key, valueList) in outputs.items() if key.getName() == "Position-Y"][0][0]
centerR = [valueList for (key, valueList) in outputs.items() if key.getName() == "Rotation"][0][0]
centerS = [valueList for (key, valueList) in outputs.items() if key.getName() == "Size"][0][0]
deltaX = ((centerX + (random.random() - 0.5) * 2.0) % 1.0) - centerX
deltaY = ((centerY + (random.random() - 0.5) * 2.0) % 1.0) - centerY
deltaSize = ((centerS + (random.random() - 0.5) * 2.0) % 1.0) - centerS
deltaRotate = ((centerR + (random.random() - 0.5) * 2.0) % mainSymmetry) - centerR
capsIdx = {} # Capsule - Count
# Rotation
for observation in initialObservation.getInputObservations():
currentCaps = observation.getCapsule()
xAttr = currentCaps.getAttributeByName("Position-X")
yAttr = currentCaps.getAttributeByName("Position-Y")
rotAttr = currentCaps.getAttributeByName("Rotation")
sizeAttr = currentCaps.getAttributeByName("Size")
# Hacky... Wacky...
if currentCaps in capsIdx:
capsIdx[currentCaps] = capsIdx[currentCaps] + 1
else:
capsIdx[currentCaps] = 0
idx = capsIdx[currentCaps]
# Move to Origin
inputs[xAttr][idx] = inputs[xAttr][idx] - centerX
inputs[yAttr][idx] = inputs[yAttr][idx] - centerY
# Apply Rotations To Coordinates
inputs[rotAttr][idx] = (inputs[rotAttr][idx] + deltaRotate) # % symmetries[currentCaps]
newX = inputs[xAttr][idx] * math.cos(deltaRotate * math.pi * 2.0) - inputs[yAttr][idx] * math.sin(deltaRotate * math.pi * 2.0)
newY = inputs[xAttr][idx] * math.sin(deltaRotate * math.pi * 2.0) + inputs[yAttr][idx] * math.cos(deltaRotate * math.pi * 2.0)
# Apply Size
# TODO: Once gamma treats size as in the paper, this must be adjusted as well:
relSize = (1 + (deltaSize / centerS))
newX = newX * relSize
newY = newY * relSize
inputs[sizeAttr][idx] = inputs[sizeAttr][idx] * relSize
# Move away back from Origin and translate
inputs[xAttr][idx] = newX + centerX + deltaX
inputs[yAttr][idx] = newY + centerY + deltaY
return inputs, self._lambdaY(inputs) # Attribute - List of Values , Attribute - List of Values
def runXInferer(self, attributes : list, isTraining : bool):
# attributes # Values
return self._lambdaXInferer(attributes, isTraining) # Values
def nextBatch(self, batchSize : int, inputMap : dict, outputMap : dict):
# inputMap : dict # Attribute - List of Indices
# outputMap : dict # Attribute - List of Indices
yData = [[]] * batchSize
xData = [[]] * batchSize
# Fill Symmetries
# TODO: This is not complete, as only one Symmetry (ie one Route) is filled
mainSymmetries = 1.0
symmetries = {}
for savedObs in self._savedObservations:
mainSymmetries = savedObs.getCapsule().getSymmetryInverse(savedObs.getInputs())
for obs in savedObs.getInputObservations():
symmetries[obs.getCapsule()] = obs.getCapsule().getSymmetry(obs.getOutputsList())
if self._lambdaXInferer is not None and self._lambdaYGenerator is not None:
# Only create Fictive Data
for idx in range(batchSize):
lyData = self._lambdaYGenerator()
lxData = self._lambdaXInferer(lyData, True)
yData[idx] = Utility.mapData(lyData, self._lambdaYMapping, outputMap)
xData[idx] = Utility.mapData(lxData, self._lambdaXMapping, inputMap)
else:
lenInputMap = 0
lenOutputMap = 0
for idxList in inputMap.values():
lenInputMap = lenInputMap + len(idxList)
for idxList in outputMap.values():
lenOutputMap = lenOutputMap + len(idxList)
# Only create True Data + Transformations
for idx in range(batchSize):
xData[idx] = [0.0] * lenInputMap
yData[idx] = [0.0] * lenOutputMap
xVals, yVals = self.transformDataPoint(self._savedObservations[self._indexInEpoch], mainSymmetries, symmetries)
# xVals > Attribute - List of Values
# yVals > Attribute - List of Values
for key, idxList in inputMap.items():
if key in xVals:
for idxidx, colIdx in enumerate(idxList):
xData[idx][colIdx] = xVals[key][idxidx]
for key, idxList in outputMap.items():
if key in yVals:
for idxidx, colIdx in enumerate(idxList):
yData[idx][colIdx] = yVals[key][idxidx]
self._indexInEpoch = self._indexInEpoch + 1
if self._indexInEpoch >= len(self._savedObservations):
self._indexInEpoch = 0
self._epochsCompleted = self._epochsCompleted + 1
return (xData, yData)
@staticmethod
def checkSimilarObservations(attributes1 : dict, attributes2 : dict):
# attributes1 # Attribute - Value
# attributes2 # Attribute - Value
agreement = {}
for attribute, value in attributes1.items():
agreement[attribute] = Utility.windowFunction(value - attributes2[attribute], 0.1, 0.1)
if len(agreement) == 0:
return 0.0
total = 0.0
for value in agreement.values():
total = total + value
total = total / float(len(agreement))
return total | {
"alphanum_fraction": 0.5979895323,
"author": null,
"avg_line_length": 41.3642611684,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7e757deb680eb72ab25bb477738215f80d3bf63d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 5,
"max_forks_repo_forks_event_max_datetime": "2021-09-10T07:47:13.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-09-04T20:33:01.000Z",
"max_forks_repo_head_hexsha": "acee3fcd9a06c91e07652398e9eecdca595b3e69",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Kayzaks/VividNet",
"max_forks_repo_path": "CapsuleMemory.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "acee3fcd9a06c91e07652398e9eecdca595b3e69",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Kayzaks/VividNet",
"max_issues_repo_path": "CapsuleMemory.py",
"max_line_length": 178,
"max_stars_count": 8,
"max_stars_repo_head_hexsha": "acee3fcd9a06c91e07652398e9eecdca595b3e69",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Kayzaks/VividNet",
"max_stars_repo_path": "CapsuleMemory.py",
"max_stars_repo_stars_event_max_datetime": "2020-10-21T20:27:20.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-05-24T03:22:17.000Z",
"num_tokens": 2775,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 12037
} |
import io
import numpy as np
import sys
import tensorflow as tf
import matplotlib
backend = 'Agg' if sys.platform == 'linux' else 'TkAgg'
matplotlib.use(backend)
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def _build_network(
name,
inputs,
hidden_layer_dims,
output_dim,
activation_fn=tf.nn.relu,
bias_initializer=tf.constant_initializer(0.1),
output_nonlinearity=None,
dropout_keep_prob=1.,
weights_regularizer=None,
reuse=False):
with tf.variable_scope(name, reuse=reuse):
hidden = inputs
for hidden_dim in hidden_layer_dims:
hidden = tf.contrib.layers.fully_connected(
hidden,
hidden_dim,
activation_fn=activation_fn,
biases_initializer=bias_initializer,
weights_regularizer=weights_regularizer)
hidden = tf.nn.dropout(hidden, dropout_keep_prob)
output = tf.contrib.layers.fully_connected(
hidden,
output_dim,
activation_fn=output_nonlinearity,
biases_initializer=tf.constant_initializer(0.)
)
return output
def _build_train_op(loss, learning_rate, var_list, grad_scale, scope, global_step=None):
with tf.variable_scope(scope):
opt = tf.train.AdamOptimizer(learning_rate)
grads = tf.gradients(loss, var_list)
scaled_grads, _ = tf.clip_by_global_norm(grads, grad_scale)
train_op = opt.apply_gradients([(g,v) for (g,v) in zip(scaled_grads, var_list)],
global_step=global_step)
# summaries
summaries = []
summaries += [tf.summary.scalar('{}/global_grad_norm'.format(scope), tf.global_norm(grads))]
summaries += [tf.summary.scalar('{}/global_scaled_grad_norm'.format(scope), tf.global_norm(scaled_grads))]
summaries += [tf.summary.scalar('{}/global_var_norm'.format(scope), tf.global_norm(var_list))]
return train_op, summaries
def plot_grid_summary(xs, name='', side=28):
nrow, ncol = np.shape(xs)[:2]
fig = plt.figure(figsize=(ncol+1, nrow+1))
gs = gridspec.GridSpec(nrow, ncol,
wspace=0.0, hspace=0.0,
top=1.-0.5/(nrow+1), bottom=0.5/(nrow+1),
left=0.5/(ncol+1), right=1-0.5/(ncol+1)
)
for i, x in enumerate(xs):
for j, img in enumerate(x):
ax = plt.subplot(gs[i,j])
ax.imshow(img.reshape(side,side))
ax.axis('off')
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
img_sum = tf.Summary.Image(
encoded_image_string=buf.getvalue(),
height=side * nrow,
width=side * ncol
)
summary = tf.Summary(value=[
tf.Summary.Value(tag='gen/{}'.format(name), image=img_sum)
])
plt.close()
return summary | {
"alphanum_fraction": 0.6171016484,
"author": null,
"avg_line_length": 33.0909090909,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8db4be03684395d11e02f9e73e58436c6188ece0",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "98ca54845c87a8cfbdc63c839961d1b4e54db317",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "wulfebw/gans",
"max_forks_repo_path": "gans/tf_utils.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "98ca54845c87a8cfbdc63c839961d1b4e54db317",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "wulfebw/gans",
"max_issues_repo_path": "gans/tf_utils.py",
"max_line_length": 110,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "98ca54845c87a8cfbdc63c839961d1b4e54db317",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "wulfebw/gans",
"max_stars_repo_path": "gans/tf_utils.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 688,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2912
} |
import numpy as np
def make_sphere(ball_shape, radius, position):
"""
Assumes shape and position are both a 3-tuple of int or float
the units are pixels / voxels (px for short)
radius is a int or float in px
:param tuple(int) ball_shape:
:param float radius:
:param tuple(int) position:
:return:
:rtype: np.ndarray
"""
half_sizes = (radius,) * 3
# generate the grid for the support points
# centered at the position indicated by position
grid = [slice(-x0, dim - x0) for x0, dim in zip(position, ball_shape)]
position = np.ogrid[grid]
# calculate the distance of all points from `position` center
# scaled by the radius
arr = np.zeros(ball_shape, dtype=float)
for x_i, half_size in zip(position, half_sizes):
arr += np.abs(x_i / half_size) ** 2
# the inner part of the sphere will have distance below 1
return arr <= 1.0
def four_connected_kernel():
return np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.bool)
| {
"alphanum_fraction": 0.6506378803,
"author": null,
"avg_line_length": 29.9705882353,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "cef24ad247257fe88a937e75674c249a4f19f86b",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 12,
"max_forks_repo_forks_event_max_datetime": "2022-03-06T13:03:18.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-06-18T09:57:24.000Z",
"max_forks_repo_head_hexsha": "7a86a7d2c879c94da529ec6140f7e5c3f02bf288",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "npeschke/cellfinder-core",
"max_forks_repo_path": "cellfinder_core/tools/geometry.py",
"max_issues_count": 38,
"max_issues_repo_head_hexsha": "7a86a7d2c879c94da529ec6140f7e5c3f02bf288",
"max_issues_repo_issues_event_max_datetime": "2022-03-11T11:04:06.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-01-22T11:50:29.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "npeschke/cellfinder-core",
"max_issues_repo_path": "cellfinder_core/tools/geometry.py",
"max_line_length": 74,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "7a86a7d2c879c94da529ec6140f7e5c3f02bf288",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "npeschke/cellfinder-core",
"max_stars_repo_path": "cellfinder_core/tools/geometry.py",
"max_stars_repo_stars_event_max_datetime": "2021-09-10T07:16:05.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-01-22T11:40:01.000Z",
"num_tokens": 285,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1019
} |
#!/bin/python
import numpy as np
import torch
import matplotlib.pyplot as plt
from spiking import SpikingLGN
from torchvision.datasets import MNIST
from torchvision import transforms
if __name__ == "__main__":
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
# Define the network
# net = SpikingNN(1, 1500, 50, 10, (2, 4), num_timesteps=500)
net = SpikingLGN(num_retina_layers=1, num_lgn_layers=1, num_neurons_retina=1500, num_neurons_lgn=400,
square_size=28, neighbourhood_size=(3, 5), num_timesteps=1200, device=device).to(device)
# Define the inputs
# inp = torch.rand((1, 1, 28, 28)).to(device)
train_dataset = MNIST('./data', train=True, download=True, transform=transforms.Compose([transforms.ToTensor()]))
inp = train_dataset[0][0][None, :, :, :] # Returns a tuple
inp = inp.to(device)
print(inp.shape)
# Forward prop the input to the network
out = net(inp)
out = np.array(out)
# Create the raster plot (neuron activation map)
plt.figure(figsize=(10, 10))
plt.imshow(out)
plt.title('Raster Plot')
plt.xlabel('Neuron ID')
plt.ylabel('Activation over time')
plt.tight_layout()
plt.show()
print(out.shape)
| {
"alphanum_fraction": 0.6736596737,
"author": null,
"avg_line_length": 30.6428571429,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d8e7d6894ec7cdccd5629f684b1d97f51d05bbf9",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "307baa4900180a42791f8717c6fe2612abac89b5",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "shoaibahmed/torchbrain",
"max_forks_repo_path": "test.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "307baa4900180a42791f8717c6fe2612abac89b5",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "shoaibahmed/torchbrain",
"max_issues_repo_path": "test.py",
"max_line_length": 117,
"max_stars_count": 7,
"max_stars_repo_head_hexsha": "307baa4900180a42791f8717c6fe2612abac89b5",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "shoaibahmed/torchbrain",
"max_stars_repo_path": "test.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-10T09:21:44.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-08-12T04:33:11.000Z",
"num_tokens": 348,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1287
} |
# -*- coding: utf-8 -*-
"""Console script for star_pairs."""
import click
import readline
import numpy as np
import matplotlib.pyplot as plt
import time
import datetime
from time import localtime
from time import strftime
import math
import os
import pkg_resources
# _Define constants:
LATITUDE = '-30d14m26.700s' # WGS84
# _Define functions:
def format_decimal(s):
"Format HH:MM:SS strings in decimal HH.H floats"
if len(s) == 14: # For latitude and longitude (degress)
return float(s[0:3]) - float(s[4:6]) / 60. - float(s[7:12]) / 3600.
elif len(s) == 10: # For LST (hours)
return float(s[0:2]) + float(s[3:5]) / 60. + float(s[6:10]) / 3600.
else:
print 'Wrong len(string), see format_decimal function'
def format_Dec(s):
"Format from a string DD:MM:SS.S to DD.D float the Dec values from pairs.txt"
for x in range(len(s)):
s[x] = float(s[x][:3]) - float(s[x][4:6]) / 60. - \
float(s[x][7:11]) / 3600.
return s
def format_RA(s):
"Format from a string HH:MM:SS.S to DD.D float the RA values from pairs.txt"
for x in range(len(s)):
s[x] = float(s[x][:2]) + float(s[x][3:5]) / 60. + \
float(s[x][6:11]) / 3600.
return s
def fill_list(l):
"Introducing the file pairs.txt, with columns and lines, it creates lists and fill them with data"
ID = []
ID_P1P2 = []
RA = []
Dec = []
Vmag_AcqCam = []
Vmag_P1P2 = []
Sep = []
Pangle = []
for i in range((len(l) / 3) + 1):
ID.append(l[3 * i].split()[0])
ID_P1P2.append(l[3 * i + 1].split()[0])
RA.append(l[3 * i].split()[1])
Dec.append(l[3 * i].split()[2])
Vmag_AcqCam.append(l[3 * i].split()[3])
Vmag_P1P2.append(l[3 * i + 1].split()[3])
Sep.append(l[3 * i].split()[4])
Pangle.append(l[3 * i + 1].split()[4])
return ID, ID_P1P2, RA, Dec, Vmag_AcqCam, Vmag_P1P2, Sep, Pangle
def cal_HA(LST, RA):
"Calculate HA introducing RA and LST"
HA = []
for a in range(len(RA)):
HA.append(LST - RA[a])
return HA
def cal_altaz(RA, Dec, HA, latitude):
"Convert from equatorial to altazimuthal coordinares (radians)"
El = []
Az = []
for i in range(len(RA)):
El.append(
np.arcsin(
np.sin(
np.radians(
Dec[i])) *
np.sin(
np.radians(latitude)) +
np.cos(
np.radians(
Dec[i])) *
np.cos(
np.radians(latitude)) *
np.cos(
np.radians(
HA[i] *
15.)))) # Radians
Az.append(
np.arccos(
(np.sin(
np.radians(
Dec[i])) -
np.sin(
El[i]) *
np.sin(
np.radians(latitude))) /
(
np.cos(
El[i]) *
np.cos(
np.radians(latitude))))) # Radians
if np.sin(np.radians(HA[i] * 15.)) > 0.:
Az[i] = 2. * np.pi - Az[i]
return Az, El
def set_altaz(El, Az, ID, ID_P1P2, Vmag_AcqCam, Vmag_P1P2, Sep, Pangle):
"Set format for displaying altazimuthal coordinates. Besides, it only takes"
"pairs which are higher than 30 degrees in elevation. Plus it has to invert"
"the elevation to be displayed right (issues about polar projection?)"
r = []
theta = []
ID_f = []
ID_P1P2_f = []
Vmag_AcqCam_f = []
Vmag_P1P2_f = []
Sep_f = []
Pangle_f = []
size = []
color_star = []
alpha_star = []
for i in range(len(El)):
if np.degrees(El[i]) > 30.:
r.append(90.0 - np.degrees(El[i]))
theta.append(Az[i])
ID_f.append(ID[i])
ID_P1P2_f.append(ID_P1P2[i])
Vmag_AcqCam_f.append(Vmag_AcqCam[i])
Vmag_P1P2_f.append(Vmag_P1P2[i])
Sep_f.append(Sep[i])
Pangle_f.append(Pangle[i])
return r, theta, ID_f, ID_P1P2_f, Vmag_AcqCam_f, Vmag_P1P2_f, Sep_f, Pangle_f
@click.command()
def main(args=None):
"""Console script for star_pairs."""
# _Extract LST from computer, format LST
try:
import epics
LST_epics = epics.caget("tcs:LST")
except ImportError:
LST_epics = "00:00:00.0"
LST = format_decimal(LST_epics)
# _Format latitude
latitude = format_decimal(LATITUDE)
# _Pairs from equatorial coordinates (J2000) to altazimuthal
# ___Read from file
file = pkg_resources.resource_stream(__name__, "data/pairs.txt")
lines = file.readlines()
file.close()
# ___Fill lists with data from pairs.txt
ID, ID_P1P2, RA, Dec, Vmag_AcqCam, Vmag_P1P2, Sep, Pangle = fill_list(lines)
# ___Format RA and Dec: so it could be used in the code tuning
RA = format_RA(RA)
Dec = format_Dec(Dec)
# ___Calculate HA
HA = cal_HA(LST, RA)
#__Calculate altazimuthal coordinates
Az, El = cal_altaz(RA, Dec, HA, latitude)
#__Plotting in polar coordiantes
#____Format altazimuthal coordinates
r, theta, ID_f, ID_P1P2_f, Vmag_AcqCam_f, Vmag_P1P2_f, Sep_f, Pangle_f = set_altaz(El, Az, ID, ID_P1P2, Vmag_AcqCam, Vmag_P1P2, Sep, Pangle)
#____Plot, in a polar projection, the coordinates
fig = plt.figure(figsize=(11, 11))
fig.set_facecolor((0.8, 0.8, 0.8))
ax = plt.subplot(111, polar=True)
ax.set_axis_bgcolor((0.0, 0.0, 0.3))
ax.set_alpha(0.9)
ax.plot(
theta,
r,
linestyle='None',
label='Stars pairs',
marker='o',
color='yellow',
markeredgecolor=(
(0,
0,
0.3)),
markersize=6,
alpha=1,
markeredgewidth=0.1,
picker=3) # theta (radians),radii (degrees)
ax.set_title(
'LST ' +
LST_epics,
verticalalignment='bottom',
horizontalalignment='center',
weight='bold')
ax.set_rmax(90.0)
plt.thetagrids([theta * 15 for theta in range(360 // 15)])
ax.set_xticklabels(['N',
'',
'',
'',
'',
'',
'E',
'',
'',
'',
'',
'',
'S',
'',
'',
'',
'',
'',
'W',
''],
verticalalignment='top')
ax.set_rgrids([0.01, 10, 20, 30, 40, 50, 60, 70, 80, 90],
angle=67.5, color='grey', alpha='0.7') # Display options
ax.set_yticklabels(['90$^\circ$',
'80$^\circ$',
'70$^\circ$',
'60$^\circ$',
'50$^\circ$',
'40$^\circ$',
'30$^\circ$',
'20$^\circ$',
'10$^\circ$'])
ax.set_theta_zero_location('N')
ax.set_theta_direction(-1)
ax.grid(color='white', linestyle='-', linewidth=0.6, alpha=0.3)
ax.grid(True)
ax.legend(loc='lower right')
# ___Some display
os.system('clear')
print '__________________________________________________________________________'
print ''
print ' TUNING STARS MAP, GEMINI SOUTH OBSERVATORY '
print ' ', 'Actual LST:', LST_epics, ' '
print '__________________________________________________________________________'
print ''
print 'Pairs library file in: perm/staff/mgomez/project_tuning/pairs.txt '
print ''
print 'Click on any star to display the information...'
# ___Events with mouse click
def onpick(event):
ind = event.ind
print '__________________________________________________________________________'
print ''
print ' AcqCam star ', 'ID:', ID_f[ind], 'Vmag:', Vmag_AcqCam_f[ind][0:3]
print ' PWFS star ', 'ID:', ID_P1P2_f[ind], 'Vmag:', Vmag_P1P2_f[ind][0:3]
print ' Separation (arcmin): ', Sep_f[ind][0:4], 'PA (degrees):', Pangle_f[ind][0:5]
print '__________________________________________________________________________'
fig.canvas.mpl_connect('pick_event', onpick)
fig.canvas.set_window_title('tuning.py')
plt.show()
print ''
print '*** Please send any comment to mgomez@gemini.edu'
print ''
return 0
if __name__ == "__main__":
import sys
sys.exit(main())
| {
"alphanum_fraction": 0.5140797477,
"author": null,
"avg_line_length": 31.150877193,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "805e9a4f1cb45038076d619bb3a96148dcba5677",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "95739d48edc35a7aaf8b724792a7e0e4936cd658",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "mgomezjimenez/stars_pairs",
"max_forks_repo_path": "star_pairs/cli.py",
"max_issues_count": 11,
"max_issues_repo_head_hexsha": "95739d48edc35a7aaf8b724792a7e0e4936cd658",
"max_issues_repo_issues_event_max_datetime": "2018-01-31T04:06:02.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-09-20T05:32:17.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "mgomezjimenez/pairs",
"max_issues_repo_path": "star_pairs/cli.py",
"max_line_length": 144,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "95739d48edc35a7aaf8b724792a7e0e4936cd658",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "mgomezjimenez/stars_pairs",
"max_stars_repo_path": "star_pairs/cli.py",
"max_stars_repo_stars_event_max_datetime": "2017-09-20T05:41:05.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-09-20T05:41:05.000Z",
"num_tokens": 2393,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 8878
} |
subroutine exco_read_salt
use hydrograph_module
use input_file_module
use organic_mineral_mass_module
use maximum_data_module
use exco_module
use constituent_mass_module
character (len=80) :: titldum, header
integer :: eof, imax, ob1, ob2
logical :: i_exist !none |check to determine if file exists
eof = 0
imax = 0
!read all export coefficient data
inquire (file=in_exco%salt, exist=i_exist)
if (i_exist .or. in_exco%salt /= "null") then
do
open (107,file=in_exco%salt)
read (107,*,iostat=eof) titldum
if (eof < 0) exit
read (107,*,iostat=eof) header
if (eof < 0) exit
imax = 0
do while (eof == 0)
read (107,*,iostat=eof) titldum
if (eof < 0) exit
imax = imax + 1
end do
db_mx%exco_salt = imax
allocate (exco_salt(imax))
do iexco_salt = 1, imax
allocate (exco_salt(iexco_salt)%salt(cs_db%num_salts))
end do
allocate (exco_salt_num(imax))
allocate (exco_salt_name(imax))
rewind (107)
read (107,*,iostat=eof) titldum
if (eof < 0) exit
read (107,*,iostat=eof) header
if (eof < 0) exit
!read all export coefficient data
do ii = 1, db_mx%exco_salt
read (107,*,iostat=eof) titldum
if (eof < 0) exit
backspace (107)
read (107,*,iostat=eof) exco_salt_name(ii), (exco_salt(ii)%salt(isalt), isalt = 1, cs_db%num_salts)
if (eof < 0) exit
end do
close (107)
exit
end do
end if
! xwalk with exco file to get sequential number
do iexco = 1, db_mx%exco
do iexco_salt = 1, db_mx%exco_salt
if (exco_db(iexco)%salts_file == exco_salt_name(iexco_salt)) then
exco_salt_num(iexco) = iexco_salt
exit
end if
end do
end do
!set exco object hydrograph
ob1 = sp_ob1%exco
ob2 = sp_ob1%exco + sp_ob%exco - 1
do iob = ob1, ob2
iexco = ob(iob)%props
if (exco_db(iexco)%salts_file == "null") then
obcs(iob)%hd(1)%salt = 0.
else
iexco_salt = exco_salt_num(iexco)
obcs(iob)%hd(1)%salt = exco_salt(iexco_salt)%salt
end if
end do
return
end subroutine exco_read_salt | {
"alphanum_fraction": 0.5304449649,
"author": null,
"avg_line_length": 30.8674698795,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "a5af59c9301e914e3b93cc48b0e1ca7c05a72c30",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 19,
"max_forks_repo_forks_event_max_datetime": "2021-09-29T01:04:49.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-03-20T01:08:11.000Z",
"max_forks_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "mikiec84/delphi",
"max_forks_repo_path": "tests/data/program_analysis/multifile_multimod/mfmm_02/exco_read_salt.f90",
"max_issues_count": 385,
"max_issues_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d",
"max_issues_repo_issues_event_max_datetime": "2022-02-17T07:44:56.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-02-21T16:52:06.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "mikiec84/delphi",
"max_issues_repo_path": "tests/data/program_analysis/multifile_multimod/mfmm_02/exco_read_salt.f90",
"max_line_length": 114,
"max_stars_count": 25,
"max_stars_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "mikiec84/delphi",
"max_stars_repo_path": "tests/data/program_analysis/multifile_multimod/mfmm_02/exco_read_salt.f90",
"max_stars_repo_stars_event_max_datetime": "2022-01-16T21:19:54.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-03-03T11:57:57.000Z",
"num_tokens": 776,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2562
} |
#! /usr/bin/env python
import rospy
import cv2
import sys
import numpy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import os
from cv_bridge import CvBridge
import shlex
import subprocess
import time
from sensor_msgs.msg import Image, CameraInfo, PointCloud2
# Switch controller server client libs
from controller_manager_msgs.srv import SwitchController, SwitchControllerRequest , SwitchControllerResponse
from controller_manager_msgs.srv import ListControllers, ListControllersRequest , ListControllersResponse
class BerryDataCollection:
def __init__(self, experiment_path):
rospy.init_node('berry_data_collection')
moveit_commander.roscpp_initialize(sys.argv)
self.robot = moveit_commander.RobotCommander()
group_name = "panda_arm"
self.group = moveit_commander.MoveGroupCommander(group_name)
self.scene = moveit_commander.PlanningSceneInterface()
self.display_trajectory_publisher = rospy.Publisher( "/move_group/display_planned_path", moveit_msgs.msg.DisplayTrajectory, queue_size=20)
self.experiment_count = 0 # increment this number
self.experiment_path = experiment_path
self.bridge = CvBridge()
self.color_img = None
self.color_img_received = False
self.depth_img = None
self.depth_img_received = False
self.img_sub = rospy.Subscriber('/color/image_raw', Image, self.color_image_cb)
# self.depth_sub = rospy.Subscriber('/depth/image_rect_raw', Image, self.depth_image_cb)
self.current_controller = 'position'
self.group.set_max_velocity_scaling_factor(0.15)
self.rosbag_proc = None
self.loop()
# get_pointcloud data
# self.do_acquire_pc = False
# self.pointcloud = np.array([])
# self.pointcloud_sub = rospy.Subscriber('/depth/color/points', PointCloud2, self.pointcloud_cb)
def color_image_cb(self, msg) :
self.color_img_received = True
self.color_img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='passthrough')
self.img_shown = self.color_img #cv2.cvtColor(self.color_img, cv2.COLOR_RGB2BGR )
def depth_image_cb(self, msg):
self.depth_img_received = True
self.depth_img = self.bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
def go_to_pose(self, pose = "ready"):
if pose in self.group.get_named_targets():
self.group.set_named_target(pose)
rospy.loginfo("Moving to {} target pose".format(pose))
self.group.go()
def go_to_position_mode(self):
if self.current_controller == "torque":
rospy.wait_for_service("/controller_manager/switch_controller")
try:
service = rospy.ServiceProxy("/controller_manager/switch_controller", SwitchController)
request = SwitchControllerRequest()
request.start_controllers.append( "position_joint_trajectory_controller" )
request.stop_controllers.append( "franka_zero_torque_controller" )
request.strictness = 2
response = service(request)
self.current_controller = "position"
return response.ok
except :
rospy.logerr("Switch controller server is down. Unable to switch contoller")
return False
else :
return True
def go_to_zero_mode(self):
print(self.current_controller)
if self.current_controller == "position":
print("Switching to torque mode")
rospy.wait_for_service("/controller_manager/switch_controller")
try:
service = rospy.ServiceProxy("/controller_manager/switch_controller", SwitchController)
request = SwitchControllerRequest()
request.start_controllers.append( "franka_zero_torque_controller" )
request.stop_controllers.append( "position_joint_trajectory_controller" )
request.strictness = 2
response = service(request)
self.current_controller = "torque"
return response.ok
except :
rospy.logerr("Switch controller server is down. Unable to switch contoller")
return False
else:
print("Already in torque mode")
return True
def record_bag_file(self):
topics_names = " /tf /joint_states "
self.experiment_count += 1
name = os.path.join(self.experiment_path , "berry_data_sample_" + str(self.experiment_count) )
print("Recording bag", name)
command = "rosrun rosbag record /tf /joint_states -O " + name
command = shlex.split(command)
self.rosbag_proc = subprocess.Popen(command)
print("Recording bag file. Press 'r' to stop the recording: \n")
return True
def recover_error(self):
command = "rostopic pub -1 /franka_control/error_recovery/goal franka_control/ErrorRecoveryActionGoal '{}'"
command = shlex.split(command)
subprocess.Popen(command)
def loop(self):
while not rospy.is_shutdown():
if self.color_img_received:
cv2.imshow('image', self.color_img)
k = cv2.waitKey(1)
if k == 27:
cv2.destroyAllWindows()
break
elif k==-1: # normally -1 returned,so don't print it
continue
else:
# print(k)
if k == ord('h'):
print("H: Go to home position.")
self.go_to_pose(pose = "poseA")
if k == ord('e'):
self.recover_error()
if k == ord('p'):
self.go_to_position_mode()
if k == ord('t'):
print("Trying to enter torque mode")
is_torque_mode = self.go_to_zero_mode()
if k == ord('r'):
# save image here.
if self.rosbag_proc is None:
self.record_bag_file()
else:
self.rosbag_proc.send_signal(subprocess.signal.SIGINT)
self.rosbag_proc = None
print("Finished recording")
if k == ord('s'):
is_torque_mode = self.go_to_zero_mode()
if self.rosbag_proc is None:
self.record_bag_file()
if k == ord('f'):
if self.rosbag_proc is not None:
self.rosbag_proc.send_signal(subprocess.signal.SIGINT)
self.rosbag_proc = None
print("Finished recording")
self.go_to_position_mode()
time.sleep(1)
self.go_to_pose(pose = "poseA")
if __name__ == '__main__':
berryData = BerryDataCollection(experiment_path = "/home/arshad/Desktop/data/7") | {
"alphanum_fraction": 0.5892393828,
"author": null,
"avg_line_length": 37.7474226804,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "904b9a2f1b1560594f803a78e7e69f241be27225",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-11-17T13:24:23.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-11-17T13:24:23.000Z",
"max_forks_repo_head_hexsha": "b6211125436849d5c7def8ad96a384cc34f2f121",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "arsh09/franka_ros_lcas",
"max_forks_repo_path": "franka_lcas_experiments/script/berry_data_collection.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b6211125436849d5c7def8ad96a384cc34f2f121",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "arsh09/franka_ros_lcas",
"max_issues_repo_path": "franka_lcas_experiments/script/berry_data_collection.py",
"max_line_length": 146,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "b6211125436849d5c7def8ad96a384cc34f2f121",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "arsh09/franka_ros_lcas",
"max_stars_repo_path": "franka_lcas_experiments/script/berry_data_collection.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-15T09:50:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-09T00:50:43.000Z",
"num_tokens": 1434,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7323
} |
function r=rot4y(theta)
% r = rot4y(theta)
%
% roty produces a 4x4 rotation matrix representing
% a rotation by theta radians about the y axis.
%
% Argument definitions:
%
% theta = rotation angle in radians
c = cos(theta);
s = sin(theta);
r = [c 0 s 0;
0 1 0 0;
-s 0 c 0;
0 0 0 1];
| {
"alphanum_fraction": null,
"author": "CelsoReyes",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/MATLAB/CelsoReyes-zmap7/zmap7-3895fcb3ca3073608abe22ca71960eb082fd0d9a/zmap_deprecated/eztool/rot4y.m",
"reason": null,
"repo": "zmap7",
"save_path": "github-repos/MATLAB/CelsoReyes-zmap7",
"sha": "3895fcb3ca3073608abe22ca71960eb082fd0d9a",
"size": null
} |
module LakeTributaryModule
use ConstantsModule, only: DZERO, LENPACKAGENAME
use ListModule, only: ListType
private
public :: LakeTributaryType, ConstructLakeTributary, &
CastAsLakeTributaryType, AddLakeTributaryToList, &
GetTributaryFromList
type LakeTributaryType
integer :: iTribNum = 0
integer :: LakeOut = 0
integer :: iSegnum = 0
integer :: IgridProv = 0
character(len=LENPACKAGENAME) :: provPkgName = ''
end type LakeTributaryType
contains
! Non-type-bound procedures
subroutine ConstructLakeTributary(newLakeTrib)
! dummy
type(LakeTributaryType), pointer :: newLakeTrib
!
allocate(newLakeTrib)
!
return
end subroutine ConstructLakeTributary
function CastAsLakeTributaryType(obj) result (res)
! dummy
class(*), pointer, intent(inout) :: obj
type(LakeTributaryType), pointer :: res
!
res => null()
select type (obj)
type is (LakeTributaryType)
res => obj
end select
!
return
end function CastAsLakeTributaryType
subroutine AddLakeTributaryToList(list, lakeTrib)
implicit none
! dummy
type(ListType), pointer :: list
type(LakeTributaryType), pointer :: lakeTrib
! local
class(*), pointer :: obj => null()
!
obj => lakeTrib
call list%Add(obj)
!
return
end subroutine AddLakeTributaryToList
function GetTributaryFromList(list, idx) result (res)
implicit none
! dummy
type(ListType), pointer :: list
integer, intent(in) :: idx
type(LakeTributaryType), pointer :: res
! local
class(*), pointer :: obj => null()
!
obj => list%GetItem(idx)
res => CastAsLakeTributaryType(obj)
!
return
end function GetTributaryFromList
end module LakeTributaryModule
| {
"alphanum_fraction": 0.6400634249,
"author": null,
"avg_line_length": 24.8947368421,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "c1a51dce361a6014ec79880ab01f027552d48b80",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 87,
"max_forks_repo_forks_event_max_datetime": "2022-03-30T05:31:40.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-12-13T21:40:39.000Z",
"max_forks_repo_head_hexsha": "83ac72ee3b6f580aaffef6352cf15c1697d3ce66",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "scharlton2/modflow6",
"max_forks_repo_path": "utils/mf5to6/src/LakeTributary.f90",
"max_issues_count": 331,
"max_issues_repo_head_hexsha": "83ac72ee3b6f580aaffef6352cf15c1697d3ce66",
"max_issues_repo_issues_event_max_datetime": "2022-03-29T05:57:00.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-01-10T21:22:48.000Z",
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "scharlton2/modflow6",
"max_issues_repo_path": "utils/mf5to6/src/LakeTributary.f90",
"max_line_length": 62,
"max_stars_count": 102,
"max_stars_repo_head_hexsha": "83ac72ee3b6f580aaffef6352cf15c1697d3ce66",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "scharlton2/modflow6",
"max_stars_repo_path": "utils/mf5to6/src/LakeTributary.f90",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T01:47:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-12-19T09:56:38.000Z",
"num_tokens": 538,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1892
} |
[STATEMENT]
lemma nsqn_update_other [simp]:
fixes dsn dsk flag hops dip nhip pre rt ip
assumes "dip \<noteq> ip"
shows "nsqn (update rt ip (dsn, dsk, flag, hops, nhip)) dip = nsqn rt dip"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. nsqn (update rt ip (dsn, dsk, flag, hops, nhip)) dip = nsqn rt dip
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
dip \<noteq> ip
goal (1 subgoal):
1. nsqn (update rt ip (dsn, dsk, flag, hops, nhip)) dip = nsqn rt dip
[PROOF STEP]
unfolding nsqn_def
[PROOF STATE]
proof (prove)
using this:
dip \<noteq> ip
goal (1 subgoal):
1. (case update rt ip (dsn, dsk, flag, hops, nhip) dip of None \<Rightarrow> 0 | Some x \<Rightarrow> nsqn\<^sub>r x) = (case rt dip of None \<Rightarrow> 0 | Some x \<Rightarrow> nsqn\<^sub>r x)
[PROOF STEP]
by (clarsimp split: option.split) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "AODV_variants_e_all_abcd_E_Fresher",
"hexsha": null,
"include": null,
"lang": null,
"length": 3,
"llama_tokens": 362,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
from torch.utils.data import DataLoader, Dataset
from torchvision.datasets import MNIST
import warnings
from typing import Dict, IO, Union
import os
import numpy as np
import torch
import codecs
import gzip
import lzma
from torchvision.datasets.utils import download_and_extract_archive
import cv2
from examples.mnist.gendata import get_projection_grid, rand_rotation_matrix, rotate_grid, rotate_map_given_R
from utils import show_spheres
SN3_PASCALVINCENT_TYPEMAP = {
8: (torch.uint8, np.uint8, np.uint8),
9: (torch.int8, np.int8, np.int8),
11: (torch.int16, np.dtype('>i2'), 'i2'),
12: (torch.int32, np.dtype('>i4'), 'i4'),
13: (torch.float32, np.dtype('>f4'), 'f4'),
14: (torch.float64, np.dtype('>f8'), 'f8')
}
def get_int(b: bytes) -> int:
return int(codecs.encode(b, 'hex'), 16)
def open_maybe_compressed_file(path: Union[str, IO]) -> Union[IO, gzip.GzipFile]:
"""Return a file object that possibly decompresses 'path' on the fly.
Decompression occurs when argument `path` is a string and ends with '.gz' or '.xz'.
"""
if not isinstance(path, torch._six.string_classes):
return path
if path.endswith('.gz'):
return gzip.open(path, 'rb')
if path.endswith('.xz'):
return lzma.open(path, 'rb')
return open(path, 'rb')
def read_sn3_pascalvincent_tensor(path: Union[str, IO], strict: bool = True) -> torch.Tensor:
"""Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx-io.lsh').
Argument may be a filename, compressed filename, or file object.
"""
# read
with open_maybe_compressed_file(path) as f:
data = f.read()
# parse
magic = get_int(data[0:4])
nd = magic % 256
ty = magic // 256
assert nd >= 1 and nd <= 3
assert ty >= 8 and ty <= 14
m = SN3_PASCALVINCENT_TYPEMAP[ty]
s = [get_int(data[4 * (i + 1): 4 * (i + 2)]) for i in range(nd)]
parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1)))
assert parsed.shape[0] == np.prod(s) or not strict
return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)
def read_label_file(path: str) -> torch.Tensor:
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert(x.dtype == torch.uint8)
assert(x.ndimension() == 1)
return x.long()
def read_image_file(path: str) -> torch.Tensor:
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert(x.dtype == torch.uint8)
assert(x.ndimension() == 3)
return x
class MNIST360Dataset(Dataset):
# set for mnist
resources = [
("http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz", "f68b3c2dcbeaaa9fbdd348bbdeb94873"),
("http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz", "d53e105ee54ea40749a09fcbcd1e9432"),
("http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz", "9fb629c4189551a2d022fa330f9573f3"),
("http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz", "ec29112dd5afa0611ce80d1b7f02629c")
]
training_file = 'training.pt'
test_file = 'test.pt'
classes = ['0 - zero', '1 - one', '2 - two', '3 - three', '4 - four',
'5 - five', '6 - six', '7 - seven', '8 - eight', '9 - nine']
@property
def train_labels(self):
warnings.warn("train_labels has been renamed targets")
return self.targets
@property
def test_labels(self):
warnings.warn("test_labels has been renamed targets")
return self.targets
@property
def train_data(self):
warnings.warn("train_data has been renamed data")
return self.data
@property
def test_data(self):
warnings.warn("test_data has been renamed data")
return self.data
def __init__(
self,
root: str,
split: str,
download: bool = False,
rotate: bool = True,
vis: bool = False,
bandwidth: int = 30
) -> None:
super().__init__()
self.root = root
self.split = split # training set or test set
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.split == 'train':
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.bandwidth = bandwidth
self.rotate = rotate
self.vis = vis
super().__init__()
def __getitem__(self, idx):
img = self.data[idx] # tensor
img_np = img.numpy()
# img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB) # RGB
img_np = cv2.resize(img_np, (self.bandwidth * 2, self.bandwidth * 2))
img_np = img_np[:, :, np.newaxis] # [self.bandwidth * 2, self.bandwidth * 2, 1]
grid = get_projection_grid(b=self.bandwidth)
if self.rotate:
rot = rand_rotation_matrix()
rotated_grid = rotate_grid(rot, grid)
map_x, map_y = rotate_map_given_R(rot, self.bandwidth * 2, self.bandwidth * 2)
img_np = cv2.remap(img_np, map_x, map_y, cv2.INTER_CUBIC, borderMode=cv2.BORDER_TRANSPARENT)
img_np = img_np[:, :, np.newaxis]
else:
rotated_grid = grid
if self.vis:
img_np_vis = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) # RGB
cv2.imshow('rotated_img', img_np_vis)
cv2.waitKey(0)
img_np_ = np.transpose(img_np_vis, (2, 0, 1))
show_spheres(scale=2, points=rotated_grid, rgb=img_np_)
# R = calculate_Rmatrix_from_phi_theta(0, 0)
img_np = np.transpose(img_np, (2, 0, 1)) # [3, 224, 224]
img_torch = torch.FloatTensor(img_np) # [3, 224, 224]
label = int(self.targets[idx])
return img_torch, label
def __len__(self):
return len(self.data)
@property
def raw_folder(self) -> str:
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self) -> str:
return os.path.join(self.root, self.__class__.__name__, 'processed')
@property
def class_to_idx(self) -> Dict[str, int]:
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self) -> bool:
return (os.path.exists(os.path.join(self.processed_folder,
self.training_file)) and
os.path.exists(os.path.join(self.processed_folder,
self.test_file)))
def download(self) -> None:
"""Download the MNIST data if it doesn't exist in processed_folder already."""
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
# download files
for url, md5 in self.resources:
filename = url.rpartition('/')[2]
download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=md5)
# process and save as torch files
print('Processing...')
training_set = (
read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte'))
)
test_set = (
read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte'))
)
with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
if __name__ == '__main__':
root = "MNIST_data"
dataset = MNIST360Dataset(root, 'test', download=True, rotate=False, vis=True, bandwidth=30)
print(len(dataset))
dataloader = DataLoader(dataset=dataset,
batch_size=1,
shuffle=True,
pin_memory=True,
num_workers=0)
for img, label in dataloader:
print(label)
| {
"alphanum_fraction": 0.6032526033,
"author": null,
"avg_line_length": 34.743902439,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "40823b37bb3516639126ed553e80b43d522f06bd",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "09652af9811357c4bf6f7a6d3e912a06d7826f70",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "csm-kr/s2cnn",
"max_forks_repo_path": "examples/mnist/mnist360_dataset.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "09652af9811357c4bf6f7a6d3e912a06d7826f70",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "csm-kr/s2cnn",
"max_issues_repo_path": "examples/mnist/mnist360_dataset.py",
"max_line_length": 109,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "09652af9811357c4bf6f7a6d3e912a06d7826f70",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "csm-kr/s2cnn",
"max_stars_repo_path": "examples/mnist/mnist360_dataset.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2207,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 8547
} |
# Author: Hiroharu Sugawara <hsugawa@gmail.com>
# Copyright: (C) 2020 Hiroharu Sugawara
# Author: Eric P. Hanson
# Copyright: (C) 2018? Eric P. Hanson
# Author: Martin Vuk <martin.vuk@fri.uni-lj.si>
# Copyright: (C) 2016 Martin Vuk
# License: BSD 3-clause
"""
PandocFiltersLiveJuliaCode
Package to aid writing Julia scripts that process the pandoc
AST serialized as JSON.
"""
module PandocFiltersLiveJuliaCode
using JSON
include("./PandocFilters.jl")
include("./PandocLiveJuliaCodeFilter.jl")
end # module
| {
"alphanum_fraction": 0.7403846154,
"author": null,
"avg_line_length": 18.5714285714,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "63c83c28d5635e18b8caba1aeb45075438fdeced",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "63f9cd1fc81f5435dabee5450063ffd2c6d7b6bc",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "hsugawa8651/PandocFiltersLiveJuliaCode.jl",
"max_forks_repo_path": "src/PandocFiltersLiveJuliaCode.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "63f9cd1fc81f5435dabee5450063ffd2c6d7b6bc",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "hsugawa8651/PandocFiltersLiveJuliaCode.jl",
"max_issues_repo_path": "src/PandocFiltersLiveJuliaCode.jl",
"max_line_length": 60,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "63f9cd1fc81f5435dabee5450063ffd2c6d7b6bc",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "hsugawa8651/PandocFiltersLiveJuliaCode.jl",
"max_stars_repo_path": "src/PandocFiltersLiveJuliaCode.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 163,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 520
} |
@testset "basic" begin
@testset "GNNChain" begin
n, din, d, dout = 10, 3, 4, 2
g = GNNGraph(random_regular_graph(n, 4),
graph_type=GRAPH_T,
ndata= randn(Float32, din, n))
gnn = GNNChain(GCNConv(din => d),
BatchNorm(d),
x -> tanh.(x),
GraphConv(d => d, tanh),
Dropout(0.5),
Dense(d, dout))
testmode!(gnn)
test_layer(gnn, g, rtol=1e-5)
@testset "Parallel" begin
AddResidual(l) = Parallel(+, identity, l)
gnn = GNNChain(ResGatedGraphConv(din => d, tanh),
BatchNorm(d),
AddResidual(ResGatedGraphConv(d => d, tanh)),
BatchNorm(d),
Dense(d, dout))
testmode!(gnn)
test_layer(gnn, g, rtol=1e-5)
end
end
end
| {
"alphanum_fraction": 0.3992285439,
"author": null,
"avg_line_length": 28.027027027,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "3ba63e51e8eb4401287f2ed28f507585df5d1ba8",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "de5bb6012ec9e8e02e024487bfba0ec812048184",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "oysteinsolheim/GraphNeuralNetworks.jl",
"max_forks_repo_path": "test/layers/basic.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "de5bb6012ec9e8e02e024487bfba0ec812048184",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "oysteinsolheim/GraphNeuralNetworks.jl",
"max_issues_repo_path": "test/layers/basic.jl",
"max_line_length": 72,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "de5bb6012ec9e8e02e024487bfba0ec812048184",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "oysteinsolheim/GraphNeuralNetworks.jl",
"max_stars_repo_path": "test/layers/basic.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 260,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1037
} |
import py, os, sys
from pytest import raises
import numpy as np
sys.path = [os.pardir] + sys.path
class TestOPTIMIZERS:
def setup_class(cls):
pass
def reset(self):
import SQSnobFit
# reset the random state for each method to get predictable results
SQSnobFit._gen_utils._randstate = np.random.RandomState(6)
def setup_method(self, method):
self.reset()
def test_issue2(self):
"""Errors with imfil for univariate functions"""
from skquant.opt import minimize
def f(a):
return a**2 - a
bounds = np.array([[0,2]], dtype=np.float)
init = np.array([1.])
res, hist = minimize(f, init, bounds, method='imfil')
def test_issue3(self):
"""error with snobfit for univariate function"""
from skquant.opt import minimize
def f(a):
return a[0]**2 - a[0]
bounds = np.array([[0,2]], dtype=np.float)
init = np.array([1.])
res, hist = minimize(f, init, bounds, method='snobfit')
def test_issue4(self):
"""error in imfil with multivariate function"""
from skquant.opt import minimize
def g(a):
return a[0]**2 - a[0] +a[1]**3 -4*a[1]
bounds = np.array([[0,2],[-2,2]], dtype=np.float)
init = np.array([1.,0.])
res, hist = minimize(g, init, bounds, method='imfil')
def test_issue10(self):
"""SNOBFIT error for initialization with nreq=1"""
from skquant.opt import minimize
def f(a):
return a[0]**2 - a[0]
bounds = np.array([[0,2]], dtype=np.float)
init = np.array([1.])
res, hist = minimize(f, init, bounds, method='snobfit', options={'maxmp' : 1})
| {
"alphanum_fraction": 0.5712656784,
"author": null,
"avg_line_length": 25.7941176471,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "94a7a56d364ab3e85b4cf6da1bdf733de0b4ba87",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2021-03-10T04:12:34.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-04-21T17:43:47.000Z",
"max_forks_repo_head_hexsha": "307ee504904fe616413944469c286fb8d6666933",
"max_forks_repo_licenses": [
"BSD-3-Clause-LBNL"
],
"max_forks_repo_name": "sustainable-processes/scikit-quant",
"max_forks_repo_path": "test/test_regression.py",
"max_issues_count": 20,
"max_issues_repo_head_hexsha": "307ee504904fe616413944469c286fb8d6666933",
"max_issues_repo_issues_event_max_datetime": "2021-08-16T16:14:13.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-04-13T09:22:53.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause-LBNL"
],
"max_issues_repo_name": "sustainable-processes/scikit-quant",
"max_issues_repo_path": "test/test_regression.py",
"max_line_length": 86,
"max_stars_count": 31,
"max_stars_repo_head_hexsha": "307ee504904fe616413944469c286fb8d6666933",
"max_stars_repo_licenses": [
"BSD-3-Clause-LBNL"
],
"max_stars_repo_name": "sustainable-processes/scikit-quant",
"max_stars_repo_path": "test/test_regression.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-11T23:14:11.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-02-05T16:39:18.000Z",
"num_tokens": 465,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1754
} |
#!/usr/bin/env python
"""
Hackable script to find threshold values.
NOTE(danny): Saved because I've needed code like this so many times
"""
import cv2
import numpy as np
# NOTE(danny): camera id goes here (or video file path)
cap = cv2.VideoCapture(1)
def nothing(x):
pass
# Creating a window for later use
cv2.namedWindow('result')
# Starting with 100's to prevent error while masking
h, s, v = 100, 100, 100
# Creating track bar
cv2.createTrackbar('h', 'result', 0, 84, nothing)
cv2.createTrackbar('s', 'result', 0, 255, nothing)
cv2.createTrackbar('v', 'result', 0, 255, nothing)
while True:
_, frame = cap.read()
# converting to HSV
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
# get info from track bar and appy to result
h = cv2.getTrackbarPos('h','result')
s = cv2.getTrackbarPos('s','result')
v = cv2.getTrackbarPos('v','result')
# Normal masking algorithm
lower_blue = np.array([84 - h,0,0])
upper_blue = np.array([84 + h,s,v])
max_sat = 130
mask = cv2.inRange(hsv, lower_blue, upper_blue)
result = cv2.bitwise_and(frame, frame, mask = mask)
cv2.imshow('result',result)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
| {
"alphanum_fraction": 0.6624102155,
"author": null,
"avg_line_length": 21.9824561404,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "2bd4af8b4dc835a2f46eed3d19adc00cb2ba1096",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "acd8ce90902eea010419b3ec0021b5572a9f1769",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "olinrobotics/pathfinder",
"max_forks_repo_path": "scripts/hsv_threshold_range_finder.py",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "acd8ce90902eea010419b3ec0021b5572a9f1769",
"max_issues_repo_issues_event_max_datetime": "2020-07-27T21:36:44.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-07-27T21:35:09.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "olinrobotics/pathfinder",
"max_issues_repo_path": "scripts/hsv_threshold_range_finder.py",
"max_line_length": 67,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "acd8ce90902eea010419b3ec0021b5572a9f1769",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "olinrobotics/pathfinder",
"max_stars_repo_path": "scripts/hsv_threshold_range_finder.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 373,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1253
} |
from pathlib import Path
import json
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from ..abs_model_controller import ControllerBase
from .make_data_loader import get_loader
from . import trainer
from . import predictor
from . import saver
from ...models.retrain_clf.model import ModelMonitor
from ...evaluation.metrics import MCC
class ModelController(ControllerBase):
def __init__(self, train_x: np.ndarray, train_y: np.ndarray, valid_x: np.ndarray = None, valid_y: np.ndarray = None) -> None:
self.train_x = train_x
self.train_y = train_y
self.valid_x = valid_x
self.valid_y = valid_y
self.model = None
self.train_loader = None
self.valid_loader = None
self.train_loss = []
self.valid_loss = []
self.model_name = "dafault_model_name"
self.model_config = {}
self.compile_config = {}
def read_config(self, config_name: str):
# read config for model and compiling
# return: None
return None
def build(self, **kwargs):
# build model with respective to model config
# return: None
return None
def load_weight(self, model_path: str):
# load exsisted model weights
# return: None
return None
def compile(self, **kwargs):
# load training config
# return: None
return None
def train(self, epochs=1, verbose=1, period_show=1):
# training process
# return: None
return None
def predict(self, x: np.ndarray, batch_size=1):
# predict output
# return: y_pred(np.ndarray)
pass
def save(self):
# save model
# return: None
return None
def evaluate(self, y_true: np.ndarray, y_pred: np.ndarray):
# calculate metrics
# return metrics's score
pass
| {
"alphanum_fraction": 0.6342616921,
"author": null,
"avg_line_length": 26.4305555556,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "acd8f1967d521f529b5e710d0a77943fb56b050f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b3d7b9d5b8555e438ee4a932971a15f0618d6c0c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "dongqifong/model_control",
"max_forks_repo_path": "model_control/models/template/controller.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b3d7b9d5b8555e438ee4a932971a15f0618d6c0c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "dongqifong/model_control",
"max_issues_repo_path": "model_control/models/template/controller.py",
"max_line_length": 129,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b3d7b9d5b8555e438ee4a932971a15f0618d6c0c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "dongqifong/model_control",
"max_stars_repo_path": "model_control/models/template/controller.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 439,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1903
} |
# Databricks notebook source
import matplotlib.pyplot as plt
from pyspark.sql import functions as F
import matplotlib.mlab as mlab
from matplotlib.ticker import MaxNLocator
from pyspark.ml.feature import VectorAssembler
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
from pyspark.ml.feature import MaxAbsScaler
from pyspark.ml.clustering import KMeans
# COMMAND ----------
dfTwitter = spark.read.json('/FileStore/tables/TwitterData.json')
dfStock = spark.read.format("csv").option("header", "true").load("/FileStore/tables/*.csv")
dfAvgSent = dfTwitter.groupby('tweet_hour', 'company').agg(F.mean('sentiment'), F.mean('followers_count'))
dfAvgSent = dfAvgSent.withColumnRenamed('company', 'comp')
dfAvgStock = dfStock.groupby('stock_hour', 'company').agg(F.mean('close'), F.mean('volume'))
dfJoin = dfAvgSent.join(dfAvgStock, (dfAvgSent.comp == dfAvgStock.company) & (dfAvgSent.tweet_hour == dfAvgStock.stock_hour+5))
dfJoin = dfJoin.withColumnRenamed("avg(sentiment)","avg-sentiment")
dfJoin = dfJoin.withColumnRenamed("avg(close)","avg-close")
dfJoin = dfJoin.withColumnRenamed("avg(volume)","avg-volume")
dfJoin = dfJoin.withColumnRenamed("avg(followers_count)","avg-followers")
dfJoin.show()
# COMMAND ----------
from pyspark.ml.feature import VectorAssembler
dfJoin1 = dfJoin.select("avg-sentiment","avg-followers","avg-volume")
inputFeatures = ["avg-sentiment","avg-followers","avg-volume"]
assembler = VectorAssembler(inputCols=inputFeatures, outputCol="features")
dfJoin2 = assembler.transform(dfJoin1)
# COMMAND ----------
# Scaling features
scaler = MaxAbsScaler(inputCol="features", outputCol="scaledFeatures")
scalerModel = scaler.fit(dfJoin2)
scaledData = scalerModel.transform(dfJoin2)
scaledData.select("features", "scaledFeatures").show()
# COMMAND ----------
#Elbow method
import numpy as np
cost = np.zeros(10)
for k in range(2,10):
kmeans = KMeans().setK(k).setFeaturesCol("scaledFeatures").setPredictionCol("prediction").setMaxIter(1).setSeed(1)
model = kmeans.fit(scaledData)
cost[k] = model.computeCost(scaledData)
# COMMAND ----------
#Plot of elbow method
fig, ax = plt.subplots(1,1, figsize =(8,6))
ax.plot(range(2,10),cost[2:10])
ax.set(title='Elbow method to predict number of clusters')
ax.set_xlabel('k')
ax.set_ylabel('cost')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.show()
display(fig)
# COMMAND ----------
#Kmeans algorithm
kmeans = KMeans().setK(8).setFeaturesCol("scaledFeatures").setPredictionCol("prediction").setMaxIter(10).setSeed(1)
model = kmeans.fit(scaledData)
output_df = model.transform(scaledData)
display(output_df.take(5))
groupedByRegion = output_df.groupby(output_df['prediction']).count()
display(groupedByRegion)
# COMMAND ----------
df_pred = output_df.select('avg-sentiment','avg-followers','avg-volume','prediction')
df_pandas = df_pred.toPandas()
# COMMAND ----------
image = plt.figure(figsize=(12,10)).gca(projection='3d')
image.scatter(df_pandas["avg-sentiment"], df_pandas["avg-followers"], df_pandas["avg-volume"], c=df_pandas.prediction)
image.set_xlabel('avg-sentiment')
image.set_ylabel('avg-followers')
image.set_zlabel('avg-volume')
plt.show()
display()
| {
"alphanum_fraction": 0.7463813719,
"author": null,
"avg_line_length": 35.3111111111,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ce73de5dfddd9eb9df75294cda82581aed7d9dba",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2021-01-05T03:16:13.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-09-28T17:55:53.000Z",
"max_forks_repo_head_hexsha": "d950f79bff43adbfe69f2f727e6cd2f644e21a58",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "VinamraPalaiya/Tweet-Stock-Analysis",
"max_forks_repo_path": "SparkML/clustering.py",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "d950f79bff43adbfe69f2f727e6cd2f644e21a58",
"max_issues_repo_issues_event_max_datetime": "2021-02-02T21:51:42.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-03-24T16:24:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "VinamraPalaiya/Tweet-Stock-Analysis",
"max_issues_repo_path": "SparkML/clustering.py",
"max_line_length": 127,
"max_stars_count": 7,
"max_stars_repo_head_hexsha": "d950f79bff43adbfe69f2f727e6cd2f644e21a58",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "VinamraPalaiya/Tweet-Stock-Analysis",
"max_stars_repo_path": "SparkML/clustering.py",
"max_stars_repo_stars_event_max_datetime": "2020-12-11T06:06:04.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-10-16T19:04:38.000Z",
"num_tokens": 820,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3178
} |
[STATEMENT]
lemma (in trace_top) LNil_safety: "safety A {LNil}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. safety A {LNil}
[PROOF STEP]
proof (unfold safety_def, clarify)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>t. \<lbrakk>t \<in> A\<^sup>\<infinity>; \<forall>r\<in>finpref A t. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> {LNil}; t \<notin> {}\<rbrakk> \<Longrightarrow> t = LNil
[PROOF STEP]
fix t
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>t. \<lbrakk>t \<in> A\<^sup>\<infinity>; \<forall>r\<in>finpref A t. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> {LNil}; t \<notin> {}\<rbrakk> \<Longrightarrow> t = LNil
[PROOF STEP]
assume adh: "t \<in> A\<^sup>\<infinity>" "\<forall>r\<in>finpref A t. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> {LNil}"
[PROOF STATE]
proof (state)
this:
t \<in> A\<^sup>\<infinity>
\<forall>r\<in>finpref A t. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> {LNil}
goal (1 subgoal):
1. \<And>t. \<lbrakk>t \<in> A\<^sup>\<infinity>; \<forall>r\<in>finpref A t. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> {LNil}; t \<notin> {}\<rbrakk> \<Longrightarrow> t = LNil
[PROOF STEP]
thus "t = LNil"
[PROOF STATE]
proof (prove)
using this:
t \<in> A\<^sup>\<infinity>
\<forall>r\<in>finpref A t. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> {LNil}
goal (1 subgoal):
1. t = LNil
[PROOF STEP]
by (cases t)(auto simp: finpref_def)
[PROOF STATE]
proof (state)
this:
t = LNil
goal:
No subgoals!
[PROOF STEP]
qed | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Topology_LList_Topology",
"hexsha": null,
"include": null,
"lang": null,
"length": 6,
"llama_tokens": 684,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def make_data(initial_data):
prev_data = initial_data['test'][0]
datas = pd.DataFrame(initial_data)
data = []
for i in range(1440):
arr = []
for j in range(60):
temp = prev_data * np.random.uniform(0.9995,1.0005,1)[0]
arr.append(temp)
prev_data = temp
data.append([arr[0],max(arr),min(arr),arr[-1]])
return data
data = [3421.6]
initial_data = pd.DataFrame(data,columns=['test'])
t = make_data(initial_data)
df = pd.DataFrame(t,columns=['open','high','low','close'])
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=(8, 5))
fig.set_facecolor('w')
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
axes = []
axes.append(plt.subplot(gs[0]))
from mpl_finance import candlestick_ohlc
x = np.arange(len(df.index))
ohlc = df[['open', 'high', 'low', 'close']].astype(int).values
dohlc = np.hstack((np.reshape(x, (-1, 1)), ohlc))
# 봉차트
candlestick_ohlc(axes[0], dohlc, width=0.5, colorup='r', colordown='b')
# 거래량 차트
plt.tight_layout()
plt.show()
| {
"alphanum_fraction": 0.6461937716,
"author": null,
"avg_line_length": 24.5957446809,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "9fbc1f30b0c43c0373532f09a52df31ea953daea",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2769104b5ff153b6b6db240757f6422fbb95da58",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "whdgusdl48/CoinPrediction-Contest",
"max_forks_repo_path": "skeleton_code.py",
"max_issues_count": 9,
"max_issues_repo_head_hexsha": "2769104b5ff153b6b6db240757f6422fbb95da58",
"max_issues_repo_issues_event_max_datetime": "2021-08-19T07:49:49.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-07-29T06:07:11.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "whdgusdl48/CoinPrediction-Contest",
"max_issues_repo_path": "skeleton_code.py",
"max_line_length": 71,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2769104b5ff153b6b6db240757f6422fbb95da58",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "whdgusdl48/CoinPrediction-Contest",
"max_stars_repo_path": "skeleton_code.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 349,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1156
} |
import numpy as np
def base_fun(key):
if key == 0:
return lambda x: x
if key == 1:
return lambda x: np.sin(x * 20)
if key == 2:
return lambda x: np.exp((x - 0.5) * 50) / (np.exp((x - 0.5) * 50) + 1)
if key == 3:
return lambda x: (np.arctan(x * 10) + np.sin(x * 10)) / 2
if key == 4:
return lambda x: 2.0 / (x + 1)
raise AttributeError("Key not associated to any function")
def base_fun_inter(x, y):
return 2 * np.exp(-1/(np.sqrt(2 * np.pi)) * (((x - 0.5) ** 2 + (y - 0.5) ** 2) / 2))
def fun_without_interaction(x: np.array, rnd_gen: np.random.Generator):
r"""
Simple function with interaction defined as follows:
.. math::
g^{\prime}(\bm{x}) & = \bm{x}_1 - \sin\left(20\bm{x}_2\right) \\
& + \frac{\exp\left(50(\bm{x}_3 -0.5)\right)}{\exp\left(50(\bm{x}_3 -0.5)\right) + 1} \\
& + \frac{\arctan\left(10\bm{x}_4\right)- \sin\left(10\bm{x}_4\right)}{2} \\
& + \frac{2}{\bm{x}_5 +1} + \mathcal{N}(0,0.1^2)
where :math:`\mathcal{N}(0,0.1^2)` is the normally distributed noise from the ``rnd_gen``.
:param x: A numpy array of 5 elements.
:type x: np.array
:param rnd_gen: the numpy random number generator to be used
:type rnd_gen: numpy.random.Generator
:return: The value representing :math:`f(\bm{x})`.
:rtype: float
"""
noise = rnd_gen.normal(0, 0.1, size=1)[0]
no_inter = base_fun(0)(x[0]) + base_fun(1)(x[1]) + base_fun(2)(x[2]) + base_fun(3)(x[3]) + base_fun(4)(x[4])
return no_inter + noise
def fun_interaction(x: np.array, rnd_gen: np.random.Generator, real_interactions: [tuple]):
r"""
Simple function with interaction defined as follows:
.. math::
h(\bm{x}_i, \bm{x}_j) & = 2 \exp \left(-\frac{1}{\sqrt{2 \pi}}\frac{(\bm{x}_i - 0.5)^2
+ (\bm{x}_j - 0.5)^2}{2}\right)
.. math::
g^{\prime\prime}_\Pi(\bm{x}) & = g^{\prime}(\bm{x}) + \sum_{(f_i, f_j) \in \Pi}h(x_i, x_j)
where :math:`\mathcal{N}(0,0.1^2)` is the normally distributed noise from the ``rnd_gen``, and `\Pi` is the
set containing the pairs of features to use to construct the interactions.
:param x: A numpy array of 6 elements.
:type x: np.array
:param rnd_gen: the numpy random number generator to be used
:type rnd_gen: numpy.random.Generator
:param real_interactions: A list of tuples representing the interaction to model. For example
[(1,2), (2,3), (3,4)] means that we want interactions between features 1 and 2,
features 2 and 3, and features 3 and 4.
:type real_interactions: [tuple]
:return: The value representing :math:`f(\bm{x})`.
:rtype: float
"""
inter = 0
for i, j in real_interactions:
inter += base_fun_inter(x[i], x[j])
return fun_without_interaction(x, rnd_gen) + inter
| {
"alphanum_fraction": 0.5510805501,
"author": null,
"avg_line_length": 39.1538461538,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b62255d7a8fc030165b2a84c21bd0439f9492514",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "07912d01040ca0169977ddd49839050c81ec2349",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "veneres/gef",
"max_forks_repo_path": "examples/synthetic/synthetic_fun.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "07912d01040ca0169977ddd49839050c81ec2349",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "veneres/gef",
"max_issues_repo_path": "examples/synthetic/synthetic_fun.py",
"max_line_length": 115,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "07912d01040ca0169977ddd49839050c81ec2349",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "veneres/gef",
"max_stars_repo_path": "examples/synthetic/synthetic_fun.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-16T13:07:53.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-16T08:02:15.000Z",
"num_tokens": 977,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3054
} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import json
import re
from bs4 import BeautifulSoup
from nltk.stem.porter import PorterStemmer
import numpy as np
from tqdm import trange
from . import HEATMAP_CSS_PATH
from .textutil import normalize_text
class ReconstructedHTML(object):
def __init__(self, results_path, parsed_html):
tfidf_scores = {}
with open(results_path) as tfidf_scores_file:
tfidf_scores = json.load(tfidf_scores_file)
self.tfidf_terms = tfidf_scores["tfidf_scores"]
scores = np.unique(self.tfidf_terms.values())
self.percentiles = [np.percentile(scores, 20 * i) for i in xrange(1, 5)]
parsed_html = " ".join(parsed_html).replace(" ", " ")
self.parsed_soup = BeautifulSoup(parsed_html, "html.parser")
extra_css = self.parsed_soup.new_tag("style", type="text/css")
with open(HEATMAP_CSS_PATH) as css_file:
extra_css.string = unicode(css_file.read())
self.parsed_soup.head.append(extra_css)
self.stemmer = PorterStemmer()
self.new_span_format = "<span class=\"{score}\">{text}</span>"
def __span_class(self, score):
if score <= self.percentiles[0]:
return "first"
elif score <= self.percentiles[1]:
return "second"
elif score <= self.percentiles[2]:
return "third"
elif score <= self.percentiles[3]:
return "fourth"
else:
return "fifth"
def __find_index(self, word, normalized_sent, sentence, score):
if len(sentence) == len(normalized_sent):
start = normalized_sent.find(word)
end = start + len(word)
return sentence.replace(
sentence[start:end],
self.new_span_format.format(
score=self.__span_class(score),
text=sentence[start:end]
)
)
re_str = word.replace(" ", ".*") + "([.-]|[^\s]+|[^\s]*?)"
regex = re.compile(re_str, re.I)
matches = regex.search(sentence)
try:
return sentence.replace(
matches.group(),
self.new_span_format.format(
score=self.__span_class(score),
text=matches.group()
)
)
except AttributeError:
try:
re_str = " ".join(
self.stemmer.stem(i) for i in word.split()
).replace(" ", ".*") + "([.-]|[^\s]+|[^\s]*?)"
regex = re.compile(re_str, re.I)
matches = regex.search(sentence)
return sentence.replace(
matches.group(),
self.new_span_format.format(
score=self.__span_class(score),
text=matches.group()
)
)
except AttributeError:
pass
@staticmethod
def __merge_strings(final_str, version):
soup = BeautifulSoup(final_str, "html.parser")
for fixed_span in soup.find_all("span"):
if not fixed_span.text == version.text:
return final_str.replace(
version.text, unicode(version)
)
return final_str
def __merge_versions(self, grouped_found_terms):
merged_spans = []
for found_terms in grouped_found_terms:
# list of pairs of the version and its span text
found_terms = (
(i, BeautifulSoup(i, "html.parser").find("span"))
for i in found_terms
)
# sort on the length of the span text to avoid issues with
# substrings
found_terms = sorted(
found_terms, key=lambda x: len(x[-1].text), reverse=True
)
# version with the largest span text
current_span = found_terms[0][0]
for i in xrange(1, len(found_terms)):
current_span = self.__merge_strings(
current_span, found_terms[i][-1]
)
merged_spans.append(current_span)
return merged_spans
def recreate_doc(self):
tagged_spans = []
failure = 0
p_tags = self.parsed_soup.find_all("p")
p_tags_len = len(p_tags)
progress = trange(p_tags_len, desc="Tagging document", leave=True)
for p_tag in progress:
for term in self.tfidf_terms:
normalize_p_tag = normalize_text(p_tags[p_tag].text)
if term in normalize_p_tag:
tagged_span = self.__find_index(
term, normalize_p_tag,
p_tags[p_tag].text, self.tfidf_terms[term]
)
if tagged_span:
tagged_spans.append(tagged_span)
else:
failure += 1
grouped_found_terms = {}
for tagged_span in tagged_spans:
key = BeautifulSoup(tagged_span, "html.parser").text
grouped_found_terms.setdefault(key, []).append(tagged_span)
grouped_found_terms = grouped_found_terms.values()
final_spans = self.__merge_versions(grouped_found_terms)
for p_tag in progress:
for final_span in final_spans:
final_span = BeautifulSoup(final_span, "html.parser")
if final_span.text == p_tags[p_tag].text:
p_tags[p_tag].string = ""
p_tags[p_tag].insert(0, final_span)
break
print("%.2f%% content lost." % (float(failure) / p_tags_len * 100))
def get_new_html(self):
return self.parsed_soup.prettify().replace(">", ">").encode("utf-8")
| {
"alphanum_fraction": 0.5482404445,
"author": null,
"avg_line_length": 32.9944444444,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "0e2508060ea3af730131d7daa8d1332be2400789",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2dd45e4237709e0e41438d2fe682dc909f75dc03",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sabbirahm3d/Highlite",
"max_forks_repo_path": "highlite/recreate.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2dd45e4237709e0e41438d2fe682dc909f75dc03",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sabbirahm3d/Highlite",
"max_issues_repo_path": "highlite/recreate.py",
"max_line_length": 80,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2dd45e4237709e0e41438d2fe682dc909f75dc03",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sabbirahm3d/Highlite",
"max_stars_repo_path": "highlite/recreate.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1221,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5939
} |
from itertools import product
import numpy as np
import param
from ...core import CompositeOverlay, Element
from ...core import traversal
from ...core.util import match_spec, max_range, unique_iterator, unique_array, is_nan
from ...element.raster import Image, Raster, RGB
from .element import ColorbarPlot, OverlayPlot
from .plot import MPLPlot, GridPlot, mpl_rc_context
from .util import get_raster_array
class RasterPlot(ColorbarPlot):
aspect = param.Parameter(default='equal', doc="""
Raster elements respect the aspect ratio of the
Images by default but may be set to an explicit
aspect ratio or to 'square'.""")
colorbar = param.Boolean(default=False, doc="""
Whether to add a colorbar to the plot.""")
situate_axes = param.Boolean(default=False, doc="""
Whether to situate the image relative to other plots. """)
style_opts = ['alpha', 'cmap', 'interpolation', 'visible',
'filterrad', 'clims', 'norm']
_plot_methods = dict(single='imshow')
def __init__(self, *args, **kwargs):
super(RasterPlot, self).__init__(*args, **kwargs)
if self.hmap.type == Raster:
self.invert_yaxis = not self.invert_yaxis
def get_extents(self, element, ranges):
extents = super(RasterPlot, self).get_extents(element, ranges)
if self.situate_axes:
return extents
else:
if isinstance(element, Image):
return element.bounds.lbrt()
else:
return element.extents
def _compute_ticks(self, element, ranges):
return None, None
def get_data(self, element, ranges, style):
xticks, yticks = self._compute_ticks(element, ranges)
if isinstance(element, RGB):
style.pop('cmap', None)
data = get_raster_array(element)
if type(element) is Raster:
l, b, r, t = element.extents
if self.invert_axes:
data = data[:, ::-1]
else:
data = data[::-1]
else:
l, b, r, t = element.bounds.lbrt()
if self.invert_axes:
data = data[::-1, ::-1]
if self.invert_axes:
data = data.transpose([1, 0, 2]) if isinstance(element, RGB) else data.T
l, b, r, t = b, l, t, r
vdim = element.vdims[0]
self._norm_kwargs(element, ranges, style, vdim)
style['extent'] = [l, r, b, t]
return [data], style, {'xticks': xticks, 'yticks': yticks}
def update_handles(self, key, axis, element, ranges, style):
im = self.handles['artist']
data, style, axis_kwargs = self.get_data(element, ranges, style)
l, r, b, t = style['extent']
im.set_data(data[0])
im.set_extent((l, r, b, t))
im.set_clim((style['vmin'], style['vmax']))
if 'norm' in style:
im.norm = style['norm']
return axis_kwargs
class HeatMapPlot(RasterPlot):
clipping_colors = param.Dict(default={'NaN': 'white'}, doc="""
Dictionary to specify colors for clipped values, allows
setting color for NaN values and for values above and below
the min and max value. The min, max or NaN color may specify
an RGB(A) color as a color hex string of the form #FFFFFF or
#FFFFFFFF or a length 3 or length 4 tuple specifying values in
the range 0-1 or a named HTML color.""")
show_values = param.Boolean(default=False, doc="""
Whether to annotate each pixel with its value.""")
def _annotate_plot(self, ax, annotations):
handles = {}
for plot_coord, text in annotations.items():
handles[plot_coord] = ax.annotate(text, xy=plot_coord,
xycoords='data',
horizontalalignment='center',
verticalalignment='center')
return handles
def _annotate_values(self, element):
val_dim = element.vdims[0]
vals = element.dimension_values(2, flat=False)
d1uniq, d2uniq = [element.dimension_values(i, False) for i in range(2)]
if self.invert_axes:
d1uniq, d2uniq = d2uniq, d1uniq
else:
vals = vals.T
if self.invert_xaxis: vals = vals[::-1]
if self.invert_yaxis: vals = vals[:, ::-1]
vals = vals.flatten()
num_x, num_y = len(d1uniq), len(d2uniq)
xpos = np.linspace(0.5, num_x-0.5, num_x)
ypos = np.linspace(0.5, num_y-0.5, num_y)
plot_coords = product(xpos, ypos)
annotations = {}
for plot_coord, v in zip(plot_coords, vals):
text = '-' if is_nan(v) else val_dim.pprint_value(v)
annotations[plot_coord] = text
return annotations
def _compute_ticks(self, element, ranges):
xdim, ydim = element.dimensions()[:2]
agg = element.gridded
dim1_keys, dim2_keys = [unique_array(agg.dimension_values(i, False))
for i in range(2)]
if self.invert_axes:
dim1_keys, dim2_keys = dim2_keys, dim1_keys
num_x, num_y = len(dim1_keys), len(dim2_keys)
xpos = np.linspace(.5, num_x-0.5, num_x)
ypos = np.linspace(.5, num_y-0.5, num_y)
xlabels = [xdim.pprint_value(k) for k in dim1_keys]
ylabels = [ydim.pprint_value(k) for k in dim2_keys]
return list(zip(xpos, xlabels)), list(zip(ypos, ylabels))
def init_artists(self, ax, plot_args, plot_kwargs):
ax.set_aspect(plot_kwargs.pop('aspect', 1))
handles = {}
annotations = plot_kwargs.pop('annotations', None)
handles['artist'] = ax.imshow(*plot_args, **plot_kwargs)
if self.show_values and annotations:
handles['annotations'] = self._annotate_plot(ax, annotations)
return handles
def get_data(self, element, ranges, style):
xticks, yticks = self._compute_ticks(element, ranges)
data = np.flipud(element.gridded.dimension_values(2, flat=False))
data = np.ma.array(data, mask=np.logical_not(np.isfinite(data)))
if self.invert_axes: data = data.T[::-1, ::-1]
if self.invert_xaxis: data = data[:, ::-1]
if self.invert_yaxis: data = data[::-1]
shape = data.shape
style['aspect'] = shape[0]/shape[1]
style['extent'] = (0, shape[1], 0, shape[0])
style['annotations'] = self._annotate_values(element.gridded)
vdim = element.vdims[0]
self._norm_kwargs(element, ranges, style, vdim)
return [data], style, {'xticks': xticks, 'yticks': yticks}
def update_handles(self, key, axis, element, ranges, style):
im = self.handles['artist']
data, style, axis_kwargs = self.get_data(element, ranges, style)
im.set_data(data[0])
im.set_extent(style['extent'])
im.set_clim((style['vmin'], style['vmax']))
if 'norm' in style:
im.norm = style['norm']
if self.show_values:
annotations = self.handles['annotations']
for annotation in annotations.values():
try:
annotation.remove()
except:
pass
annotations = self._annotate_plot(axis, style['annotations'])
self.handles['annotations'] = annotations
return axis_kwargs
class ImagePlot(RasterPlot):
def get_data(self, element, ranges, style):
data = np.flipud(element.dimension_values(2, flat=False))
data = np.ma.array(data, mask=np.logical_not(np.isfinite(data)))
l, b, r, t = element.bounds.lbrt()
if self.invert_axes:
data = data[::-1].T
l, b, r, t = b, l, t, r
vdim = element.vdims[0]
self._norm_kwargs(element, ranges, style, vdim)
style['extent'] = [l, r, b, t]
return (data,), style, {}
def get_extents(self, element, ranges):
extents = super(ImagePlot, self).get_extents(element, ranges)
if self.situate_axes:
return extents
else:
return element.bounds.lbrt()
class QuadMeshPlot(ColorbarPlot):
style_opts = ['alpha', 'cmap', 'clims', 'edgecolors', 'norm', 'shading',
'linestyles', 'linewidths', 'hatch', 'visible']
_plot_methods = dict(single='pcolormesh')
def get_data(self, element, ranges, style):
zdata = element.dimension_values(2, flat=False)
data = np.ma.array(zdata, mask=np.logical_not(np.isfinite(zdata)))
expanded = element.interface.irregular(element, element.kdims[0])
edges = style.get('shading') != 'gouraud'
coords = [element.interface.coords(element, d, ordered=True,
expanded=expanded, edges=edges)
for d in element.kdims]
if self.invert_axes:
coords = coords[::-1]
data = data.T
cmesh_data = coords + [data]
if expanded:
style['locs'] = np.concatenate(coords)
vdim = element.vdims[0]
self._norm_kwargs(element, ranges, style, vdim)
return tuple(cmesh_data), style, {}
def init_artists(self, ax, plot_args, plot_kwargs):
locs = plot_kwargs.pop('locs', None)
artist = ax.pcolormesh(*plot_args, **plot_kwargs)
return {'artist': artist, 'locs': locs}
class RasterGridPlot(GridPlot, OverlayPlot):
"""
RasterGridPlot evenly spaces out plots of individual projections on
a grid, even when they differ in size. Since this class uses a single
axis to generate all the individual plots it is much faster than the
equivalent using subplots.
"""
# Parameters inherited from OverlayPlot that are not part of the
# GridPlot interface. Some of these may be enabled in future in
# conjunction with GridPlot.
apply_extents = param.Parameter(precedence=-1)
apply_ranges = param.Parameter(precedence=-1)
apply_ticks = param.Parameter(precedence=-1)
batched = param.Parameter(precedence=-1)
bgcolor = param.Parameter(precedence=-1)
invert_axes = param.Parameter(precedence=-1)
invert_xaxis = param.Parameter(precedence=-1)
invert_yaxis = param.Parameter(precedence=-1)
invert_zaxis = param.Parameter(precedence=-1)
labelled = param.Parameter(precedence=-1)
legend_cols = param.Parameter(precedence=-1)
legend_position = param.Parameter(precedence=-1)
legend_limit = param.Parameter(precedence=-1)
logx = param.Parameter(precedence=-1)
logy = param.Parameter(precedence=-1)
logz = param.Parameter(precedence=-1)
show_grid = param.Parameter(precedence=-1)
style_grouping = param.Parameter(precedence=-1)
xticks = param.Parameter(precedence=-1)
yticks = param.Parameter(precedence=-1)
zticks = param.Parameter(precedence=-1)
zaxis = param.Parameter(precedence=-1)
zrotation = param.Parameter(precedence=-1)
def __init__(self, layout, keys=None, dimensions=None, create_axes=False, ranges=None,
layout_num=1, **params):
top_level = keys is None
if top_level:
dimensions, keys = traversal.unique_dimkeys(layout)
MPLPlot.__init__(self, dimensions=dimensions, keys=keys, **params)
if top_level:
self.comm = self.init_comm()
self.layout = layout
self.cyclic_index = 0
self.zorder = 0
self.layout_num = layout_num
self.overlaid = False
self.hmap = layout
if layout.ndims > 1:
xkeys, ykeys = zip(*layout.data.keys())
else:
xkeys = layout.keys()
ykeys = [None]
self._xkeys = sorted(set(xkeys))
self._ykeys = sorted(set(ykeys))
self._xticks, self._yticks = [], []
self.rows, self.cols = layout.shape
self.fig_inches = self._get_size()
_, _, self.layout = self._create_subplots(layout, None, ranges, create_axes=False)
self.border_extents = self._compute_borders()
width, height, _, _, _, _ = self.border_extents
if self.aspect == 'equal':
self.aspect = float(width/height)
# Note that streams are not supported on RasterGridPlot
# until that is implemented this stub is needed
self.streams = []
def _finalize_artist(self, key):
pass
def get_extents(self, view, ranges):
width, height, _, _, _, _ = self.border_extents
return (0, 0, width, height)
def _get_frame(self, key):
return GridPlot._get_frame(self, key)
@mpl_rc_context
def initialize_plot(self, ranges=None):
_, _, b_w, b_h, widths, heights = self.border_extents
key = self.keys[-1]
ranges = self.compute_ranges(self.layout, key, ranges)
self.handles['projs'] = {}
x, y = b_w, b_h
for xidx, xkey in enumerate(self._xkeys):
w = widths[xidx]
for yidx, ykey in enumerate(self._ykeys):
h = heights[yidx]
if self.layout.ndims > 1:
vmap = self.layout.get((xkey, ykey), None)
else:
vmap = self.layout.get(xkey, None)
pane = vmap.select(**{d.name: val for d, val in zip(self.dimensions, key)
if d in vmap.kdims})
pane = vmap.last.values()[-1] if issubclass(vmap.type, CompositeOverlay) else vmap.last
data = get_raster_array(pane) if pane else None
ranges = self.compute_ranges(vmap, key, ranges)
opts = self.lookup_options(pane, 'style')[self.cyclic_index]
plot = self.handles['axis'].imshow(data, extent=(x,x+w, y, y+h), **opts)
cdim = pane.vdims[0].name
valrange = match_spec(pane, ranges).get(cdim, pane.range(cdim))
plot.set_clim(valrange)
if data is None:
plot.set_visible(False)
self.handles['projs'][(xkey, ykey)] = plot
y += h + b_h
if xidx == 0:
self._yticks.append(y-b_h-h/2.)
y = b_h
x += w + b_w
self._xticks.append(x-b_w-w/2.)
kwargs = self._get_axis_kwargs()
return self._finalize_axis(key, ranges=ranges, **kwargs)
@mpl_rc_context
def update_frame(self, key, ranges=None):
grid = self._get_frame(key)
ranges = self.compute_ranges(self.layout, key, ranges)
for xkey in self._xkeys:
for ykey in self._ykeys:
plot = self.handles['projs'][(xkey, ykey)]
grid_key = (xkey, ykey) if self.layout.ndims > 1 else (xkey,)
element = grid.data.get(grid_key, None)
if element:
plot.set_visible(True)
img = element.values()[0] if isinstance(element, CompositeOverlay) else element
data = get_raster_array(img)
plot.set_data(data)
else:
plot.set_visible(False)
kwargs = self._get_axis_kwargs()
return self._finalize_axis(key, ranges=ranges, **kwargs)
def _get_axis_kwargs(self):
xdim = self.layout.kdims[0]
ydim = self.layout.kdims[1] if self.layout.ndims > 1 else None
xticks = (self._xticks, [xdim.pprint_value(l) for l in self._xkeys])
yticks = (self._yticks, [ydim.pprint_value(l) if ydim else ''
for l in self._ykeys])
return dict(xlabel=xdim.pprint_label, ylabel=ydim.pprint_label if ydim else '',
xticks=xticks, yticks=yticks)
def _compute_borders(self):
ndims = self.layout.ndims
width_fn = lambda x: x.range(0)
height_fn = lambda x: x.range(1)
width_extents = [max_range(self.layout[x, :].traverse(width_fn, [Element]))
for x in unique_iterator(self.layout.dimension_values(0))]
if ndims > 1:
height_extents = [max_range(self.layout[:, y].traverse(height_fn, [Element]))
for y in unique_iterator(self.layout.dimension_values(1))]
else:
height_extents = [max_range(self.layout.traverse(height_fn, [Element]))]
widths = [extent[0]-extent[1] for extent in width_extents]
heights = [extent[0]-extent[1] for extent in height_extents]
width, height = np.sum(widths), np.sum(heights)
border_width = (width*self.padding)/(len(widths)+1)
border_height = (height*self.padding)/(len(heights)+1)
width += width*self.padding
height += height*self.padding
return width, height, border_width, border_height, widths, heights
def __len__(self):
return max([len(self.keys), 1])
| {
"alphanum_fraction": 0.5946696608,
"author": null,
"avg_line_length": 38.9011494253,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "368e3e9766eb11b01e2476bf4b8185062d4a8985",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c83b96ed0842d6f7addf0129c01d65232e05f131",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "laleph/holoviews",
"max_forks_repo_path": "holoviews/plotting/mpl/raster.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c83b96ed0842d6f7addf0129c01d65232e05f131",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "laleph/holoviews",
"max_issues_repo_path": "holoviews/plotting/mpl/raster.py",
"max_line_length": 103,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c83b96ed0842d6f7addf0129c01d65232e05f131",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "laleph/holoviews",
"max_stars_repo_path": "holoviews/plotting/mpl/raster.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4117,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 16922
} |
import torch
import torch.nn.functional as F
import numpy as np
from utils.processing import BoundingBox
import cv2
def train(model, train_loader, optimizer, criterion, epoch, device, log_interval=175):
""" Function to train the model
Args:
model (nn.model object): Model to be trained
train_loader (utils.dataloader object): Dataloader for training data
optimizer (nn.optim object): Optimizer to be used
criterion (nn.loss object): loss object to calculate MSE loss
epoch (int): The current epoch
device (torch.device object): device to load data on
log_interval (int): interval at which to print batch metrics [Default: 175]
Return:
train_loss (double): Training loss over one epoch
"""
model.train()
train_loss = 0.0
prev_frame = None
for batchIdx, (data) in enumerate(train_loader):
data['image'], data['bbox'] = data['image'].to(
device), data['bbox'].to(device)
b, _, h, w = data['image'].shape
c = 1
for idx in range(b):
bounding_box = BoundingBox(device)
heat_map, _ = bounding_box.pre_process(
data['bbox'][idx], (c, h, w), (c, int(h/4), int(w/4)))
if idx == 0:
ground_truth = heat_map[None, ...]
else:
ground_truth = torch.cat((ground_truth, heat_map[None, ...]))
optimizer.zero_grad()
output, prev_frame = model(data['image'], prev_frame)
loss = criterion(ground_truth, output)
train_loss += loss
loss.backward()
optimizer.step()
if batchIdx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch+1, batchIdx * train_loader.batch_size, len(
train_loader.dataset), 100. * batchIdx / len(train_loader), loss.item()/b))
train_loss /= len(train_loader.dataset)
return train_loss.item()
def __freeze_SweatyNet__(model, requires_grad=False):
"""
Function to freeze/unfreeze weights of the SweatyNet part of the model
Args:
model (nn.model object): Model for which weights are to be frozen or unfrozen
requires_grad (boolean): parameter to set for weights. If False, weights are frozen [Default: False]
"""
for idx, (child) in enumerate(model.children()):
if idx < 11:
for param in child.parameters():
param.requires_grad = requires_grad
| {
"alphanum_fraction": 0.6015655577,
"author": null,
"avg_line_length": 41.2096774194,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "dd353e8b6025c8c07a57385a6c084f1ababd8446",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "41758736a4f7fd29981f4954578e520a2f592099",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tridivb/Soccer_Ball_Detector_with_FCNN_and_ConvLSTM",
"max_forks_repo_path": "train.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "41758736a4f7fd29981f4954578e520a2f592099",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tridivb/Soccer_Ball_Detector_with_FCNN_and_ConvLSTM",
"max_issues_repo_path": "train.py",
"max_line_length": 125,
"max_stars_count": 13,
"max_stars_repo_head_hexsha": "41758736a4f7fd29981f4954578e520a2f592099",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tridivb/Soccer_Ball_Detector_with_FCNN_and_ConvLSTM",
"max_stars_repo_path": "train.py",
"max_stars_repo_stars_event_max_datetime": "2020-12-02T14:06:59.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-03-08T15:34:46.000Z",
"num_tokens": 584,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2555
} |
\documentclass[a4paper, 11pt]{article}
\usepackage[utf8]{inputenc}
\usepackage[lmargin=3cm]{geometry}
\usepackage{amssymb}
\usepackage{verbatim}
\hyphenation{FORTRAN NEKBONE}
%Inlucde common settings
\input{../BPG/deliverables-config.tex}
\newenvironment{code}%
{
\addtolength{\leftskip}{0.5cm}}%
{
}
\begin{document}
\title{\includegraphics[width=4cm]{./images/Ptf_LogoBlau}\\ \vspace{1cm}
\textsf{\bf \huge Best Practice Guide }\\
\normalsize PTF Version: 1.1\\
}
\author{AutoTune Partners}
\date{14.04.2015}
\maketitle
\newpage
\tableofcontents
\newpage
\section{Introduction}
\label{sec:intro}
\input{../BPG/chapter1_intro.tex}
\clearpage
\section{PTF in the Tuning Cycle}
\label{sec:chapter2}
\input{../BPG/chapter2_tuning_lifecycle.tex}
\clearpage
%------------------------- Subsection for each plugin -------------------------%
\section{Best Practice on how to use PTF: A Walkthrough}
\label{sec:chapter3}
\input{../BPG/chapter3_ptf_guidelines.tex}
\clearpage
\section{Best Practice on how to use the PTF Tuning Plugins}
\label{sec:chapter4}
\input{../BPG/chapter4_general_guidelines_plugins.tex}
\clearpage
\bibliography{References}{}
\bibliographystyle{plainnat}
\end{document}
| {
"alphanum_fraction": 0.7240527183,
"author": null,
"avg_line_length": 20.9310344828,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "f138c84d9fe62700f24479788d65467e0479571c",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-11-20T03:04:32.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-11-20T03:04:32.000Z",
"max_forks_repo_head_hexsha": "5514b0545721ef27de0426a7fa0116d2e0bb5eef",
"max_forks_repo_licenses": [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
],
"max_forks_repo_name": "robert-mijakovic/readex-ptf",
"max_forks_repo_path": "docs/BPG/PTF_Best_Practices_Guide.tex",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "5514b0545721ef27de0426a7fa0116d2e0bb5eef",
"max_issues_repo_issues_event_max_datetime": "2020-10-14T08:05:41.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-04-21T07:57:32.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
],
"max_issues_repo_name": "robert-mijakovic/readex-ptf",
"max_issues_repo_path": "docs/BPG/PTF_Best_Practices_Guide.tex",
"max_line_length": 80,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "5514b0545721ef27de0426a7fa0116d2e0bb5eef",
"max_stars_repo_licenses": [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
],
"max_stars_repo_name": "robert-mijakovic/readex-ptf",
"max_stars_repo_path": "docs/BPG/PTF_Best_Practices_Guide.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-10T09:59:37.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-03-10T09:59:37.000Z",
"num_tokens": 387,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1214
} |
# -*- coding: utf-8 -*-
"""
Created on Thu May 26 12:25:24 2016
@author: kbefus
"""
import os,sys
import numpy as np
kbpath = r'C:/Research/Coastalgw/Model_develop/'
sys.path.insert(1,kbpath)
from cgw_model.prep import prep_utils as cprep
#%%
class CRM(object):
'''
'''
def __init__(self,work_dir=None,region=None,crm_version=1):
self.work_dir = work_dir
self.region = region
self.crm_version = crm_version
if (self.work_dir is not None) and (self.region is not None) \
and (self.crm_version is not None):
self.fname = os.path.join(self.work_dir,
'{}_crm_v{}.tif'.format(self.region.lower(),
self.crm_version))
def load(self):
if hasattr(self,'fname'):
temp_extent = cprep.arcpy.Raster(self.fname).extent
self.extent = [temp_extent.XMin,temp_extent.YMin,
temp_extent.XMax,temp_extent.YMax]
class NED(object):
'''
'''
def __init__(self,work_dir=None,out_dir=None,crop_extent=None,
CRM=None):
self.work_dir = work_dir
self.out_dir = out_dir
self.crop_extent = crop_extent
self.CRM = CRM
def load(self,ned_spacing = [1.,1.],extent_pad = 2.,
buffer_size = 0.,mosaic_fname=None,mosaic=True,overwrite_flag=False):
# Create top left corners of NED datasets
Y,X = np.mgrid[np.floor(self.crop_extent[1])-extent_pad:np.ceil(self.crop_extent[3])+extent_pad:ned_spacing[1],
np.floor(self.crop_extent[0])-extent_pad:np.ceil(self.crop_extent[2])+extent_pad:ned_spacing[0]]
self.in_pts = (X<np.ceil(self.crop_extent[2])+buffer_size) & \
(X>=np.floor(self.crop_extent[0])-buffer_size) & \
(Y<=np.ceil(self.crop_extent[3])+buffer_size) & \
(Y>np.ceil(self.crop_extent[1])-buffer_size)
self.filename = os.path.join(self.out_dir,'{}_DEM.tif'.format(self.CRM.region.lower()))
self.fnames = cprep.collect_NED(ned_dir=self.work_dir,XY=[X,Y],internal_pts=self.in_pts)
if mosaic_fname is None:
self.mosaic_fname = os.path.join(self.out_dir,'{}_NED.tif'.format(self.CRM.region.lower()))
if mosaic:
if not os.path.isfile(self.mosaic_fname) or overwrite_flag:
cprep.mosaic_NED(self.fnames,self.mosaic_fname)
else:
print 'Mosaic file already exists: {}'.format(self.mosaic_fname)
class DEM_merge(object):
def __init__(self,NED=None,fix_shp=None,other_dem=None,shp_not_use=None,
shp_use_other=None):
self.NED=NED
self.fix_shp = fix_shp
self.other_dem = other_dem
self.shp_not_use = shp_not_use
self.shp_use_other = shp_use_other
def join_main(self,out_fname=None,CRM_fname=None,
NED_fname=None,max_elev_from_bathy=8.):
if out_fname is None:
self.join_fname = os.path.join(self.NED.out_dir,'{}_joined.tif'.format(self.NED.CRM.region.lower()))
else:
self.join_fname = out_fname
if CRM_fname is None:
CRM_fname = self.NED.CRM.fname
if NED_fname is None:
NED_fname = self.NED.mosaic_fname
cprep.join_NED_CRM(NED_fname,CRM_fname,
self.join_fname,
max_elev_from_bathy=max_elev_from_bathy)
def fix(self,dem_fname=None,max_elev_to_fix=None):
if max_elev_to_fix is None:
max_elev_to_fix = 0.
if dem_fname is None:
self.fix_fname = cprep.fix_NED_CRM(self.join_fname,self.fix_shp,max_elev_to_fix=max_elev_to_fix)
else:
self.fix_fname = cprep.fix_NED_CRM(dem_fname,self.fix_shp,max_elev_to_fix=max_elev_to_fix)
def join_fix(self,elev_threshold=None):
if elev_threshold is None:
elev_threshold=0.
self.fix2_fname = cprep.join_DEM_bathy(self.fix_fname,self.other_dem,
self.shp_not_use,self.shp_use_other,
elev_threshold=elev_threshold)
def run(self,fix=False,second_join=False,max_elev_from_bathy=8.):
self.join_main(max_elev_from_bathy=max_elev_from_bathy)
if fix:
self.fix()
if second_join:
self.join_fix()
| {
"alphanum_fraction": 0.5525672372,
"author": null,
"avg_line_length": 38.0465116279,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a056ae949248470a8369eff36ef9eb2bd8882a8f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7a793351be5c135ea6b39a8f59d95508fcc00c7b",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "kbefus/ca_gw_slr",
"max_forks_repo_path": "cgw_model/prep/dem_prep.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7a793351be5c135ea6b39a8f59d95508fcc00c7b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "kbefus/ca_gw_slr",
"max_issues_repo_path": "cgw_model/prep/dem_prep.py",
"max_line_length": 120,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "7a793351be5c135ea6b39a8f59d95508fcc00c7b",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "kbefus/ca_gw_slr",
"max_stars_repo_path": "cgw_model/prep/dem_prep.py",
"max_stars_repo_stars_event_max_datetime": "2021-12-23T21:20:21.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-09-29T22:09:48.000Z",
"num_tokens": 1103,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4908
} |
import pandas as pd
import numpy as np
from .utils.numerical_utils import gaussian_kde
from ._configs import *
import sys
__all__ = ["identify_metastable_states", "approximate_FES"]
def identify_metastable_states(
colvar,
selected_cvs,
kBT,
bandwidth,
logweights=None,
fes_cutoff=None,
gradient_descent_iterates = 0,
sort_minima_by = 'cvs_grid',
optimizer_kwargs=dict(),
):
"""Label configurations based on free energy
Parameters
----------
colvar : Pandas dataframe
###
selected_cvs : list of strings
Names of the collective variables used for clustering
kBT : scalar
Temperature
bandwidth : scalar
Bandwidth method for FES calculations
logweights : pandas.DataFrame, np.array or string , optional
Logweights used for FES calculation, by default None
fes_cutoff : float, optional
Cutoff used to select only low free-energy configurations, if None fes_cutoff is 2k_BT
sort_minima_by : string, optional
Sort labels based on `energy`, `cvs`, or `cvs_grid` values, by default `cvs_grid`.
optimizer_kwargs : optional
Arguments for optimizer, by default dict(). Possible kwargs are:
(int) num_init: number of initialization point,
(int) decimals_tolerance: number of decimals to retain to identify unique minima,
(str) sampling: sampling scheme. Accepted strings are 'data_driven' or 'uniform'.
"""
#Adimensional fes_cutoff
if fes_cutoff is None:
fes_cutoff = 2 #kBT
else:
fes_cutoff=fes_cutoff/kBT
# Retrieve logweights
if logweights is not None:
assert ( isinstance(logweights,np.ndarray) ), 'Logweights must be a numpy array.'
# Compute KDE
empirical_centers = colvar[selected_cvs].to_numpy()
KDE = gaussian_kde(empirical_centers,bandwidth=bandwidth,logweights=logweights)
if __DEV__:
print("DEV >>> Finding Local Minima")
minima = KDE.local_minima(**optimizer_kwargs)
# sort minima based on first CV
if sort_minima_by == 'energy':
f_min = np.asarray([-KDE.logpdf(x) for x in minima])
sortperm = np.argsort(f_min, axis=0)
minima = minima[sortperm]
elif sort_minima_by == 'cvs' :
# sort first by 1st cv, then 2nd, ...
x = minima
minima = x [ np.lexsort( np.round(np.flipud(x.T),2) ) ]
elif sort_minima_by == 'cvs_grid' :
bounds = [(x.min(), x.max()) for x in KDE.dataset.T]
# sort based on a binning of the cvs (10 bins per each direction),
# along 1st cv, then 2nd, ...
x = minima
y = (x - [ bound[0] for bound in bounds ])
y /= np.asarray( [ bound[1]-bound[0] for bound in bounds ]) / 10
minima = x [ np.lexsort( np.round(np.flipud(y.T),0) ) ]
else:
raise KeyError(f'Key {sort_minima_by} not allowed. Valid values: "energy","cvs","cvs_grid".')
# Assign basins and select based on FES cutoff
basins = _basin_selection(KDE, minima, fes_cutoff, gradient_descent_iterates)
n_basins = len(basins['labels'].unique())
print(f"Found {n_basins} local minima with selected populations:")
for idx in range(n_basins):
l = len(basins.loc[ (basins['labels'] == idx) & (basins['selection'] == True)])
print(f"\tBasin {idx} -> {l} configurations.")
return basins
def _basin_selection(
KDE, minima, fes_cutoff, gradient_descent_iterates
):
if __DEV__:
print("DEV >>> Basin Assignment")
pts = np.copy(KDE.dataset)
v = np.zeros_like(pts)
beta = 0.9 #Default
learning_rate = np.diag(np.diag(KDE.inv_bwidth)**-1)*0.5
for _ in range(gradient_descent_iterates):
v *= beta
v += -(1 - beta)*KDE.grad(pts, logpdf=True)
pts -= np.dot(v,learning_rate)
norms = np.linalg.norm((pts[:,np.newaxis,:] - minima), axis=2)
classes = np.argmin(norms, axis=1)
fes_at_minima = - KDE.logpdf(minima)
if len(minima) == 1:
ref_fes = fes_at_minima
else:
ref_fes = np.asarray([fes_at_minima[idx] for idx in classes])
fes_pts = - KDE.logpdf(KDE.dataset)
mask = (fes_pts - ref_fes) < fes_cutoff
df = pd.DataFrame(data=classes, columns=["labels"])
df["selection"] = mask
return df
def approximate_FES(
colvar, bandwidth, selected_cvs=None, kBT=2.5, logweights=None
):
"""Approximate Free Energy Surface (FES) in the space of selected_cvs through Gaussian Kernel Density Estimation
Args:
bandwidth (scalar, vector or matrix):
selected_cvs (numpy.ndarray or pd.Dataframe): List of sampled collective variables with dimensions [num_timesteps, num_CVs]
kBT (scalar): Temperature
logweights (arraylike log weights, optional): Logarithm of the weights. Defaults to None (uniform weights).
Returns:
function: Function approximating the free Energy Surface
"""
if __DEV__:
print("DEV >>> Approximating FES")
if logweights is not None:
assert ( isinstance(logweights,np.ndarray) ), 'Logweights must be a numpy array.'
empirical_centers = colvar[selected_cvs] if selected_cvs is not None else colvar
empirical_centers = empirical_centers.to_numpy()
KDE = gaussian_kde(empirical_centers, bandwidth,logweights=logweights)
return lambda x: -kBT*KDE.logpdf(x)
| {
"alphanum_fraction": 0.6232215001,
"author": null,
"avg_line_length": 39.5347222222,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "042c765104456478a2303f27d418baa078819bf3",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ad49ffdeaef236aeebcec781eb0de22bc840c113",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "luigibonati/md-stateinterpreter",
"max_forks_repo_path": "stateinterpreter/metastable.py",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "ad49ffdeaef236aeebcec781eb0de22bc840c113",
"max_issues_repo_issues_event_max_datetime": "2022-01-27T11:36:41.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-10-01T11:17:52.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "luigibonati/md-stateinterpreter",
"max_issues_repo_path": "stateinterpreter/metastable.py",
"max_line_length": 131,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "ad49ffdeaef236aeebcec781eb0de22bc840c113",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "luigibonati/md-stateinterpreter",
"max_stars_repo_path": "stateinterpreter/metastable.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-14T10:06:45.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-14T10:06:45.000Z",
"num_tokens": 1424,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5693
} |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import contextlib
from detectron2.data import DatasetCatalog, MetadataCatalog
from fvcore.common.timer import Timer
from fvcore.common.file_io import PathManager
import io
import logging
from detectron2.data.datasets.cityscapes import load_cityscapes_instances, load_cityscapes_semantic
from detectron2.data.datasets.cityscapes_panoptic import register_all_cityscapes_panoptic
from detectron2.data.datasets.builtin_meta import _get_builtin_metadata
from detectron2.structures import BoxMode
import numpy as np
import xml.etree.ElementTree as ET
from typing import List, Tuple, Union
from pathlib import Path
import glob
import cv2
logger = logging.getLogger(__name__)
IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
JSON_ANNOTATIONS_DIR = ""
_SPLITS_COCO_FORMAT = [
("destroy_201005069_unlable", "/test_tif/201005069"),
("destroy_0013_unlable", "/test_tif/0013")
]
def register_destroy_unlabel(root):
for key, dataset_name in _SPLITS_COCO_FORMAT:
meta = {}
image_root = root + dataset_name
register_destroy_unlabel_instances(key, meta, image_root)
def register_destroy_unlabel_instances(name, metadata, image_root):
"""
Register a dataset in COCO's json annotation format for
instance detection, instance segmentation and keypoint detection.
(i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
`instances*.json` and `person_keypoints*.json` in the dataset).
This is an example of how to register a new dataset.
You can do something similar to this function, to register new datasets.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(
name, lambda: load_destroy_unlabel_file(image_root)
)
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
image_root=image_root, evaluator_type="destroy_voc", **metadata
)
def load_destroy_unlabel_file(image_root):
p = str(Path(image_root).absolute())
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
dataset_dicts = []
for path in images:
img_id = os.path.splitext(os.path.split(path)[1])[0]
file_name = os.path.split(path)[1]
img = cv2.imread(path)
height, width = img.shape[:2]
record = {}
record["file_name"] = path
record["height"] = height
record["width"] = width
record["image_id"] = img_id
dataset_dicts.append(record)
return dataset_dicts
# ==== Predefined splits for raw cityscapes images ===========
_RAW_CITYSCAPES_SPLITS = {
"cityscapes_foggy_fine_{task}_train": ("cityscapes/leftImg8bit_foggy/train/", "cityscapes/gtFine/train/"),
"cityscapes_foggy_fine_{task}_val": ("cityscapes/leftImg8bit_foggy/val/", "cityscapes/gtFine/val/"),
"cityscapes_foggy_fine_{task}_test": ("cityscapes/leftImg8bit_foggy/test/", "cityscapes/gtFine/test/"),
}
def register_all_cityscapes(root):
for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items():
meta = _get_builtin_metadata("cityscapes")
image_dir = os.path.join(root, image_dir)
gt_dir = os.path.join(root, gt_dir)
inst_key = key.format(task="instance_seg")
DatasetCatalog.register(
inst_key,
lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
x, y, from_json=True, to_polygons=True
),
)
MetadataCatalog.get(inst_key).set(
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta
)
sem_key = key.format(task="sem_seg")
DatasetCatalog.register(
sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y)
)
MetadataCatalog.get(sem_key).set(
image_dir=image_dir,
gt_dir=gt_dir,
evaluator_type="cityscapes_sem_seg",
ignore_label=255,
**meta,
)
# ==== Predefined splits for PASCAL VOC ===========\
CLASS_NAMES = ["1"]
def register_all_destroy_voc(root):
SPLITS = [
("voc_destroy_trainval", "taining_data_2021-08-19", "trainval"),
("voc_destroy_train", "taining_data_2021-08-19", "train"),
("voc_destroy_val", "taining_data_2021-08-19", "val"),
("voc_destroy_test", "taining_data_2021-08-19", "test")
]
for name, dirname, split in SPLITS:
register_pascal_voc(name, os.path.join(root, dirname), split)
MetadataCatalog.get(name).evaluator_type = "destroy_voc"
def load_voc_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]):
"""
Load Pascal VOC detection annotations to Detectron2 format.
Args:
dirname: Contain "Annotations", "ImageSets", "JPEGImages"
split (str): one of "train", "test", "val", "trainval"
class_names: list or tuple of class names
"""
with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f:
fileids = np.loadtxt(f, dtype=np.str)
# Needs to read many small annotation files. Makes sense at local
annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/"))
dicts = []
for fileid in fileids:
anno_file = os.path.join(annotation_dirname, fileid + ".xml")
jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".tif")
with PathManager.open(anno_file) as f:
tree = ET.parse(f)
r = {
"file_name": jpeg_file,
"image_id": fileid,
"height": int(tree.findall("./size/height")[0].text),
"width": int(tree.findall("./size/width")[0].text),
}
instances = []
for obj in tree.findall("object"):
cls = obj.find("name").text
# We include "difficult" samples in training.
# Based on limited experiments, they don't hurt accuracy.
# difficult = int(obj.find("difficult").text)
# if difficult == 1:
# continue
bbox = obj.find("bndbox")
bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
# Original annotations are integers in the range [1, W or H]
# Assuming they mean 1-based pixel indices (inclusive),
# a box with annotation (xmin=1, xmax=W) covers the whole image.
# In coordinate space this is represented by (xmin=0, xmax=W)
bbox[0] -= 1.0
bbox[1] -= 1.0
instances.append(
{"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS}
)
r["annotations"] = instances
dicts.append(r)
return dicts
def register_pascal_voc(name, dirname, split, class_names=CLASS_NAMES):
year = 2007 if "2007" in name else 2012
DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split, class_names))
MetadataCatalog.get(name).set(
thing_classes=list(class_names), dirname=dirname, split=split, year=year
)
os.environ["DETECTRON2_DATASETS"] = "/home/msi/Documents/Datasets"
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_cityscapes(_root)
register_all_destroy_voc(_root)
register_destroy_unlabel(_root)
| {
"alphanum_fraction": 0.6581186193,
"author": null,
"avg_line_length": 38.6291079812,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f76a36ec0b1830bcfe6367075758e4b822f7f1ba",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "df8085c61e334abb04bab5e8192de8cb4ce2b2af",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "WdBlink/Teacher-Student-Faster-Rcnn",
"max_forks_repo_path": "ubteacher/data/datasets/builtin.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "df8085c61e334abb04bab5e8192de8cb4ce2b2af",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "WdBlink/Teacher-Student-Faster-Rcnn",
"max_issues_repo_path": "ubteacher/data/datasets/builtin.py",
"max_line_length": 110,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "df8085c61e334abb04bab5e8192de8cb4ce2b2af",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "WdBlink/Teacher-Student-Faster-Rcnn",
"max_stars_repo_path": "ubteacher/data/datasets/builtin.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2067,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 8228
} |
#include <Eigen/Core>
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include "entity.hpp"
namespace cuauv {
namespace fishbowl {
entity::entity(double m, double r, const inertia_tensor& I, const Eigen::Quaterniond& btom_rq)
: m(m)
, r(r)
, I(I)
, btom_rq(btom_rq)
, btom_rm(btom_rq.matrix())
, mtob_rm(btom_rq.conjugate().matrix())
{
if (m <= 0) throw std::invalid_argument("expected m > 0.");
if (r <= 0) throw std::invalid_argument("expected r > 0.");
Eigen::Vector3d diag = I.diagonal();
if (diag[0] == 0 || diag[1] == 0 || diag[2] == 0) throw std::invalid_argument("expected I fully non-zero");
Eigen::Vector3d& diagr = Ir.diagonal();
for (int i = 0; i < 3; i++)
diagr[i] = 1.0 / diag[i];
}
double entity::get_m() const { return m; }
double entity::get_r() const { return r; }
inertia_tensor entity::get_I() const { return I; }
inertia_tensor entity::get_Ir() const { return Ir; }
Eigen::Matrix3d entity::get_btom_rm() const { return btom_rm; }
Eigen::Matrix3d entity::get_mtob_rm() const { return mtob_rm; }
Eigen::Quaterniond entity::get_model_q() const { return Eigen::Quaterniond(q * btom_rq.conjugate()).normalized(); }
} // namespace fishbowl
} // namespace cuauv
| {
"alphanum_fraction": 0.652173913,
"author": null,
"avg_line_length": 30.2926829268,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "325a54c91f138080e7ea19f8e2c29faebd6c9914",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 34,
"max_forks_repo_forks_event_max_datetime": "2021-11-18T14:15:12.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-12-15T17:29:23.000Z",
"max_forks_repo_head_hexsha": "5ad4d52d603f81a7f254f365d9b0fe636d03a260",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "cuauv/software",
"max_forks_repo_path": "fishbowl/entity.cpp",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "5ad4d52d603f81a7f254f365d9b0fe636d03a260",
"max_issues_repo_issues_event_max_datetime": "2016-08-03T06:19:39.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-08-03T05:13:19.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "cuauv/software",
"max_issues_repo_path": "fishbowl/entity.cpp",
"max_line_length": 115,
"max_stars_count": 70,
"max_stars_repo_head_hexsha": "5ad4d52d603f81a7f254f365d9b0fe636d03a260",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "cuauv/software",
"max_stars_repo_path": "fishbowl/entity.cpp",
"max_stars_repo_stars_event_max_datetime": "2022-03-05T09:04:02.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-11-16T18:04:01.000Z",
"num_tokens": 378,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1242
} |
"""Base classes for creating GUI objects to create manually selected points.
The definition of X,Y axis is the following:
xmin,ymin o---------o xmax,ymin
| |
| |
| |
| |
xmin,ymax o---------o xmax,ymax
"""
from __future__ import absolute_import
from __future__ import division
from collections import namedtuple
import logging
import sys
import matplotlib as mpl
import numpy as np
mpl.use('Qt5Agg')
from PyQt5 import QtCore, QtGui, QtWidgets
logger = logging.getLogger(__name__)
Position = namedtuple('Position', ('x', 'y', 'z'))
class AnatomicalParams(object):
"""The base parameter object for GUI configuration"""
def __init__(self,
cmap='gray',
interp='nearest',
perc_min=5.,
perc_max=95.,
vmode='percentile',
alpha=1.0):
"""
Parameters
----------
cmap : str
interp : str
perc_min : float: low percentile threshold for intensity adjustment
perc_max : float: high percentile threshold for intensity adjustment
vmode : str: "percentile": intensity adjustment based on vmin/vmax percentile,
"mean-std": intensity adjustment based on
"clahe: CLAHE (not implemented yet)
alpha : float
"""
self.cmap = cmap
self.interp = interp
self.perc_min = perc_min
self.perc_max = perc_max
self.vmode = vmode
self.alpha = alpha
self.start_vertebrae = 50
self.end_vertebrae = -1
self.num_points = 0
self._title = '' # figure title
self.subtitle = '' # subplot title (will be displayed above the image)
self._vertebraes = []
self.input_file_name = ""
self.starting_slice = 'top' # used in centerline.py canvas and corresponds to the location of
# the first axial slice for labeling. Possible values are: 'top': top slice; 'midfovminusinterval': mid-FOV
# minus the interval.
self.interval_in_mm = 15 # superior-inferior distance between two consecutive labels in AUTO mode
@property
def dialog_title(self):
if not self._title:
self._title = '{}: manual labeling'.format(self.input_file_name)
return self._title
@property
def vertebraes(self):
return self._vertebraes
@vertebraes.setter
def vertebraes(self, values):
if not values:
return
self._vertebraes = values
self.start_vertebrae = values[0]
self.end_vertebrae = values[-1]
class BaseDialog(QtWidgets.QWidget):
"""Abstract base class to a Anatomical GUI.
Attributes
----------
update_canvas_signal : QtCore.Signal
Signal emits when dialog has a point to add to the
"""
lb_status = None
lb_warning = None
btn_ok = None
btn_undo = None
def __init__(self, controller):
"""Initialize the UI parameters
Parameters
----------
controller : BaseController
The logical object that controls the state of the UI
"""
super(BaseDialog, self).__init__()
self.params = controller.params
self._controller = controller
self.image = controller.image
self._controller._dialog = self
self._init_ui()
def _init_ui(self):
self.resize(1200, 800)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
layout = QtWidgets.QVBoxLayout(self)
self._init_header(layout)
self._init_canvas(layout)
self._init_controls(layout)
self._init_footer(layout)
events = (
(QtGui.QKeySequence.Undo, self.on_undo),
(QtGui.QKeySequence.Save, self.on_save_quit),
(QtGui.QKeySequence.Quit, self.close),
(QtGui.QKeySequence.MoveToNextChar, self.increment_vertical_nav),
(QtGui.QKeySequence.MoveToPreviousChar, self.decrement_vertical_nav),
(QtGui.QKeySequence.MoveToNextLine, self.increment_horizontal_nav),
(QtGui.QKeySequence.MoveToPreviousLine, self.decrement_horizontal_nav)
)
for event, action in events:
QtWidgets.QShortcut(event, self, action)
self.setWindowTitle(self.params.dialog_title)
def increment_vertical_nav(self):
"""Action to increment the anatonical viewing position.
The common case is when the right arrow key is pressed. Ignore implementing
this function if no navigation functionality is required
"""
pass
def decrement_vertical_nav(self):
"""Action to decrement the anatonical viewing position.
The common case is when the left arrow key is pressed. Ignore implementing
this function if no navigation functionality is required
"""
pass
def increment_horizontal_nav(self):
"""Action to increment the anatonical viewing position.
The common case is when the down arrow key is pressed. Ignore implementing
this function if no navigation functionality is required
"""
pass
def decrement_horizontal_nav(self):
"""Action to decrement the anatonical viewing position.
The common case is when the up arrow key is pressed. Ignore implementing
this function if no navigation functionality is required
"""
pass
def _init_canvas(self, parent):
"""
Parameters
----------
parent : QtGui.QWidget
The widget / dialog that will host the canvas layout
"""
raise NotImplementedError('Include _init_canvas in your class declaration')
def _init_controls(self, parent):
"""
Parameters
----------
parent : QtGui.QWidget
The widget / dialog that will host the control layout
"""
raise NotImplementedError('Include _init_controls in your class declaration')
def _init_header(self, parent):
self.lb_status = QtWidgets.QLabel('Label Status')
self.lb_status.setStyleSheet("color:black")
self.lb_status.setAlignment(QtCore.Qt.AlignCenter)
self.lb_warning = QtWidgets.QLabel()
self.lb_warning.setStyleSheet('color:red')
self.lb_warning.setAlignment(QtCore.Qt.AlignCenter)
message_label = getattr(self.params, 'message_warn', '')
self.Label = QtWidgets.QLabel(message_label)
self.Label.setAlignment(QtCore.Qt.AlignLeft)
parent.addWidget(self.lb_status)
parent.addWidget(self.lb_warning)
parent.addWidget(self.Label)
parent.addStretch()
message = getattr(self.params, 'init_message', '')
self.update_status(message)
def _init_footer(self, parent):
"""
Parameters
----------
parent : QtGui.QWidget
The widget / dialog that will host the footer layout
Returns
-------
The footer layout created
"""
ctrl_layout = QtWidgets.QHBoxLayout()
if sys.platform.lower() == 'darwin':
cmd_key = 'Cmd'
else:
cmd_key = 'Ctrl'
self.btn_ok = QtWidgets.QPushButton('Save and Quit [%s+S]' % cmd_key)
self.btn_undo = QtWidgets.QPushButton('Undo [%s+Z]' % cmd_key)
ctrl_layout.addStretch()
ctrl_layout.addWidget(self.btn_undo)
ctrl_layout.addWidget(self.btn_ok)
self.btn_undo.clicked.connect(self.on_undo)
self.btn_ok.clicked.connect(self.on_save_quit)
parent.addLayout(ctrl_layout)
return ctrl_layout
def on_save_quit(self):
self._controller.save()
self.close()
def on_undo(self):
try:
self._controller.undo()
except InvalidActionWarning as err:
self.update_warning(str(err))
def show(self):
"""Override the base class show to fix a bug found in MAC"""
super(BaseDialog, self).show()
self.activateWindow()
self.raise_()
def update_status(self, msg):
"""Print the message into the dialog's status widget and clear the warning widget
Parameters
----------
msg : str The message to display in the header of dialog
"""
self.lb_status.setText(msg)
self.lb_warning.setText('')
def update_warning(self, msg):
"""Print the message into the dialog's warning widget and clear the status widget
Parameters
----------
msg : str The message to display in the header of dialog
"""
self.lb_warning.setText(msg)
self.lb_status.setText('')
class BaseController(object):
orientation = None
_overlay_image = None
_dialog = None
default_position = ()
position = ()
saved = False
def __init__(self, image, params, init_values=None):
self.image = image
self.params = params
self.points = []
self._overlay_image = init_values
self.setup_intensity()
def setup_intensity(self):
if self.params.vmode == 'percentile':
self.params.vmin, self.params.vmax = np.percentile(self.image.data,
(self.params.perc_min, self.params.perc_max))
elif self.params.vmode == 'mean-std':
# TODO: update this
self.mean_intensity = (self.params.vmax + self.params.vmin) / 2.0
self.std_intensity = (self.params.vmax - self.params.vmin) / 2.0
elif self.params.vmode == 'clahe':
# TODO: implement
logger.warning("CLAHE is not implemented yet.")
def reformat_image(self):
"""Set the camera position and increase contrast.
The image orientation is set to SAL. And set the default contrast, and
axes position for all canvases. Need to run before displaying the GUI
with the image.
"""
logger.debug('Image orientation {}'.format(self.image.orientation))
self.orientation = self.image.orientation
self.image.change_orientation('SAL')
if self._overlay_image:
self._overlay_image.change_orientation('SAL')
x, y, z, t, dx, dy, dz, dt = self.image.dim
self.params.aspect = dx / dy
self.params.offset = x * dx
self.default_position = Position(x // 2, y // 2, z // 2)
self.setup_intensity()
self.reset_position()
def reset_position(self):
"""Set the canvas position to the center of the image"""
self.position = self.default_position
def valid_point(self, x, y, z):
dim = self.image.dim
if -1 < x < dim[0] and -1 < y < dim[1] and -1 < z < dim[2]:
return True
return False
def save(self):
logger.debug('Overlay shape {}'.format(self._overlay_image.data.shape))
for point in self.points:
x, y, z, label = [int(i) for i in point]
self._overlay_image.data[x, y, z] = label
if self.orientation != self._overlay_image.orientation:
self._overlay_image.change_orientation(self.orientation)
self.saved = True
def undo(self):
"""Remove the last point selected and refresh the UI"""
if self.points:
x, y, z, label = self.points[-1]
self.position = Position(x, y, z)
self.points = self.points[:-1]
self.label = label
logger.debug('Point removed {}'.format(self.position))
else:
raise InvalidActionWarning('There is no points selected to undo')
def as_string(self):
if self._overlay_image is None:
logger.warning('There is no information to save')
return ''
output = []
data = self._overlay_image.data
xs, ys, zs = np.where(data)
for x, y, z in zip(xs, ys, zs):
output.append('{},{},{},{}'.format(x, y, z, int(data[x, y, z])))
return ':'.join(output)
def as_niftii(self, file_name=None):
if not self._overlay_image:
logger.warning('There is no information to save')
raise IOError('There is no information to save')
if file_name:
self._overlay_image.absolutepath = file_name
if self._overlay_image.absolutepath == self.image.absolutepath:
raise IOError('Aborting: the original file and the labeled file are the same', self._overlay_image.absolutepath)
logger.debug('Data: {}'.format(np.where(self._overlay_image.data)))
self._overlay_image.save()
class TooManyPointsWarning(StopIteration):
message = 'Reached the maximum number of points'
class InvalidActionWarning(ValueError):
pass
class MissingLabelWarning(ValueError):
pass
def launch_dialog(controller, dialog_class):
app = QtWidgets.QApplication([])
dialog = dialog_class(controller)
dialog.show()
app.exec_()
return controller
| {
"alphanum_fraction": 0.6125514717,
"author": null,
"avg_line_length": 31.6,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "58ce4affc73da1e7ea9f5adba86ca3f047924db8",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc88d6eb6e96a2c2f1ec88c2e185c6f88e09",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "valosekj/spinalcordtoolbox",
"max_forks_repo_path": "spinalcordtoolbox/gui/base.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "266bfc88d6eb6e96a2c2f1ec88c2e185c6f88e09",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "valosekj/spinalcordtoolbox",
"max_issues_repo_path": "spinalcordtoolbox/gui/base.py",
"max_line_length": 124,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "266bfc88d6eb6e96a2c2f1ec88c2e185c6f88e09",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "valosekj/spinalcordtoolbox",
"max_stars_repo_path": "spinalcordtoolbox/gui/base.py",
"max_stars_repo_stars_event_max_datetime": "2020-05-17T00:39:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-05-17T00:39:47.000Z",
"num_tokens": 2752,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 13114
} |
import argparse
import datetime
import os
from supervised_model.sup_model import Frontend
from utils import config as cfg
import time
import numpy as np
import torch
import wandb
from torch.utils.tensorboard import SummaryWriter
from tianshou.data import Collector, PrioritizedVectorReplayBuffer, VectorReplayBuffer
from tianshou.env import ShmemVectorEnv, DummyVectorEnv, SubprocVectorEnv
from tianshou.policy import DQNPolicy
from tianshou.utils import TensorboardLogger, WandbLogger
from rl import tianshou_rl_model, tianshou_env
from sklearn.model_selection import train_test_split
from tqdm import tqdm, trange
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def state_to(state, device, args):
embedds = torch.as_tensor(state['embedding_space'][np.newaxis, ...]).to(device)
centroids = torch.as_tensor(state['centroids'][np.newaxis, ...]).to(device)
lens = state['lens'][np.newaxis, ...]
if args.freeze_frontend:
cur_embedding = torch.as_tensor(state['cur_embedding'][np.newaxis, ...]).to(device)
return {
'embedding_space': embedds,
'cur_embedding': cur_embedding,
'centroids': centroids,
'lens': lens
}
else:
cur_chunk = torch.as_tensor(state['cur_chunk'][np.newaxis, ...]).to(device)
return {
'embedding_space': embedds,
'cur_chunk': cur_chunk,
'centroids': centroids,
'lens': lens
}
def get_args():
parser = argparse.ArgumentParser()
# experiment set up
parser.add_argument('--name', type=str)
parser.add_argument('--seed', type=int, default=8)
parser.add_argument("--resume-path", type=str, default=None)
parser.add_argument('--pretrained', type=str, default=None)
parser.add_argument(
"--logger",
type=str,
default=None
)
# ----------------RL------------------
# embedding model
parser.add_argument('--freeze_frontend', action='store_true')
# env
parser.add_argument('--final_punish', type=float, default=-2.)
parser.add_argument('--knowing_cluster_num', action='store_true')
# backend
parser.add_argument('--cluster_encode', action='store_true')
parser.add_argument('--hidden_size', type=int, default=128) #*
parser.add_argument('--num_layers', type=int, default=1) #*
parser.add_argument('--num_heads', type=int, default=1) # *
parser.add_argument('--seq_max_len', type=int, default=128)
parser.add_argument('--num_clusters', type=int, default=5) # *
parser.add_argument('--use_rnn', action='store_true')
# training
parser.add_argument("--epoch_num", type=int, default=100)
parser.add_argument('--train_env_batch_size', type=int, default=4)
parser.add_argument("--scale-obs", type=int, default=0) # TODO
parser.add_argument("--eps-test", type=float, default=0.)
parser.add_argument("--eps-train", type=float, default=1.)
parser.add_argument("--eps-train-final", type=float, default=0.05)
parser.add_argument('--eps_decay', type=float, default=1/1e6)
parser.add_argument("--buffer-size", type=int, default=10000)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--lr", type=float, default=0.0000625)
parser.add_argument("--gamma", type=float, default=0.99)
# priority buffer
parser.add_argument("--no-priority", action="store_true", default=False)
parser.add_argument("--alpha", type=float, default=0.5)
parser.add_argument("--beta", type=float, default=0.4)
parser.add_argument("--beta-final", type=float, default=1.)
parser.add_argument("--beta-anneal-step", type=int, default=1000000)
parser.add_argument("--no-weight-norm", action="store_true", default=False)
parser.add_argument("--n-step", type=int, default=3)
# dqn
parser.add_argument("--target-update-freq", type=int, default=500)
parser.add_argument("--update-per-step", type=float, default=0.1)
return parser.parse_args()
def rm_invalid_mel_fp(files):
valid_files = []
for f in files:
if f.startswith('.') or not f.endswith('npy'):
continue
valid_files.append(f)
return valid_files
def omss_train_val_test_split(val_pct, test_pct, test_idxs, args):
mel_dir = os.path.join(cfg.SALAMI_DIR, 'internet_melspecs')
files = os.listdir(mel_dir)
files = rm_invalid_mel_fp(files)
fps = np.array(list(map(lambda x: os.path.join(mel_dir, x), files)))
if test_idxs:
test_dataset = fps[test_idxs]
remain_idxs = np.setdiff1d(np.arange(len(files)), test_idxs)
train_val_dataset = fps[remain_idxs]
else:
train_val_dataset, test_dataset = train_test_split(fps, test_size=test_pct, random_state=args.seed)
train_dataset, val_dataset = train_test_split(train_val_dataset, test_size=val_pct, random_state=args.seed)
return train_dataset, val_dataset, test_dataset
def omss_train_val_split(val_pct, val_files, args):
if cfg.dataset == 'salami':
mel_dir = os.path.join(cfg.SALAMI_DIR, 'internet_melspecs')
elif cfg.dataset == 'harmonix':
mel_dir = os.path.join(cfg.HARMONIX_DIR, 'melspecs')
files = os.listdir(mel_dir)
files = rm_invalid_mel_fp(files)
if val_files is not None:
train_files = np.setdiff1d(files, val_files, assume_unique=True)
else:
train_files, val_files = train_test_split(files, test_size=val_pct, random_state=args.seed)
train_dataset = np.array(list(map(lambda x: os.path.join(mel_dir, x), train_files)))
val_dataset = np.array(list(map(lambda x: os.path.join(mel_dir, x), val_files)))
return train_dataset, val_dataset
def validation(policy: DQNPolicy, val_dataset, args, frontend=None):
q_net = policy.model
q_net.eval()
if not args.freeze_frontend:
frontend = q_net.get_frontend()
score = 0
f1 = 0
count = len(val_dataset)
with torch.no_grad():
with trange(len(val_dataset)) as t:
for k in t:
#for k in tqdm(range(len(val_dataset))):
# if k < 25:
# continue
fp = val_dataset[k]
print(fp)
env = tianshou_env.OMSSEnv(#q_net.module.get_frontend(),
frontend,
args.num_clusters,
fp,
args.seq_max_len, # TODO don't need this in val
cluster_encode=args.cluster_encode,
freeze_frontend=args.freeze_frontend,
mode='test')
# if not env.check_anno():
# count -= 1
# continue
state = env.reset()
done = False
while not done:
format_state = state_to(state, device, args=args)
logits = policy.model(format_state)[0].detach().cpu().numpy()
# print(logits)
action = np.argmax(logits)
# print(action)
# action = policy.take_action(state, env, args.test_eps, args.num_clusters)
next_state, reward, done, info = env.step(action)
# if args.logger:
# wandb.log({
# 'val/action': action,
# 'val/reward': reward})
state = next_state
score += reward
f1 += info['f1']
t.set_description('f1: {}'.format(info['f1']))
# print(reward.item())
score /= count
f1 /= count
return score, f1
def train(args=get_args()):
# seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# prepare dataset (file paths)
test_csv = cfg.test_csv
if test_csv:
# load test set indexs
import pandas as pd
test_files = np.array(pd.read_csv(test_csv, header=None)[0])
else:
test_files = None
# train_dataset, val_dataset, test_dataset = omss_train_val_test_split(cfg.val_pct, cfg.test_pct, test_idxs, args)
train_dataset, val_dataset = omss_train_val_split(cfg.val_pct, test_files, args)
print(len(train_dataset))
# define model
backend_input_size = cfg.EMBEDDING_DIM + args.num_clusters if args.cluster_encode else cfg.EMBEDDING_DIM
if not args.freeze_frontend:
net = tianshou_rl_model.QNet(
input_shape=(cfg.BIN, cfg.CHUNK_LEN),
embedding_size=backend_input_size,
hidden_size=args.hidden_size,
num_layers=args.num_layers,
num_heads=args.num_heads,
num_clusters=args.num_clusters,
cluster_encode=args.cluster_encode,
use_rnn=args.use_rnn,
device=device,
freeze_frontend=args.freeze_frontend
)
if args.pretrained:
net.load_frontend(args.pretrained)
else:
net = tianshou_rl_model.TianshouBackend(input_size=backend_input_size,
hidden_size=args.hidden_size,
num_layers=args.num_layers,
num_clusters=args.num_clusters,
num_heads=args.num_heads,
mode='train',
use_rnn=args.use_rnn,
device=device,
cluster_encode=args.cluster_encode)
checkpoint = torch.load(args.pretrained)
frontend = Frontend((cfg.BIN, cfg.CHUNK_LEN), embedding_dim=cfg.EMBEDDING_DIM)
frontend.load_state_dict(checkpoint['state_dict'])
optim = torch.optim.Adam(net.parameters(), lr=args.lr)
# define policy
policy = DQNPolicy(
model=net,
optim=optim,
discount_factor=args.gamma,
target_update_freq=args.target_update_freq,
is_double=True
).to(device)
# replay buffer: `save_last_obs` and `stack_num` can be removed together
# when you have enough RAM
if args.no_priority:
buffer = VectorReplayBuffer(
args.buffer_size,
buffer_num=args.train_env_batch_size,
ignore_obs_next=True,
)
else:
buffer = PrioritizedVectorReplayBuffer(
args.buffer_size,
buffer_num=args.train_env_batch_size,
ignore_obs_next=True,
alpha=args.alpha,
beta=args.beta,
weight_norm=not args.no_weight_norm
)
# log
run_id = time.strftime("%m%d%H%M", time.localtime())
exp_dir = os.path.join(cfg.RL_EXP_DIR, run_id)
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
# logger
if args.logger:
if args.logger == "wandb":
wandb.login(key='1dd98ff229fabf915050f551d8d8adadc9276b51')
logger = WandbLogger(
save_interval=1,
name=args.name,
run_id=run_id,
config=args,
project='online_mss',
update_interval=100
)
writer = SummaryWriter(exp_dir)
writer.add_text("args", str(args))
if args.logger == "tensorboard":
logger = TensorboardLogger(writer)
else: # wandb
logger.load(writer)
def train_fn(epoch, env_step):
# nature DQN setting, linear decay in the first 1M steps
if env_step <= 1 / args.eps_decay:
eps = args.eps_train - env_step * args.eps_decay * \
(args.eps_train - args.eps_train_final)
else:
eps = args.eps_train_final
policy.set_eps(eps)
train_envs.set_env_attr('_eps', eps)
if args.logger:
logger.write("train/env_step", env_step, {"train/eps": eps})
if not args.no_priority:
if env_step <= args.beta_anneal_step:
beta = args.beta - env_step / args.beta_anneal_step * \
(args.beta - args.beta_final)
else:
beta = args.beta_final
buffer.set_beta(beta)
if args.logger:
logger.write("train/env_step", env_step, {"train/beta": beta})
# load a previous policy
if args.resume_path:
checkpoint = torch.load(args.resume_path, map_location=device)
try:
policy.load_state_dict(checkpoint['state_dict'])
except:
# just load backend parameters
state_dict = checkpoint['state_dict']
for k in list(state_dict.keys()):
if k.startswith('model_old'):
state_dict[k.replace('model_old', 'model_old._backend')] = state_dict.pop(k)
else:
state_dict[k.replace('model', 'model._backend')] = state_dict.pop(k)
policy.load_state_dict(state_dict, strict=False)
best_score = checkpoint['best_score']
best_f1 = checkpoint['best_f1']
print('best score: ', best_score)
print("Loaded agent from: ", args.resume_path)
else:
best_score = 0
best_f1 = 0
gradient_step = 0
env_step = 0
# train loop
for epoch in range(args.epoch_num):
# iterate over train set
# np.random.shuffle(train_dataset)
env_batch = []
batch_count = 1
train_score = 0
train_loss = 0
with trange(len(train_dataset)) as t:
# for j in tqdm(range(len(train_dataset))):
for j in t:
# continue
# if j < 20:
# continue
# prepare batch envs
fp = train_dataset[j]
env_batch.append(fp)
print(fp)
if not args.freeze_frontend:
frontend = net.get_frontend()
# TODO ugly, but would be removed after washing dataset
# env = tianshou_env.OMSSEnv(frontend, # TODO cpu device?
# args.num_clusters,
# fp,
# args.seq_max_len,
# cluster_encode=args.cluster_encode,
# mode='train')
# if env.check_anno():
# env_batch.append(fp)
######################################################
if j != len(train_dataset)-1 and len(env_batch) < args.train_env_batch_size:
continue
train_envs = DummyVectorEnv([lambda x=fp: tianshou_env.OMSSEnv(frontend,
args.num_clusters,
x,
args.seq_max_len,
knowing_cluster_num=args.knowing_cluster_num,
final_eps=args.eps_train_final,
final_punish=args.final_punish,
cluster_encode=args.cluster_encode,
freeze_frontend=args.freeze_frontend,
mode='train') for fp in env_batch])
env_batch = []
train_collector = Collector(policy, train_envs, buffer, exploration_noise=True)
# print(train_envs)
score = 0
loss = 0
count = 0
# collect one episode
train_fn(epoch, env_step)
policy.eval()
coll_res = train_collector.collect(n_episode=args.train_env_batch_size)
policy.train()
if not args.freeze_frontend:
policy.model._freeze_bm()
t.set_description('Epoch:[{}/{}], reward:{:.5f}, n_st:{}'.format(epoch, args.epoch_num, coll_res['rew'], coll_res['n/st']))
# log train data
env_step += coll_res['n/st']
if args.logger:
logger.log_train_data(coll_res, env_step)
train_score += coll_res['rew'] # mean reward
# increase batch size with buffer size
perc = 1 + len(buffer) / args.buffer_size
batch_size = round(perc * args.batch_size)
update_times = round(perc * args.update_per_step * coll_res['n/st'])
for _ in range(update_times):
losses = policy.update(batch_size * args.train_env_batch_size, buffer)
gradient_step += 1
if args.logger:
logger.log_update_data(losses, gradient_step)
train_loss += losses['loss']
# update frontend if needed
if not args.freeze_frontend:
train_envs.set_env_attr('_frontend_model', net.get_frontend())
batch_count += 1
## step wise collection
# while True:
# # eps, beta linearly decay
# train_fn(epoch, env_step)
# # collect step data
# coll_res = train_collector.collect(n_step=args.batch_size * args.train_env_batch_size)
# if coll_res['n/ep'] > 0:
# score += coll_res['rew'] * coll_res['n/ep'] # TODO not include the rewards of some unfinished episodes
# print(coll_res['n/st'])
# # update policy
# for _ in range(round(10)):
# update_res = policy.update(args.batch_size * args.train_env_batch_size, buffer) # TODO do more training
# loss += update_res['loss']
# count += 1
# # update frontend if needed
# if not args.freeze_frontend:
# train_envs.set_env_attr('_frontend_model', net.get_frontend())
# if train_collector.collect_episode >= args.train_env_batch_size: # TODO should be when the longest on ends, or just count the episodes
# # if train_collector.collect_step >= args.train_collect_steps: # TODO??
# train_score += score / train_collector.collect_episode # score per episode
# train_loss += loss / count
# batch_count += 1
# break
# print(train_collector.collect_step)
# env_step += train_collector.collect_step
train_score /= batch_count
train_loss /= batch_count
# validation
if not args.freeze_frontend:
val_score, f1 = validation(policy, val_dataset, args)
else:
val_score, f1 = validation(policy, val_dataset, args, frontend)
# log validation metrics
if args.logger:
metrics = {'val/val_score': val_score,
'val/f1': f1,
'val/train_loss': train_loss,
'val/train_score': train_score}
wandb.log(metrics)
# save model
checkpoint = {
'best_score': best_score,
'best_f1': best_f1,
'state_dict': policy.state_dict()
}
#print(score)
if val_score > best_score:
checkpoint['best_score'] = val_score
best_score = val_score
torch.save(checkpoint, os.path.join(exp_dir, "best_score_policy.pth"))
if f1 > best_f1:
checkpoint['best_f1'] = f1
best_f1 = f1
torch.save(checkpoint, os.path.join(exp_dir, 'best_f1_policy.pth'))
torch.save(checkpoint, os.path.join(exp_dir, "last_policy.pth"))
if __name__ == "__main__":
train(get_args())
| {
"alphanum_fraction": 0.5492896281,
"author": null,
"avg_line_length": 41.0816733068,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "4d66571fb1ecdd379e122e72905dbd0af9a08fc8",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-02-10T19:49:52.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-02-10T19:49:52.000Z",
"max_forks_repo_head_hexsha": "a2f65cd6d9eae1d1a4814db8dc8c968d5ac3d973",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "stg1205/online-music-structure-segmentation",
"max_forks_repo_path": "tianshou_rl_train.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a2f65cd6d9eae1d1a4814db8dc8c968d5ac3d973",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "stg1205/online-music-structure-segmentation",
"max_issues_repo_path": "tianshou_rl_train.py",
"max_line_length": 157,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a2f65cd6d9eae1d1a4814db8dc8c968d5ac3d973",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "stg1205/online-music-structure-segmentation",
"max_stars_repo_path": "tianshou_rl_train.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4277,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 20623
} |
from .context import assert_equal
import pytest
from sympy import Sum, I, Symbol, Integer
a = Symbol('a', real=True, positive=True)
b = Symbol('b', real=True, positive=True)
i = Symbol('i', real=True, positive=True)
n = Symbol('n', real=True, positive=True)
x = Symbol('x', real=True, positive=True)
def test_complex():
assert_equal("a+Ib", a + I * b)
def test_complex_e():
assert_equal("e^{I\\pi}", Integer(-1))
def test_complex_sum():
assert_equal("\\sum_{i=0}^{n} i \\cdot x", Sum(i * x, (i, 0, n)))
| {
"alphanum_fraction": 0.6494252874,
"author": null,
"avg_line_length": 23.7272727273,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "0da7dd303431d1d25a8178acd3c9f372a31ef611",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2021-05-10T11:10:12.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-07-11T21:39:16.000Z",
"max_forks_repo_head_hexsha": "f3e29fbf1e6979c6416844073863bbaa976927fc",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "purdue-tlt/latex2sympy",
"max_forks_repo_path": "tests/complex_test.py",
"max_issues_count": 14,
"max_issues_repo_head_hexsha": "f3e29fbf1e6979c6416844073863bbaa976927fc",
"max_issues_repo_issues_event_max_datetime": "2022-03-28T14:40:41.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-06-06T14:47:15.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "purdue-tlt/latex2sympy",
"max_issues_repo_path": "tests/complex_test.py",
"max_line_length": 69,
"max_stars_count": 11,
"max_stars_repo_head_hexsha": "f3e29fbf1e6979c6416844073863bbaa976927fc",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "purdue-tlt/latex2sympy",
"max_stars_repo_path": "tests/complex_test.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T08:31:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-05-19T19:56:50.000Z",
"num_tokens": 156,
"path": null,
"reason": "from sympy",
"repo": null,
"save_path": null,
"sha": null,
"size": 522
} |
# Code from Chapter 18 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2014
import pylab as pl
import numpy as np
import scipy.optimize as so
def kernel4(data1, data2, theta, wantderiv=True, measnoise=1.):
theta = np.squeeze(theta)
# Periodic
if np.shape(data1)[0] == len(data1):
d1 = np.shape(data1)[0]
n = 1
else:
(d1, n) = np.shape(data1)
d2 = np.shape(data2)[0]
sumxy = np.zeros((d1, d2))
for d in range(n):
D1 = np.transpose([data1[:, d]]) * np.ones((d1, d2))
D2 = [data2[:, d]] * np.ones((d1, d2))
sumxy += (D1 - D2) ** 2
k = theta[0] ** 2 * np.exp(- 2.0 * np.sin(np.pi * sumxy) ** 2 / (theta[1] ** 2))
if wantderiv:
K = np.zeros((d1, d2, len(theta) + 1))
K[:, :, 0] = k + measnoise * theta[2] ** 2 * np.eye(d1, d2)
K[:, :, 1] = 2.0 * k / theta[0]
K[:, :, 2] = 4.0 * k * np.sin(np.pi * sumxy) ** 2 / (theta[2] ** 3)
K[:, :, 3] = 2.0 * theta[2] * np.eye(d1, d2)
return K
else:
return k + measnoise * theta[2] ** 2 * np.eye(d1, d2)
def kernel3(data1, data2, theta, wantderiv=True, measnoise=1.):
theta = np.squeeze(theta)
# Periodic and a squared exponential
if np.shape(data1)[0] == len(data1):
d1 = np.shape(data1)[0]
n = 1
else:
(d1, n) = np.shape(data1)
d2 = np.shape(data2)[0]
sumxy = np.zeros((d1, d2))
for d in range(n):
D1 = np.transpose([data1[:, d]]) * np.ones((d1, d2))
D2 = [data2[:, d]] * np.ones((d1, d2))
sumxy += (D1 - D2) ** 2
k = theta[0] ** 2 * np.exp(-sumxy / (2.0 * theta[1] ** 2) - 2.0 * np.sin(np.pi * sumxy) ** 2 / (theta[2] ** 2))
# print k
# print measnoise*theta[2]**2*np.eye(d1,d2)
if wantderiv:
K = np.zeros((d1, d2, len(theta) + 1))
K[:, :, 0] = k + measnoise * theta[2] ** 2 * np.eye(d1, d2)
K[:, :, 1] = 2.0 * k / theta[0]
K[:, :, 2] = k * sumxy / (theta[1] ** 3)
K[:, :, 3] = -4.0 * k * np.sin(np.pi * sumxy) ** 2 / (theta[2] ** 3)
K[:, :, 4] = 2.0 * theta[3] * np.eye(d1, d2)
return K
else:
return k + measnoise * theta[2] ** 2 * np.eye(d1, d2)
def kernel2(data1, data2, theta, wantderiv=True, measnoise=1.):
# Uses exp(theta) to ensure positive hyperparams
theta = np.squeeze(theta)
theta = np.exp(theta)
# Squared exponential
if np.ndim(data1) == 1:
d1 = np.shape(data1)[0]
n = 1
data1 = data1 * np.ones((d1, 1))
data2 = data2 * np.ones((np.shape(data2)[0], 1))
else:
(d1, n) = np.shape(data1)
d2 = np.shape(data2)[0]
sumxy = np.zeros((d1, d2))
for d in range(n):
D1 = np.transpose([data1[:, d]]) * np.ones((d1, d2))
D2 = [data2[:, d]] * np.ones((d1, d2))
sumxy += (D1 - D2) ** 2 * theta[d + 1]
k = theta[0] * np.exp(-0.5 * sumxy)
# k = theta[0]**2 * np.exp(-sumxy/(2.0*theta[1]**2))
# print k
# print measnoise*theta[2]**2*np.eye(d1,d2)
if wantderiv:
K = np.zeros((d1, d2, len(theta) + 1))
K[:, :, 0] = k + measnoise * theta[2] * np.eye(d1, d2)
K[:, :, 1] = k
K[:, :, 2] = -0.5 * k * sumxy
K[:, :, 3] = theta[2] * np.eye(d1, d2)
return K
else:
return k + measnoise * theta[2] * np.eye(d1, d2)
def kernel(data1, data2, theta, wantderiv=True, measnoise=1.):
theta = np.squeeze(theta)
# Squared exponential and periodic
if np.shape(data1)[0] == len(data1):
d1 = np.shape(data1)[0]
n = 1
else:
(d1, n) = np.shape(data1)
d2 = np.shape(data2)[0]
sumxy = np.zeros((d1, d2))
for d in range(n):
D1 = np.transpose([data1[:, d]]) * np.ones((d1, d2))
D2 = [data2[:, d]] * np.ones((d1, d2))
sumxy += (D1 - D2)
k = theta[0] ** 2 * np.exp(-sumxy ** 2 / (2.0 * theta[1] ** 2)) + np.exp(
-2. * np.sin(theta[2] * np.pi * (sumxy)) ** 2 / theta[3] ** 2)
if wantderiv:
K = np.zeros((d1, d2, len(theta) + 1))
K[:, :, 0] = k + measnoise * theta[4] ** 2 * np.eye(d1, d2)
K[:, :, 1] = 2.0 * k / theta[0]
K[:, :, 2] = k * sumxy ** 2 / (theta[1] ** 3)
K[:, :, 3] = -4.0 / (theta[3] ** 2) * np.pi * sumxy * np.sin(theta[2] * np.pi * sumxy) * np.cos(
theta[2] * np.pi * sumxy) * np.exp(-2. * np.sin(theta[2] * np.pi * (sumxy)) ** 2 / theta[3] ** 2)
K[:, :, 4] = 4.0 * np.sin(theta[2] * np.pi * sumxy) ** 2 / (theta[3] ** 3) * np.exp(
-2. * np.sin(theta[2] * np.pi * (sumxy)) ** 2)
K[:, :, 5] = 2.0 * theta[4] * np.eye(d1, d2)
return K
else:
return k + measnoise * theta[3] ** 2 * np.eye(d1, d2)
def predict(xstar, data, k, t, theta, L=None, beta=None):
if L == None:
L = np.linalg.cholesky(k)
beta = np.linalg.solve(L.transpose(), np.linalg.solve(L, t))
kstar = kernel2(data, xstar, theta, wantderiv=False, measnoise=0)
f = np.dot(kstar.transpose(), beta)
v = np.linalg.solve(L, kstar)
V = kernel2(xstar, xstar, theta, wantderiv=False, measnoise=0) - np.dot(v.transpose(), v)
# logp = -0.5*np.dot(t.transpose(),beta) - np.sum(np.log(np.diag(L))) - np.shape(data)[0] /2. * np.log(2*np.pi)
return (f, V)
def logPosterior(theta, args):
data, t = args
k = kernel2(data, data, theta, wantderiv=False)
L = np.linalg.cholesky(k)
beta = np.linalg.solve(L.transpose(), np.linalg.solve(L, t))
logp = -0.5 * np.dot(t.transpose(), beta) - np.sum(np.log(np.diag(L))) - np.shape(data)[0] / 2. * np.log(2 * np.pi)
return -logp
def gradLogPosterior(theta, args):
data, t = args
theta = np.squeeze(theta)
d = len(theta)
K = kernel2(data, data, theta, wantderiv=True)
L = np.linalg.cholesky(np.squeeze(K[:, :, 0]))
invk = np.linalg.solve(L.transpose(), np.linalg.solve(L, np.eye(np.shape(data)[0])))
dlogpdtheta = np.zeros(d)
for d in range(1, len(theta) + 1):
dlogpdtheta[d - 1] = 0.5 * np.dot(t.transpose(), np.dot(invk, np.dot(np.squeeze(K[:, :, d]),
np.dot(invk, t)))) - 0.5 * np.trace(
np.dot(invk, np.squeeze(K[:, :, d])))
return -dlogpdtheta
def testopt():
theta = np.array([0.5, 0.25, 0.1]) # GP4
x = np.array([[-3.5, -2.5, -.5, .4, 2.25]]).transpose()
t = 0.55 * np.array([[-2., 0., 1., 2., -1.]]).transpose()
args = (x, t)
print(theta, -logPosterior(theta, args))
newTheta = so.fmin_cg(logPosterior, theta, fprime=gradLogPosterior, args=[args], gtol=1e-4, maxiter=50, disp=1)
print(newTheta, -logPosterior(newTheta, args))
# theta = newTheta
xstar = np.reshape(np.linspace(-5, 5, 100), (100, 1))
k = kernel2(x, x, theta, wantderiv=False)
kstar = [kernel2(x, xs * np.ones((1, 1)), theta, wantderiv=False) for xs in xstar]
kstar = np.squeeze(kstar)
kstarstar = [kernel2(xs * np.ones((1, 1)), xs * np.ones((1, 1)), theta, wantderiv=False) for xs in xstar]
kstarstar = np.squeeze(kstarstar)
# kstarstar = kernel2(xstar,xstar,theta,wantderiv=False)
L = np.linalg.cholesky(k)
invk = np.linalg.solve(L.transpose(), np.linalg.solve(L, np.eye(np.shape(x)[0])))
# invL = np.linalg.inv(L)
# invk = np.dot(invL.T,invL)
mean = np.dot(kstar, np.dot(invk, t))
# print np.shape(kstarstar), np.shape(kstar), np.shape(invk)
var = kstarstar - np.diag(np.dot(kstar, np.dot(invk, kstar.T)))
# print np.shape(var)
# var = kstarstar - np.dot(kstar.transpose(),np.dot(invk,kstar))
var = np.reshape(var, (100, 1))
# print mean
pl.figure()
pl.plot(xstar, mean, '-k')
# pl.plot(xstar,mean+2*np.sqrt(var),'x-')
# pl.plot(xstar,mean-2*np.sqrt(var),'x-')
# print np.shape(xstar), np.shape(mean), np.shape(var)
pl.fill_between(np.squeeze(xstar), np.squeeze(mean - 2 * np.sqrt(var)), np.squeeze(mean + 2 * np.sqrt(var)),
color='0.75')
pl.plot(x, t, 'ko')
pl.axis('tight')
pl.xlabel('x')
pl.ylabel('f(x)')
def showpost():
# theta = np.array([0.5,1.,0.0]) # GP1
# theta = np.array([0.5,1.,0.2]) # GP2
# theta = np.array([1.0,1.,0.0]) # GP3
theta = np.array([0.5, 0.5, 0.0]) # GP4
x = np.array([[-3.5, -2.5, -.5, .4, 2.25]]).transpose()
t = 0.55 * np.array([[-2., 0., 1., 2., -1.]]).transpose()
xstar = np.reshape(np.linspace(-5, 5, 100), (100, 1))
k = kernel2(x, x, theta, wantderiv=False)
kstar = [kernel2(x, xs * np.ones((1, 1)), theta, wantderiv=False) for xs in xstar]
kstar = np.squeeze(kstar)
kstarstar = [kernel2(xs * np.ones((1, 1)), xs * np.ones((1, 1)), theta, wantderiv=False) for xs in xstar]
kstarstar = np.squeeze(kstarstar)
# kstarstar = kernel(xstar,xstar,theta,wantderiv=False)
# invk = np.linalg.inv(k)
L = np.linalg.cholesky(k)
invk = np.linalg.solve(L.transpose(), np.linalg.solve(L, np.eye(np.shape(x)[0])))
mean = np.dot(kstar, np.dot(invk, t))
var = kstarstar - np.diag(np.dot(kstar, np.dot(invk, kstar.T)))
var = np.reshape(var, (100, 1))
pl.figure()
pl.plot(xstar, mean, '-k')
# pl.plot(xstar,mean+2*np.sqrt(var),'x-')
# pl.plot(xstar,mean-2*np.sqrt(var),'x-')
# print np.shape(xstar), np.shape(mean), np.shape(var)
pl.fill_between(np.squeeze(xstar), np.squeeze(mean - 2 * np.sqrt(var)), np.squeeze(mean + 2 * np.sqrt(var)),
color='0.75')
pl.plot(x, t, 'ko')
pl.axis('tight')
pl.xlabel('x')
pl.ylabel('f(x)')
print(np.shape(mean), np.shape(var))
def showlength(theta, scale):
x = scale * np.array([[-3.5, -2.5, -.5, .4, 2.25]]).transpose()
t = 0.55 * np.array([[-2., 0, 1., 2., -1.]]).transpose()
xstar = np.reshape(np.linspace(-5, 5, 100), (100, 1))
k = kernel2(x, x, theta, wantderiv=False)
print(k)
# print k
kstar = [kernel2(x, xs * np.ones((1, 1)), theta, wantderiv=False, measnoise=False) for xs in xstar]
kstar = np.squeeze(kstar)
# print kstar
kstarstar = [kernel2(xs * np.ones((1, 1)), xs * np.ones((1, 1)), theta, wantderiv=False, measnoise=False) for xs in
xstar]
kstarstar = np.squeeze(kstarstar)
# kstarstar = kernel2(xstar,xstar,theta,wantderiv=False)
L = np.linalg.cholesky(k)
# invL = np.linalg.inv(L)
# invk = np.dot(invL.T,invL)
invk = np.linalg.solve(L.transpose(), np.linalg.solve(L, np.eye(np.shape(x)[0])))
# print np.shape(kstar), np.shape(invk), np.shape(t), np.shape(kstarstar), np.shape(np.dot(kstar,np.dot(invk,kstar.T)))
mean = np.dot(kstar, np.dot(invk, t))
var = kstarstar - np.diag(np.dot(kstar, np.dot(invk, kstar.T)))
var = np.reshape(var, (100, 1))
pl.ion()
pl.figure()
pl.plot(xstar, mean, '-k')
# pl.plot(xstar,mean+2*np.sqrt(var),'x-')
# pl.plot(xstar,mean-2*np.sqrt(var),'x-')
# print np.shape(xstar), np.shape(mean), np.shape(var)
pl.fill_between(np.squeeze(xstar), np.squeeze(mean - 2 * np.sqrt(var)), np.squeeze(mean + 2 * np.sqrt(var)),
color='0.75')
pl.plot(x, t, 'ko')
pl.axis('tight')
pl.xlabel('x')
pl.ylabel('f(x)')
def runlength():
theta1 = np.array([0.5, 1., 0.0])
theta2 = np.array([0.5, 0.5, 0.0])
showlength(theta1, 1.)
showlength(theta2, 1.)
showlength(theta1, .5)
showlength(theta2, .5)
def runhp():
theta1 = np.array([0.5, 1.0, 0.0])
theta2 = np.array([0.5, 1.0, 0.2])
theta3 = np.array([0.5, 1.0, 0.4])
theta8 = np.array([0.5, 1.0, 0.6])
theta4 = np.array([0.25, 1.0, 0.0])
theta5 = np.array([1.0, 1.0, 0.0])
theta6 = np.array([0.5, 0.5, 0.0])
theta7 = np.array([0.5, 2.0, 0.0])
showlength(theta1, 1.)
showlength(theta2, 1.)
showlength(theta3, 1.)
showlength(theta4, 1.)
showlength(theta5, 1.)
showlength(theta6, 1.)
showlength(theta7, 1.)
showlength(theta8, 1.)
def test():
data = np.loadtxt("data.txt")
X = data[:, 0:-1] # everything except the last column
y = data[:, -1] # just the last column
args = (X, y)
# theta = np.array([ 1.7657065779589087, -1.3841332550882446, -10.162222605402242])
# theta = np.array([ 1.7999382115210827, -14.001391904643032 , -5.577578503745549])
theta = np.zeros(3)
theta[0] = np.random.normal(0, 5)
theta[1] = np.random.normal(0, 5)
theta[2] = np.random.normal(0, 5)
print(theta)
print(np.exp(theta))
print(logPosterior(theta, args))
print(gradLogPosterior(theta, args))
print(so.check_grad(logPosterior, gradLogPosterior, theta, args))
newTheta = so.fmin_cg(logPosterior, theta, fprime=gradLogPosterior, args=[args], gtol=1e-4, maxiter=100, disp=1)
print(newTheta, logPosterior(newTheta, args))
K = kernel2(X, X, newTheta, wantderiv=False)
L = np.linalg.cholesky(K)
beta = np.linalg.solve(L.transpose(), np.linalg.solve(L, y))
test = X
# pred = [predict(i,input,K,target,newTheta,L,beta) for i in input]
# pred = np.squeeze([predict(i,input,K,target,newTheta,L,beta) for i in input])
demoplot(theta, args)
demoplot(newTheta, args)
def demoplot(theta, args):
colour = np.array([0, 0, 1.0])
faded = 1 - (1 - colour) / 2.0
(X, y) = args
(n, D) = np.shape(X)
xrange = X.max() - X.min()
Xtest = np.arange(X.min() - xrange / 2, X.max() + xrange / 2, (X.max() - X.min()) / 100)
Xtest.shape = (len(Xtest), 1)
k = kernel2(X, X, theta, wantderiv=False)
kstar = [kernel2(X, xs * np.ones((1, 1)), theta, wantderiv=False, measnoise=False) for xs in Xtest]
kstar = np.squeeze(kstar)
kstarstar = [kernel2(xs * np.ones((1, 1)), xs * np.ones((1, 1)), theta, wantderiv=False, measnoise=False) for xs in
Xtest]
kstarstar = np.squeeze(kstarstar)
L = np.linalg.cholesky(k)
invk = np.linalg.solve(L.transpose(), np.linalg.solve(L, np.eye(np.shape(X)[0])))
mean = np.dot(kstar, np.dot(invk, y))
var = kstarstar - np.diag(np.dot(kstar, np.dot(invk, kstar.T)))
# var = np.reshape(var,(100,1))
pl.ion()
fig = pl.figure()
# ax1 = fig.add_subplot(211)
# ax2 = fig.add_subplot(212,sharex=ax1,sharey=ax1)
pl.plot(Xtest, mean, '-k')
# pl.plot(xstar,mean+2*np.sqrt(var),'x-')
# pl.plot(xstar,mean-2*np.sqrt(var),'x-')
# print np.shape(xstar), np.shape(mean), np.shape(var)
pl.fill_between(np.squeeze(Xtest), np.squeeze(mean - 2 * np.sqrt(var)), np.squeeze(mean + 2 * np.sqrt(var)),
color='0.75')
pl.plot(X, y, 'ko')
# pl.axis('tight')
# pl.xlabel('x')
# pl.ylabel('f(x)')
# covariance = np.exp(theta[0])*np.exp(-np.exp(theta[1])*Xtest**2)
# print np.shape(Xtest), np.shape(covariance)
# ax2.fill_between(np.squeeze(Xtest),np.squeeze(np.zeros(np.shape(Xtest))),np.squeeze(covariance),color='black',alpha=.2)
# ax2.plot(0,np.exp(theta[0]) + np.exp(theta[-1]),'o',color='black')
| {
"alphanum_fraction": 0.5557386776,
"author": null,
"avg_line_length": 36.9975609756,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e1b7c92aa75aa3e142a6e8495fa642b6a1352891",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b4bdb7ee3468da597e5d16cfb58728e3c29ca889",
"max_forks_repo_licenses": [
"Xnet",
"X11"
],
"max_forks_repo_name": "quietcoolwu/MLCode",
"max_forks_repo_path": "Ch18/gp.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b4bdb7ee3468da597e5d16cfb58728e3c29ca889",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Xnet",
"X11"
],
"max_issues_repo_name": "quietcoolwu/MLCode",
"max_issues_repo_path": "Ch18/gp.py",
"max_line_length": 123,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b4bdb7ee3468da597e5d16cfb58728e3c29ca889",
"max_stars_repo_licenses": [
"Xnet",
"X11"
],
"max_stars_repo_name": "quietcoolwu/MLCode",
"max_stars_repo_path": "Ch18/gp.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5325,
"path": null,
"reason": "import numpy,import scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 15169
} |
# Author: weiwei
import numpy as np
from .metric import BaseMetric, filter_parameters, Compose
from .functional.sixd import projection_2d, add, cm_degree, add_error, add_auc, nearest_point_distance, angular_error, \
translation_error
# from leaf.metrics.metric import BaseMetric, filter_parameters, Compose
# from leaf.metrics.functional.sixd import projection_2d, add, cm_degree, add_error, add_auc, nearest_point_distance, angular_error, \
# translation_error
class PoseCompose(Compose):
def __init__(self, name='', model=None, symmetric=False, out_as_in=False, *metrics):
super().__init__(name)
self.model = model
self.symmetric = symmetric
self.out_as_in = out_as_in
self.metrics = metrics
def _compute_common_data(self, predict_pose, target_pose, K):
model_pred = np.dot(self.model, predict_pose[:, :3].T) + predict_pose[:, 3]
model_target = np.dot(self.model, target_pose[:, :3].T) + target_pose[:, 3]
proj_pred = np.dot(model_pred, K.T)
proj_pred = proj_pred[:, :2] / proj_pred[:, 2:]
proj_target = np.dot(model_target, K.T)
proj_target = proj_target[:, :2] / proj_target[:, 2:]
# add error
if self.symmetric:
add_err = np.mean(nearest_point_distance(model_pred, model_target))
else:
add_err = np.mean(np.linalg.norm(model_pred - model_target, axis=-1))
# projection error
proj_err = np.mean(np.linalg.norm(proj_pred - proj_target, axis=-1))
# angular error
angular_err = angular_error(predict_pose, target_pose)
# translation error
translation_err = translation_error(predict_pose, target_pose)
return {'add_err': add_err, 'projection_err': proj_err, 'angular_err': angular_err, 'translation_err': translation_err}
def __call__(self, data, data_mode='mix'):
result_dict = {}
if data_mode == 'mix':
res = self._compute_common_data(data['predict_pose'], data['target_pose'], data['K'])
data.update(res)
for m in self.metrics:
if not self.out_as_in:
res = filter_parameters(m, data)
else:
res = filter_parameters(m, {**data, **result_dict})
result_dict.update({m.name: res})
elif data_mode == 'seq':
res = self._compute_common_data(data['predict_pose'], data['target_pose'], data['K'])
data.update(res)
for m, d in zip(self.metrics, data):
if isinstance(d, dict):
res = m(**d)
else:
res = m(*d)
result_dict.update({m.name: res})
else:
raise ValueError('data_mode must be mix or seq')
return result_dict
class Projection2d(BaseMetric):
"""
2D projection
:param name: name of the metric
:param model: shape (N, 3), 3D points cloud of object
:param threshold: default is 5 pixel
>>> import numpy as np
>>> pose_pred = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> pose_target = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> K = np.array([[320, 0, 320], [0, 320, 240], [0, 0, 1]])
>>> model_xyz = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> proj_metric = Projection2d(model=model_xyz, threshold=5)
>>> proj_metric(pose_pred, pose_target, K)
True
>>> proj_metric.summarize()
1.0
"""
def __init__(self, name='Projection2d', model=None, threshold=5):
super().__init__(name)
self.model = model
self.threshold = threshold
def __call__(self, predict_pose, target_pose, K, projection_err=None):
if projection_err is None:
result = projection_2d(predict_pose, target_pose, K, self.model, self.threshold)
else:
result = projection_err < self.threshold
self.result_list.append(result)
return result
class ADD(BaseMetric):
"""
ADD
:param name: name of the metric
:param model: shape (N, 3), 3D points cloud of object
:param symmetric: whether the object is symmetric or not
:param threshold: distance threshold, 'threshold' and 'model_diameter percentage' is not compatible
:param diameter: the diameter of object
:param percentage: percentage of model diameter
>>> import numpy as np
>>> pose_pred = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> pose_target = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> model_xyz = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> model_diameter = np.sqrt(3)
>>> add_metric = ADD(model=model_xyz, symmetric=False, threshold=None, diameter=model_diameter, percentage=0.1)
>>> add_metric(pose_pred, pose_target)
True
>>> add_metric.summarize()
1.0
"""
def __init__(self, name='ADD', model=None, symmetric=False, threshold=None, diameter=None, percentage=0.1):
super().__init__(name)
self.model = model
self.symmetric = symmetric
self.threshold = threshold if threshold is not None else diameter * percentage
def __call__(self, predict_pose, target_pose, add_err=None):
if add_err is None:
result = add(predict_pose, target_pose, self.model, self.symmetric, self.threshold)
else:
result = add_err < self.threshold
self.result_list.append(result)
return result
class MeanRotationError(BaseMetric):
"""
Mean Rotation Error
:param name: name of the metric
>>> import numpy as np
>>> pose_pred = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> pose_target = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> pose_pred1 = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 2]])
>>> pose_target1 = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> re_metric = MeanRotationError()
>>> re_metric(pose_pred, pose_target)
0.0
>>> re_metric(pose_pred1, pose_target1)
0.0
>>> re_metric.summarize()
0.0
"""
def __init__(self, name='Re'):
super().__init__(name)
def __call__(self, predict_pose, target_pose, angular_err=None):
if angular_err is None:
result = angular_error(predict_pose, target_pose)
else:
result = angular_err
self.result_list.append(result)
return result
class MeanTranslationError(BaseMetric):
"""
Mean Translation Error
:param name: name of the metric
:param unit_scale: scale for meter
>>> import numpy as np
>>> pose_pred = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> pose_target = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> pose_pred1 = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 2]])
>>> pose_target1 = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> te_metric = MeanTranslationError(unit_scale=1.0)
>>> te_metric(pose_pred, pose_target)
0.0
>>> te_metric(pose_pred1, pose_target1)
1.0
>>> te_metric.summarize()
0.5
"""
def __init__(self, name='Te', unit_scale=1.0):
super().__init__(name)
self.unit_scale = unit_scale
def __call__(self, predict_pose, target_pose, translation_err=None):
if translation_err is None:
result = translation_error(predict_pose, target_pose) * self.unit_scale
else:
result = translation_err
self.result_list.append(result)
return result
class Cmd(BaseMetric):
"""
Degree and cm
:param name: name of the metric
:param cm_threshold: unit is centimeter
:param degree_threshold:
:param unit_scale: scale for meter
>>> import numpy as np
>>> pose_pred = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> pose_target = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> pose_pred1 = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 2]])
>>> pose_target1 = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> cmd_metric = Cmd(cm_threshold=5, degree_threshold=5, unit_scale=1.0)
>>> cmd_metric(pose_pred, pose_target)
True
>>> cmd_metric(pose_pred1, pose_target1)
False
>>> cmd_metric.summarize()
0.5
"""
def __init__(self, name='Cmd', cm_threshold=5, degree_threshold=5, unit_scale=1.0):
super().__init__(name)
self.cm_threshold = cm_threshold
self.degree_threshold = degree_threshold
self.unit_scale = unit_scale
def __call__(self, predict_pose, target_pose, angular_err=None, translation_err=None):
if angular_err is None or translation_err is None:
pred = predict_pose.copy()
target = target_pose.copy()
pred[:3, 3] = pred[:3, 3] * self.unit_scale
target[:3, 3] = target[:3, 3] * self.unit_scale
result = cm_degree(pred, target, self.cm_threshold, self.degree_threshold)
else:
result = translation_err * self.unit_scale < 0.01 * self.cm_threshold and angular_err < self.degree_threshold
self.result_list.append(result)
return result
class ADDAUC(BaseMetric):
"""
ADD AUC
:param name: name of the metric
:param model: shape (N, 3), 3D points cloud of object
:param max_threshold: max error threshold, so threshold is [0, max]
:param unit_scale: scale for meter unit
:param symmetric: whether the object is symmetric or not
>>> import numpy as np
>>> model_xyz = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> pose_pred = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> pose_target = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> pose_pred1 = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 2]])
>>> pose_target1 = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> pose_pred2 = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 3]])
>>> pose_target2 = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
>>> add_auc_metric = ADDAUC(model=model_xyz, max_threshold=0.1, unit_scale=1.0, symmetric=False)
>>> add_auc_metric(pose_pred, pose_target)
>>> add_auc_metric(pose_pred1, pose_target1)
>>> add_auc_metric(pose_pred2, pose_target2)
>>> add_auc_metric.summarize()
0.3333333333333333
"""
def __init__(self, name='ADD_AUC', model=None, max_threshold=None, unit_scale=1.0, symmetric=False):
super().__init__(name)
self.model = model
self.max_threshold = max_threshold
self.unit_scale = unit_scale
self.symmetric = symmetric
def __call__(self, predict_pose, target_pose, add_err=None):
if add_err is None:
result = add_error(predict_pose, target_pose, self.model, self.symmetric)
else:
result = add_err
self.result_list.append(result)
def summarize(self):
result = add_auc(self.result_list, self.max_threshold, self.unit_scale)
return result
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"alphanum_fraction": 0.5998205473,
"author": null,
"avg_line_length": 37.0265780731,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "9134f4ce1fe11af69490f8cbde0dd86acc67c4e9",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c07b768ed066780e11bbf5af97e45d5352724c6a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "weiwei11/leaf",
"max_forks_repo_path": "leaf/metrics/sixd.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c07b768ed066780e11bbf5af97e45d5352724c6a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "weiwei11/leaf",
"max_issues_repo_path": "leaf/metrics/sixd.py",
"max_line_length": 134,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c07b768ed066780e11bbf5af97e45d5352724c6a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "weiwei11/leaf",
"max_stars_repo_path": "leaf/metrics/sixd.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3326,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 11145
} |
#Problem 20:
#n! means n × (n − 1) × ... × 3 × 2 × 1
#For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800,
#and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
#Find the sum of the digits in the number 100!
import sympy as sp
def main(num):
val = sp.factorial(num)
summa = 0
val_list = list(str(val))
for i in val_list:
summa += int(i)
print summa
main(100)
| {
"alphanum_fraction": 0.5409836066,
"author": null,
"avg_line_length": 22.4736842105,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "92ba0605a3330afc5729f406312d700af0a2d69f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-03-06T00:36:29.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-03-06T00:36:29.000Z",
"max_forks_repo_head_hexsha": "50c630d2c3bcb537033519fc5d857749584aafa7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "KartikKannapur/HackerRank",
"max_forks_repo_path": "Project-Euler-Solutions/020.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "50c630d2c3bcb537033519fc5d857749584aafa7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "KartikKannapur/HackerRank",
"max_issues_repo_path": "Project-Euler-Solutions/020.py",
"max_line_length": 79,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "50c630d2c3bcb537033519fc5d857749584aafa7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "KartikKannapur/HackerRank",
"max_stars_repo_path": "Project-Euler-Solutions/020.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 167,
"path": null,
"reason": "import sympy",
"repo": null,
"save_path": null,
"sha": null,
"size": 427
} |
%!TEX TS-program = lualatex
%!TEX encoding = UTF-8 Unicode
\documentclass[letterpaper]{tufte-handout}
%\geometry{showframe} % display margins for debugging page layout
\usepackage{fontspec}
\def\mainfont{Linux Libertine O}
\setmainfont[Ligatures={Common,TeX}, Contextuals={NoAlternate}, BoldFont={* Bold}, ItalicFont={* Italic}, Numbers={OldStyle}]{\mainfont}
\setsansfont[Scale=MatchLowercase]{Linux Biolinum O}
\usepackage{microtype}
\usepackage{graphicx} % allow embedded images
\setkeys{Gin}{width=\linewidth,totalheight=\textheight,keepaspectratio}
\graphicspath{{img/}} % set of paths to search for images
\usepackage{amsmath} % extended mathematics
\usepackage{booktabs} % book-quality tables
\usepackage{units} % non-stacked fractions and better unit spacing
\usepackage{siunitx}
\usepackage{multicol} % multiple column layout facilities
\usepackage{microtype} % filler text
\usepackage{hyperref}
%\usepackage{fancyvrb} % extended verbatim environments
% \fvset{fontsize=\normalsize}% default font size for fancy-verbatim environments
\makeatletter
% Paragraph indentation and separation for normal text
\renewcommand{\@tufte@reset@par}{%
\setlength{\RaggedRightParindent}{1.0pc}%
\setlength{\JustifyingParindent}{1.0pc}%
\setlength{\parindent}{1pc}%
\setlength{\parskip}{0pt}%
}
\@tufte@reset@par
% Paragraph indentation and separation for marginal text
\renewcommand{\@tufte@margin@par}{%
\setlength{\RaggedRightParindent}{0pt}%
\setlength{\JustifyingParindent}{0.5pc}%
\setlength{\parindent}{0.5pc}%
\setlength{\parskip}{0pt}%
}
\makeatother
% Set up the spacing using fontspec features
\renewcommand\allcapsspacing[1]{{\addfontfeatures{LetterSpace=15}#1}}
\renewcommand\smallcapsspacing[1]{{\addfontfeatures{LetterSpace=10}#1}}
\title{Study Guide 07\hfill}
\author{The Genetic Toolkit}
\date{} % without \date command, current date is supplied
\begin{document}
\maketitle % this prints the handout title, author, and date
%\printclassoptions
\section{Vocabulary}\marginnote{\textbf{Read:} 303--338; 142--143 (cis- and trans-acting elements). We won't cover all of Ch. 10 but I will highlight many of the ideas in the chapter. This will form the basis for the next several lectures so you should study this chapter carefully. Also, you may want to review chapter 5 if you don't recall the basics of DNA and mutations.\\
\noindent\textbf{Questions:} pgs 339--340, MC 1,2,4--9,11, SA 2--5,6,8,10.\\
\noindent\textbf{Note:}You must be able to recognize the different toolkit genes names used in class and discuss the examples of how the toolkit genes are used.}
\vspace{-1\baselineskip}
\begin{multicols}{2}
serial homology\\
hox genes\\
sonic hedgehog (\textit{ssh})\\
bone morphogenetic\\\hspace{1em}protein (\textit{bmp})\\
co-option
\end{multicols}
\section{Concepts}
You should \emph{write} clear and concise answers to each question in the Concepts section. The questions are not necessarily independent. Think broadly across lectures to see ``the big picture.''
\begin{enumerate}
\item What is the genetic toolkit? What is the importance of the genetic toolkit to developmental biology? What is the importance of the genetic toolkit to evolutionary biology?
\item Can you briefly describe the examples of toolkit genes discussed in class that suggest why developmental toolkit genes are likely to be important to understanding the evolutionary history and diversification of life on earth?
\item How have we used phylogenetics to infer that the hox genes were very important to the evolutionary diversity of organismal form (body plans)?
\item What is the evolutionary importance of co-option?
\item Explain the relationship between the order of the hox genes on chromosomes and their relationship to segmentation order in developing animals.
\item What are regulatory genes? Why is the relative concentration gradient of some regulatory genes important to the developing organism? You may have to review the building of the fly slides.
\end{enumerate}
\end{document} | {
"alphanum_fraction": 0.7753641076,
"author": null,
"avg_line_length": 44.0326086957,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "dde7eafc95b75f393041dabfd87b9b9837808c5a",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2721d0e2f33333ca5337ccae56508143bfa481d8",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mtaylor-semo/300",
"max_forks_repo_path": "study_guides/evol_StudyGuide07.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2721d0e2f33333ca5337ccae56508143bfa481d8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mtaylor-semo/300",
"max_issues_repo_path": "study_guides/evol_StudyGuide07.tex",
"max_line_length": 377,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "2721d0e2f33333ca5337ccae56508143bfa481d8",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mtaylor-semo/300",
"max_stars_repo_path": "study_guides/evol_StudyGuide07.tex",
"max_stars_repo_stars_event_max_datetime": "2020-03-19T03:16:10.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-03-19T03:16:10.000Z",
"num_tokens": 1103,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 4051
} |
import numpy as np
import matplotlib.pyplot as plt
def plot_bar_figure(
k8s,
nomad,
swarm,
ylabel,
xlabel,
xtick_labels,
figure_path
):
ind = np.arange(len(xtick_labels))
width = 0.27
fig = plt.figure()
ax = fig.add_subplot(111)
k8s_rects = ax.bar(ind, k8s, width, color='r')
if nomad:
nomad_rects = ax.bar(ind + width, nomad, width, color='g')
bar_width = ind + width * 2
if swarm and not nomad:
bar_width = ind + width
swarm_rects = ax.bar(bar_width, swarm, width, color='b')
legend_title = ('k8s', 'swarm')
if nomad:
legend_title = ('k8s', 'nomad', 'swarm',)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_xticks(ind + width)
ax.set_xticklabels(xtick_labels)
ax.legend(
legend_title,
bbox_to_anchor=(0, 1.05, 1, 0.2), loc="lower left",
fancybox=True, shadow=True, ncol=5, mode="expand"
)
def _auto_label(rects):
for rect in rects:
h = rect.get_height()
ax.text(
rect.get_x() + rect.get_width() / 2.,
1.05 * h, '%d' % int(h),
ha='center', va='bottom'
)
_auto_label(k8s_rects)
if nomad:
_auto_label(nomad_rects)
_auto_label(swarm_rects)
plt.savefig('{0}.png'.format(figure_path))
plt.close(fig)
| {
"alphanum_fraction": 0.5605413105,
"author": null,
"avg_line_length": 24.6315789474,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f04c490bf18e7f70ede4e0b7d217908fdb6e5553",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d2315e9f8f0700b3db967e385cb804f08902037e",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "mabuaisha/terraform-openstack-faas-nomad",
"max_forks_repo_path": "figures.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d2315e9f8f0700b3db967e385cb804f08902037e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "mabuaisha/terraform-openstack-faas-nomad",
"max_issues_repo_path": "figures.py",
"max_line_length": 66,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d2315e9f8f0700b3db967e385cb804f08902037e",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "mabuaisha/terraform-openstack-faas-nomad",
"max_stars_repo_path": "figures.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 406,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1404
} |
function banner()
FIGlet.render("XtalsPyTools", FIGlet.availablefonts()[286])
end
| {
"alphanum_fraction": 0.7191011236,
"author": null,
"avg_line_length": 22.25,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "22d6cfc1e9894cd2212086da60c85792b3541486",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2bdf60b7ebb6f159bbe9a7659034df133e4f773a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "SimonEnsemble/XtalsPyTools.jl",
"max_forks_repo_path": "src/misc.jl",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "2bdf60b7ebb6f159bbe9a7659034df133e4f773a",
"max_issues_repo_issues_event_max_datetime": "2022-03-25T20:47:43.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-12-02T00:09:23.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "SimonEnsemble/XtalsPyTools",
"max_issues_repo_path": "src/misc.jl",
"max_line_length": 64,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "2bdf60b7ebb6f159bbe9a7659034df133e4f773a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "SimonEnsemble/XtalsPyTools",
"max_stars_repo_path": "src/misc.jl",
"max_stars_repo_stars_event_max_datetime": "2022-02-22T23:48:41.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-06T04:37:47.000Z",
"num_tokens": 23,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 89
} |
[STATEMENT]
lemma preserves_cones:
fixes J :: "'j comp"
assumes "cone J A D a \<chi>"
shows "cone J B (F o D) (F a) (F o \<chi>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cone J (\<cdot>\<^sub>B) (F \<circ> D) (F a) (F \<circ> \<chi>)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. cone J (\<cdot>\<^sub>B) (F \<circ> D) (F a) (F \<circ> \<chi>)
[PROOF STEP]
interpret \<chi>: cone J A D a \<chi>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cone J (\<cdot>\<^sub>A) D a \<chi>
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
cone J (\<cdot>\<^sub>A) D a \<chi>
goal (1 subgoal):
1. cone J (\<cdot>\<^sub>A) D a \<chi>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. cone J (\<cdot>\<^sub>B) (F \<circ> D) (F a) (F \<circ> \<chi>)
[PROOF STEP]
interpret Fa: constant_functor J B \<open>F a\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. constant_functor J (\<cdot>\<^sub>B) (F a)
[PROOF STEP]
using \<chi>.ide_apex
[PROOF STATE]
proof (prove)
using this:
A.ide a
goal (1 subgoal):
1. constant_functor J (\<cdot>\<^sub>B) (F a)
[PROOF STEP]
by unfold_locales auto
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. cone J (\<cdot>\<^sub>B) (F \<circ> D) (F a) (F \<circ> \<chi>)
[PROOF STEP]
have 1: "F o \<chi>.A.map = Fa.map"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. F \<circ> \<chi>.A.map = Fa.map
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. (F \<circ> \<chi>.A.map) x = Fa.map x
[PROOF STEP]
fix f
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. (F \<circ> \<chi>.A.map) x = Fa.map x
[PROOF STEP]
show "(F \<circ> \<chi>.A.map) f = Fa.map f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (F \<circ> \<chi>.A.map) f = Fa.map f
[PROOF STEP]
using is_extensional Fa.is_extensional \<chi>.A.is_extensional
[PROOF STATE]
proof (prove)
using this:
\<not> A.arr ?f \<Longrightarrow> F ?f = B.null
\<not> \<chi>.J.arr ?f \<Longrightarrow> Fa.map ?f = B.null
\<not> \<chi>.J.arr ?f \<Longrightarrow> \<chi>.A.map ?f = A.null
goal (1 subgoal):
1. (F \<circ> \<chi>.A.map) f = Fa.map f
[PROOF STEP]
by (cases "\<chi>.J.arr f", simp_all)
[PROOF STATE]
proof (state)
this:
(F \<circ> \<chi>.A.map) f = Fa.map f
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
F \<circ> \<chi>.A.map = Fa.map
goal (1 subgoal):
1. cone J (\<cdot>\<^sub>B) (F \<circ> D) (F a) (F \<circ> \<chi>)
[PROOF STEP]
interpret \<chi>': natural_transformation J B Fa.map \<open>F o D\<close> \<open>F o \<chi>\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. natural_transformation J (\<cdot>\<^sub>B) Fa.map (F \<circ> D) (F \<circ> \<chi>)
[PROOF STEP]
using 1 horizontal_composite \<chi>.natural_transformation_axioms
as_nat_trans.natural_transformation_axioms
[PROOF STATE]
proof (prove)
using this:
F \<circ> \<chi>.A.map = Fa.map
\<lbrakk>natural_transformation ?A ?B ?F ?G ?\<sigma>; natural_transformation ?B ?C ?H ?K ?\<tau>\<rbrakk> \<Longrightarrow> natural_transformation ?A ?C (?H \<circ> ?F) (?K \<circ> ?G) (?\<tau> \<circ> ?\<sigma>)
natural_transformation J (\<cdot>\<^sub>A) \<chi>.A.map D \<chi>
natural_transformation (\<cdot>\<^sub>A) (\<cdot>\<^sub>B) F F F
goal (1 subgoal):
1. natural_transformation J (\<cdot>\<^sub>B) Fa.map (F \<circ> D) (F \<circ> \<chi>)
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. cone J (\<cdot>\<^sub>B) (F \<circ> D) (F a) (F \<circ> \<chi>)
[PROOF STEP]
show "cone J B (F o D) (F a) (F o \<chi>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cone J (\<cdot>\<^sub>B) (F \<circ> D) (F a) (F \<circ> \<chi>)
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
cone J (\<cdot>\<^sub>B) (F \<circ> D) (F a) (F \<circ> \<chi>)
goal:
No subgoals!
[PROOF STEP]
qed | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Category3_Limit",
"hexsha": null,
"include": null,
"lang": null,
"length": 20,
"llama_tokens": 1668,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
# based on: https://github.com/rlcode/per
import numpy
import random
import numpy as np
# stored as ( s, a, r, s_ ) in SumTree
class PrioritizedReplayBuffer:
def __init__(self, capacity, alpha=0.6, beta=0.4, beta_increment_per_sampling=0.001, e=0.01):
self.tree = SumTree(capacity)
self.alpha = alpha # (0 - no prioritization, 1 - full prioritization)
self.beta = beta # importance sampling; increase to 1 over time
self.beta_increment_per_sampling = beta_increment_per_sampling
self.e = e
def _get_priority(self, error):
# (td error + epsilon) ^ alpha
return (np.abs(error) + self.e) ** self.alpha
def push(self, error, transition):
p = self._get_priority(error)
self.tree.add(p, transition)
def sample(self, batch_size):
batch = []
idxs = []
segment = self.tree.total / batch_size
priorities = []
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling])
for i in range(batch_size):
a = segment * i
b = segment * (i + 1)
s = random.uniform(a, b)
(idx, p, data) = self.tree.get(s)
priorities.append(p)
batch.append(data)
idxs.append(idx)
sampling_probabilities = priorities / self.tree.total
is_weight = np.power(self.tree.n_entries * sampling_probabilities, -self.beta)
is_weight /= is_weight.max()
return batch, idxs, is_weight
def update(self, idx, error):
p = self._get_priority(error)
self.tree.update(idx, p)
def __len__(self):
return self.tree.n_entries
# SumTree
# a binary tree data structure where the parent’s value is the sum of its children
class SumTree:
write = 0
def __init__(self, capacity):
self.capacity = capacity
self.tree = numpy.zeros(2 * capacity - 1)
self.data = numpy.zeros(capacity, dtype=object)
self.n_entries = 0
# update to the root node
def _propagate(self, idx, change):
parent = (idx - 1) // 2
self.tree[parent] += change
if parent != 0:
self._propagate(parent, change)
# find sample on leaf node
def _retrieve(self, idx, s):
left = 2 * idx + 1
right = left + 1
if left >= len(self.tree):
return idx
if s <= self.tree[left]:
return self._retrieve(left, s)
else:
return self._retrieve(right, s - self.tree[left])
@property
def total(self):
return self.tree[0]
# store priority and sample
def add(self, p, data):
idx = self.write + self.capacity - 1
self.data[self.write] = data
self.update(idx, p)
self.write += 1
if self.write >= self.capacity:
self.write = 0
if self.n_entries < self.capacity:
self.n_entries += 1
# update priority
def update(self, idx, p):
change = p - self.tree[idx]
self.tree[idx] = p
self._propagate(idx, change)
# get priority and sample
def get(self, s):
idx = self._retrieve(0, s)
dataIdx = idx - self.capacity + 1
return (idx, self.tree[idx], self.data[dataIdx]) | {
"alphanum_fraction": 0.5834095761,
"author": null,
"avg_line_length": 27.7881355932,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d1fb7d49c476606ed9c646679e69c5635563930d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f2ca127119ffbfb3f7d2855eff7e7473e0bb3a80",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "abstractpaper/prop",
"max_forks_repo_path": "prop/buffers/priority_replay_buffer.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f2ca127119ffbfb3f7d2855eff7e7473e0bb3a80",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "abstractpaper/prop",
"max_issues_repo_path": "prop/buffers/priority_replay_buffer.py",
"max_line_length": 97,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f2ca127119ffbfb3f7d2855eff7e7473e0bb3a80",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "abstractpaper/prop",
"max_stars_repo_path": "prop/buffers/priority_replay_buffer.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 821,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3279
} |
import gym
import numpy as np
import sys
import matplotlib
from random import randint
if "../" not in sys.path:
sys.path.append("../")
from collections import defaultdict
from lib.envs.blackjack import BlackjackEnv
from lib import plotting
matplotlib.style.use('ggplot')
env = BlackjackEnv()
def make_epsilon_greedy_policy(Q, epsilon, nA):
def policy_fn(observation):
e = randint(1,10)
if e <= (epsilon*10):
#take random action
a = randint(0,nA-1)
else:
#take greedy action
a = np.argmax(Q[observation])
return a
return policy_fn
def mc_control_epsilon_greedy(env, num_episodes, epsilon, df=1.0):
#visit counts
returns_count = defaultdict(float)
#is visited dictionary for episodic book keeping
isVisited = defaultdict(int)
#action-values
Q = defaultdict(lambda: np.zeros(env.action_space.n))
#policy to be learnt
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
for ep in range(1, num_episodes+1):
#print for debug
if ep % 1000 == 0:
print "episode no: ", ep, "/", num_episodes
sys.stdout.flush()
#generate an episode
episode = []
G = 0
state = env.reset()
for t in range(100):
action = policy(state)
sa_pair = (state, action)
isVisited[sa_pair] = 0
next_state, reward, done, _ = env.step(action)
episode.append((state, action, reward))
G += (df**t) * reward
if done:
break
state = next_state
#update action-values for this episode
for state, action, reward in episode:
sa_pair = (state, action)
if isVisited[sa_pair] == 0:
returns_count[sa_pair] += 1
#check
Q[state][action] += (G - Q[state][action]) / returns_count[sa_pair]
G = (G - reward) / df
isVisited[sa_pair] = 1
return Q, policy
Q, policy = mc_control_epsilon_greedy(env, num_episodes=500000, epsilon=0.1)
# For plotting: Create value function from action-value function# For p
# by picking the best action at each state
V = defaultdict(float)
for state, actions in Q.items():
action_value = np.max(actions)
V[state] = action_value
plotting.plot_value_function(V, title="Optimal Value Function")
| {
"alphanum_fraction": 0.6056511057,
"author": null,
"avg_line_length": 29.0714285714,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "debbb33fea2bbbe34bedcba38d40a7eebcddb9b4",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "06681b1e680f43634fae209341de1d2dffc87d48",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "viv92/rl-implementations",
"max_forks_repo_path": "MC/MC_control_onPolicy_epsGreedy_incremental.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "06681b1e680f43634fae209341de1d2dffc87d48",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "viv92/rl-implementations",
"max_issues_repo_path": "MC/MC_control_onPolicy_epsGreedy_incremental.py",
"max_line_length": 83,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "06681b1e680f43634fae209341de1d2dffc87d48",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "viv92/wildML_RLimplementations",
"max_stars_repo_path": "MC/MC_control_onPolicy_epsGreedy_incremental.py",
"max_stars_repo_stars_event_max_datetime": "2019-12-10T07:02:15.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-12-10T07:02:15.000Z",
"num_tokens": 590,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2442
} |
"""
This function finds the hourly availability data for projects
to be passed to expected and actual market clearing modules.
"""
function find_project_availability_data(project::P,
availability_df::DataFrames.DataFrame,
num_invperiods::Int64,
num_hours::Int64) where P <: Project{<:BuildPhase}
type = get_type(get_tech(project))
zone = get_zone(get_tech(project))
if in(get_name(project), names(availability_df))
availability_raw = availability_df[:, Symbol(get_name(project))]
elseif in("$(type)_$(zone)", names(availability_df))
availability_raw = availability_df[:, Symbol("$(type)_$(zone)")]
else
error("project availiability data not found!")
end
data_years = unique(availability_df.Year)
data_hours = count(i -> i == data_years[1], availability_df.Year)
@assert data_hours == num_hours
availability_input = zeros(num_invperiods, data_hours)
# Fill raw availability data into yearly and hourly values
for y in 1:num_invperiods
availability_input[y, 1:data_hours] = availability_raw[1:data_hours]
end
return availability_input
end
"""
This function extracts the technical details for generators
to be passed to expected and actual market clearing modules.
"""
function get_technical_details(project::P) where P <: GeneratorEMIS{<: BuildPhase}
project_type = "generator"
min_input = 0.0
max_input = 0.0
efficiency_in = 0.0
efficiency_out = 0.0
min_storage = 0.0
max_storage = 0.0
init_storage = 0.0
return project_type, min_input, max_input, efficiency_in, efficiency_out, min_storage, max_storage, init_storage
end
"""
This function extracts the technical details for storage units
to be passed to expected and actual market clearing modules.
"""
function get_technical_details(project::P) where P <: StorageEMIS{<: BuildPhase}
tech = get_tech(project)
project_type = "storage"
input_power_limits = get_input_active_power_limits(tech)
min_input = input_power_limits[:min]
max_input = input_power_limits[:max]
efficiency = get_efficiency(tech)
efficiency_in = efficiency[:in]
efficiency_out = efficiency[:out]
storage_capacity = get_storage_capacity(tech)
min_storage = storage_capacity[:min]
max_storage = storage_capacity[:max]
init_storage = get_soc(tech)
return project_type, min_input, max_input, efficiency_in, efficiency_out, min_storage, max_storage, init_storage
end
function get_marginal_cost_energy(product::T) where T <: Product
return
end
function get_marginal_cost_energy(product::Energy)
marginal_cost_energy = get_marginal_cost(product)
return marginal_cost_energy
end
"""
This function returns the project's marginal cost of energy product to be passed to economic dispatch and CEM.
"""
function get_project_energy_cost(project::P) where P <: Project{<: BuildPhase}
marginal_cost_energy = 0.0
for product in find_operating_products(get_products(project))
marginal_cost_temp = get_marginal_cost_energy(product)
if !isnothing(marginal_cost_temp)
marginal_cost_energy = marginal_cost_temp
end
end
return marginal_cost_energy
end
function get_marginal_cost_reserveup(product::T) where T <: Product
return
end
function get_marginal_cost_reserveup(product::OperatingReserve{ReserveUpEMIS})
marginal_cost_reserveup = get_marginal_cost(product)
return marginal_cost_reserveup
end
"""
This function returns the project's marginal cost of reserve up product to be passed to economic dispatch and CEM.
"""
function get_project_reserve_up_cost(project::P, voll::Float64) where P <: Project{<: BuildPhase}
marginal_cost_reserve_up = voll
for product in find_operating_products(get_products(project))
marginal_cost_temp = get_marginal_cost_reserveup(product)
if !isnothing(marginal_cost_temp)
marginal_cost_reserve_up = marginal_cost_temp
end
end
return marginal_cost_reserve_up
end
function get_max_reserveup(product::T) where T <: Product
return
end
function get_max_reserveup(product::OperatingReserve{ReserveUpEMIS})
max_reserveup = get_max_limit(product)
return max_reserveup
end
"""
This function returns the project's maximum reserve up participation limit to be passed to economic dispatch and CEM.
"""
function get_project_max_reserveup(project::P) where P <: Project{<: BuildPhase}
max_reserveup = 0.
for product in find_operating_products(get_products(project))
max_reserveup_temp = get_max_reserveup(product)
if !isnothing(max_reserveup_temp)
max_reserveup = max_reserveup_temp * get_maxcap(project)
end
end
return max_reserveup
end
function get_marginal_cost_reservedown(product::T) where T <: Product
return
end
function get_marginal_cost_reservedown(product::OperatingReserve{ReserveDownEMIS})
marginal_cost_reserve_down = get_marginal_cost(product)
return marginal_cost_reserve_down
end
"""
This function returns the project's marginal cost of reserve down product to be passed to economic dispatch and CEM.
"""
function get_project_reserve_down_cost(project::P, voll::Float64) where P <: Project{<: BuildPhase}
marginal_cost_reserve_down = voll
for product in find_operating_products(get_products(project))
marginal_cost_temp = get_marginal_cost_reservedown(product)
if !isnothing(marginal_cost_temp)
marginal_cost_reserve_down = marginal_cost_temp
end
end
return marginal_cost_reserve_down
end
function get_max_reservedown(product::T) where T <: Product
return
end
function get_max_reservedown(product::OperatingReserve{ReserveDownEMIS})
max_reservedown = get_max_limit(product)
return max_reservedown
end
"""
This function returns the project's maximum reserve down participation limit to be passed to economic dispatch and CEM.
"""
function get_project_max_reservedown(project::P) where P <: Project{<: BuildPhase}
max_reservedown = 0.
for product in find_operating_products(get_products(project))
max_reservedown_temp = get_max_reservedown(product)
if !isnothing(max_reservedown_temp)
max_reservedown = max_reservedown_temp * get_maxcap(project)
end
end
return max_reservedown
end
"""
This function returns the project's derating factor to be passed to CEM and capacity market clearing module.
Returns 0 if there is no capacity market participation.
"""
function get_project_derating(project::P) where P <: Project{<: BuildPhase}
derating_factor = 0.
for product in get_products(project)
derating_temp = get_derating(product)
if !isnothing(derating_temp)
derating_factor = derating_temp
end
end
return derating_factor
end
"""
This function returns the project's capacity market bid to be passed to the capacity market clearing module.
Returns 0 if there is no capacity market participation.
"""
function get_project_capacity_market_bid(project::P) where P <: Project{<: BuildPhase}
capacity_bid = 0.
for product in get_products(project)
capacity_bid_temp = get_capacity_bid(product)
if !isnothing(capacity_bid_temp)
capacity_bid = capacity_bid_temp
end
end
return capacity_bid
end
"""
This function returns the project's REC energy output to be passed the REC market market clearing module.
Returns 0 if there is no REC market participation.
"""
function get_project_rec_output(project::P) where P <: Project{<: BuildPhase}
rec_output = 0.
for product in get_products(project)
rec_output_temp = get_rec_certificates(product)
if !isnothing(rec_output_temp)
rec_output = rec_output_temp
end
end
return rec_output
end
"""
This function returns the project's REC market bid to be passed the REC market market clearing module.
Returns 0 if there is no REC market participation.
"""
function get_project_rec_market_bid(project::P) where P <: Project{<: BuildPhase}
rec_bid = 0.
for product in get_products(project)
rec_bid_temp = get_rec_bid(product)
if !isnothing(rec_bid_temp)
rec_bid = rec_bid_temp
end
end
return rec_bid
end
"""
This function creates the MarketProject struct to be passed to CEM price projection and endogeneous Economic Dispatch models.
"""
function populate_market_project(project::P,
reserve_up_cost::Float64,
reserve_down_cost::Float64,
project_type::String,
min_input::Float64,
max_input::Float64,
efficiency_in::Float64,
efficiency_out::Float64,
min_storage::Float64,
max_storage::Float64,
init_storage::Float64,
availability_input::Array{Float64, 2},
existing_units::Int64,
units_inqueue::Vector{Float64},
remaining_lag_time::Int64,
remaining_life_time::Int64,
iteration_year::Int64,
num_invperiods::Int64) where P <: Project{<: BuildPhase}
finance_data = get_finance_data(project)
market_project = MarketProject(
get_name(project), # name
project_type, # is project of storage type
get_type(get_tech(project)), # technology type
get_fixed_OM_cost(finance_data), # annualfixed O&M costs
get_queue_cost(finance_data), # Queue cost
get_project_energy_cost(project), # marginal cost of energy
reserve_up_cost, # marginal cost of reserve down
reserve_down_cost, # marginal cost of reserve up
get_investment_cost(finance_data)[iteration_year:iteration_year + num_invperiods - 1], # yearly investment cost
get_discount_rate(finance_data), # discount rate
get_mincap(project), # minimum capacity
get_maxcap(project), # maximum capacity
min_input, # minimum input power
max_input, # maximum input power
efficiency_in, # input efficiency
efficiency_out, # output efficiency
min_storage, # minimum storage capacity
max_storage, # maximum storage capacity
init_storage, # initial storage level
availability_input, # Hourly availability
get_project_derating(project), # de-rating factor
get_ramp_limits(get_tech(project)), # ramp limits
get_project_max_reserveup(project), # maximum reserve up limit
get_project_max_reservedown(project), # maximum reserve down limit
existing_units, # existing units
units_inqueue, # units in queue
get_lag_time(finance_data), # construction lead time
remaining_lag_time, # remaining construction time
1, # maximum units
1, # base cost units
get_capex_years(finance_data), # capital cost recovery years
get_life_time(finance_data), # total life_time
remaining_life_time, # remaining life_time
in(:Capacity, get_name.(get_products(project))), # eligible for capacity markets
in(:REC, get_name.(get_products(project))), # eligible for rps compliance
get_zone(get_tech(project)), # project zone
[get_ownedby(finance_data)]) # owned by
return market_project
end
"""
This function processes data of Existing projects to create the MarketProject struct
passed to CEM price projection and endogeneous Economic Dispatch models.
"""
function create_market_project(project::P,
pricecap_energy::Float64,
pricecap_reserveup::Float64,
pricecap_reservedown::Float64,
max_peak_loads::AxisArrays.AxisArray{Float64, 1},
iteration_year::Int64,
num_hours::Int64,
num_invperiods::Int64,
availability_df::DataFrames.DataFrame) where P <: Project{Existing}
finance_data = get_finance_data(project)
queue_time = length(get_queue_cost(finance_data))
# Get technical characteristics based on project type
project_type,
min_input,
max_input,
efficiency_in,
efficiency_out,
min_storage,
max_storage,
init_storage = get_technical_details(project)
availability_input = find_project_availability_data(project, availability_df, num_invperiods, num_hours)
# Calculate remaining life_time
remaining_life_time = min(get_life_time(finance_data), get_end_life_year(project) - iteration_year + 1)
units_inqueue = zeros(queue_time)
# Existing project have completed their queue time
units_inqueue[queue_time] = 1.0
existing_units = 1
remaining_lag_time = 0
market_project = populate_market_project(project,
get_project_reserve_up_cost(project, pricecap_reserveup),
get_project_reserve_down_cost(project, pricecap_reservedown),
project_type,
min_input,
max_input,
efficiency_in,
efficiency_out,
min_storage,
max_storage,
init_storage,
availability_input,
existing_units,
units_inqueue,
remaining_lag_time,
remaining_life_time,
iteration_year,
num_invperiods)
return market_project
end
"""
This function processes data of Option projects to create the MarketProject struct
passed to CEM price projection and endogeneous Economic Dispatch models.
"""
function create_market_project(project::P,
pricecap_energy::Float64,
pricecap_reserveup::Float64,
pricecap_reservedown::Float64,
max_peak_loads::AxisArrays.AxisArray{Float64, 1},
iteration_year::Int64,
num_hours::Int64,
num_invperiods::Int64,
availability_df::DataFrames.DataFrame) where P <: Project{Option}
# Get technical characteristics based on project type
project_type,
min_input,
max_input,
efficiency_in,
efficiency_out,
min_storage,
max_storage,
init_storage = get_technical_details(project)
availability_input = find_project_availability_data(project, availability_df, num_invperiods, num_hours)
finance_data = get_finance_data(project)
queue_time = length(get_queue_cost(finance_data))
# End of life_time for option projects can be up to the end of horizon.
remaining_life_time = num_invperiods
units_inqueue = zeros(queue_time)
existing_units = 0
remaining_lag_time = get_lag_time(finance_data) + queue_time
market_project = populate_market_project(project,
get_project_reserve_up_cost(project, pricecap_reserveup),
get_project_reserve_down_cost(project, pricecap_reservedown),
project_type,
min_input,
max_input,
efficiency_in,
efficiency_out,
min_storage,
max_storage,
init_storage,
availability_input,
existing_units,
units_inqueue,
remaining_lag_time,
remaining_life_time,
iteration_year,
num_invperiods)
market_project.name = "option_$(market_project.tech_type)_$(market_project.zone)"
if market_project.derating_factor == 0.0
derating = 0.5 # Dummy derating for maximum capacity if capacity product doesn't exist
else
derating = market_project.derating_factor
end
modified_max_gen = max_peak_loads[market_project.zone] / derating
market_project.max_new_options = round(modified_max_gen / market_project.max_gen)
return market_project
end
"""
This function processes data of Planned projects to create the MarketProject struct
passed to CEM price projection and endogeneous Economic Dispatch models.
"""
function create_market_project(project::P,
pricecap_energy::Float64,
pricecap_reserveup::Float64,
pricecap_reservedown::Float64,
max_peak_loads::AxisArrays.AxisArray{Float64, 1},
iteration_year::Int64,
num_hours::Int64,
num_invperiods::Int64,
availability_df::DataFrames.DataFrame) where P <: Project{Planned}
# Get technical characteristics based on project type
project_type,
min_input,
max_input,
efficiency_in,
efficiency_out,
min_storage,
max_storage,
init_storage = get_technical_details(project)
availability_input = find_project_availability_data(project, availability_df, num_invperiods, num_hours)
finance_data = get_finance_data(project)
queue_time = length(get_queue_cost(finance_data))
# Number of construction years left
remaining_lag_time = get_construction_year(project) - iteration_year
units_inqueue = zeros(queue_time)
# Planned units have completed their queue time
units_inqueue[queue_time] = 1.0
# Calculate remaining life_time
remaining_life_time = get_life_time(finance_data) + remaining_lag_time
existing_units = 0
market_project = populate_market_project(project,
get_project_reserve_up_cost(project, pricecap_reserveup),
get_project_reserve_down_cost(project, pricecap_reservedown),
project_type,
min_input,
max_input,
efficiency_in,
efficiency_out,
min_storage,
max_storage,
init_storage,
availability_input,
existing_units,
units_inqueue,
remaining_lag_time,
remaining_life_time,
iteration_year,
num_invperiods)
return market_project
end
"""
This function processes data of Queue projects to create the MarketProject struct
passed to CEM price projection and endogeneous Economic Dispatch models.
"""
function create_market_project(project::P,
pricecap_energy::Float64,
pricecap_reserveup::Float64,
pricecap_reservedown::Float64,
max_peak_loads::AxisArrays.AxisArray{Float64, 1},
iteration_year::Int64,
num_hours::Int64,
num_invperiods::Int64,
availability_df::DataFrames.DataFrame) where P <: Project{Queue}
# Get technical characteristics based on project type
project_type,
min_input,
max_input,
efficiency_in,
efficiency_out,
min_storage,
max_storage,
init_storage = get_technical_details(project)
availability_input = find_project_availability_data(project, availability_df, num_invperiods, num_hours)
finance_data = get_finance_data(project)
queue_time = length(get_queue_cost(finance_data))
units_inqueue = zeros(queue_time)
queue_year = iteration_year - get_decision_year(project)
# Calculate number of years left in queue
if queue_year <= queue_time
units_inqueue[queue_year] = 1.0
end
remaining_lag_time = get_lag_time(finance_data) + queue_time - queue_year
# Calculate remaining life_time
remaining_life_time = get_life_time(finance_data) + get_lag_time(finance_data) + queue_time - queue_year
existing_units = 0
market_project = populate_market_project(project,
get_project_reserve_up_cost(project, pricecap_reserveup),
get_project_reserve_down_cost(project, pricecap_reservedown),
project_type,
min_input,
max_input,
efficiency_in,
efficiency_out,
min_storage,
max_storage,
init_storage,
availability_input,
existing_units,
units_inqueue,
remaining_lag_time,
remaining_life_time,
iteration_year,
num_invperiods)
return market_project
end
| {
"alphanum_fraction": 0.5492163009,
"author": null,
"avg_line_length": 43.3276740238,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "2a3c9604ce2aabddff396bf832e72ea9d4d9f011",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8217a2e8eb2abc9fc2a8dde3f3c09c22e3b0b332",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "NREL/EMISAgentSimulation.jl",
"max_forks_repo_path": "src/struct_creators/market_structs/market_project_creator.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8217a2e8eb2abc9fc2a8dde3f3c09c22e3b0b332",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "NREL/EMISAgentSimulation.jl",
"max_issues_repo_path": "src/struct_creators/market_structs/market_project_creator.jl",
"max_line_length": 128,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "8217a2e8eb2abc9fc2a8dde3f3c09c22e3b0b332",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "NREL/EMISAgentSimulation.jl",
"max_stars_repo_path": "src/struct_creators/market_structs/market_project_creator.jl",
"max_stars_repo_stars_event_max_datetime": "2021-11-10T04:33:08.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-04-14T11:02:26.000Z",
"num_tokens": 4298,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 25520
} |
# -*- coding: utf-8 -*-
import numpy as np
from numpy import testing
from sktime.classification.dictionary_based import BOSSEnsemble, BOSSIndividual
from sktime.datasets import load_gunpoint, load_italy_power_demand
def test_boss_on_gunpoint():
# load gunpoint data
X_train, y_train = load_gunpoint(split="train", return_X_y=True)
X_test, y_test = load_gunpoint(split="test", return_X_y=True)
indices = np.random.RandomState(0).permutation(10)
# train boss
boss = BOSSEnsemble(random_state=0)
boss.fit(X_train.iloc[indices], y_train[indices])
# assert probabilities are the same
probas = boss.predict_proba(X_test.iloc[indices])
testing.assert_array_equal(probas, boss_gunpoint_probas)
def test_individual_boss_on_gunpoint():
# load gunpoint data
X_train, y_train = load_gunpoint(split="train", return_X_y=True)
X_test, y_test = load_gunpoint(split="test", return_X_y=True)
indices = np.random.RandomState(0).permutation(10)
# train boss
indiv_boss = BOSSIndividual(random_state=0)
indiv_boss.fit(X_train.iloc[indices], y_train[indices])
# assert probabilities are the same
probas = indiv_boss.predict_proba(X_test.iloc[indices])
testing.assert_array_equal(probas, individual_boss_gunpoint_probas)
def test_boss_on_power_demand():
# load power demand data
X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
X_test, y_test = load_italy_power_demand(split="test", return_X_y=True)
indices = np.random.RandomState(0).permutation(100)
# train BOSS
boss = BOSSEnsemble(random_state=47)
boss.fit(X_train, y_train)
score = boss.score(X_test.iloc[indices], y_test[indices])
assert score >= 0.80
boss_gunpoint_probas = np.array(
[
[
0.0,
1.0,
],
[
0.05263157894736842,
0.9473684210526315,
],
[
0.8421052631578947,
0.15789473684210525,
],
[
0.8947368421052632,
0.10526315789473684,
],
[
0.0,
1.0,
],
[
0.7368421052631579,
0.2631578947368421,
],
[
0.0,
1.0,
],
[
0.8947368421052632,
0.10526315789473684,
],
[
0.7368421052631579,
0.2631578947368421,
],
[
0.0,
1.0,
],
]
)
individual_boss_gunpoint_probas = np.array(
[
[
0.0,
1.0,
],
[
0.0,
1.0,
],
[
1.0,
0.0,
],
[
1.0,
0.0,
],
[
0.0,
1.0,
],
[
1.0,
0.0,
],
[
0.0,
1.0,
],
[
1.0,
0.0,
],
[
0.0,
1.0,
],
[
0.0,
1.0,
],
]
)
# def print_array(array):
# print('[')
# for sub_array in array:
# print('[', end='')
# for value in sub_array:
# print(value.astype(str), end='')
# print(', ', end='')
# print('],')
# print(']')
#
#
# if __name__ == "__main__":
# X_train, y_train = load_gunpoint(split='train', return_X_y=True)
# X_test, y_test = load_gunpoint(split='test', return_X_y=True)
# indices = np.random.RandomState(0).permutation(10)
#
# boss = BOSSEnsemble(random_state=0)
# indiv_boss = BOSSIndividual(random_state=0)
#
# boss.fit(X_train.iloc[indices], y_train[indices])
# probas = boss.predict_proba(X_test.iloc[indices])
# print_array(probas)
#
# indiv_boss.fit(X_train.iloc[indices], y_train[indices])
# probas = indiv_boss.predict_proba(X_test.iloc[indices])
# print_array(probas)
| {
"alphanum_fraction": 0.5403245943,
"author": null,
"avg_line_length": 23.5588235294,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f6b4c841405a82e058cad54908dbffb27995a850",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a6f17bd586df6bbc8e6c783f08eda4c30d2353f9",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "alwinw/sktime",
"max_forks_repo_path": "sktime/classification/dictionary_based/tests/test_boss.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a6f17bd586df6bbc8e6c783f08eda4c30d2353f9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "alwinw/sktime",
"max_issues_repo_path": "sktime/classification/dictionary_based/tests/test_boss.py",
"max_line_length": 79,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "a6f17bd586df6bbc8e6c783f08eda4c30d2353f9",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "alwinw/sktime",
"max_stars_repo_path": "sktime/classification/dictionary_based/tests/test_boss.py",
"max_stars_repo_stars_event_max_datetime": "2020-10-05T19:49:24.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-05T19:49:24.000Z",
"num_tokens": 1115,
"path": null,
"reason": "import numpy,from numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4005
} |
#!/usr/bin/env python
# coding: utf-8
# ### This script was used to create a wikipedia linkfile for the entities in the FB15k and not the complete Freebase
import utils
import argparse
import logging
import os
import pickle
import string
import time
import numpy as np
import pandas as pd
data_repo_root = "../data/fb15k/"
model_weights = "dumps/fb15k237_distmult_dump_norm.pkl"
wiki_file = os.path.join(data_repo_root, "mid2wikipedia.tsv")
orig_file = os.path.join(data_repo_root, "entity_mid_name_type_typeid.txt")
intersection_file = os.path.join(data_repo_root, "mid2wikipedia_cleaned.tsv")
distmult_dump = utils.read_pkl(model_weights)
def read_data(path):
mapping_name = {}
mapping_url = {}
with open(path, "r") as f:
for line in f:
line_arr = line.split("\t")
mapping_name[line_arr[0]] = line_arr[1]
mapping_url[line_arr[0]] = line_arr[2]
return mapping_name, mapping_url
mapping_name, mapping_url = read_data(wiki_file)
reader = open(orig_file, "r")
writer = open(intersection_file, "w")
for line in reader:
line_arr = line.split("\t")
string = ""
if(line_arr[0] in mapping_name):
string = "\t".join(
[line_arr[0], mapping_name[line_arr[0]], mapping_url[line_arr[0]]])
else:
string = "\t".join([line_arr[0], line_arr[1], ""])
print(string, file=writer)
reader.close()
writer.close()
| {
"alphanum_fraction": 0.6936744847,
"author": null,
"avg_line_length": 28.14,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1b04deee35cee0873abef3909bd56542883d04fb",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-06-07T01:46:44.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-06-07T01:46:44.000Z",
"max_forks_repo_head_hexsha": "e25ad0296137ed354593c74509b077a22f60425e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "NinaCalvi/OKBC",
"max_forks_repo_path": "scripts/entity_wiki_intersect.py",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "e25ad0296137ed354593c74509b077a22f60425e",
"max_issues_repo_issues_event_max_datetime": "2021-09-14T15:28:54.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-09-12T17:49:09.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "NinaCalvi/OKBC",
"max_issues_repo_path": "scripts/entity_wiki_intersect.py",
"max_line_length": 117,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "e25ad0296137ed354593c74509b077a22f60425e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "NinaCalvi/OKBC",
"max_stars_repo_path": "scripts/entity_wiki_intersect.py",
"max_stars_repo_stars_event_max_datetime": "2021-09-13T10:15:14.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-07-06T14:31:18.000Z",
"num_tokens": 369,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1407
} |
source('covidmap/stage1.r')
# Wrapper script to recombine the results produced
# inparallel from a run of stage1_run.r
opt = covidmap_stage1_get_cmdline_options()
covidmap_stage1_combine(opt)
| {
"alphanum_fraction": 0.8144329897,
"author": null,
"avg_line_length": 24.25,
"converted": null,
"ext": "r",
"file": null,
"hexsha": "e494aa20aaf060a163005a5388d0a6af372ee20c",
"include": null,
"lang": "R",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5ae74e8b0e110cba578fe19159c0f87ea52fa495",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "oxcsml/Rmap",
"max_forks_repo_path": "covidmap/stage1_combine.r",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5ae74e8b0e110cba578fe19159c0f87ea52fa495",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "oxcsml/Rmap",
"max_issues_repo_path": "covidmap/stage1_combine.r",
"max_line_length": 50,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "5ae74e8b0e110cba578fe19159c0f87ea52fa495",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "oxcsml/Rmap",
"max_stars_repo_path": "covidmap/stage1_combine.r",
"max_stars_repo_stars_event_max_datetime": "2021-06-03T10:25:31.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-06-03T10:25:31.000Z",
"num_tokens": 51,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 194
} |
theory Variable
imports Main
begin
datatype var = V nat
primrec fresh' :: "var set \<Rightarrow> nat \<Rightarrow> nat" where
"fresh' xs 0 = 0"
| "fresh' xs (Suc x) = (if V (Suc x) \<in> xs then fresh' (xs - {V (Suc x)}) x else Suc x)"
definition fresh :: "var set \<Rightarrow> var" where
"fresh xs = V (fresh' xs (card xs))"
abbreviation extend_set :: "var set \<Rightarrow> var set" where
"extend_set vs \<equiv> insert (fresh vs) vs"
lemma [simp]: "finite xs \<Longrightarrow> fresh' xs x \<noteq> Suc x"
proof -
assume "finite xs"
hence "fresh' xs x < Suc x" by simp
thus ?thesis by simp
qed
lemma [simp]: "finite xs \<Longrightarrow> x = card xs \<Longrightarrow> V (fresh' xs x) \<notin> xs"
proof (induction x arbitrary: xs)
case (Suc x)
moreover hence "finite (xs - {V (Suc x)})" by simp
moreover from Suc have "V (Suc x) \<in> xs \<Longrightarrow> x = card (xs - {V (Suc x)})" by simp
ultimately have "V (Suc x) \<in> xs \<Longrightarrow> V (fresh' (xs - {V (Suc x)}) x) \<notin> xs - {V (Suc x)}" by metis
moreover from Suc(2) have "fresh' (xs - {V (Suc x)}) x \<noteq> Suc x" by simp
ultimately show ?case by simp
qed simp_all
lemma fresh_is_fresh [simp]: "finite xs \<Longrightarrow> fresh xs \<notin> xs"
by (simp add: fresh_def)
end | {
"alphanum_fraction": null,
"author": "xtreme-james-cooper",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/isabelle/xtreme-james-cooper-Lambda-RAM-Compiler/Lambda-RAM-Compiler-24125435949fa71dfc5faafdb236d28a098beefc/00Utils/Variable.thy",
"reason": null,
"repo": "Lambda-RAM-Compiler",
"save_path": "github-repos/isabelle/xtreme-james-cooper-Lambda-RAM-Compiler",
"sha": "24125435949fa71dfc5faafdb236d28a098beefc",
"size": null
} |
function [fs, bmg] = slfiltersize(fs0)
%SLFILTERSIZE Extracts information from filtersize
%
% $ Syntax $
% - [fs, bmg] = slfiltersize(fs0)
%
% $ Arguments $
% - fs0: The input filter size
% - fs: The full filter size form
% - bmg: The boundary margins
%
% $ Description $
% - [fs, bmg] = slfiltersize(fs0) restores the full form of the input
% filtersize. In sltoolbox, filter size can be specified in either
% of the following forms:
% \*
% \t Table. The forms of the filter size \\
% \h name & syntax \\
% full & [height, width, center_y, center_x] \\
% sizeonly & [height, width]
% The center will be computed as:
% cy = floor((1 + h) / 2)
% cx = floor((1 + w) / 2) \\
% lenonly & [len]
% height = width = len
% \*
% bmg is the boundary margins in the form of
% [top_margin, bottom_margin, left_margin, right_margin]
%
% $ History $
% - Created by Dahua Lin, on Sep 1st, 2006
%
%% parse filter size
if ~isvector(fs0)
error('sltoolbox:invalidarg', ...
'fs0 should be a vector');
end
switch length(fs0)
case 1
h = fs0;
w = fs0;
cy = floor((1+h)/2);
cx = floor((1+w)/2);
case 2
cencoords = floor((1 + fs0) / 2);
h = fs0(1);
w = fs0(2);
cy = cencoords(1);
cx = cencoords(2);
case 4
h = fs0(1);
w = fs0(2);
cy = fs0(3);
cx = fs0(4);
otherwise
error('sltoolbox:sizmismatch', ...
'The length of fs0 is illegal');
end
fs = [h, w, cy, cx];
%% compute boundary margins
if nargout >= 2
tm = cy - 1;
bm = h - cy;
lm = cx - 1;
rm = w - cx;
bmg = [tm, bm, lm, rm];
end
| {
"alphanum_fraction": null,
"author": "lmthang",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/MATLAB/lmthang-nmt.hybrid/nmt.hybrid-50d5c025f18ed280ff0fd2e2adce327f4170a2c3/code/wordsim/code/sltoolbox_r101/sltoolbox_r101/sltoolbox/imgproc/slfiltersize.m",
"reason": null,
"repo": "nmt.hybrid",
"save_path": "github-repos/MATLAB/lmthang-nmt.hybrid",
"sha": "50d5c025f18ed280ff0fd2e2adce327f4170a2c3",
"size": null
} |
function sdl_colors(c::Colorant)
sdl_colors(
convert(ARGB{Colors.FixedPointNumbers.Normed{UInt8,8}}, c)
)
end
sdl_colors(c::ARGB) = Int.(reinterpret.((red(c), green(c), blue(c), alpha(c))))
| {
"alphanum_fraction": 0.6715686275,
"author": null,
"avg_line_length": 25.5,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "d13cb8186ccb8b8eeba562f98ea038eb68ebcf39",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "85a5ac34538755400d31ddc563ae12078801c507",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "albinahlback/GameZero.jl",
"max_forks_repo_path": "src/utility.jl",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "85a5ac34538755400d31ddc563ae12078801c507",
"max_issues_repo_issues_event_max_datetime": "2021-01-17T23:33:45.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-01-17T23:33:00.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "albinahlback/GameZero.jl",
"max_issues_repo_path": "src/utility.jl",
"max_line_length": 79,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "85a5ac34538755400d31ddc563ae12078801c507",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "albinahlback/GameZero.jl",
"max_stars_repo_path": "src/utility.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 63,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 204
} |
import sys
import numpy as np
import myface.face as face
import myface.utils.utils as utils
image = utils.load_image('./fig/fig1.jpeg')
res = face.detect_face_and_encode(image)
print(res['encoded_faces'].__len__)
| {
"alphanum_fraction": 0.7777777778,
"author": null,
"avg_line_length": 19.6363636364,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "539a9d9797481d758783cc1e67f76685ccd87f0e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2019-11-10T08:03:15.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-06-17T07:36:59.000Z",
"max_forks_repo_head_hexsha": "f4dd7b2a8859eb6abd961944367a3847dfc47bc8",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "lzane/one-shot-face-recognition",
"max_forks_repo_path": "src/main.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f4dd7b2a8859eb6abd961944367a3847dfc47bc8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "lzane/one-shot-face-recognition",
"max_issues_repo_path": "src/main.py",
"max_line_length": 43,
"max_stars_count": 15,
"max_stars_repo_head_hexsha": "f4dd7b2a8859eb6abd961944367a3847dfc47bc8",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "lzane/one-shot-face-recognition",
"max_stars_repo_path": "src/main.py",
"max_stars_repo_stars_event_max_datetime": "2019-11-10T08:03:15.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-10-28T08:00:02.000Z",
"num_tokens": 53,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 216
} |
#!/usr/bin/env python
"""
Convnets for image classification (1)
=====================================
"""
import numpy as np
import deeppy as dp
import matplotlib
import matplotlib.pyplot as plt
# Fetch MNIST data
dataset = dp.dataset.MNIST()
x_train, y_train, x_test, y_test = dataset.data(dp_dtypes=True)
# Bring images to BCHW format
x_train = x_train[:, np.newaxis, :, :]
x_test = x_test[:, np.newaxis, :, :]
# Normalize pixel intensities
scaler = dp.StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
# Prepare network inputs
batch_size = 128
train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size)
test_input = dp.Input(x_test)
# Setup network
def pool_layer():
return dp.Pool(
win_shape=(2, 2),
strides=(2, 2),
border_mode='valid',
method='max',
)
def conv_layer(n_filters):
return dp.Convolution(
n_filters=n_filters,
filter_shape=(5, 5),
border_mode='valid',
weights=dp.Parameter(dp.AutoFiller(gain=1.39),
weight_decay=0.0005),
)
weight_gain_fc = 1.84
weight_decay_fc = 0.002
net = dp.NeuralNetwork(
layers=[
conv_layer(32),
dp.ReLU(),
pool_layer(),
conv_layer(64),
dp.ReLU(),
pool_layer(),
dp.Flatten(),
dp.DropoutFullyConnected(
n_out=512,
dropout=0.5,
weights=dp.Parameter(dp.AutoFiller(weight_gain_fc),
weight_decay=weight_decay_fc),
),
dp.ReLU(),
dp.FullyConnected(
n_out=dataset.n_classes,
weights=dp.Parameter(dp.AutoFiller(weight_gain_fc)),
),
],
loss=dp.SoftmaxCrossEntropy(),
)
# Train network
n_epochs = [50, 15, 15]
learn_rate = 0.05
momentum = 0.88
for i, epochs in enumerate(n_epochs):
trainer = dp.StochasticGradientDescent(
max_epochs=epochs, learn_rule=dp.Momentum(learn_rate=learn_rate/10**i,
momentum=momentum),
)
trainer.train(net, train_input)
# Plot misclassified images.
def plot_img(img, title):
plt.figure()
plt.imshow(img, cmap='gray', interpolation='nearest')
plt.title(title)
plt.axis('off')
plt.tight_layout()
errors = net.predict(x_test) != y_test
n_errors = np.sum(errors)
x_errors = np.squeeze(x_test[errors])
plot_img(dp.misc.img_tile(dp.misc.img_stretch(x_errors), aspect_ratio=0.6),
'All %i misclassified digits' % n_errors)
# Plot convolutional filters.
filters = [l.weights.array for l in net.layers
if isinstance(l, dp.Convolution)]
fig = plt.figure()
gs = matplotlib.gridspec.GridSpec(2, 1, height_ratios=[1, 3])
for i, f in enumerate(filters):
ax = plt.subplot(gs[i])
ax.imshow(dp.misc.conv_filter_tile(f), cmap='gray',
interpolation='nearest')
ax.set_title('Conv layer %i' % i)
ax.axis('off')
plt.tight_layout()
| {
"alphanum_fraction": 0.624497992,
"author": null,
"avg_line_length": 25.5384615385,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "82dc8dcf9a6b3f205d92a4d92541cff5e5029443",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2021-09-28T18:05:49.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-04-05T21:41:14.000Z",
"max_forks_repo_head_hexsha": "79cc7cb552f30bc70eeea9ee7ff4976b0899ea66",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "rajat1994/deeppy",
"max_forks_repo_path": "examples/convnet_mnist.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "79cc7cb552f30bc70eeea9ee7ff4976b0899ea66",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "rajat1994/deeppy",
"max_issues_repo_path": "examples/convnet_mnist.py",
"max_line_length": 78,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "c9644d348e22b78d32ea049fb0ac14bf3b750941",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "DmitryUlyanov/deeppy",
"max_stars_repo_path": "examples/convnet_mnist.py",
"max_stars_repo_stars_event_max_datetime": "2015-09-16T08:01:21.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-09-16T08:01:21.000Z",
"num_tokens": 734,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2988
} |
import tensorrt as trt
import numpy as np
import os
import cv2
import torch
from efficientdet.scripts.utils import *
#from utils import *
import re
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
def get_engine(model_path: str):
if os.path.exists(model_path) and model_path.endswith('trt'):
print(f"Reading engine from file {model_path}")
with open(model_path, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
print(f"FILE: {model_path} not found or extension not supported.")
def preprocess(img, img_size, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)):
normalized_img = (img / 255 - mean) / std
framed_img, *framed_meta = aspectaware_resize_padding(normalized_img, img_size, img_size)
framed_img = framed_img.transpose(2, 0, 1)
return np.ascontiguousarray(framed_img[np.newaxis, ...]), framed_meta
def postprocess_outputs(pred, anchors, img_size, image, original_img, regressBoxes, clipBoxes, threshold, iou_threshold, framed_meta):
regression = torch.from_numpy(pred[0].reshape(1, -1, 4))
classification = torch.from_numpy(pred[1].reshape(1, -1, 90))
out = postprocess(image, anchors, regression, classification,
regressBoxes, clipBoxes,
threshold, iou_threshold)[0]
out = scale_coords(framed_meta, out)
vis = plot_bbox(out, original_img)
return vis
class EFFICIENTDET:
def __init__(self, model_path='cfg/efficientdet-d0.trt'):
model_type = int(re.search(r'\d+', model_path).group())
self.img_size = 512
self.threshold = 0.2
self.iou_threshold = 0.2
anchor_scale = [4., 4., 4., 4., 4., 4., 4., 5.]
self.regressBoxes = BBoxTransform()
self.clipBoxes = ClipBoxes()
self.anchors = anchors_def(anchor_scale=anchor_scale[model_type])
engine = get_engine(model_path)
self.context = engine.create_execution_context()
self.inputs, self.outputs, self.bindings, self.stream = allocate_buffers(engine)
def predict(self, frame):
#frame = cv2.flip(frame, 0)
image, framed_meta = preprocess(frame, self.img_size)
self.inputs[0].host = image
trt_outputs = do_inference_v2(self.context, self.bindings, self.inputs, self.outputs, self.stream)
vis = postprocess_outputs(trt_outputs, self.anchors, self.img_size, image, frame, self.regressBoxes, self.clipBoxes, self.threshold, self.iou_threshold, framed_meta)
return vis
def main():
model_type = 0
model_path = f'cfg/efficientdet-d{model_type}.trt'
img_size = 512
threshold = 0.2
iou_threshold = 0.2
anchor_scale = [4., 4., 4., 4., 4., 4., 4., 5.]
webcam = WebcamStream()
fps = FPS()
regressBoxes = BBoxTransform()
clipBoxes = ClipBoxes()
anchors = anchors_def(anchor_scale=anchor_scale[model_type])
with get_engine(model_path) as engine, engine.create_execution_context() as context:
inputs, outputs, bindings, stream = allocate_buffers(engine)
while True:
fps.start()
frame = webcam.read()
image, framed_meta = preprocess(frame, img_size)
inputs[0].host = image
trt_outputs = do_inference_v2(context, bindings, inputs, outputs, stream)
vis = postprocess_outputs(trt_outputs, anchors, img_size, image, frame, regressBoxes, clipBoxes, threshold, iou_threshold, framed_meta)
fps.stop()
print(fps.get_fps())
cv2.imshow('frame', vis)
if cv2.waitKey(1) == ord("q"):
webcam.stop()
if __name__ == '__main__':
main()
| {
"alphanum_fraction": 0.6590419806,
"author": null,
"avg_line_length": 34.7289719626,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1a757d6822a0f02b1e4a5bd9d3e014ad5f99dff4",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-07-06T18:39:55.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-07-06T18:39:55.000Z",
"max_forks_repo_head_hexsha": "babf46fac5924e06464e0b84e1e74c16c1f960fa",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sithu31296/PyTorch-ONNX-TRT",
"max_forks_repo_path": "examples/efficientdet/efficientdet/scripts/infer.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "babf46fac5924e06464e0b84e1e74c16c1f960fa",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sithu31296/PyTorch-ONNX-TRT",
"max_issues_repo_path": "examples/efficientdet/efficientdet/scripts/infer.py",
"max_line_length": 173,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "babf46fac5924e06464e0b84e1e74c16c1f960fa",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sithu31296/PyTorch-ONNX-TRT",
"max_stars_repo_path": "examples/efficientdet/efficientdet/scripts/infer.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-24T07:12:48.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-12-15T22:40:06.000Z",
"num_tokens": 946,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3716
} |
import sys
import os
import numpy as _np
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../src/")
import finoptions as fo
def test_monte_carlo():
S = 100
K = 100
t = 1 / 12
sigma = 0.4
r = 0.10
b = 0.1
path_length = 30
mc_samples = 5000
mc_loops = 50
eps = _np.genfromtxt(
"./pytest/sobol_scrambled_path_test.csv", delimiter=","
) # load sobol paths from R since python version is slighly different in third path
inno = fo.monte_carlo_options.NormalSobolInnovations
path = fo.monte_carlo_options.WienerPath
payoff = fo.monte_carlo_options.PlainVanillaPayoff
mc = fo.monte_carlo_options.MonteCarloOption(
mc_loops,
path_length,
mc_samples,
S,
K,
t,
r,
b,
sigma,
inno,
path,
payoff,
# eps=eps,
trace=False,
antithetic=True,
standardization=False,
)
opt = fo.GBSOption(S, K, t, r, b, sigma)
assert _np.allclose(
opt.call(), _np.mean(mc.call()), rtol=1e-2
), "Monte Carlo Plain Vanilla call failed"
assert _np.allclose(
opt.put(), _np.mean(mc.put()), rtol=1e-2
), "Monte Carlo Plain Vanilla put failed"
# test standardization - seems to produce worse results for Plain Vanilla
mc = fo.monte_carlo_options.MonteCarloOption(
mc_loops,
path_length,
mc_samples,
# dt,
S,
K,
t,
r,
b,
sigma,
inno,
path,
payoff,
# eps=eps,
trace=False,
antithetic=True,
standardization=True,
)
assert _np.allclose(
opt.call(), _np.mean(mc.call()), rtol=1e-2
), "Monte Carlo Plain Vanilla call with standardization failed"
assert _np.allclose(
opt.put(), _np.mean(mc.put()), rtol=1e-2
), "Monte Carlo Plain Vanilla put with standardization failed"
if __name__ == "__main__":
test_monte_carlo()
| {
"alphanum_fraction": 0.5823849579,
"author": null,
"avg_line_length": 21.9673913043,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ce7831b944c8a4749e9a6831deb018ba8d38ed3e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "81365b6d93693b0b546be92448db858ccce44d5a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bbcho/energyderivatives",
"max_forks_repo_path": "pytest/test_mc.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "81365b6d93693b0b546be92448db858ccce44d5a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bbcho/energyderivatives",
"max_issues_repo_path": "pytest/test_mc.py",
"max_line_length": 88,
"max_stars_count": 9,
"max_stars_repo_head_hexsha": "81365b6d93693b0b546be92448db858ccce44d5a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bbcho/finoptions-dev",
"max_stars_repo_path": "pytest/test_mc.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-14T22:27:46.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-09-21T05:39:10.000Z",
"num_tokens": 568,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2021
} |
###### Content under Creative Commons Attribution license CC-BY 4.0, code under MIT license (c)2014 L.A. Barba, C.D. Cooper, G.F. Forsyth.
# Spreading out
Welcome to the fifth, and last, notebook of Module 4 "_Spreading out: diffusion problems,"_ of our fabulous course **"Practical Numerical Methods with Python."**
In this course module, we have learned about explicit and implicit methods for parabolic equations in 1 and 2 dimensions. So far, all schemes have been first-order in time and second-order in space. _Can we do any better?_ We certainly can: this notebook presents the Crank-Nicolson scheme, which is a second-order method in both time and space! We will continue to use the heat equation to guide the discussion, as we've done throughout this module.
## Crank-Nicolson scheme
The [Crank Nicolson scheme](http://en.wikipedia.org/wiki/Crank–Nicolson_method) is a popular second-order, implicit method used with parabolic PDEs in particular. It was developed by John Crank and [Phyllis Nicolson](http://en.wikipedia.org/wiki/Phyllis_Nicolson). The main idea is to take the average between the solutions at $t^n$ and $t^{n+1}$ in the evaluation of the spatial derivative. Why bother doing that? Because the time derivative will then be discretized with a centered scheme, giving second-order accuracy!
Remember the 1D heat equation from the [first notebook](https://nbviewer.jupyter.org/github/numerical-mooc/numerical-mooc/blob/master/lessons/04_spreadout/04_01_Heat_Equation_1D_Explicit.ipynb)? Just to refresh your memory, here it is:
$$
\begin{equation}
\frac{\partial T}{\partial t} = \alpha \frac{\partial^2 T}{\partial x^2}.
\end{equation}
$$
In this case, the Crank-Nicolson scheme leads to the following discretized equation:
$$
\begin{equation}
\begin{split}
& \frac{T^{n+1}_i - T^n_i}{\Delta t} = \\
& \quad \alpha \cdot \frac{1}{2} \left( \frac{T^{n+1}_{i+1} - 2 T^{n+1}_i + T^{n+1}_{i-1}}{\Delta x^2} + \frac{T^n_{i+1} - 2 T^n_i + T^n_{i-1}}{\Delta x^2} \right) \\
\end{split}
\end{equation}
$$
Notice how the both time indices $n$ and $n+1$ appear on the right-hand side. You know we'll have to rearrange this equation, right? Now look at the stencil and notice that we are using more information than before in the update.
#### Figure 2. Stencil of the Crank-Nicolson scheme.
Rearranging terms so that everything that we don't know is on the left side and what we do know on the right side, we get
$$
\begin{equation}
\begin{split}
& -T^{n+1}_{i-1} + 2 \left( \frac{\Delta x^2}{\alpha \Delta t} + 1 \right) T^{n+1}_i - T^{n+1}_{i+1} \\
& \qquad = T^{n}_{i-1} + 2 \left( \frac{\Delta x^2}{\alpha \Delta t} - 1 \right) T^{n}_i + T^{n}_{i+1} \\
\end{split}
\end{equation}
$$
Again, we are left with a linear system of equations. Check out the left side of that equation: it looks a lot like the matrix from [notebook 2](https://nbviewer.jupyter.org/github/numerical-mooc/numerical-mooc/blob/master/lessons/04_spreadout/04_02_Heat_Equation_1D_Implicit.ipynb), doesn't it? Apart from the slight modification in the $T_i^{n+1}$ term, the left side of the equation is pretty much the same. What about the right-hand side? Sure, it looks quite different, but that is not a problem, we know all those terms!
Things don't change much for boundary conditions, either. We've seen all the cases already. Say $T_0^{n+1}$ is a Dirichlet boundary. Then the equation for $i=1$ becomes
$$
\begin{equation}
\begin{split}
& 2 \left( \frac{\Delta x^2}{\alpha \Delta t} + 1 \right) T^{n+1}_1 - T^{n+1}_{2} \\
& \qquad = T^{n}_{0} + 2 \left( \frac{\Delta x^2}{\alpha \Delta t} - 1 \right) T^{n}_1 + T^{n}_{2} + T^{n+1}_{0} \\
\end{split}
\end{equation}
$$
And if we have a Neumann boundary $\left(\left.\frac{\partial T}{\partial x}\right|_{x=L} = q\right)$ at $T_{n_x-1}^{n+1}$? We know this stuff, right? For $i=n_x-2$ we get
$$
\begin{equation}
\begin{split}
& -T^{n+1}_{n_x-3} + \left( 2 \frac{\Delta x^2}{\alpha \Delta t} + 1 \right) T^{n+1}_{n_x-2} \\
& \qquad = T^{n}_{n_x-3} + 2 \left( \frac{\Delta x^2}{\alpha \Delta t} - 1 \right) T^{n}_{n_x-2} + T^{n}_{n_x-1} + q\Delta x \\
\end{split}
\end{equation}
$$
The code will look a lot like the implicit method from the [second notebook](https://nbviewer.jupyter.org/github/numerical-mooc/numerical-mooc/blob/master/lessons/04_spreadout/04_02_Heat_Equation_1D_Implicit.ipynb). Only some terms of the matrix and right-hand-side vector will be different, which changes some of our custom functions.
### The linear system
Just like in [notebook 2](https://nbviewer.jupyter.org/github/numerical-mooc/numerical-mooc/blob/master/lessons/04_spreadout/04_02_Heat_Equation_1D_Implicit.ipynb), we need to solve a linear system on every time step of the form:
$$
[A][T^{n+1}_\text{int}] = [b]+[b]_{b.c.}
$$
The coefficient matrix is very similar to the previous case, but the right-hand side changes a lot:
$$
\begin{align}
\left[
\begin{array}{cccccc}
2 \left( \frac{1}{\sigma} + 1 \right) & -1 & 0 & \cdots & & 0 \\
-1 & 2 \left( \frac{1}{\sigma} + 1\right) & -1 & 0 & \cdots & 0 \\
0 & & \ddots & & & \vdots \\
\vdots & & & & 2 \left( \frac{1}{\sigma} + 1\right) & \\
0 & \cdots & & & -1 & \left( 2 \frac{1}{\sigma} + 1\right) \\
\end{array}
\right] \cdot
\left[
\begin{array}{c}
T_1^{n+1} \\
T_2^{n+1} \\
\vdots \\
\\
T_{N-2}^{n+1} \\
\end{array}
\right] =
\left[
\begin{array}{c}
T_0^n + 2 \left( \frac{1}{\sigma} - 1 \right) T_1^n + T_2^n \\
T_1^n + 2 \left( \frac{1}{\sigma} - 1 \right) T_2^n + T_3^n \\
\vdots \\
\\
T_{n_x-3}^n + 2 \left( \frac{1}{\sigma} - 1 \right) T_{n_x-2}^n + T_{n_x-1}^n \\
\end{array}
\right] +
\begin{bmatrix}
T_0^{n+1} \\
0\\
\vdots \\
0 \\
q \Delta x \\
\end{bmatrix}
\end{align}
$$
Let's write a function that will create the coefficient matrix and right-hand-side vectors for the heat conduction problem from [notebook 2](https://nbviewer.jupyter.org/github/numerical-mooc/numerical-mooc/blob/master/lessons/04_spreadout/04_02_Heat_Equation_1D_Implicit.ipynb): with Dirichlet boundary at $x=0$ and zero-flux boundary $(q=0)$ at $x=L$.
```python
import numpy
from scipy import linalg
```
```python
def lhs_operator(N, sigma):
"""
Computes and returns the implicit operator
of the system for the 1D diffusion equation.
We use Crank-Nicolson method, Dirichlet condition
on the left side of the domain and zero-gradient
Neumann condition on the right side.
Parameters
----------
N : integer
Number of interior points.
sigma : float
Value of alpha * dt / dx**2.
Returns
-------
A : numpy.ndarray
The implicit operator as a 2D array of floats
of size N by N.
"""
# Setup the diagonal of the operator.
D = numpy.diag(2.0 * (1.0 + 1.0 / sigma) * numpy.ones(N))
# Setup the Neumann condition for the last element.
D[-1, -1] = 1.0 + 2.0 / sigma
# Setup the upper diagonal of the operator.
U = numpy.diag(-1.0 * numpy.ones(N - 1), k=1)
# Setup the lower diagonal of the operator.
L = numpy.diag(-1.0 * numpy.ones(N - 1), k=-1)
# Assemble the operator.
A = D + U + L
return A
```
```python
def rhs_vector(T, sigma, qdx):
"""
Computes and returns the right-hand side of the system
for the 1D diffusion equation, using a Dirichlet condition
on the left side and a Neumann condition on the right side.
Parameters
----------
T : numpy.ndarray
The temperature distribution as a 1D array of floats.
sigma : float
Value of alpha * dt / dx**2.
qdx : float
Value of the temperature flux at the right side.
Returns
-------
b : numpy.ndarray
The right-hand side of the system as a 1D array of floats.
"""
b = T[:-2] + 2.0 * (1.0 / sigma - 1.0) * T[1:-1] + T[2:]
# Set Dirichlet condition.
b[0] += T[0]
# Set Neumann condition.
b[-1] += qdx
return b
```
We will solve the linear system at every time step. Let's define a function to step in time:
```python
def crank_nicolson(T0, nt, dt, dx, alpha, q):
"""
Computes and returns the temperature along the rod
after a given number of time steps.
The function uses Crank-Nicolson method in time,
central differencing in space, a Dirichlet condition
on the left side, and a Neumann condition on the
right side.
Parameters
----------
T0 : numpy.ndarray
The initial temperature distribution as a 1D array of floats.
nt : integer
Number of time steps to compute.
dt : float
Time-step size.
dx : float
Distance between two consecutive locations.
alpha : float
Thermal diffusivity of the rod.
q : float
Value of the temperature gradient on the right side.
Returns
-------
T : numpy.ndarray
The temperature distribution as a 1D array of floats.
"""
sigma = alpha * dt / dx**2
# Create the implicit operator of the system.
A = lhs_operator(len(T0) - 2, sigma)
# Integrate in time.
T = T0.copy()
for n in range(nt):
# Generate the right-hand side of the system.
b = rhs_vector(T, sigma, q * dx)
# Solve the system with scipy.linalg.solve.
T[1:-1] = linalg.solve(A, b)
# Apply the Neumann boundary condition.
T[-1] = T[-2] + q * dx
return T
```
And we are good to go! First, let's setup our initial conditions, and the matrix
```python
# Set parameters.
L = 1.0 # length of the rod
nx = 21 # number of points on the rod
dx = L / (nx - 1) # grid spacing
alpha = 1.22e-3 # thermal diffusivity of the rod
q = 0.0 # temperature gradient at the extremity
# Define the locations on the rod.
x = numpy.linspace(0.0, L, num=nx)
# Set the initial temperature distribution.
T0 = numpy.zeros(nx)
T0[0] = 100.0
```
Check the matrix...
```python
A = lhs_operator(nx - 1, 0.5)
print(A)
```
[[ 6. -1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0.]
[-1. 6. -1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0.]
[ 0. -1. 6. -1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0.]
[ 0. 0. -1. 6. -1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0.]
[ 0. 0. 0. -1. 6. -1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0.]
[ 0. 0. 0. 0. -1. 6. -1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0.]
[ 0. 0. 0. 0. 0. -1. 6. -1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0.]
[ 0. 0. 0. 0. 0. 0. -1. 6. -1. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. -1. 6. -1. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0. -1. 6. -1. 0. 0. 0. 0. 0. 0. 0.
0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. -1. 6. -1. 0. 0. 0. 0. 0. 0.
0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. -1. 6. -1. 0. 0. 0. 0. 0.
0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. -1. 6. -1. 0. 0. 0. 0.
0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. -1. 6. -1. 0. 0. 0.
0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. -1. 6. -1. 0. 0.
0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. -1. 6. -1. 0.
0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. -1. 6. -1.
0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. -1. 6.
-1. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. -1.
6. -1.]
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
-1. 5.]]
Looks okay! Now, step in time
```python
# Set the time-step size based on CFL limit.
sigma = 0.5
dt = sigma * dx**2 / alpha # time-step size
nt = 10 # number of time steps to compute
# Compute the temperature distribution.
T = crank_nicolson(T0, nt, dt, dx, alpha, q)
```
And plot,
```python
from matplotlib import pyplot
%matplotlib inline
```
```python
# Set the font family and size to use for Matplotlib figures.
pyplot.rcParams['font.family'] = 'serif'
pyplot.rcParams['font.size'] = 16
```
```python
# Plot the temperature along the rod.
pyplot.figure(figsize=(6.0, 4.0))
pyplot.xlabel('Distance [m]')
pyplot.ylabel('Temperature [C]')
pyplot.grid()
pyplot.plot(x, T, color='C0', linestyle='-', linewidth=2)
pyplot.xlim(0.0, L)
pyplot.ylim(0.0, 100.0);
```
Works nicely. But wait! This method has elements of explicit and implicit discretizations. Is it *conditionally stable* like forward Euler, or *unconditionally stable* like backward Euler? Try out different values of `sigma`. You'll see Crank-Nicolson is an *unconditionally stable scheme* for the diffusion equation!
## Accuracy & convergence
Using some techniques you might have learned in your PDE class, such as separation of variables, you can get a closed expression for the rod problem. It looks like this:
$$
\begin{eqnarray}
T(x,t) = & \nonumber \\
100 - \sum_{n=1}^{\infty} & \frac{400}{(2n-1)\pi}\sin\left(\frac{(2n-1)\pi}{2L}x\right) \exp\left[-\alpha\left(\frac{(2n-1)\pi}{2L}\right)^2t\right]
\end{eqnarray}
$$
Unfortunately, the analytical solution is a bit messy, but at least it gives a good approximation if we evaluate it for large $n$. Let's define a function that will calculate this for us:
```python
def analytical_temperature(x, t, alpha, L, N):
"""
Computes and returns a truncated approximation
of the exact temperature distribution along the rod.
Parameters
----------
x : numpy.ndarray
Locations at which to calculate the temperature
as a 1D array of floats.
t : float
Time.
alpha : float
Thermal diffusivity of the rod.
L : float
Length of the rod.
N : integer
Number of terms to use in the expansion.
Returns
-------
T : numpy.ndarray
The truncated analytical temperature distribution
as a 1D array of floats.
"""
T = 100.0 * numpy.ones_like(x)
for n in range(1, N + 1):
k = (2 * n - 1) * numpy.pi / (2.0 * L)
T -= (400.0 / (2.0 * L * k) *
numpy.sin(k * x) * numpy.exp(- alpha * k**2 * t))
return T
```
And let's see how that expression looks for the time where we left the numerical solution
```python
# Compute the analytical temperature distribution.
T_exact = analytical_temperature(x, nt * dt, alpha, L, 100)
# Plot the numerical and analytical temperatures.
pyplot.figure(figsize=(6.0, 4.0))
pyplot.xlabel('Distance [m]')
pyplot.ylabel('Temperature [C]')
pyplot.grid()
pyplot.plot(x, T, label='numerical',
color='C0', linestyle='-', linewidth=2)
pyplot.plot(x, T_exact, label='analytical',
color='C1', linestyle='--', linewidth=2)
pyplot.legend()
pyplot.xlim(0.0, L)
pyplot.ylim(0.0, 100.0);
```
```python
T1 = analytical_temperature(x, 0.2, alpha, L, 100)
T2 = analytical_temperature(x, 0.2, alpha, L, 200)
numpy.sqrt(numpy.sum((T1 - T2)**2) / numpy.sum(T2**2))
```
6.927917118260093e-13
That looks like it should. We'll now use this result to study the convergence of the Crank-Nicolson scheme.
### Time convergence
We said this method was second-order accurate in time, remember? That's in theory, but we should test that the numerical solution indeed behaves like the theory says.
Leaving $\Delta x$ constant, we'll run the code for different values of $\Delta t$ and compare the result at the same physical time, say $t=n_t\cdot\Delta t=10$, with the analytical expression above.
The initial condition of the rod problem has a very sharp gradient: it suddenly jumps from $0{\rm C}$ to $100{\rm C}$ at the boundary. To resolve that gradient to the point that it doesn't affect time convergence, we would need a very fine mesh, and computations would be very slow. To avoid this issue, we will start from $t=1$ rather than starting from $t=0$.
First, let's define a function that will compute the $L_2$-norm of the error:
```python
def l2_error(T, T_exact):
"""
Computes and returns the relative L2-norm
of the difference between the numerical solution
and the exact solution.
Parameters
----------
T : numpy.ndarray
The numerical solution as an array of floats.
T_exact : numpy.ndarray
The exact solution as an array of floats.
Returns
-------
error : float
The relative L2-norm of the difference.
"""
error = numpy.sqrt(numpy.sum((T - T_exact)**2) /
numpy.sum(T_exact**2))
return error
```
For fun, let's compare the Crank-Nicolson scheme with the implicit (a.k.a., backward) Euler scheme. We'll borrow some functions from [notebook 2](https://nbviewer.jupyter.org/github/numerical-mooc/numerical-mooc/blob/master/lessons/04_spreadout/04_02_Heat_Equation_1D_Implicit.ipynb) to do this.
```python
def lhs_operator_btcs(N, sigma):
"""
Computes and returns the implicit operator
of the system for the 1D diffusion equation.
We use backward Euler method, Dirichlet condition
on the left side of the domain and zero-gradient
Neumann condition on the right side.
Parameters
----------
N : integer
Number of interior points.
sigma : float
Value of alpha * dt / dx**2.
Returns
-------
A : numpy.ndarray
The implicit operator as a 2D array of floats
of size N by N.
"""
# Setup the diagonal of the operator.
D = numpy.diag((2.0 + 1.0 / sigma) * numpy.ones(N))
# Setup the Neumann condition for the last element.
D[-1, -1] = 1.0 + 1.0 / sigma
# Setup the upper diagonal of the operator.
U = numpy.diag(-1.0 * numpy.ones(N - 1), k=1)
# Setup the lower diagonal of the operator.
L = numpy.diag(-1.0 * numpy.ones(N - 1), k=-1)
# Assemble the operator.
A = D + U + L
return A
```
```python
def rhs_vector_btcs(T, sigma, qdx):
"""
Computes and returns the right-hand side of the system
for the 1D diffusion equation, using a Dirichlet condition
on the left side and a Neumann condition on the right side.
Parameters
----------
T : numpy.ndarray
The temperature distribution as a 1D array of floats.
sigma : float
Value of alpha * dt / dx**2.
qdx : float
Value of the temperature flux at the right side.
Returns
-------
b : numpy.ndarray
The right-hand side of the system as a 1D array of floats.
"""
b = T[1:-1] / sigma
# Set Dirichlet condition.
b[0] += T[0]
# Set Neumann condition.
b[-1] += qdx
return b
```
```python
def btcs_implicit(T0, nt, dt, dx, alpha, q):
"""
Computes and returns the temperature along the rod
after a given number of time steps.
The function uses Euler implicit in time,
central differencing in space, a Dirichlet condition
on the left side, and a Neumann condition on the
right side.
Parameters
----------
T0 : numpy.ndarray
The initial temperature distribution
as a 1D array of floats.
nt : integer
Number of time steps to compute.
dt : float
Time-step size.
dx : float
Distance between two consecutive locations.
alpha : float
Thermal diffusivity of the rod.
q : float
Value of the temperature gradient on the right side.
Returns
-------
T : numpy.ndarray
The temperature distribution as a 1D array of floats.
"""
sigma = alpha * dt / dx**2
# Create the implicit operator of the system.
A = lhs_operator_btcs(len(T0) - 2, sigma)
# Integrate in time.
T = T0.copy()
for n in range(nt):
# Generate the right-hand side of the system.
b = rhs_vector_btcs(T, sigma, q * dx)
# Solve the system with scipy.linalg.solve.
T[1:-1] = linalg.solve(A, b)
# Apply the Neumann boundary condition.
T[-1] = T[-2] + q * dx
return T
```
Now, let's do the runs!
```python
# Update parameters.
nx = 1001 # number of points on the rod
dx = L / (nx - 1) # grid spacing
# Define the locations on the rod.
x = numpy.linspace(0.0, L, num=nx)
# Create a list with the time-step sizes to use.
dt_values = [1.0, 0.5, 0.25, 0.125]
# Create empty lists to hold the errors for both schemes.
errors = []
errors_btcs = []
# Compute the initial temperature distribution at t=1.0.
t0 = 1.0
T0 = analytical_temperature(x, t0, alpha, L, 100)
# Compute the final analytical temperature at t=10.0.
t = 10.0
T_exact = analytical_temperature(x, t, alpha, L, 100)
# Compute the numerical solutions and errors.
for dt in dt_values:
nt = int((t - t0) / dt) # number of time steps
# Compute the solution using Crank-Nicolson scheme.
T = crank_nicolson(T0, nt, dt, dx, alpha, q)
# Compute and record the L2-norm of the error.
errors.append(l2_error(T, T_exact))
# Compute the solution using implicit BTCS scheme.
T = btcs_implicit(T0, nt, dt, dx, alpha, q)
# Compute and record the L2-norm of the error.
errors_btcs.append(l2_error(T, T_exact))
```
And plot,
```python
# Plot the error versus the time-step size.
pyplot.figure(figsize=(6.0, 6.0))
pyplot.grid()
pyplot.xlabel(r'$\Delta t$')
pyplot.ylabel('Relative $L_2$-norm\nof the error')
pyplot.loglog(dt_values, errors, label='Crank-Nicolson',
color='black', linestyle='--', linewidth=2, marker='o')
pyplot.loglog(dt_values, errors_btcs, label='BTCS (implicit)',
color='black', linestyle='--', linewidth=2, marker='s')
pyplot.legend()
pyplot.axis('equal');
```
```python
errors
```
[0.0005562525604218684,
0.0001374575644793469,
3.285170428405964e-05,
6.771647468538648e-06]
See how the error drops four times when the time step is halved? This method is second order in time!
Clearly, Crank-Nicolson (circles) converges faster than backward Euler (squares)! Not only that, but also the error curve is shifted down: Crank-Nicolson is more accurate.
If you look closely, you'll realize that the error in Crank-Nicolson decays about twice as fast than backward Euler: it's a second versus first order method!
### Spatial convergence
To study spatial convergence, we will run the code for meshes with 21, 41, 81 and 161 points, and compare them at the same non-dimensional time, say $t=20$.
Let's start by defining a function that will do everything for us
```python
# Set parameters.
dt = 0.1 # time-step size
t = 20.0 # final time
nt = int(t / dt) # number of time steps to compute
# Create a list with the grid-spacing sizes to use.
nx_values = [11, 21, 41, 81, 161]
# Create an empty list to store the errors.
errors = []
# Compute the numerical solutions and errors.
for nx in nx_values:
dx = L / (nx - 1) # grid spacing
x = numpy.linspace(0.0, L, num=nx) # grid points
# Set the initial conditions for the grid.
T0 = numpy.zeros(nx)
T0[0] = 100.0
# Compute the solution using Crank-Nicolson scheme.
T = crank_nicolson(T0, nt, dt, dx, alpha, q)
# Compute the analytical solution.
T_exact = analytical_temperature(x, t, alpha, L, 100)
# Compute and record the L2-norm of the error.
errors.append(l2_error(T, T_exact))
```
And plot!
```python
# Plot the error versus the grid-spacing size.
pyplot.figure(figsize=(6.0, 6.0))
pyplot.grid()
pyplot.xlabel(r'$\Delta x$')
pyplot.ylabel('Relative $L_2$-norm\nof the error')
dx_values = L / (numpy.array(nx_values) - 1)
pyplot.loglog(dx_values, errors,
color='black', linestyle='--', linewidth=2, marker='o')
pyplot.axis('equal');
```
That looks good! See how for each quadrant we go right, the error drops two quadrants going down (and even a bit better!).
##### Dig deeper
Let's re-do the spatial convergence, but comparing at a much later time, say $t=1000$.
```python
# Set parameters.
dt = 0.1 # time-step size
t = 1000.0 # final time
nt = int(t / dt) # number of time steps to compute
# Create a list with the grid-spacing sizes to use.
nx_values = [11, 21, 41, 81, 161]
# Create an empty list to store the errors.
errors = []
# Compute the numerical solutions and errors.
for nx in nx_values:
dx = L / (nx - 1) # grid spacing
x = numpy.linspace(0.0, L, num=nx) # grid points
# Set the initial conditions for the grid.
T0 = numpy.zeros(nx)
T0[0] = 100.0
# Compute the solution using Crank-Nicolson scheme.
T = crank_nicolson(T0, nt, dt, dx, alpha, q)
# Compute the analytical solution.
T_exact = analytical_temperature(x, t, alpha, L, 100)
# Compute and record the L2-norm of the error.
errors.append(l2_error(T, T_exact))
```
```python
# Plot the error versus the grid-spacing size.
pyplot.figure(figsize=(6.0, 6.0))
pyplot.grid()
pyplot.xlabel(r'$\Delta x$')
pyplot.ylabel('Relative $L_2$-norm\nof the error')
dx_values = L / (numpy.array(nx_values) - 1)
pyplot.loglog(dx_values, errors,
color='black', linestyle='--', linewidth=2, marker='o')
pyplot.axis('equal');
```
```python
errors
```
[0.011922719076357474,
0.006181593859790544,
0.003142664307189285,
0.0015838621626866334,
0.0007950070915380142]
Wait, convergence is not that great now! It's not as good as second order, but not as bad as first order. *What is going on?*
Remember our implementation of the boundary conditions? We used
$$
\begin{equation}
\frac{T^{n}_{N-1} - T^{n}_{N-2}}{\Delta x} = q
\end{equation}
$$
Well, that is a **first-order** approximation!
But, why doesn't this affect our solution at an earlier time? Initially, temperature on the right side of the rod is zero and the gradient is very small in that region; at that point in time, errors there were negligible. Once temperature starts picking up, we start having problems.
**Boundary conditions can affect the convergence and accuracy of your solution!**
---
###### The cell below loads the style of the notebook
```python
from IPython.core.display import HTML
css_file = '../../styles/numericalmoocstyle.css'
HTML(open(css_file, 'r').read())
```
<link href='http://fonts.googleapis.com/css?family=Alegreya+Sans:100,300,400,500,700,800,900,100italic,300italic,400italic,500italic,700italic,800italic,900italic' rel='stylesheet' type='text/css'>
<link href='http://fonts.googleapis.com/css?family=Arvo:400,700,400italic' rel='stylesheet' type='text/css'>
<link href='http://fonts.googleapis.com/css?family=PT+Mono' rel='stylesheet' type='text/css'>
<link href='http://fonts.googleapis.com/css?family=Shadows+Into+Light' rel='stylesheet' type='text/css'>
<link href='http://fonts.googleapis.com/css?family=Nixie+One' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Source+Code+Pro' rel='stylesheet' type='text/css'>
<style>
@font-face {
font-family: "Computer Modern";
src: url('http://mirrors.ctan.org/fonts/cm-unicode/fonts/otf/cmunss.otf');
}
#notebook_panel { /* main background */
background: rgb(245,245,245);
}
div.cell { /* set cell width */
width: 750px;
}
div #notebook { /* centre the content */
background: #fff; /* white background for content */
width: 1000px;
margin: auto;
padding-left: 0em;
}
#notebook li { /* More space between bullet points */
margin-top:0.8em;
}
/* draw border around running cells */
div.cell.border-box-sizing.code_cell.running {
border: 1px solid #111;
}
/* Put a solid color box around each cell and its output, visually linking them*/
div.cell.code_cell {
background-color: rgb(256,256,256);
border-radius: 0px;
padding: 0.5em;
margin-left:1em;
margin-top: 1em;
}
div.text_cell_render{
font-family: 'Alegreya Sans' sans-serif;
line-height: 140%;
font-size: 125%;
font-weight: 400;
width:600px;
margin-left:auto;
margin-right:auto;
}
/* Formatting for header cells */
.text_cell_render h1 {
font-family: 'Nixie One', serif;
font-style:regular;
font-weight: 400;
font-size: 45pt;
line-height: 100%;
color: rgb(0,51,102);
margin-bottom: 0.5em;
margin-top: 0.5em;
display: block;
}
.text_cell_render h2 {
font-family: 'Nixie One', serif;
font-weight: 400;
font-size: 30pt;
line-height: 100%;
color: rgb(0,51,102);
margin-bottom: 0.1em;
margin-top: 0.3em;
display: block;
}
.text_cell_render h3 {
font-family: 'Nixie One', serif;
margin-top:16px;
font-size: 22pt;
font-weight: 600;
margin-bottom: 3px;
font-style: regular;
color: rgb(102,102,0);
}
.text_cell_render h4 { /*Use this for captions*/
font-family: 'Nixie One', serif;
font-size: 14pt;
text-align: center;
margin-top: 0em;
margin-bottom: 2em;
font-style: regular;
}
.text_cell_render h5 { /*Use this for small titles*/
font-family: 'Nixie One', sans-serif;
font-weight: 400;
font-size: 16pt;
color: rgb(163,0,0);
font-style: italic;
margin-bottom: .1em;
margin-top: 0.8em;
display: block;
}
.text_cell_render h6 { /*use this for copyright note*/
font-family: 'PT Mono', sans-serif;
font-weight: 300;
font-size: 9pt;
line-height: 100%;
color: grey;
margin-bottom: 1px;
margin-top: 1px;
}
.CodeMirror{
font-family: "Source Code Pro";
font-size: 90%;
}
.alert-box {
padding:10px 10px 10px 36px;
margin:5px;
}
.success {
color:#666600;
background:rgb(240,242,229);
}
</style>
| {
"alphanum_fraction": 0.8233294974,
"author": null,
"avg_line_length": 109.6627306273,
"converted": true,
"ext": "ipynb",
"file": null,
"hexsha": "370470feccd28c49adeed4b492b7b46c822c8a3c",
"include": null,
"lang": "Jupyter Notebook",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1270,
"max_forks_repo_forks_event_max_datetime": "2022-02-27T01:02:44.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-01-02T19:19:52.000Z",
"max_forks_repo_head_hexsha": "62b3c14c2c56d85d65c6075f2d7eb44266b49c17",
"max_forks_repo_licenses": [
"CC-BY-3.0"
],
"max_forks_repo_name": "mcarpe/numerical-mooc",
"max_forks_repo_path": "lessons/04_spreadout/04_05_Crank-Nicolson.ipynb",
"max_issues_count": 62,
"max_issues_repo_head_hexsha": "62b3c14c2c56d85d65c6075f2d7eb44266b49c17",
"max_issues_repo_issues_event_max_datetime": "2020-11-09T12:27:41.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-02-02T01:06:07.000Z",
"max_issues_repo_licenses": [
"CC-BY-3.0"
],
"max_issues_repo_name": "mcarpe/numerical-mooc",
"max_issues_repo_path": "lessons/04_spreadout/04_05_Crank-Nicolson.ipynb",
"max_line_length": 25016,
"max_stars_count": 748,
"max_stars_repo_head_hexsha": "62b3c14c2c56d85d65c6075f2d7eb44266b49c17",
"max_stars_repo_licenses": [
"CC-BY-3.0"
],
"max_stars_repo_name": "mcarpe/numerical-mooc",
"max_stars_repo_path": "lessons/04_spreadout/04_05_Crank-Nicolson.ipynb",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T20:42:16.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-04T22:50:56.000Z",
"num_tokens": 9820,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 148593
} |
//==================================================================================================
/**
Copyright 2016 NumScale SAS
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
//==================================================================================================
#ifndef BOOST_SIMD_ARCH_COMMON_SCALAR_FUNCTION_ALIGNED_STORE_HPP_INCLUDED
#define BOOST_SIMD_ARCH_COMMON_SCALAR_FUNCTION_ALIGNED_STORE_HPP_INCLUDED
#include <boost/simd/detail/dispatch/function/overload.hpp>
#include <boost/simd/detail/dispatch/adapted/common/pointer.hpp>
#include <boost/simd/mask.hpp>
#include <boost/config.hpp>
namespace boost { namespace simd { namespace ext
{
namespace bd = boost::dispatch;
namespace bs = boost::simd;
/// INTERNAL ONLY - Scalar aligned_store and aligned_store are equivalent
BOOST_DISPATCH_OVERLOAD ( aligned_store_
, (typename A0, typename A1, typename A2)
, bd::cpu_
, bd::scalar_< bd::unspecified_<A0> >
, bd::pointer_<bd::scalar_<bd::unspecified_<A1>>,1u>
, bd::scalar_<bd::integer_<A2>>
)
{
BOOST_FORCEINLINE void operator() ( A0 a0, A1 a1, A2 a2) const BOOST_NOEXCEPT
{
*(a1+a2) = a0;
}
};
/// INTERNAL ONLY - Scalar aligned_store and aligned_store are equivalent
BOOST_DISPATCH_OVERLOAD ( aligned_store_
, (typename A0, typename A1)
, bd::cpu_
, bd::scalar_< bd::unspecified_<A0> >
, bd::pointer_<bd::scalar_<bd::unspecified_<A1>>,1u>
)
{
BOOST_FORCEINLINE void operator() ( A0 a0, A1 a1) const BOOST_NOEXCEPT
{
*a1 = a0;
}
};
BOOST_DISPATCH_OVERLOAD ( aligned_store_
, (typename Src, typename Pointer, typename Zero)
, bd::cpu_
, bd::scalar_<bd::unspecified_<Src>>
, bd::masked_pointer_<bd::scalar_<bd::unspecified_<Pointer>>,Zero>
)
{
BOOST_FORCEINLINE void operator()(const Src& s, Pointer const& p) const
{
if(p.mask()) *p.get() = s;
}
};
BOOST_DISPATCH_OVERLOAD ( aligned_store_
, (typename Src, typename Pointer, typename Zero, typename A2)
, bd::cpu_
, bd::scalar_<bd::unspecified_<Src>>
, bd::masked_pointer_<bd::scalar_<bd::unspecified_<Pointer>>,Zero>
, bd::scalar_<bd::integer_<A2>>
)
{
BOOST_FORCEINLINE void operator()(const Src& s, Pointer const& p, A2 idx) const
{
if(p.mask()) *(p.get()+idx) = s;
}
};
} } }
#endif
| {
"alphanum_fraction": 0.5186693822,
"author": null,
"avg_line_length": 36.825,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "4f888102d3cc312296179738cb5ed4c2188c3565",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 7,
"max_forks_repo_forks_event_max_datetime": "2020-02-10T14:27:07.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-12-12T12:36:31.000Z",
"max_forks_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "SylvainCorlay/pythran",
"max_forks_repo_path": "third_party/boost/simd/arch/common/scalar/function/aligned_store.hpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "SylvainCorlay/pythran",
"max_issues_repo_path": "third_party/boost/simd/arch/common/scalar/function/aligned_store.hpp",
"max_line_length": 100,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "SylvainCorlay/pythran",
"max_stars_repo_path": "third_party/boost/simd/arch/common/scalar/function/aligned_store.hpp",
"max_stars_repo_stars_event_max_datetime": "2021-01-15T15:13:12.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-02-25T22:23:33.000Z",
"num_tokens": 645,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2946
} |
import sys
if sys.version_info.major < 3:
from customaxesimage import CustomAxesImage
else:
from lib.customaxesimage import CustomAxesImage
import numpy as np
import math
from kernel import setupkernel,setupintegratedkernel
import globals
if globals.debug > 0: from time import time
try:
from numba import cuda,float64
import math
has_jit = True
except ImportError:
has_jit = False
class IntegratedValuePlot(CustomAxesImage,object):
def __init__(self,ax,A,x,y,m,h,rho,physical_units,display_units,**kwargs):
if globals.debug > 1: print("integratedvalueplot.__init__")
self.ax = ax
self.A = np.ascontiguousarray(A,dtype=np.double)
self.x = np.ascontiguousarray(x,dtype=np.double)
self.y = np.ascontiguousarray(y,dtype=np.double)
self.m = np.ascontiguousarray(m,dtype=np.double)
self.h = np.ascontiguousarray(h,dtype=np.double)
self.rho = np.ascontiguousarray(rho,dtype=np.double)
self.physical_units = physical_units
self.display_units = display_units
self.wint, self.ctab = setupintegratedkernel()
self.wint = np.ascontiguousarray(self.wint,dtype=np.double)
# Make everything in physical units and then just set the
# extent to display units later
self.A *= self.physical_units[0]
self.x *= self.physical_units[1]
self.y *= self.physical_units[2]
self.m *= self.physical_units[3]
self.h *= self.physical_units[4]
self.rho *= self.physical_units[5]
self.wint *= self.physical_units[4]**3
self.h2 = self.h**2
self.ctabinvh2 = self.ctab/self.h2
self.quantity = np.ascontiguousarray(
self.m*self.A/(self.rho*self.h2*self.h),
dtype=np.double,
)
if has_jit:
self.stream = cuda.stream()
N = len(self.x)
self.device_x = cuda.device_array(N,dtype=np.double)
self.device_y = cuda.device_array(N,dtype=np.double)
self.device_quantity = cuda.device_array(N,dtype=np.double)
self.device_h = cuda.device_array(N,dtype=np.double)
self.device_h2 = cuda.device_array(N,dtype=np.double)
self.device_ctabinvh2 = cuda.device_array(N,dtype=np.double)
self.device_wint = cuda.device_array(len(self.wint),dtype=np.double)
cuda.to_device(self.x,to=self.device_x,stream=self.stream)
cuda.to_device(self.y,to=self.device_y,stream=self.stream)
cuda.to_device(self.quantity,to=self.device_quantity,stream=self.stream)
cuda.to_device(self.h,to=self.device_h,stream=self.stream)
cuda.to_device(self.h2,to=self.device_h2,stream=self.stream)
cuda.to_device(self.ctabinvh2,to=self.device_ctabinvh2,stream=self.stream)
cuda.to_device(self.wint,to=self.device_wint,stream=self.stream)
cuda.synchronize()
super(IntegratedValuePlot,self).__init__(
self.ax,
np.zeros((1,1),dtype=np.double),
**kwargs
)
def calculate(self,*args,**kwargs):
if globals.debug > 1: print("integratedvalueplot.calculate")
if globals.debug > 0: start = time()
xmin,xmax = self.ax.get_xlim()
ymin,ymax = self.ax.get_ylim()
xpmin = self.x-self.h
xpmax = self.x+self.h
ypmin = self.y-self.h
ypmax = self.y+self.h
display_to_physical = [pu/du for pu,du in zip(self.physical_units,self.display_units)]
idx = np.logical_and(
np.logical_and(xpmax > xmin*display_to_physical[1], xpmin < xmax*display_to_physical[1]),
np.logical_and(ypmax > ymin*display_to_physical[2], ypmin < ymax*display_to_physical[2]),
)
if any(idx):
self._extent = [xmin,xmax,ymin,ymax]
self.dx = float(xmax-xmin)/float(self.xpixels) * display_to_physical[1]
self.dy = float(ymax-ymin)/float(self.ypixels) * display_to_physical[2]
self._data = np.zeros(np.shape(self._data),dtype=np.double)
self.calculate_data(idx)
if globals.debug > 0: print("integratedvalueplot.calculate took %f seconds" % (time()-start))
if has_jit:
@staticmethod
@cuda.jit('void(double[:,:], int64[:], double[:], double[:], double[:], double[:], double[:], double, double, double, double, int64, int64, double[:], double[:])') # Decorator parameters improve performance
def calculate_gpu(data,idx,x,y,quantity,h,h2,dx,dy,xmin,ymin,xpixels,ypixels,ctabinvh2,wint):
p = cuda.grid(1)
if p < idx.size:
i = idx[p]
hi = h[i]
h2i = h2[i]
xi = x[i]
yi = y[i]
quantityi = quantity[i]
ctabinvh2i = ctabinvh2[i]
imin = max(int((xi-hi-xmin)/dx),0)
imax = min(int((xi+hi-xmin)/dx)+1,xpixels)
jmin = max(int((yi-hi-ymin)/dy),0)
jmax = min(int((yi+hi-ymin)/dy)+1,ypixels)
for ix in range(imin,imax):
xpos = xmin + (ix+0.5)*dx
dx2 = (xpos-xi)*(xpos-xi)
for jy in range(jmin,jmax):
ypos = ymin + (jy+0.5)*dy
dr2 = dx2 + (ypos-yi)*(ypos-yi)
if dr2 < h2i:
cuda.atomic.add(data, (jy,ix), quantityi*wint[int(dr2*ctabinvh2i)])
def calculate_data(self,idx): # On GPU
if globals.debug > 1: print("integratedvalueplot.calculate_data")
device_idx = cuda.to_device(np.where(idx)[0])
device_data = cuda.to_device(self._data)
display_to_physical = [pu/du for pu,du in zip(self.physical_units,self.display_units)]
xmin = self.ax.get_xlim()[0]*display_to_physical[1]
ymin = self.ax.get_ylim()[0]*display_to_physical[2]
threadsperblock = 512
blockspergrid = len(idx) // threadsperblock + 1
self.calculate_gpu[blockspergrid,threadsperblock](
device_data,
device_idx,
self.device_x,
self.device_y,
self.device_quantity,
self.device_h,
self.device_h2,
self.dx,
self.dy,
xmin,
ymin,
self.xpixels,
self.ypixels,
self.device_ctabinvh2,
self.device_wint,
)
cuda.synchronize()
self._data = device_data.copy_to_host()
else:
def calculate_data(self,idx): # On CPU
if globals.debug > 1: print("integratedvalueplot.calculate_data")
xmin,xmax = self.ax.get_xlim()
ymin,ymax = self.ax.get_ylim()
display_to_physical = [pu/du for pu,du in zip(self.physical_units,self.display_units)]
xmin *= display_to_physical[1]
xmax *= display_to_physical[1]
ymin *= display_to_physical[2]
ymax *= display_to_physical[2]
xpos = np.linspace(xmin,xmax,self.xpixels+1)[:-1] + 0.5*self.dx
ypos = np.linspace(ymin,ymax,self.ypixels+1)[:-1] + 0.5*self.dy
indexes = np.arange(len(self.x))[idx]
x = self.x[idx]
y = self.y[idx]
h = self.h[idx]
h2 = self.h2[idx]
quantity = self.quantity[idx]
ctabinvh2 = self.ctabinvh2[idx]
dx = np.abs(xpos[:,None]-x)
idx_xs = dx < h
for i,(idx_x,dx_x) in enumerate(zip(idx_xs,dx)):
if not any(idx_x): continue
dx_x = dx_x[idx_x]
dy = np.abs(ypos[:,None]-y[idx_x])
idx_ys = dy < h[idx_x]
for j,(idx_y,dy_y) in enumerate(zip(idx_ys,dy)):
if not any(idx_y): continue
dr2 = dx_x[idx_y]**2 + dy_y[idx_y]**2
idx_r = dr2 < h2[idx_x][idx_y]
if not any(idx_r): continue
indices = (dr2[idx_r]*ctabinvh2[idx_x][idx_y][idx_r]).astype(int,copy=False)
self._data[j,i] = sum(quantity[idx_x][idx_y][idx_r]*self.wint[indices])
| {
"alphanum_fraction": 0.5633786178,
"author": null,
"avg_line_length": 38.8302752294,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "aaa2cc13aac297db4abafa5a782e0aca716c3c7c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d8bed1afd0a05d6bc50e6677d2733ffb3d1d5bcd",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "hatfullr/pysplash",
"max_forks_repo_path": "lib/integratedvalueplot.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d8bed1afd0a05d6bc50e6677d2733ffb3d1d5bcd",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "hatfullr/pysplash",
"max_issues_repo_path": "lib/integratedvalueplot.py",
"max_line_length": 214,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d8bed1afd0a05d6bc50e6677d2733ffb3d1d5bcd",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "hatfullr/pysplash",
"max_stars_repo_path": "lib/integratedvalueplot.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2064,
"path": null,
"reason": "import numpy,from numba",
"repo": null,
"save_path": null,
"sha": null,
"size": 8465
} |
import csv
import logging
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import random
import statistics as stat
import glob
import os
import yaml
logger = logging.getLogger(__name__)
def set_size(w,h, ax=None):
""" w, h: width, height in inches """
if not ax: ax=plt.gca()
l = ax.figure.subplotpars.left
r = ax.figure.subplotpars.right
t = ax.figure.subplotpars.top
b = ax.figure.subplotpars.bottom
figw = float(w)/(r-l)
figh = float(h)/(t-b)
ax.figure.set_size_inches(figw, figh)
def main():
dir_path = os.path.dirname(os.path.realpath(__file__))
yaml_list = glob.glob(dir_path + "/res_robots_*.yaml")
dict_results = {}
for file in yaml_list:
split_filename = file.split("_")
# print(split_filename)
seed = str(split_filename[-1].split(".")[0])
horizon = str(split_filename[-3])
robots = str(split_filename[-5])
try:
print(dict_results[seed])
except KeyError:
dict_results[seed] = {}
with open(file, "r") as stream:
try:
yaml_data = yaml.safe_load(stream)
cumulative_time = yaml_data["results"]["total time"]
max_time = yaml_data["results"]["comp time"]["max"]
avg_time = yaml_data["results"]["comp time"]["avg"]
print("seed: {} horizon: {} robots: {} -> makespan: {}; max: {} avg: {}".format(seed, horizon, robots, cumulative_time, max_time, avg_time))
dict_results[seed][horizon] = {}
dict_results[seed][horizon]["robots"] = robots
dict_results[seed][horizon]["cum_time"] = cumulative_time
dict_results[seed][horizon]["max"] = max_time
dict_results[seed][horizon]["avg"] = avg_time
except yaml.YAMLError as exc:
print(exc)
print(dict_results)
with open(dir_path + "/results.yaml", "w") as outfile:
yaml.safe_dump(dict_results, outfile, default_flow_style=False)
improv_list_1 = []
comp_max_list_1 = []
comp_avg_list_1 = []
improv_list_5 = []
comp_max_list_5 = []
comp_avg_list_5 = []
seed_list = []
for exp in dict_results:
try:
# cumulative time results
cost_0 = dict_results[exp]["0"]["cum_time"]
cost_1 = dict_results[exp]["1"]["cum_time"]
cost_5 = dict_results[exp]["5"]["cum_time"]
improv_1 = round( 100*(int(cost_0)-int(cost_1))/int(cost_0) ,2)
improv_5 = round( 100*(int(cost_0)-int(cost_5))/int(cost_0) ,2)
improv_list_1.append(improv_1)
improv_list_5.append(improv_5)
# computation time results
comp_max_1 = round(dict_results[exp]["1"]["max"], 3)
comp_avg_1 = round(dict_results[exp]["1"]["avg"], 3)
comp_max_5 = round(dict_results[exp]["5"]["max"], 3)
comp_avg_5 = round(dict_results[exp]["5"]["avg"], 3)
comp_max_list_1.append(comp_max_1)
comp_avg_list_1.append(comp_avg_1)
comp_max_list_5.append(comp_max_5)
comp_avg_list_5.append(comp_avg_5)
seed_list.append(int(exp))
# print
print("{} ->\t improv 1 {} ( {} | {} ) \t \t improv 5 {} ( {} | {} )".format(exp, improv_1, comp_avg_1, comp_max_1, improv_5, comp_avg_5, comp_max_5))
except KeyError:
pass
print("avg improvement")
print(" Horizon 1: {}".format(stat.mean(improv_list_1)))
print(" Horizon 5: {}".format(stat.mean(improv_list_5)))
print("avg comp. time [avg]")
print(" Horizon 1: {}".format(stat.mean(comp_avg_list_1)))
print(" Horizon 5: {}".format(stat.mean(comp_avg_list_5)))
print("avg comp. time [max]")
print(" Horizon 1: {}".format(stat.mean(comp_max_list_1)))
print(" Horizon 5: {}".format(stat.mean(comp_max_list_5)))
mpl.style.use('default')
fig, ax1 = plt.subplots()
seed_list.sort()
color = 'tab:red'
ax1.set_xlabel('Random seed')
ax1.set_ylabel('Improvement [%]') #, color=color)
ax1.plot(seed_list, improv_list_5, color="m", linestyle='-', marker='_', label="H = 5")
ax1.plot(seed_list, improv_list_1, color="b", linestyle='-', marker='_', label="H = 1")
ax1.plot(seed_list, [0]*len(seed_list), color="c", linestyle='-', marker='_', label="no MILP")
ax1.grid()
plt.legend(title="Horizon length")
# color = 'tab:blue'
# ax1.plot(horizon, improvement_2, color=color, linestyle='-', marker='*')
ax1.tick_params(axis='y') #, labelcolor=color)
# plt.ylim((-2, 20))set_size(w=4, h=3)
set_size(w=4, h=3)
plt.subplots_adjust(left=0.13, bottom=0.15, right=0.92, top=0.94, wspace=None, hspace=None)
# plt.show()
plt.savefig("./results/plots/10x15_improv.pdf", format="pdf", pad_inches=0.01, transparent=True)
# plt.show()
fig, ax1 = plt.subplots()
ax1.set_xlabel('Random seed')
ax1.set_ylabel('Computation time [s]') #, color=color)
color = 'm'
ax1.errorbar(seed_list, comp_avg_list_5, [comp_avg_list_5, comp_max_list_5], fmt='_m', ecolor='m', lw=1, capsize=6, capthick=1, label="H = 5")
ax1.errorbar(seed_list, comp_avg_list_1, [comp_avg_list_1, comp_max_list_1], fmt='_b', ecolor='b', lw=1, capsize=6, capthick=1, label="H = 1")
ax1.grid()
plt.legend(title="Horizon length")
set_size(w=4, h=3)
plt.subplots_adjust(left=0.13, bottom=0.15, right=0.92, top=0.94, wspace=None, hspace=None)
plt.savefig("./results/plots/10x15_comptime.pdf", format="pdf", pad_inches=0.01, transparent=True)
plt.show()
return 0
if __name__ == "__main__":
main()
| {
"alphanum_fraction": 0.608269096,
"author": null,
"avg_line_length": 36.1265822785,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8374e3132959583a9d7a8cc4c0390b12c57de3d4",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-11-22T10:58:38.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-11-22T10:58:38.000Z",
"max_forks_repo_head_hexsha": "76b97fd5aa3898fd6cb6f74f8d87140555c92af5",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "alexberndt/mobile-AGV-optimization",
"max_forks_repo_path": "python/results/old/delay_random_10x15/plot_results.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "76b97fd5aa3898fd6cb6f74f8d87140555c92af5",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "alexberndt/mobile-AGV-optimization",
"max_issues_repo_path": "python/results/old/delay_random_10x15/plot_results.py",
"max_line_length": 162,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "76b97fd5aa3898fd6cb6f74f8d87140555c92af5",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "alexberndt/mobile-AGV-optimization",
"max_stars_repo_path": "python/results/old/delay_random_10x15/plot_results.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-19T09:41:29.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-12-22T03:07:08.000Z",
"num_tokens": 1580,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5708
} |
'''
Created on Sep 23, 2021
@author: immanueltrummer
'''
import codexdb.code.generic
from contextlib import redirect_stdout
from io import StringIO
import pandas as pd
import sys
class PythonGenerator(codexdb.code.generic.Generator):
""" Generates Python code. """
def execute(self, db_id, question, generated):
self.for_prompt = False
code_prefix = self.generate(db_id, question)
self.for_prompt = True
pruned = self._prune_code(generated)
all_code = code_prefix + pruned
print(f'Executing \n{all_code}\n---')
try:
f = StringIO()
with redirect_stdout(f):
exec(all_code)
return f.getvalue()
except Exception as e:
sys.stderr.write(f'Exception: {e}\n')
return ''
def _add_context(self):
snippets = []
snippets += [(0, 'import pandas as pd')]
snippets += [(1, 'import numpy as np')]
if not self.for_prompt:
snippets += [(2, 'pd.set_option("max_columns", None)')]
snippets += [(3, 'pd.set_option("max_colwidth", None)')]
snippets += [(4, 'pd.set_option("max_rows", None)')]
return snippets
def _add_data_constraints(self, db_json):
snippets = []
if self.for_prompt:
tables = db_json['table_names_original']
columns = db_json['column_names_original']
f_keys = db_json['foreign_keys']
for f_idx, f_key in enumerate(f_keys):
col_1_idx, col_2_idx = f_key
tbl_1_idx, col_1 = columns[col_1_idx]
tbl_2_idx, col_2 = columns[col_2_idx]
tbl_1 = tables[tbl_1_idx]
tbl_2 = tables[tbl_2_idx]
question = f'What are the entries from {tbl_1} and {tbl_2}?'
snippets += self._add_task(db_json, question, 5000 + f_idx * 100)
code = f"print({tbl_1}.merge({tbl_2}, left_on='{col_1}', right_on='{col_2}'))"
snippets += [(5010 + f_idx * 100, code)]
return snippets
def _add_data_load(self, db_json, tbl_idx):
db_id = db_json['db_id']
tables = db_json['table_names_original']
table = tables[tbl_idx]
path_prefix = '' if self.for_prompt else f'{self.spider_dir}/database/{db_id}/'
code = f"{table} = pd.read_csv('{path_prefix}{table}.csv')"
priority = 1000 + tbl_idx * self.tbl_p_step
return [(priority, code)]
def _add_data_samples(self, db_json, tbl_idx):
db_id = db_json['db_id']
tables = db_json['table_names_original']
table = tables[tbl_idx]
df = pd.read_csv(f'{self.spider_dir}/database/{db_id}/{table}.csv')
snippets = []
start_priority = 990 + tbl_idx * self.tbl_p_step
snippets += [(start_priority, f'# Sample data from {table}.csv:')]
for row_ctr, row in df.iloc[0:2,:].reset_index().iterrows():
priority = start_priority + row_ctr + 1
code = f'# {list(row)}'
snippets += [(priority, code)]
return snippets
def _add_data_schema(self, db_json, tbl_idx):
tables = db_json['table_names_original']
tbl_name = tables[tbl_idx]
all_columns = db_json['column_names_original']
tbl_columns = [c[1] for c in all_columns if c[0] == tbl_idx]
quoted_cols = [f"'{c}'" for c in tbl_columns]
col_list = ', '.join(quoted_cols)
code = f"{tbl_name}.columns = [{col_list}]"
priority = 1001 + tbl_idx * self.tbl_p_step
return [(priority, code)]
def _add_data_types(self, db_json, tbl_idx):
tables = db_json['table_names_original']
tbl_name = tables[tbl_idx]
col_info = db_json['column_names_original']
col_types = db_json['column_types']
t_items = []
for col_idx, col_info in enumerate(col_info):
col_tbl, col_name = col_info
if col_tbl == tbl_idx:
sql_type = col_types[col_idx]
d_type = self._sql_to_dtype(sql_type)
t_item = f"'{col_name}':{d_type}"
t_items.append(t_item)
priority = 1002 + tbl_idx * self.tbl_p_step
# snippet = f'{tbl_name} = {tbl_name}.astype({{{", ".join(t_items)}}})'
snippet = f'# Column types in {tbl_name}: {", ".join(t_items)}'
# return [(priority, snippet)]
return []
def _add_examples(self, db_json, question):
return []
def _add_task(self, db_json, question, first_priority):
q_requoted = question.replace("'", '"')
snippets = [(first_priority, f"# {q_requoted} Print answer.")]
snippets += [(first_priority+1, f"print('{q_requoted}')")]
return snippets
def _cmd_load(self, table, data_path):
return f"{table} = pd.read_csv('{data_path}')"
def _prune_code(self, generated):
""" Prune generated code.
Args:
generated: code generated by Codex
Returns:
code parts that likely answer query
"""
gen_lines = generated.split('\n')
gen_lines = [g for g in gen_lines if g]
if len(gen_lines) > 1:
gen_lines.pop()
if gen_lines:
first_line = gen_lines[0]
if not first_line.startswith('print('):
gen_lines[0] = 'print(' + first_line + ')'
pruned = []
for line in gen_lines:
if not line.startswith('print('):
break
else:
pruned.append(line)
return '\n'.join(pruned)
def _sql_to_dtype(self, sql_type):
""" Translates SQL type into dtype for pandas data frame.
Args:
sql_type: SQL column type
Returns:
dtype of Pandas data frame
"""
sql_type = sql_type.lower()
if sql_type == 'text':
return 'object'
elif sql_type == 'number':
return 'np.float64'
else:
return 'object' | {
"alphanum_fraction": 0.5561273721,
"author": null,
"avg_line_length": 35.7356321839,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "3b0d59aa438ce0d78f34d2aaa97aa747099efe6a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "15ab6268c95e8a283b69e17d5fa4cb7589580a27",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "itrummer/CodexDB",
"max_forks_repo_path": "src/codexdb/deprecated/code/python.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "15ab6268c95e8a283b69e17d5fa4cb7589580a27",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "itrummer/CodexDB",
"max_issues_repo_path": "src/codexdb/deprecated/code/python.py",
"max_line_length": 94,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "15ab6268c95e8a283b69e17d5fa4cb7589580a27",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "itrummer/CodexDB",
"max_stars_repo_path": "src/codexdb/deprecated/code/python.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1486,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6218
} |
#=
This is a very crude first stab at the Tables.jl interface
https://github.com/JuliaData/Tables.jl
=#
using Tables
Tables.istable(::Type{<:KeyedArray}) = true
Tables.rowaccess(::Type{<:KeyedArray}) = true
function Tables.rows(A::Union{KeyedArray, NdaKa})
L = hasnames(A) ? (dimnames(A)..., :value) : # should gensym() if :value in dimnames(A)
(ntuple(d -> Symbol(:dim_,d), ndims(A))..., :value)
R = keys_or_axes(A)
nt(inds) = NamedTuple{L}((map(getindex, R, inds)..., A[inds...]))
# (nt(inds) for inds in Iterators.product(axes(A)...)) # should flatten?
(nt(inds) for inds in Vectorator(Iterators.product(axes(A)...)))
end
#=
rr = wrapdims(rand(2,3), 11:12, 21:23)
nn = wrapdims(rand(2,3), a=11:12, b=21:23)
Tables.rows(rr) |> collect |> vec
Tables.rows(nn) |> collect |> vec
Tables.Schema(nn) # define a struct? Now below...
# No error if Tables.rows's generator has size,
# it uses Vectorator mostly to give Tables.Schema something to find.
=#
Tables.columnaccess(::Type{<:KeyedArray{T,N,AT}}) where {T,N,AT} =
IndexStyle(AT) === IndexLinear()
function Tables.columns(A::Union{KeyedArray, NdaKa})
L = hasnames(A) ? (dimnames(A)..., :value) :
(ntuple(d -> Symbol(:dim_,d), ndims(A))..., :value)
R = keys_or_axes(A)
G = ntuple(ndims(A)) do d
vec([rs[d] for rs in Iterators.product(R...)])
# _vec(rs[d] for rs in Iterators.product(R...))
end
C = (G..., vec(parent(A)))
NamedTuple{L}(C)
end
function Tables.Schema(nt::NamedTuple) # 🏴☠️
L = keys(nt)
T = map(v -> typeof(first(v)), values(nt))
Tables.Schema(L,T)
end
#=
Ah, iterators aren't allowed for columns, must be indexable:
https://github.com/JuliaData/Tables.jl/issues/101
They could be something like this, but seems overkill?
https://github.com/MichielStock/Kronecker.jl
https://github.com/JuliaArrays/LazyArrays.jl#kronecker-products
Tables.columns(nn)
map(collect, Tables.columns(nn))
using DataFrames
DataFrame(rand(2,3))
DataFrame(nn) # doesn't see Tables
dd1 = DataFrame(Tables.rows(nn))
dd2 = DataFrame(Tables.columns(nn))
=#
"""
Vectorator(iter)
Wrapper for iterators which ensures they do not have an n-dimensional size.
Tries to ensure that `collect(Vectorator(iter)) == vec(collect(iter))`.
"""
struct Vectorator{T}
iter::T
end
_vec(iter) = (x for x in Vectorator(iter))
Base.iterate(x::Vectorator, s...) = iterate(x.iter, s...)
Base.length(x::Vectorator) = length(x.iter)
Base.IteratorSize(::Type{Vectorator{T}}) where {T} =
Base.IteratorSize(T) isa Base.HasShape ? Base.HasLength() : IteratorSize(T)
Base.IteratorEltype(::Type{Vectorator{T}}) where {T} = Base.IteratorEltype(T)
Base.eltype(::Type{Vectorator{T}}) where {T} = eltype(T)
function Tables.Schema(rows::Base.Generator{<:Vectorator})
row = first(rows)
Tables.Schema(keys(row), map(typeof, values(row)))
end
# struct OneKron{T, AT} <: AbstractVector{T}
# data::AT
# inner::Int
# outer::Int
# end
# Tables.materializer(A::KeyedArray) = wrapdims
# function wrapdims(tab)
# sch = Tables.Schema(tab)
# for r in Tables.rows(tab)
# end
# end
"""
AxisKeys.populate!(A, table, value; force=false)
Populate `A` with the contents of the `value` column in a provided `table`, matching the
[Tables.jl](https://github.com/JuliaData/Tables.jl) API. The `table` must contain columns
corresponding to the keys in `A` and implements `Tables.rows`. If the keys in `A` do not
uniquely identify rows in the `table` then an `ArgumentError` is throw. If `force` is true
then the duplicate (non-unique) entries will be overwritten.
"""
function populate!(A, table, value::Symbol; force=false)
# Use a BitArray mask to detect duplicates and error instead of overwriting.
mask = force ? falses() : falses(size(A))
for r in Tables.rows(table)
vals = Tuple(Tables.getcolumn(r, c) for c in dimnames(A))
inds = map(findindex, vals, axiskeys(A))
# Handle duplicate error checking if applicable
if !force
# Error if mask already set.
mask[inds...] && throw(ArgumentError("Key $vals is not unique"))
# Set mask, marking that we've set this index
setindex!(mask, true, inds...)
end
# Insert our value into the data array
setindex!(A, Tables.getcolumn(r, value), inds...)
end
return A
end
"""
wrapdims(table, value, names...; default=undef, sort=false, force=false)
Construct `KeyedArray(NamedDimsArray(A,names),keys)` from a `table` matching
the [Tables.jl](https://github.com/JuliaData/Tables.jl) API.
(It must support both `Tables.columns` and `Tables.rows`.)
The contents of the array is taken from the column `value::Symbol` of the table.
Each symbol in `names` specifies a column whose unique entries
become the keys along a dimenension of the array.
If there is no row in the table matching a possible set of keys,
then this element of the array is undefined, unless you provide the `default` keyword.
If several rows share the same set of keys, then by default an `ArgumentError` is thrown.
Keyword `force=true` will instead cause these non-unique entries to be overwritten.
Setting `AxisKeys.nameouter() = false` will reverse the order of wrappers produced.
"""
function wrapdims(table, value::Symbol, names::Symbol...; kw...)
if nameouter() == false
_wrap_table(KeyedArray, identity, table, value, names...; kw...)
else
_wrap_table(NamedDimsArray, identity, table, value, names...; kw...)
end
end
"""
wrapdims(df, UniqueVector, :val, :x, :y)
Converts at Tables.jl table to a `KeyedArray` + `NamedDimsArray` pair,
using column `:val` for values, and columns `:x, :y` for names & keys.
Optional 2nd argument applies this type to all the key-vectors.
"""
function wrapdims(table, KT::Type, value::Symbol, names::Symbol...; kw...)
if nameouter() == false
_wrap_table(KeyedArray, KT, table, value, names...; kw...)
else
_wrap_table(NamedDimsArray, KT, table, value, names...; kw...)
end
end
function _wrap_table(AT::Type, KT, table, value::Symbol, names::Symbol...; default=undef, sort::Bool=false, kwargs...)
# get columns of the input table source
cols = Tables.columns(table)
# Extract key columns
pairs = map(names) do k
col = unique(Tables.getcolumn(cols, k))
sort && Base.sort!(col)
return k => KT(col)
end
# Extract data/value column
vals = Tables.getcolumn(cols, value)
# Initialize the KeyedArray
sz = length.(last.(pairs))
if default === undef
data = similar(vals, sz)
else
data = similar(vals, Union{eltype(vals), typeof(default)}, sz)
fill!(data, default)
end
A = AT(data; pairs...)
populate!(A, table, value; kwargs...)
return A
end
| {
"alphanum_fraction": 0.6658861408,
"author": null,
"avg_line_length": 31.200913242,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "6fb910fdc9289f2c0b8c9e14b6adbaa89150eff0",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ec293e172abfd832f2fa31ae190c4737b0a18f7d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jw3126/AxisKeys.jl",
"max_forks_repo_path": "src/tables.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ec293e172abfd832f2fa31ae190c4737b0a18f7d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jw3126/AxisKeys.jl",
"max_issues_repo_path": "src/tables.jl",
"max_line_length": 118,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ec293e172abfd832f2fa31ae190c4737b0a18f7d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jw3126/AxisKeys.jl",
"max_stars_repo_path": "src/tables.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1883,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 6833
} |
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Created on Mon Aug 03 19:02:55 2015
@author: Erik Herrmann
"""
import numpy as np
from .global_transform_constraint import GlobalTransformConstraint
from .. import SPATIAL_CONSTRAINT_TYPE_KEYFRAME_RELATIVE_POSITION
class RelativeTransformConstraint(GlobalTransformConstraint):
"""
* constraint_desc: dict
Contains joint, position, orientation and semantic Annotation
"""
def __init__(self, skeleton, constraint_desc, precision, weight_factor=1.0):
super(RelativeTransformConstraint, self).__init__(skeleton, constraint_desc, precision, weight_factor)
self.constraint_type = SPATIAL_CONSTRAINT_TYPE_KEYFRAME_RELATIVE_POSITION
self.offset = constraint_desc["offset"]
def _evaluate_joint_position(self, frame):
global_m = self.skeleton.nodes[self.joint_name].get_global_matrix(frame)
pos = np.dot(global_m, self.offset)[:3]
d = np.linalg.norm(self.position-pos)
return d
| {
"alphanum_fraction": 0.7628915663,
"author": null,
"avg_line_length": 39.9038461538,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ac28a04a55678876fd62ce33f43b481d9bfca8a5",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-07-20T06:57:08.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-07-20T06:57:08.000Z",
"max_forks_repo_head_hexsha": "02c77aab72aa4b58f4067c720f5d124f0be3ea80",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "dfki-asr/morphablegraphs",
"max_forks_repo_path": "morphablegraphs/constraints/spatial_constraints/keyframe_constraints/relative_transform_constraint.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "02c77aab72aa4b58f4067c720f5d124f0be3ea80",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "dfki-asr/morphablegraphs",
"max_issues_repo_path": "morphablegraphs/constraints/spatial_constraints/keyframe_constraints/relative_transform_constraint.py",
"max_line_length": 110,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "02c77aab72aa4b58f4067c720f5d124f0be3ea80",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "dfki-asr/morphablegraphs",
"max_stars_repo_path": "morphablegraphs/constraints/spatial_constraints/keyframe_constraints/relative_transform_constraint.py",
"max_stars_repo_stars_event_max_datetime": "2021-05-12T16:59:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-03-03T21:07:01.000Z",
"num_tokens": 448,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2075
} |
import inspect
import sys
import itertools
import random
from abc import ABC, abstractproperty
from distutils.version import LooseVersion
import base64
import hashlib
import logging
import os
from typing import Union
import cv2
import numpy as np
from ipso_phen.ipapi.base.ip_abstract import BaseImageProcessor
from ipso_phen.ipapi.tools.common_functions import make_safe_name
import ipso_phen.ipapi.base.ip_common as ipc
CLASS_NAME_KEY = "class__name__"
MODULE_NAME_KEY = "module__name__"
PARAMS_NAME_KEY = "params"
GRID_SEARCH_PARAMS_NAME_KEY = "grid_search_params"
logger = logging.getLogger(__name__)
class IptParam(object):
def __init__(self, **kwargs):
self.name = kwargs.get("name", "no_name")
self.desc = kwargs.get("desc", "no desc")
self.default_value = kwargs.get("default_value", "no default")
self.allowed_values = kwargs.get("allowed_values", None)
if self.allowed_values is not None and isinstance(self.allowed_values, list):
self.allowed_values = tuple(self.allowed_values)
self.hint = kwargs.get("hint", "no clue")
self.widget_type = kwargs.get("widget_type", "unk_wt")
self.kind = kwargs.get("kind", "unk_k")
self.options = kwargs.get("options", {})
self._value = kwargs.get("_value", self.default_value)
self.on_change = None
self._widgets = {}
self._grid_search_options = kwargs.get(
"_grid_search_options", str(self.default_value)
)
self.grid_search_mode = False
self.ui_update_callbacks = {}
def __str__(self):
return f"[{self.name}:{self.value}]"
def __repr__(self):
return (
f"{repr(self.name)}_"
f"{repr(self.desc)}_"
f"{repr(self.default_value)}_"
f"{repr(self.allowed_values)}_"
f"{repr(self.value)}"
)
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return self.name != other.name
def __copy__(self):
new = type(self)(
name=self.name,
desc=self.desc,
default_value=self.default_value,
allowed_values=self.allowed_values,
hint=self.hint,
widget_type=self.widget_type,
kind=self.kind,
options=self.options,
)
return new
def clear_widgets(self):
self._widgets = {}
def update_ui(self, callback: str, **kwargs):
callback = self.ui_update_callbacks.get(callback, None)
if callback is None:
return
callback(**kwargs)
def init(self, tool_name, label, widget, grid_search_mode: bool = False, **kwargs):
self.ui_update_callbacks = dict(**kwargs)
self.update_ui(
callback="set_name",
widget=widget,
new_name=f"ipt_param_{tool_name}_{self.name}",
)
self.update_ui(
callback="set_name",
widget=label,
new_name=f"ipt_param_label_{tool_name}_{self.name}",
)
self.label = label
self.update_label()
self.grid_search_mode = grid_search_mode
if self.is_input:
self.input = widget
if widget is None:
return False
elif grid_search_mode:
self.update_ui(
callback="set_text",
widget=self.gs_input,
text=self.grid_search_options,
)
elif isinstance(self.allowed_values, dict):
self.update_ui(
callback="add_items",
widget=widget,
items=self.allowed_values,
default=self.value,
)
elif isinstance(self.allowed_values, tuple):
if self.allowed_values == (0, 1):
self.update_ui(
callback="set_checked",
widget=widget,
new_check_state=self.value == 1,
)
self.update_ui(callback="set_text", widget=widget, text=self.desc)
elif len(self.allowed_values) == 2:
self.update_ui(
callback="set_range",
widget=widget,
min_val=self.allowed_values[0],
max_val=self.allowed_values[1],
default_val=int(self.value),
)
else:
return False
elif isinstance(self.allowed_values, str):
if hasattr(widget, "textEdited"):
self.update_ui(callback="set_text", widget=widget, text=self.value)
elif hasattr(widget, "clicked"):
self.update_ui(callback="set_text", widget=widget, text=self.desc)
elif hasattr(widget, "insertPlainText"):
self.update_ui(callback="set_text", widget=widget, text=self.value)
else:
return False
if self.is_output:
self.output = widget
self.update_output(label_text=self.desc, output_value=self.value)
self.update_ui(callback="set_tool_tip", widget=widget, tool_tip=self.hint)
self.update_ui(callback="set_tool_tip", widget=label, tool_tip=self.hint)
self.update_ui(callback="connect_call_back", widget=widget, param=self)
return True
def update_label(self):
lbl = self.label
if lbl is None:
return False
if (
isinstance(self.allowed_values, dict)
or isinstance(self.allowed_values, str)
or (self.widget_type == "spin_box")
):
self.update_ui(callback="set_text", widget=lbl, text=self.desc)
elif isinstance(self.allowed_values, tuple) and (len(self.allowed_values) == 2):
self.update_ui(
callback="set_text", widget=lbl, text=f"{self.desc}: {self.value}"
)
else:
return False
self.update_ui(callback="set_tool_tip", widget=lbl, tool_tip=self.hint)
return True
def update_input(self, new_values=None):
if not self.is_input:
return False
widget = self.input
if widget is None:
return False
if self.kind == "button":
return True
elif isinstance(self.allowed_values, dict):
if (
(new_values is not None)
and isinstance(new_values, dict)
and (self.allowed_values.keys() - new_values.keys() != {})
):
if self.options.get("enable_none", False) is True:
self.allowed_values = {**{"none": "none"}, **new_values}
else:
self.allowed_values = new_values
bck_value = self.value
self.update_ui(callback="clear", widget=widget)
self.update_ui(
callback="add_items",
widget=widget,
items=self.allowed_values,
default=bck_value,
)
self._value = bck_value
else:
for i, key in enumerate(self.allowed_values):
if self.value == key:
self.update_ui(
callback="set_current_index", widget=widget, index=i
)
break
elif isinstance(self.allowed_values, tuple):
if self.allowed_values == (0, 1):
self.update_ui(
callback="set_checked", widget=widget, new_check_state=self.value == 1
)
elif len(self.allowed_values) == 2:
if (
(new_values is not None)
and isinstance(new_values, tuple)
and (self.allowed_values != new_values)
):
self.allowed_values = new_values
self.update_ui(
callback="set_range",
widget=widget,
min_val=self.allowed_values[0],
max_val=self.allowed_values[1],
default_val=None,
)
self.update_ui(callback="set_value", widget=widget, value=int(self.value))
else:
return False
elif isinstance(self.allowed_values, str):
self.update_ui(callback="set_text", widget=widget, text=self.value)
else:
return False
self.update_ui(callback="set_tool_tip", widget=widget, tool_tip=self.hint)
return True
def update_output(
self, label_text: str = "", output_value=None, ignore_list=(), invert=False
):
if not self.is_output:
return False
self._value = output_value
if label_text and isinstance(label_text, str):
self.desc = label_text
self.update_label()
widget = self.output
if widget is None:
return True
elif self.allowed_values == "single_line_text_output":
self.update_ui(callback="set_text", widget=widget, text=self.value)
elif self.allowed_values == "multi_line_text_output":
self.update_ui(callback="clear", widget=widget)
self.update_ui(callback="set_text", widget=widget, text=self.value)
elif self.allowed_values == "table_output":
self.update_ui(callback="clear", widget=widget)
self.update_ui(
callback="update_table",
widget=widget,
items=self._value,
ignore_list=ignore_list,
invert_order=invert,
)
else:
return False
return True
def add_option_to_grid_search(self, new_option: str):
self.grid_search_options = f"{self._grid_search_options},{new_option}"
@staticmethod
def decode_string(gs_code: str):
res = []
for opt_ in gs_code.replace(" ", "").split(","):
try:
if ("-" in opt_) and (";" in opt_):
bd, step = opt_.split(";")
left, right = bd.split("-")
left, right = min(int(left), int(right) + 1), max(
int(left), int(right) + 1
)
res.extend([i for i in range(left, right, int(step))])
else:
res.append(opt_)
except ValueError as e:
logger.exception(f'String decoding failed: "{repr(e)}"')
return [str(i) for i in sorted(list(set(res)))]
def decode_grid_search_options(self):
return self.decode_string(self._grid_search_options)
def auto_fill_grid_search(self, step=None):
if not self.is_input:
return False
widget = self.input
if widget is None:
return False
if isinstance(self.allowed_values, dict):
return ",".join([k for k in self.allowed_values.keys()])
elif isinstance(self.allowed_values, tuple):
if self.allowed_values == (0, 1):
return "0,1"
elif len(self.allowed_values) == 2:
min_ = min(self.allowed_values[0], self.allowed_values[1])
max_ = max(self.allowed_values[0], self.allowed_values[1])
if step is None:
step = (max_ - min_) // 10
return f"{min_}-{max_};{step}"
else:
return ""
else:
return ""
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if value != self._value:
self._value = value
if self.on_change is not None:
self.on_change(self)
@property
def str_value(self):
if isinstance(self.value, str):
return f"'{self.value}'"
else:
return str(self.value)
@property
def grid_search_options(self):
return self._grid_search_options
@grid_search_options.setter
def grid_search_options(self, value):
if self._grid_search_options != value:
self._grid_search_options = value
widget = self.gs_input
if widget is not None:
self.update_ui(callback="set_text", widget=widget, text=value)
@property
def input(self):
return self._widgets.get("input", None)
@input.setter
def input(self, value):
self._widgets["input"] = value
@property
def output(self):
return self._widgets.get("output", None)
@output.setter
def output(self, value):
self._widgets["output"] = value
@property
def label(self):
return self._widgets.get("label", None)
@label.setter
def label(self, value):
self._widgets["label"] = value
@property
def gs_label(self):
return self._widgets.get("gs_label", None)
@gs_label.setter
def gs_label(self, value):
self._widgets["gs_label"] = value
self.update_ui(callback="set_text", widget=value, text=self.desc)
@property
def gs_input(self):
return self._widgets.get("gs_input", None)
@gs_input.setter
def gs_input(self, value):
self._widgets["gs_input"] = value
@property
def gs_auto_fill(self):
return self._widgets.get("gs_auto_fill", None)
@gs_auto_fill.setter
def gs_auto_fill(self, value):
self._widgets["gs_auto_fill"] = value
@property
def gs_copy_from_param(self):
return self._widgets.get("gs_copy_from_param", None)
@gs_copy_from_param.setter
def gs_copy_from_param(self, value):
self._widgets["gs_copy_from_param"] = value
@property
def gs_reset(self):
return self._widgets.get("gs_reset", None)
@gs_reset.setter
def gs_reset(self, value):
self._widgets["gs_reset"] = value
@property
def is_input(self):
return not isinstance(self.allowed_values, str) or (
"input" in self.allowed_values
)
@property
def is_output(self):
return isinstance(self.allowed_values, str) and not (
"input" in self.allowed_values
)
@property
def is_neutral(self):
return self.is_output and (self.allowed_values in ["label"])
@property
def is_default(self):
return self.value == self.default_value
class IptParamHolder(object):
def __init__(self, **kwargs):
super(IptParamHolder, self).__init__()
self.block_feedback = False
self._kwargs = None
self._param_list = kwargs.get("_param_list", None)
if self._param_list is None:
self._param_list = []
self.build_params()
for key, value in kwargs.items():
self.set_or_add_value(key, value)
def __eq__(self, other) -> bool:
if (other is None) or (len(self.gizmos) != len(other.gizmos)):
return False
else:
for s, o in zip(self.gizmos, other.gizmos):
if (s.value != o.value) or (s.name != o.name):
return False
return True
def copy(self):
return self.__class__(**self.params_to_dict())
def build_params(self):
pass
def reset(self, is_update_widgets: bool = True):
self.block_feedback = True
try:
for p in self._param_list:
p.value = p.default_value
if is_update_widgets:
p.update_label()
p.update_input()
p.update_output()
finally:
self.block_feedback = False
def add(self, new_item) -> IptParam:
try:
self._param_list.append(new_item)
except Exception as e:
logger.exception(f'Failed to add param "{repr(e)}')
else:
return new_item
def add_combobox(
self,
name: str,
desc: str,
default_value: str = "",
values: dict = {},
hint: str = "",
) -> IptParam:
try:
param = IptParam(
name=name,
desc=desc,
default_value=default_value,
allowed_values=values,
hint=hint,
)
param.widget_type = "combo_box"
return self.add(param)
except Exception as e:
logger.exception(f'Failed to add param "{repr(e)}')
def add_slider(
self,
name: str,
desc: str,
default_value: int = 0,
minimum: int = 0,
maximum: int = 100,
hint: str = "",
) -> IptParam:
param = IptParam(
name=name,
desc=desc,
default_value=default_value,
allowed_values=(minimum, maximum),
hint=hint,
)
param.widget_type = "slider"
return self.add(param)
def add_checkbox(self, name, desc, default_value, hint="") -> IptParam:
"""Add a checkbox to the widgets
Arguments:
name {str} -- name used to access the widget
desc {str} -- name used for the label associated to the comobobox
default_value {str} -- default value, dictionary key
Keyword Arguments:
hint {str} -- hover hint (default: {''})
Returns:
IptParam -- built param
"""
param = IptParam(
name=name,
desc=desc,
default_value=default_value,
allowed_values=(0, 1),
hint=hint,
)
param.widget_type = "checkbox"
return self.add(param)
def add_text_input(
self,
name: str,
desc: str,
default_value: str = "-",
hint: str = "",
is_single_line: bool = True,
) -> IptParam:
if is_single_line:
mode_ = "single_line_text_input"
else:
mode_ = "multi_line_text_input"
param = IptParam(
name=name,
desc=desc,
default_value=default_value,
allowed_values=mode_,
hint=hint,
)
param.widget_type = mode_
return self.add(param)
def add_text_output(
self,
is_single_line: bool,
name: str,
desc: str,
default_value: str = "-",
hint: str = "",
) -> IptParam:
if is_single_line:
mode_ = "single_line_text_output"
else:
mode_ = "multi_line_text_output"
param = IptParam(
name=name,
desc=desc,
default_value=default_value,
allowed_values=mode_,
hint=hint,
)
param.widget_type = mode_
return self.add(param)
def add_table_output(
self, name: str, desc: tuple, default_value: dict = {}, hint: str = ""
) -> IptParam:
param = IptParam(
name=name,
desc=desc,
default_value=default_value,
allowed_values="table_output",
hint=hint,
)
param.widget_type = "table_output"
return self.add(param)
def add_text_overlay(self, default_value: int = 0) -> IptParam:
param = IptParam(
name="text_overlay",
desc="Overlay text on top of images",
default_value=default_value,
allowed_values=(0, 1),
hint="Draw description text on top of images",
)
param.widget_type = "checkbox"
param.kind = "text_overlay_cb"
return self.add(param)
def add_label(self, name: str, desc: str, hint: str = "") -> IptParam:
param = IptParam(
name=name, desc=desc, default_value=desc, allowed_values="label", hint=hint
)
param.widget_type = "label"
return self.add(param)
def add_separator(self, name: str) -> IptParam:
param = IptParam(
name=name, desc="", default_value="", allowed_values="label", hint=""
)
param.widget_type = "label"
return self.add(param)
def add_color_selector(
self,
name="color",
desc="Select color",
default_value="light_steel_blue",
hint="",
enable_none: bool = False,
) -> IptParam:
if enable_none:
values = {"none": "none"}
else:
values = {}
values = {**values, **{k: k for k in ipc.all_colors_dict}}
param = IptParam(
name=name,
desc=desc,
default_value=default_value,
allowed_values=values,
hint=hint,
)
param.widget_type = "combo_box"
param.kind = "color_selector"
return self.add(param)
def add_enabled_checkbox(self) -> IptParam:
return self.add_checkbox(
name="enabled",
desc="Activate tool",
default_value=1,
hint="Toggle whether or not tool is active",
)
def add_file_naming(
self,
output_format: str = "source",
output_name: str = "as_source",
prefix_suffix: str = "",
) -> IptParam:
self.add_combobox(
name="output_format",
desc="Image output format",
default_value=output_format,
values=dict(source="As source image", jpg="JPEG", png="PNG", tiff="TIFF"),
)
self.add_combobox(
name="output_name",
desc="Output naming convention",
default_value=output_name,
values=dict(
as_source="Same as source",
hash="Use hash for anonymous names",
suffix="Add suffix to name",
prefix="Add prefix to name",
),
)
self.add_text_input(
name="prefix_suffix",
desc="Prefix or suffix",
default_value=prefix_suffix,
)
def add_channel_selector(
self,
default_value,
name="channel",
desc="Channel",
hint: str = "",
enable_none: bool = False,
) -> IptParam:
if enable_none:
values = {"none": "none"}
else:
values = {}
values = {
**values,
**{
channel_info[1]: ipc.get_hr_channel_name(channel_info[1])
for channel_info in ipc.create_channel_generator(include_msp=True)
},
}
param = IptParam(
name=name,
desc=desc,
default_value=default_value,
allowed_values=values,
hint=hint,
)
if enable_none:
param.options["enable_none"] = True
param.widget_type = "combo_box"
param.kind = "channel_selector"
return self.add(param)
def add_arithmetic_operator(
self,
default_value="plus",
name="operator",
desc="Arithmetic operator",
hint="Operator to use with operands",
) -> IptParam:
param = IptParam(
name=name,
desc=desc,
default_value=default_value,
allowed_values=dict(plus="+", minus="-", mult="*", div="/", power="^"),
hint=hint,
)
param.widget_type = "combo_box"
param.kind = "arithmetic_operator"
return self.add(param)
def add_source_selector(
self,
name: str = "source_file",
desc: str = "Select source file type",
default_value: str = "source",
) -> IptParam:
param = IptParam(
name=name,
desc=desc,
default_value=default_value,
allowed_values=dict(
source="source",
mask="mask",
source_roi="Source with ROIs applied",
process_roi="Use roi created for process",
masked_source="masked source",
cropped_source="source cropped to keep ROI (if available)",
source_median="source with median filter (5 if not set)",
),
)
param.widget_type = "combo_box"
param.kind = "source_selector"
return self.add(param)
def add_color_map_selector(
self,
name="color_map",
default_value="c_2",
desc="Select pseudo color map",
hint="",
) -> IptParam:
param = IptParam(
name=name,
desc=desc,
default_value=default_value,
allowed_values=dict(
a_0="Autumn",
b_1="Bone",
c_2="Jet",
d_3="Winter",
e_4="Rainbow",
f_5="Ocean",
g_6="Summer",
h_7="Spring",
i_8="Cool",
j_9="HSV",
k_10="Pink",
l_11="Hot",
),
hint=hint,
)
param.widget_type = "combo_box"
param.kind = "color_map_selector"
return self.add(param)
def add_color_space(self, default_value) -> IptParam:
param = IptParam(
name="color_space",
desc="Color space",
default_value=default_value,
allowed_values=dict(HSV="HSV", LAB="LAB", RGB="RGB"),
)
param.widget_type = "combo_box"
param.kind = "color_space_selector"
return self.add(param)
def add_roi_type(self, default_value="other") -> IptParam:
param = IptParam(
name="roi_type",
desc="Select action linked to ROI",
default_value=default_value,
allowed_values=dict(
keep="Keep region inside ROI",
delete="Delete region inside ROI",
crop="Crop image to ROI (most tools don not support this option)",
safe="Region inside ROI is safe",
enforce="Check mask position",
erode="Erode region inside ROI - mask only",
dilate="Dilate region inside ROI - mask only",
open="Open region inside ROI - mask only",
close="Close region inside ROI - mask only",
other="No predefined behavior",
),
)
param.kind = "roi_type_selector"
return self.add(param)
def add_roi_name(self, default_value: str = "unnamed_roi") -> IptParam:
param = self.add_text_input(
name="roi_name", desc="ROI name", default_value="unnamed_roi"
)
param.kind = "roi_name_selector"
return param
def add_tool_target(self) -> IptParam:
param = IptParam(
name="tool_target",
desc="Target IPT",
default_value="none",
allowed_values=dict(none="None"),
)
param.kind = "tool_target_selector"
return self.add(param)
def add_roi_shape(self, default_value="rectangle") -> IptParam:
param = IptParam(
name="roi_shape",
desc="Select ROI shape",
default_value=default_value,
allowed_values=dict(
rectangle="Rectangle",
circle="Circle, will be treated as rectangle for morphology",
),
)
param.kind = "roi_shape_selector"
return self.add(param)
def add_roi_settings(
self,
default_name: str = "unnamed_roi",
default_type: str = "other",
default_shape: str = "",
) -> IptParam:
self.add_roi_name(default_value=default_name)
self.add_roi_type(default_value=default_type)
if default_shape:
self.add_roi_shape(default_value=default_shape)
self.add_tool_target()
def add_hierarchy_threshold(self, default_value: int = 35) -> IptParam:
self.add_slider(
name="hierarchy_threshold",
desc="Label merger threshold",
default_value=default_value,
minimum=0,
maximum=1000,
hint="Regions connected by an edge with weight smaller than thresh are merged",
)
def add_edge_detector(self, default_operator: str = "canny_opcv"):
self.add_combobox(
name="operator",
desc="Select edge detection operator",
default_value=default_operator,
values=dict(
canny_opcv="Canny OpenCV",
canny_scik="Canny Scikit",
laplacian="Laplacian",
sobel="Sobel",
sobel_v="Sobel vertical",
sobel_h="Sobel horizontal",
roberts="Roberts",
prewitt="Prewitt",
),
)
self.add_spin_box(
name="canny_sigma",
desc="Canny's sigma for scikit, aperture for OpenCV",
default_value=2,
minimum=0,
maximum=20,
hint="Sigma.",
)
self.add_spin_box(
name="canny_first",
desc="Canny's first Threshold",
default_value=0,
minimum=0,
maximum=255,
hint="First threshold for the hysteresis procedure.",
)
self.add_spin_box(
name="canny_second",
desc="Canny's second Threshold",
default_value=255,
minimum=0,
maximum=255,
hint="Second threshold for the hysteresis procedure.",
)
self.add_spin_box(
name="kernel_size", desc="Kernel size", default_value=5, minimum=0, maximum=27
)
self.add_spin_box(
name="threshold",
desc="Threshold",
default_value=130,
minimum=0,
maximum=255,
hint="Threshold for kernel based operators",
)
self.add_checkbox(name="apply_threshold", desc="Apply threshold", default_value=1)
def add_binary_threshold(self, add_morphology: bool = True):
self.add_spin_box(
name="min_t",
desc="Threshold min value",
default_value=0,
minimum=0,
maximum=255,
)
self.add_spin_box(
name="max_t",
desc="Threshold max value",
default_value=255,
minimum=0,
maximum=255,
)
self.add_slider(
name="median_filter_size",
desc="Median filter size (odd values only)",
default_value=0,
minimum=0,
maximum=51,
)
if add_morphology:
self.add_morphology_operator()
def add_roi_selector(self):
self.add_text_input(
name="roi_names",
desc="Name of ROI to be used",
default_value="",
hint="Operation will only be applied inside of ROI",
)
self.add_combobox(
name="roi_selection_mode",
desc="ROI selection mode",
default_value="all_linked",
values=dict(
all_linked="Select all linked ROIs",
linked_and_named="Select all ROIs named in the list that are linked",
all_named="Select all named ROIs regardless if they're linked or not",
),
)
def add_morphology_operator(self, default_operator: str = "none"):
self.add_combobox(
name="morph_op",
desc="Morphology operator",
default_value=default_operator,
values=dict(
none="none", erode="erode", dilate="dilate", open="open", close="close"
),
)
self.add_spin_box(
name="kernel_size",
desc="Kernel size",
default_value=3,
minimum=3,
maximum=101,
)
self.add_combobox(
name="kernel_shape",
desc="Kernel shape",
default_value="ellipse",
values=dict(ellipse="ellipse", rectangle="rectangle", cross="cross"),
)
self.add_spin_box(
name="proc_times", desc="Iterations", default_value=1, minimum=1, maximum=100
)
def add_exposure_viewer_switch(self):
self.add_checkbox(
name="show_over_under",
desc="Show over an under exposed parts",
default_value=0,
)
def add_button(
self, name: str, desc: str, index: int = 0, hint: str = ""
) -> IptParam:
param = IptParam(
name=name,
desc=desc,
default_value=index,
allowed_values="input_button",
hint=hint,
)
param.kind = "button"
self.add(param)
def add_spin_box(
self,
name: str,
desc: str,
default_value: int = 0,
minimum: int = 0,
maximum: int = 100,
hint: str = "",
):
param = IptParam(
name=name,
desc=desc,
default_value=default_value,
allowed_values=(minimum, maximum),
hint=hint,
)
param.widget_type = "spin_box"
self.add(param)
def add_date_picker(
self, name: str, desc: str, default_value: int = 0, hint: str = ""
):
pass
def build_output_filename(self) -> str:
"""Creates a fully qualified filename from data generated by add_file_naming
Returns:
str: File name
"""
wrapper = self.wrapper
# Build output file name
output_name_mode = self.get_value_of("output_name")
if output_name_mode == "as_source":
dst_name = wrapper.file_handler.file_name_no_ext
elif output_name_mode == "hash":
var_name = "hash_val"
dst_name = self.get_short_hash(add_plant_name=False)
elif output_name_mode == "suffix":
var_name = self.get_value_of("prefix_suffix")
dst_name = wrapper.file_handler.file_name_no_ext + "_" + var_name
elif output_name_mode == "prefix":
var_name = self.get_value_of("prefix_suffix")
dst_name = var_name + "_" + wrapper.file_handler.file_name_no_ext
else:
logger.error(
f"Copy or rename image FAILED, unknown naming convention: {output_name_mode}"
)
return
# Get new extension
file_ext = self.get_value_of("output_format")
if file_ext == "source":
file_ext = self.wrapper.file_handler.file_ext
else:
file_ext = f".{file_ext}"
# Build destination full path
return os.path.join(self.output_path, f"{dst_name}{file_ext}")
def reset_grid_search(self):
for p in self._param_list:
p.grid_search_options = str(p.default_value)
gsw = p.gs_input
if gsw is not None:
self.update_ui(
callback="set_text", widget=gsw, text=p.grid_search_options
)
def update_grid_search(self, ignore_composite: bool = True) -> None:
for p in self._param_list:
values = p.grid_search_options
if ignore_composite and (
(";" in values) or ("-" in values) or ("," in values)
):
continue
p.grid_search_options = str(p.value)
gsw = p.gs_input
if gsw is not None:
self.update_ui(
callback="set_text", widget=gsw, text=p.grid_search_options
)
def reset_input(self) -> None:
for p in self._param_list:
if p.is_input:
p.value = p.default_value
def reset_output(self) -> None:
for p in self._param_list:
if p.is_output:
p.value = p.default_value
def find_by_name(self, name) -> IptParam:
for p in self._param_list:
if p.name == name:
return p
return None
def get_value_of(self, key, default_value=None, scale_factor=1) -> str:
if (self._kwargs is not None) and (key in self._kwargs):
res = self._kwargs.get(key, None)
if res is not None:
return res
p = self.find_by_name(key)
if p is not None:
res = p.value
else:
res = default_value
try:
tmp = int(res)
except ValueError:
return res
except TypeError:
return res
else:
if scale_factor != 1:
return round(tmp * scale_factor)
else:
return tmp
def has_param(self, key: str) -> bool:
d = {} if self._kwargs is None else dict(self._kwargs)
d.update(self.params_to_dict())
return key in d.keys()
def has_key_matching(self, partial: str) -> bool:
d = {} if self._kwargs is None else dict(self._kwargs)
d.update(self.params_to_dict())
for k in d.keys():
if partial in k:
return True
return False
def has_keys(self, keys) -> int:
res = 0
for key in keys:
if self.has_param(key):
res += 1
return res
def set_value_of(self, key, value, update_widgets: bool = False):
p = self.find_by_name(key)
if p is not None:
if value is not None:
p.value = value
else:
p.value = p.default_value
if update_widgets:
p.update_label()
p.update_input()
p.update_output()
def set_or_add_value(self, key, value):
p = self.find_by_name(key)
if p is None:
self.add(
IptParam(name=key, desc="", default_value=value, allowed_values=None)
)
else:
if value is not None:
p.value = value
else:
p.value = p.default_value
def set_or_add_param(self, src_param, allow_add):
if src_param is None:
return False
p = self.find_by_name(src_param.name)
if (p is None) and not allow_add:
return False
elif p is not None:
self._param_list.remove(p)
self.add(src_param.copy())
def get(self, key, value, default=None):
p = self.find_by_name(key)
if p is not None:
return getattr(p, value)
else:
return default
def update_output_from_dict(self, data: dict):
self.reset_output()
for p in self._param_list:
val = data.get(p.name, None)
if val is not None:
p.update_output(output_value=str(val))
def input_params(
self,
exclude_defaults: bool = False,
excluded_params: tuple = (),
forced_params: tuple = (),
):
return [
p
for p in self.gizmos
if (
p.is_input
and not (exclude_defaults and p.is_default)
and (p.name not in excluded_params)
)
or (p.name in forced_params)
]
def output_params(
self,
exclude_defaults: bool = False,
excluded_params: tuple = (),
forced_params: tuple = (),
):
return [
p
for p in self.gizmos
if (
p.is_output
and not (exclude_defaults and p.is_default)
and (p.name not in excluded_params)
)
or (p.name in forced_params)
]
def all_params(
self,
exclude_defaults: bool = False,
excluded_params: tuple = (),
forced_params: tuple = (),
):
return [
p
for p in self.gizmos
if (
not (exclude_defaults and p.is_default)
and (p.name not in excluded_params)
)
or (p.name in forced_params)
]
def params_to_dict(
self,
include_input: bool = True,
include_output: bool = False,
include_neutral: bool = False,
):
dic = {}
for p in self.gizmos:
if (
(include_input and p.is_input)
or (include_output and p.is_output)
or (include_neutral and p.is_neutral)
):
dic[p.name] = p.value
return dic
def update_inputs(self, update_values: dict = {}):
channels = update_values.get("channels", None)
ipt_list = update_values.get("ipt_list", None)
for p in self._param_list:
if (p.kind == "channel_selector") and (channels is not None):
p.update_input(new_values=channels)
elif (p.kind == "tool_target_selector") and (ipt_list is not None):
p.update_input(new_values={**{"none": "None"}, **ipt_list})
@property
def gizmos(self):
return self._param_list
@property
def has_input(self):
for p in self._param_list:
if p.is_input:
return True
return False
@property
def has_output(self):
for p in self._param_list:
if p.is_output:
return True
return False
class IptBase(IptParamHolder, ABC):
def __init__(self, wrapper=None, **kwargs):
super(IptBase, self).__init__(**kwargs)
self._wrapper = wrapper
self._result = None
self.result = None
self.demo_image = None
self._old_lock_state = False
def __repr__(self):
return (
f"{type(self).__name__}("
+ f",".join([f"{p.name}={p.str_value}" for p in self.gizmos])
+ ")"
)
def __str__(self):
return f"{type(self).__name__}_" + self.input_params_as_str(
exclude_defaults=True,
excluded_params=("progress_callback",),
forced_params=("channel",),
)
def __enter__(self):
wrapper = self.wrapper
if wrapper is not None:
self._old_lock_state = wrapper.lock
wrapper.lock = True
return self.process_wrapper(), self
def __exit__(self, exc_type, exc_val, exc_tb):
self.wrapper.lock = self._old_lock_state
def short_desc(self):
res = f"{type(self).__name__}_"
gizmos_info = {}
for p in self.gizmos:
if p.kind not in gizmos_info.keys():
gizmos_info[p.kind] = str(p)
if "channel_selector" in gizmos_info.keys():
res += str(gizmos_info["channel_selector"])
elif "roi_name_selector" in gizmos_info.keys():
res += str(gizmos_info["roi_name_selector"])
elif "color_space_selector" in gizmos_info.keys():
res += str(gizmos_info["color_space_selector"])
return res
def copy(self, copy_wrapper: bool = True):
if copy_wrapper:
return self.__class__(wrapper=self.wrapper, **self.params_to_dict())
else:
return self.__class__(**self.params_to_dict())
def to_json(self):
return {
"name": self.name,
"package": self.package,
CLASS_NAME_KEY: type(self).__name__,
MODULE_NAME_KEY: type(self).__module__,
PARAMS_NAME_KEY: self.params_to_dict(),
GRID_SEARCH_PARAMS_NAME_KEY: {
p.name: p.grid_search_options for p in self.gizmos
},
}
@classmethod
def from_json(cls, json_data: dict):
class_name = json_data[CLASS_NAME_KEY]
module_name: str = json_data[MODULE_NAME_KEY].replace("ip_tools", "ipt")
if "ipt" in module_name and "ipapi" not in module_name:
module_name = module_name.replace("ipt", "ipso_phen.ipapi.ipt", 1)
if "ipapi" in module_name and "ipso_phen" not in module_name:
module_name = module_name.replace("ipapi", "ipso_phen.ipapi", 1)
__import__(module_name)
for _, obj in inspect.getmembers(sys.modules[module_name]):
if inspect.isclass(obj) and (obj.__name__ == class_name):
try:
ipt = obj(**json_data[PARAMS_NAME_KEY])
break
except Exception as e:
return e
else:
ipt = None
if ipt is None:
return None
gs_params = json_data.get(GRID_SEARCH_PARAMS_NAME_KEY, None)
if gs_params:
for p in ipt.gizmos:
gp = gs_params.get(p.name, None)
if gp:
p.grid_search_options = gp
return ipt
def execute(self, param, **kwargs):
pass
def init_wrapper(self, **kwargs) -> BaseImageProcessor:
"""Initializes wrapper according to key arguments
Returns:
BaseImageProcessor -- Wrapper
"""
self._kwargs = kwargs
wrapper = self._get_wrapper()
self.demo_image = None
if kwargs.get("reset_wrapper", True) is True:
wrapper.reset()
return wrapper
def process_grid_search(self, **kwargs):
progress_callback = kwargs.get("progress_callback", None)
random_grid_search = kwargs.get("random_grid_search", False)
def lcl_callback(step, total, msg, image_dict={}):
if progress_callback is not None:
return progress_callback(step, total, msg, image_dict)
else:
print(msg)
return True
tmp_wrapper = kwargs.get("wrapper", None)
if tmp_wrapper is not None:
self.wrapper = tmp_wrapper
elif self.wrapper is None:
self._kwargs = kwargs
self._wrapper = self._get_wrapper()
self._wrapper.reset()
self._kwargs = None
if self.wrapper is None:
return False
procs = list(
itertools.product(*[p.decode_grid_search_options() for p in self.gizmos])
)
if random_grid_search:
random.shuffle(procs)
tot_ = len(procs)
keys = [p.name for p in self.gizmos]
lcl_callback(0, tot_, f"_____________________________________")
lcl_callback(0, tot_, f"Instantiated tools")
for i, p in enumerate(procs):
kwargs_ = {k: (int(v) if str.isdigit(v) else v) for k, v in zip(keys, p)}
kwargs_["progress_callback"] = progress_callback
ip = self.__class__(**kwargs_)
self.wrapper.image_list = []
kwargs_["wrapper"] = self.wrapper
kwargs_["reset_wrapper"] = False
if ip.process_wrapper(**kwargs_):
img_lst_ = ip.wrapper.image_list
if len(img_lst_) > 0:
if kwargs.get("send_all_images", False):
for dic in ip.wrapper.image_list:
go_on = lcl_callback(
i + 1,
tot_,
f"""{ip.name}:
{ip.input_params_as_str(exclude_defaults=True,
excluded_params=("progress_callback",))}""",
dic,
)
if go_on is False:
return
else:
dic = ip.wrapper.retrieve_image_dict("mosaic_out")
if dic is None:
dic = ip.wrapper.retrieve_image_dict("mosaic")
if dic is None:
dic = img_lst_[len(img_lst_) - 1]
go_on = lcl_callback(
i + 1,
tot_,
f"""{ip.name}:
{ip.input_params_as_str(exclude_defaults=True,
excluded_params=("progress_callback",))}""",
dic,
)
if go_on is False:
return
else:
go_on = lcl_callback(i + 1, tot_, f"Failed {str(ip)}")
if not go_on:
return
def do_channel_failure(self, channel):
self.wrapper.store_image(
self.wrapper.current_image, f"Missing {channel} channel", text_overlay=True
)
logger.error(f"Missing {channel} channel")
def _get_wrapper(self):
if "wrapper" in self.kwargs:
value = self.kwargs.get("wrapper", None)
if isinstance(value, str):
self.wrapper = BaseImageProcessor(value)
else:
self._wrapper = value
return self._wrapper
def get_mask(self):
mask = self.wrapper.mask
if mask is None:
img = self.wrapper.current_image
if np.sum(img[img != 255]) == 0:
mask = self.wrapper.get_channel(src_img=img, channel="bl")
return mask
def to_uint8(self, img, normalize: bool = False):
if str(img.dtype) == "bool":
img = img.astype(np.uint8)
img[img != 0] = 255
return img
elif (
(str(img.dtype) == "float64")
or (str(img.dtype) == "float16")
or (str(img.dtype) == "int32")
):
return ((img - img.min()) / (img.max() - img.min()) * 255).astype(np.uint8)
elif str(img.dtype) == "uint8":
if normalize:
if len(img.shape) == 2:
return ((img - img.min()) / (img.max() - img.min()) * 255).astype(
np.uint8
)
else:
c1, c2, c3 = cv2.split(img)
c1, c2, c3 = (
cv2.equalizeHist(c1),
cv2.equalizeHist(c2),
cv2.equalizeHist(c3),
)
return np.dstack((c1, c2, c3))
else:
return img.copy()
else:
logger.error(f"Unknown source format {str(img.type)}")
def to_fuzzy(self, img):
"""
Converts image to float numbers constrained between 0 & 1
:param img:
:return: image
"""
if str(img.dtype) == "bool":
img = img.astype(np.uint8)
return img
elif (
(str(img.dtype) == "float64")
or (str(img.dtype) == "int32")
or (str(img.dtype) == "uint8")
):
return ((img - img.min()) / (img.max() - img.min()) * 1).astype(np.float)
else:
logger.error(f"Unknown source format {str(img.type)}")
def to_bit(self, img, threshold=255):
"""
Converts image data to either 0 or 1, be careful with what you wish for
:param img:
:param threshold:
:return: image
"""
if str(img.dtype) == "bool":
img = img.astype(np.uint8)
return img
elif str(img.dtype) == "uint8":
img[img < threshold] = 0
img[img >= threshold] = 1
return img
elif (str(img.dtype) == "float64") or (str(img.dtype) == "int32"):
return ((img - img.min()) / (img.max() - img.min()) * 1).astype(np.uint8)
else:
logger.error(f"Unknown source format {str(img.type)}")
@staticmethod
def apply_mask(image, mask):
return cv2.bitwise_and(image, image, mask=mask)
def match_image_size_to_source(
self, img, source_mode: str = "source_file", ignore_list: tuple = ()
):
if not (source_mode in ignore_list):
source_type = self.get_value_of(source_mode, "source")
else:
return img
if source_type == "process_roi":
self.wrapper.init_rois()
return self.wrapper.crop_to_roi(img, type(self).__name__.lower())
elif source_type == "cropped_source":
self.wrapper.init_rois()
return self.wrapper.crop_to_keep_roi(img=img)
else:
return img
def get_ipt_roi(
self, wrapper, roi_names: list = [], selection_mode: str = "all_linked"
) -> list:
res = []
for roi in wrapper.rois_list:
if selection_mode == "all_linked":
if roi.target == type(self).__name__:
res.append(roi)
elif selection_mode == "linked_and_named":
if (roi.target == type(self).__name__) and (roi.name in roi_names):
res.append(roi)
elif selection_mode == "all_named":
if roi.name in roi_names:
res.append(roi)
else:
raise NotImplementedError
return res
def get_short_hash(
self, exclude_list: tuple = (), add_plant_name: bool = True
) -> Union[str, None]:
wrapper = self.wrapper
if wrapper is None:
return None
p_str = self.input_params_as_str(
exclude_defaults=False, excluded_params=exclude_list
).encode("utf-8")
w_str = str(wrapper).encode("utf-8")
long_hash = hashlib.sha1(p_str + w_str)
if add_plant_name:
return (
wrapper.plant
+ "_"
+ make_safe_name(
str(base64.urlsafe_b64encode(long_hash.digest()[0:20]))
).replace("_", "")
)
else:
return make_safe_name(
str(base64.urlsafe_b64encode(long_hash.digest()[0:20]))
).replace("_", "")
def apply_binary_threshold(self, wrapper, img, channel):
min_ = self.get_value_of("min_t")
max_ = self.get_value_of("max_t")
median_filter_size = self.get_value_of("median_filter_size")
median_filter_size = (
0 if median_filter_size == 1 else ipc.ensure_odd(median_filter_size)
)
min_, max_ = min(min_, max_), max(min_, max_)
mask, _ = wrapper.get_mask(
src_img=img,
channel=channel,
min_t=min_,
max_t=max_,
median_filter_size=median_filter_size,
)
return self.apply_morphology_from_params(mask)
def apply_morphology_from_params(self, mask, store_result: bool = False):
if mask is None:
return None
kernel_size = self.get_value_of("kernel_size", 0)
iter_ = self.get_value_of("proc_times", 1)
kernel_shape = self.get_value_of("kernel_shape", None)
if not (len(mask.shape) == 2 or (len(mask.shape) == 3 and mask.shape[2] == 1)):
logger.error("Morphology works only on mask images")
return None
if kernel_shape == "rectangle":
k_shape = cv2.MORPH_RECT
elif kernel_shape == "cross":
k_shape = cv2.MORPH_CROSS
else:
k_shape = cv2.MORPH_ELLIPSE
if kernel_size <= 1:
return mask
elif (kernel_size % 2 == 0) and (kernel_size > 0):
kernel_size += 1
func = getattr(self._wrapper, self.get_value_of("morph_op"), None)
if func:
mask = func(
mask, kernel_size=kernel_size, proc_times=iter_, kernel_shape=k_shape
)
if store_result:
self.wrapper.store_image(image=mask, text="morphology_applied")
return mask
def print_segmentation_labels(
self, watershed_image, labels, dbg_suffix="", min_size=-1, source_image=None
):
if source_image is None:
source_image = self._wrapper.current_image
# loop over the unique labels returned by the Watershed
# algorithm
for label in np.unique(labels):
# if the label is zero, we are examining the 'background'
# so simply ignore it
if label == 0:
continue
# otherwise, allocate memory for the label region and draw
# it on the mask
mask = np.zeros(watershed_image.shape[:2], dtype="uint8")
mask[labels == label] = 255
# detect contours in the mask and grab the largest one
contours_ = ipc.get_contours(
mask=mask, retrieve_mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE
)
c = max(contours_, key=cv2.contourArea)
# Draw min area rect enclosing object
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
x = int(rect[0][0])
y = int(rect[0][1])
area_ = round(rect[1][0] * rect[1][1])
is_area_enough = area_ > min_size
draw_color = (255, 255, 255) if is_area_enough else (0, 0, 0)
cv2.drawContours(watershed_image, [box], 0, draw_color, 2)
cv2.drawContours(watershed_image, [c], 0, draw_color, 4)
cv2.putText(
watershed_image,
f"#{label}: {area_}",
(x - 10, y),
cv2.FONT_HERSHEY_SIMPLEX,
0.6,
draw_color,
2,
)
cv2.drawContours(source_image, [c], 0, draw_color, 4)
cv2.putText(
source_image,
f"#{label}: {area_}",
(x - 10, y),
cv2.FONT_HERSHEY_SIMPLEX,
0.6,
draw_color,
2,
)
self._wrapper.store_image(
watershed_image, f"{dbg_suffix}_vis_labels", text_overlay=True
)
self._wrapper.store_image(
source_image, f"{dbg_suffix}_labels_on_source_image", text_overlay=True
)
return watershed_image, source_image
def help_stub(self):
res = '"""\n'
res += f"{self.name}:\n"
res += self.description
res += "\n"
res += f"Real time: {str(self.real_time)}\n"
res += "\n"
res += "Keyword Arguments (in parentheses, argument name):\n"
if self.has_input:
for p in self.gizmos:
if p.is_input:
res += f" * {p.desc} ({p.name}): {p.hint}".rstrip() + "\n"
if self.has_input and self.has_output:
res += "--------------\n"
if self.has_output:
for p in self.gizmos:
if p.is_output and not p.is_neutral:
res += f" * output ({p.name}): {p.desc}".rstrip() + "\n"
res += '"""\n'
return res
def input_params_as_str(
self,
exclude_defaults: bool = True,
excluded_params: tuple = (),
forced_params: tuple = (),
):
return ", ".join(
[
f"{p.name}={p.str_value}"
for p in self.input_params(
exclude_defaults=exclude_defaults,
excluded_params=excluded_params,
forced_params=forced_params,
)
]
)
def input_params_as_html(
self,
exclude_defaults: bool = True,
excluded_params: tuple = (),
forced_params: tuple = (),
):
return (
"<ul>"
+ "".join(
f"<li>{p.name}={p.str_value}</li>"
for p in self.input_params(
exclude_defaults=exclude_defaults,
excluded_params=excluded_params,
forced_params=forced_params,
)
)
+ "</ul>"
)
def code_imports(self, **kwargs):
ret = [f"from {self.__module__} import {type(self).__name__}"]
if kwargs.get("build_wrapper", "yes") is not False:
ret.append("from ipso_phen.ipapi.base.ip_abstract import BaseImageProcessor")
return ret
def code_apply_roi(self, print_result=None, white_spaces=""):
ws = "".join(
[" " for _ in range(0, len(f"{white_spaces}ipt_res = ipt.process_wrapper("))]
)
params_ = f",\n{ws}".join(
[f"{p.name}={p.str_value}" for p in self.input_params(exclude_defaults=True)]
)
code_ = f'{white_spaces}if wrapper is None:\n{white_spaces} raise RuntimeError("Missing wrapper")\n'
code_ += f"{white_spaces}ipt = {type(self).__name__}({params_})\n"
code_ += f'{white_spaces}if callable(getattr(ipt, "apply_roy")):\n'
add_ws = " "
code_ += f"{white_spaces}{add_ws}wrapper.current_image = ipt.apply_roy(wrapper=wrapper)\n"
return code_
def code_body(self, **kwargs):
use_with_clause = kwargs.get("use_with_clause", False)
build_wrapper = kwargs.get("build_wrapper", "yes")
file_name = kwargs.get("file_name", "")
white_spaces = kwargs.get("white_spaces", "")
target_data_base = kwargs.get("target_data_base", None)
if file_name:
wrapper_ = file_name
else:
wrapper_ = self.file_name
wrapper_ = "{file}"
if use_with_clause:
ws = "".join(
[" " for _ in range(0, len(f"{white_spaces}with {type(self).__name__}("))]
)
else:
ws = "".join(
[
" "
for _ in range(
0, len(f"{white_spaces}ipt_res = ipt.process_wrapper(")
)
]
)
params_ = f",\n{ws}".join(
[f"{p.name}={p.str_value}" for p in self.input_params(exclude_defaults=True)]
)
if use_with_clause or (build_wrapper is False):
if build_wrapper is False:
wrapper_param = wrapper_
else:
wrapper_param = "wrapper"
if params_:
params_ = f",\n{ws}".join([f"wrapper={wrapper_param}", params_])
else:
params_ = f"wrapper={wrapper_param}"
if (build_wrapper is True) or (build_wrapper == "yes"):
code_ = f"{white_spaces}wrapper = BaseImageProcessor({wrapper_})\n"
if target_data_base:
code_ += f"{white_spaces}wrapper.target_database = target_data_base\n"
code_ += f"{white_spaces}wrapper.lock = True\n"
elif build_wrapper == "expected":
code_ = f'{white_spaces}if wrapper is None:\n{white_spaces} raise RuntimeError("Missing wrapper")\n'
else:
code_ = ""
if use_with_clause:
code_ += (
f"{white_spaces}with {type(self).__name__}({params_}) as (res, ed):\n"
)
add_ws = " "
code_ += f"{white_spaces}{add_ws}if res:\n"
code_ += f"{white_spaces}{add_ws}{add_ws}return ed.result\n"
code_ += f"{white_spaces}{add_ws}else:\n"
code_ += (
f"{white_spaces}{add_ws}{add_ws}"
+ 'print(f"Process error: {str(wrapper.error_holder)}")\n'
)
else:
code_ += f"{white_spaces}ipt = {type(self).__name__}()\n"
if build_wrapper is not False:
code_ += f"{white_spaces}ipt.wrapper = wrapper\n"
code_ += f"{white_spaces}ipt_res = ipt.process_wrapper({params_})\n"
if self.result_name and (self.result_name != "none"):
code_ += f"{white_spaces}{self.result_name} = ipt.result\n"
code_ += f"{white_spaces}if not ipt_res:\n"
code_ += (
f"{white_spaces} "
+ 'print(f"Process error: {str(ipt.wrapper.error_holder)}")\n'
)
return code_
def code(self, **kwargs):
return (
"\n".join(self.code_imports(**kwargs)) + "\n\n\n" + self.code_body(**kwargs)
)
def apply_test_values_overrides(self, use_cases: tuple = ()):
pass
@abstractproperty
def name(self):
return "Base abstract image processing tool"
@property
def description(self):
return "\n"
@property
def hint(self):
if self.process_wrapper.__doc__ is not None:
return inspect.getdoc(self.process_wrapper)
else:
return self.help_stub()
@property
def needs_doc_string(self):
return self.process_wrapper.__doc__ is None
@property
def real_time(self):
return False
@property
def wrapper(self) -> BaseImageProcessor:
return self._wrapper
@wrapper.setter
def wrapper(self, value):
self._wrapper = value
@property
def is_ready(self):
return self._wrapper is not None
@property
def order(self):
return 9999
@property
def output_kind(self):
return ""
@property
def use_case(self):
return ["none"]
@property
def result(self):
return self._result
@result.setter
def result(self, value):
self._result = value
@property
def result_name(self):
return "none"
@property
def kwargs(self):
return self._kwargs
@property
def lock_once_added(self):
return False
@property
def file_name(self):
if self.wrapper is not None:
return f'"{self.wrapper.file_path}"'
else:
return "{file}"
@property
def package(self):
return "IPSO Phen"
@property
def is_wip(self):
return False
@property
def is_deprecated(self):
return False
@property
def short_test_script(self):
return self.is_wip or self.is_deprecated
@property
def needs_previous_mask(self):
return False
@property
def input_type(self):
if set(self.use_case).intersection(
set(
(
ipc.ToolFamily.EXPOSURE_FIXING,
ipc.ToolFamily.IMAGE_GENERATOR,
ipc.ToolFamily.PRE_PROCESSING,
ipc.ToolFamily.THRESHOLD,
ipc.ToolFamily.WHITE_BALANCE,
ipc.ToolFamily.ROI,
)
)
):
return ipc.IO_IMAGE
elif set(self.use_case).intersection(
set((ipc.ToolFamily.FEATURE_EXTRACTION, ipc.ToolFamily.MASK_CLEANUP))
):
return ipc.IO_MASK
else:
return ipc.IO_NONE
@property
def output_type(self):
if set(self.use_case).intersection(
set(
(
ipc.ToolFamily.EXPOSURE_FIXING,
ipc.ToolFamily.PRE_PROCESSING,
ipc.ToolFamily.WHITE_BALANCE,
)
)
):
return ipc.IO_IMAGE
elif set(self.use_case).intersection(
set((ipc.ToolFamily.THRESHOLD, ipc.ToolFamily.MASK_CLEANUP))
):
return ipc.IO_MASK
elif set(self.use_case).intersection(set((ipc.ToolFamily.ROI,))):
return ipc.IO_ROI
elif set(self.use_case).intersection(
set((ipc.ToolFamily.IMAGE_GENERATOR, ipc.ToolFamily.FEATURE_EXTRACTION))
):
return ipc.IO_DATA
elif set(self.use_case).intersection(set((ipc.ToolFamily.VISUALIZATION,))):
return ipc.IO_IMAGE
else:
return ipc.IO_NONE
@property
def required_images(self):
return []
| {
"alphanum_fraction": 0.5157580859,
"author": null,
"avg_line_length": 33.7787951807,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "317d0f1d9df0954499b550845b9a946f2a3ca7b2",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b0f6be8960a20dbf95ef9df96efdd22bd6e031c5",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tpmp-inra/ipapi",
"max_forks_repo_path": "base/ipt_abstract.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b0f6be8960a20dbf95ef9df96efdd22bd6e031c5",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tpmp-inra/ipapi",
"max_issues_repo_path": "base/ipt_abstract.py",
"max_line_length": 116,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "b0f6be8960a20dbf95ef9df96efdd22bd6e031c5",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tpmp-inra/ipapi",
"max_stars_repo_path": "base/ipt_abstract.py",
"max_stars_repo_stars_event_max_datetime": "2020-06-30T06:53:36.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-06-30T06:53:36.000Z",
"num_tokens": 14724,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 70091
} |
#[0] is ours
##whole_level[1] calais
#[2] ritter
#[3] stanford
import datetime
from threading import Thread
import random
import math
from queue import Queue
import pandas as pd
import warnings
import numpy as np
import time
import pickle
import matplotlib.pyplot as plt
import copy
import matplotlib.ticker as ticker
import matplotlib
from matplotlib import rc
import matplotlib.font_manager as fm
warnings.filterwarnings("ignore")
rc('font',**{'family':'dejavusans','serif':['Times']})
rc('text', usetex=False)
csfont = {'fontname':'DejaVu Sans Condensed'}
# sentence level
# whole_level=[
# [
# 1082.7956538044,
# 1050.6432401884,
# 1053.7217456599,
# 1012.5515266169,
# 1008.6493123934,
# 989.9630010678,
# 952.6808602644,
# 913.4404472567,
# 877.5589883683,
# 850.7370345206,
# 838.5608943759
# ],
# [83.5613470219,
# 86.1817237598,
# 91.4628130584,
# 89.9526238481,
# 86.8751099374,
# 92.2544695707,
# 95.2050934217,
# 97.7485665828,
# 99.7710068994,
# 101.1100325883,
# 101.5450283182
# ],
# [
# 282.5355908803,
# 286.9383684093,
# 304.265311919,
# 304.7142461953,
# 307.9814362368,
# 307.7235153447,
# 303.0300608151,
# 298.1720040698,
# 293.5781982817,
# 290.6551030755,
# 290.0621321868
# ],
# [0.7847898245,
# 0.7890123589,
# 0.7931519113,
# 0.7704950433,
# 0.7660793922,
# 0.771851223,
# 0.7642034553,
# 0.7541908589,
# 0.7508384564,
# 0.7468438025,
# 0.7450788343
# ]
# ]
whole_level=[[624.4496273381776, 593.30218438052, 594.1481509218495, 652.9262676551306, 605.6753390579645, 552.3884725429233, 597.3519978583432, 606.3851824297484, 543.6730550626653, 540.0956312505397, 529.8341210809203],
[48.1899348453864, 48.66714314099523, 51.57192729540457, 58.00438734328955, 52.16690381930308, 51.4769799239461, 59.69570202760152, 64.89014424264954, 61.811010822796185, 64.19032516588791, 64.15994496041087],
[162.9386337256632, 162.03517449871248, 171.56205915928956, 196.48968989695575, 184.9371813615319, 171.70633894746533, 190.00655916273732, 197.940747671422, 181.88014489644576, 184.52417727437214, 183.27209854023903],
[0.45258928748558247, 0.4455582429242619, 0.4472240830561037, 0.49684034801617233, 0.46001656860802365, 0.4306844979493904, 0.47917249084547664, 0.5006677369437788, 0.46516603767974085, 0.47413837483176313, 0.470768660874824]]
tweets_been_processed_list=[173400,
350484,
527834,
682913,
849446,
1028661,
1188145,
1338782,
1500195,
1657711,
1713105
]
tweets_been_processed_list=[100000,
200000,
300000,
400000,
500000,
600000,
700000,
800000,
900000,
1000000,
1035000]
fontPath = "/usr/share/fonts/truetype/abyssinica/AbyssinicaSIL-R.ttf"
font_axis = fm.FontProperties(fname=fontPath, size=19)
fontPath = "/usr/share/fonts/truetype/abyssinica/AbyssinicaSIL-R.ttf"
font_axis2 = fm.FontProperties(fname=fontPath, size=24)
fontPath = "/usr/share/fonts/truetype/abyssinica/AbyssinicaSIL-R.ttf"
font_legend = fm.FontProperties(fname=fontPath, size=18)
f, (ax, ax2,ax3) = plt.subplots(3, 1, sharex=True)
#fig, ax = plt.subplots()
params = {
'text.usetex': False,
'legend.fontsize': 20,
'figure.figsize': [40, 400]
}
matplotlib.rcParams.update(params)
print("BITTI BITTIBITTIBITTIBITTIBITTIBITTIBITTIBITTIBITTIBITTI")
ax.plot( tweets_been_processed_list, whole_level[0],marker='s' ,markersize=8,linewidth=1, label="TwiCS")
ax3.plot( tweets_been_processed_list, whole_level[0],marker='s' ,markersize=8,linewidth=1, label="TwiCS")
ax2.plot( tweets_been_processed_list, whole_level[0],marker='s' ,markersize=8,linewidth=1, label="TwiCS")
ax2.plot( tweets_been_processed_list, whole_level[1],marker='>' ,markersize=8,linewidth=1, label="OpenCalais")
ax3.plot( tweets_been_processed_list, whole_level[1],marker='>' ,markersize=8,linewidth=1, label="OpenCalais")
ax2.plot( tweets_been_processed_list, whole_level[2],marker='x' ,markersize=8,linewidth=1, label="TwitterNLP")
ax3.plot( tweets_been_processed_list, whole_level[2],marker='x' ,markersize=8,linewidth=1, label="TwitterNLP")
ax3.plot( tweets_been_processed_list, whole_level[3],marker='o' , markersize=8, linewidth=1,label="Stanford")
ax.set_ylim(500,700) # outliers only
ax2.set_ylim(40, 220)
ax3.set_ylim(0,1)
ax.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax.xaxis.tick_top()
ax2.xaxis.tick_top()
ax.tick_params(labeltop='off') # don't put tick labels at the top
ax.tick_params(labelbottom='off',axis='both', which='major', labelsize=12)
ax2.tick_params(labeltop='off',axis='both', which='major', labelsize=12)
ax3.tick_params(labeltop='off',axis='both', which='major', labelsize=12) # don't put tick labels at the top
# don't put tick labels at the top
# ax2.xaxis.tick_bottom()
ax3.xaxis.tick_bottom()
d = 0.01 # how big to make the diagonal lines in axes coordinates
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax2.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax2.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax3.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax2.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
tick_spacing = 50
ax.yaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
tick_spacing_ax2 = 50
ax2.yaxis.set_major_locator(ticker.MultipleLocator(tick_spacing_ax2))
tick_spacing_x_axis = 400000
ax.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing_x_axis))
plt.tick_params(axis='both', which='major', labelsize=12)
abc=f.text(0.03, 0.5, 'Tweet Processing Throughput',fontproperties=font_axis, ha='center', va='center', rotation='vertical')
ax.text(0.1, 0.3,'TwiCS', ha='center', va='center', transform=ax.transAxes,FontProperties=font_legend)
ax2.text(0.5, 0.64, 'TwitterNLP',ha='center', va='center', transform=ax2.transAxes,FontProperties=font_legend)
ax2.text(0.15, -0.1, 'OpenCalais',ha='center', va='center', transform=ax2.transAxes,FontProperties=font_legend)
ax3.text(0.8, 0.55, 'Stanford',ha='center', va='center', transform=ax3.transAxes,FontProperties=font_legend)
plt.xlabel('Tweet in Input Stream',fontproperties=font_axis2)
# plt.ylabel('Tweet Throughput',fontproperties=font_axis)#prop=20)
ax2.grid(True)
ax3.grid(True)
ax.grid(True)
# plt.ylim((0.1,1.0))
# plt.legend(loc="lower right",ncol=4,frameon=False,prop=font_legend)
# plt.legend(loc="upper left", bbox_to_anchor=[0, 1],
# ncol=2,frameon=False,prop=font)
f.savefig("f1_score_us_vs_others7.pdf",dpi=1200,bbox_inches='tight',bbox_extra_artists=[abc])
plt.show() | {
"alphanum_fraction": 0.7436848865,
"author": null,
"avg_line_length": 31.85,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f9161648cc6f58be894e69757cb6f75669dcc16d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2020-08-27T01:55:34.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-06-20T14:50:03.000Z",
"max_forks_repo_head_hexsha": "40672a99a201f6e2aab9dd085e1f4a29e8253f3b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "dalakada/TwiCSv2",
"max_forks_repo_path": "efficiency_vs_state_of_the_art/plotter_broken_scale.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "40672a99a201f6e2aab9dd085e1f4a29e8253f3b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "dalakada/TwiCSv2",
"max_issues_repo_path": "efficiency_vs_state_of_the_art/plotter_broken_scale.py",
"max_line_length": 226,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "40672a99a201f6e2aab9dd085e1f4a29e8253f3b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "dalakada/TwiCSv2",
"max_stars_repo_path": "efficiency_vs_state_of_the_art/plotter_broken_scale.py",
"max_stars_repo_stars_event_max_datetime": "2021-06-22T18:02:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-04-01T00:54:39.000Z",
"num_tokens": 2476,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7007
} |
[STATEMENT]
lemma lr_of_tran_fbs_acceptD:
assumes s1: "valid_prefixes rt" "has_default_route rt"
assumes s2: "no_oif_match fw"
shows "generalized_sfw (lr_of_tran_fbs rt fw ifs) p = Some (r, oif, simple_action.Accept) \<Longrightarrow>
simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. generalized_sfw (lr_of_tran_fbs rt fw ifs) p = Some (r, oif, Accept) \<Longrightarrow> simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)
[PROOF STEP]
proof(goal_cases)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. generalized_sfw (lr_of_tran_fbs rt fw ifs) p = Some (r, oif, Accept) \<Longrightarrow> simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)
[PROOF STEP]
case 1
[PROOF STATE]
proof (state)
this:
generalized_sfw (lr_of_tran_fbs rt fw ifs) p = Some (r, oif, Accept)
goal (1 subgoal):
1. generalized_sfw (lr_of_tran_fbs rt fw ifs) p = Some (r, oif, Accept) \<Longrightarrow> simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)
[PROOF STEP]
note 1[unfolded lr_of_tran_fbs_def Let_def, THEN generalized_fw_joinD]
[PROOF STATE]
proof (state)
this:
\<exists>r1 r2. generalized_sfw (lr_of_tran_s1 rt) p = Some (r1, oif) \<and> generalized_sfw (map simple_rule_dtor fw) p = Some (r2, Accept) \<and> Some r = simple_match_and r1 r2
goal (1 subgoal):
1. generalized_sfw (lr_of_tran_fbs rt fw ifs) p = Some (r, oif, Accept) \<Longrightarrow> simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>r1 r2. generalized_sfw (lr_of_tran_s1 rt) p = Some (r1, oif) \<and> generalized_sfw (map simple_rule_dtor fw) p = Some (r2, Accept) \<and> Some r = simple_match_and r1 r2
[PROOF STEP]
guess r1
[PROOF STATE]
proof (prove)
using this:
\<exists>r1 r2. generalized_sfw (lr_of_tran_s1 rt) p = Some (r1, oif) \<and> generalized_sfw (map simple_rule_dtor fw) p = Some (r2, Accept) \<and> Some r = simple_match_and r1 r2
goal (1 subgoal):
1. thesis
goal instantiation:
PROP ?guess \<leadsto> thesis
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
\<exists>r2. generalized_sfw (lr_of_tran_s1 rt) p = Some (r1, oif) \<and> generalized_sfw (map simple_rule_dtor fw) p = Some (r2, Accept) \<and> Some r = simple_match_and r1 r2
goal (1 subgoal):
1. generalized_sfw (lr_of_tran_fbs rt fw ifs) p = Some (r, oif, Accept) \<Longrightarrow> simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>r2. generalized_sfw (lr_of_tran_s1 rt) p = Some (r1, oif) \<and> generalized_sfw (map simple_rule_dtor fw) p = Some (r2, Accept) \<and> Some r = simple_match_and r1 r2
[PROOF STEP]
guess r2
[PROOF STATE]
proof (prove)
using this:
\<exists>r2. generalized_sfw (lr_of_tran_s1 rt) p = Some (r1, oif) \<and> generalized_sfw (map simple_rule_dtor fw) p = Some (r2, Accept) \<and> Some r = simple_match_and r1 r2
goal (1 subgoal):
1. thesis
goal instantiation:
PROP ?guess \<leadsto> thesis
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
generalized_sfw (lr_of_tran_s1 rt) p = Some (r1, oif) \<and> generalized_sfw (map simple_rule_dtor fw) p = Some (r2, Accept) \<and> Some r = simple_match_and r1 r2
goal (1 subgoal):
1. generalized_sfw (lr_of_tran_fbs rt fw ifs) p = Some (r, oif, Accept) \<Longrightarrow> simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)
[PROOF STEP]
note r12 = this
[PROOF STATE]
proof (state)
this:
generalized_sfw (lr_of_tran_s1 rt) p = Some (r1, oif) \<and> generalized_sfw (map simple_rule_dtor fw) p = Some (r2, Accept) \<and> Some r = simple_match_and r1 r2
goal (1 subgoal):
1. generalized_sfw (lr_of_tran_fbs rt fw ifs) p = Some (r, oif, Accept) \<Longrightarrow> simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)
[PROOF STEP]
note s1_correct[OF s1, of p]
[PROOF STATE]
proof (state)
this:
\<exists>rm ra. generalized_sfw (lr_of_tran_s1 rt) p = Some (rm, ra) \<and> ra = output_iface (routing_table_semantics rt (p_dst p))
goal (1 subgoal):
1. generalized_sfw (lr_of_tran_fbs rt fw ifs) p = Some (r, oif, Accept) \<Longrightarrow> simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>rm ra. generalized_sfw (lr_of_tran_s1 rt) p = Some (rm, ra) \<and> ra = output_iface (routing_table_semantics rt (p_dst p))
[PROOF STEP]
guess rm
[PROOF STATE]
proof (prove)
using this:
\<exists>rm ra. generalized_sfw (lr_of_tran_s1 rt) p = Some (rm, ra) \<and> ra = output_iface (routing_table_semantics rt (p_dst p))
goal (1 subgoal):
1. thesis
goal instantiation:
PROP ?guess \<leadsto> thesis
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
\<exists>ra. generalized_sfw (lr_of_tran_s1 rt) p = Some (rm, ra) \<and> ra = output_iface (routing_table_semantics rt (p_dst p))
goal (1 subgoal):
1. generalized_sfw (lr_of_tran_fbs rt fw ifs) p = Some (r, oif, Accept) \<Longrightarrow> simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>ra. generalized_sfw (lr_of_tran_s1 rt) p = Some (rm, ra) \<and> ra = output_iface (routing_table_semantics rt (p_dst p))
[PROOF STEP]
guess ra
[PROOF STATE]
proof (prove)
using this:
\<exists>ra. generalized_sfw (lr_of_tran_s1 rt) p = Some (rm, ra) \<and> ra = output_iface (routing_table_semantics rt (p_dst p))
goal (1 subgoal):
1. thesis
goal instantiation:
PROP ?guess \<leadsto> thesis
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
generalized_sfw (lr_of_tran_s1 rt) p = Some (rm, ra) \<and> ra = output_iface (routing_table_semantics rt (p_dst p))
goal (1 subgoal):
1. generalized_sfw (lr_of_tran_fbs rt fw ifs) p = Some (r, oif, Accept) \<Longrightarrow> simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)
[PROOF STEP]
note rmra = this
[PROOF STATE]
proof (state)
this:
generalized_sfw (lr_of_tran_s1 rt) p = Some (rm, ra) \<and> ra = output_iface (routing_table_semantics rt (p_dst p))
goal (1 subgoal):
1. generalized_sfw (lr_of_tran_fbs rt fw ifs) p = Some (r, oif, Accept) \<Longrightarrow> simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)
[PROOF STEP]
from r12 rmra
[PROOF STATE]
proof (chain)
picking this:
generalized_sfw (lr_of_tran_s1 rt) p = Some (r1, oif) \<and> generalized_sfw (map simple_rule_dtor fw) p = Some (r2, Accept) \<and> Some r = simple_match_and r1 r2
generalized_sfw (lr_of_tran_s1 rt) p = Some (rm, ra) \<and> ra = output_iface (routing_table_semantics rt (p_dst p))
[PROOF STEP]
have oifra: "oif = ra"
[PROOF STATE]
proof (prove)
using this:
generalized_sfw (lr_of_tran_s1 rt) p = Some (r1, oif) \<and> generalized_sfw (map simple_rule_dtor fw) p = Some (r2, Accept) \<and> Some r = simple_match_and r1 r2
generalized_sfw (lr_of_tran_s1 rt) p = Some (rm, ra) \<and> ra = output_iface (routing_table_semantics rt (p_dst p))
goal (1 subgoal):
1. oif = ra
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
oif = ra
goal (1 subgoal):
1. generalized_sfw (lr_of_tran_fbs rt fw ifs) p = Some (r, oif, Accept) \<Longrightarrow> simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)
[PROOF STEP]
from r12
[PROOF STATE]
proof (chain)
picking this:
generalized_sfw (lr_of_tran_s1 rt) p = Some (r1, oif) \<and> generalized_sfw (map simple_rule_dtor fw) p = Some (r2, Accept) \<and> Some r = simple_match_and r1 r2
[PROOF STEP]
have sfw: "simple_fw fw p = Decision FinalAllow"
[PROOF STATE]
proof (prove)
using this:
generalized_sfw (lr_of_tran_s1 rt) p = Some (r1, oif) \<and> generalized_sfw (map simple_rule_dtor fw) p = Some (r2, Accept) \<and> Some r = simple_match_and r1 r2
goal (1 subgoal):
1. simple_fw fw p = Decision FinalAllow
[PROOF STEP]
using simple_fw_iff_generalized_fw_accept
[PROOF STATE]
proof (prove)
using this:
generalized_sfw (lr_of_tran_s1 rt) p = Some (r1, oif) \<and> generalized_sfw (map simple_rule_dtor fw) p = Some (r2, Accept) \<and> Some r = simple_match_and r1 r2
(simple_fw ?fw ?p = Decision FinalAllow) = (\<exists>r. generalized_sfw (map simple_rule_dtor ?fw) ?p = Some (r, Accept))
goal (1 subgoal):
1. simple_fw fw p = Decision FinalAllow
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
simple_fw fw p = Decision FinalAllow
goal (1 subgoal):
1. generalized_sfw (lr_of_tran_fbs rt fw ifs) p = Some (r, oif, Accept) \<Longrightarrow> simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)
[PROOF STEP]
note ifupdateirrel = no_oif_matchD[OF s2, where any = " output_iface (routing_table_semantics rt (p_dst p))" and p = p, symmetric]
[PROOF STATE]
proof (state)
this:
simple_fw fw (p\<lparr>p_oiface := output_iface (routing_table_semantics rt (p_dst p))\<rparr>) = simple_fw fw p
goal (1 subgoal):
1. generalized_sfw (lr_of_tran_fbs rt fw ifs) p = Some (r, oif, Accept) \<Longrightarrow> simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)
[PROOF STEP]
unfolding simple_linux_router_nol12_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (let rd = routing_table_semantics rt (p_dst p); p = p\<lparr>p_oiface := output_iface rd\<rparr>; fd = simple_fw fw p in (case fd of Decision FinalAllow \<Rightarrow> Some () | Decision FinalDeny \<Rightarrow> None) \<bind> (\<lambda>_. Some p)) = Some (p\<lparr>p_oiface := oif\<rparr>)
[PROOF STEP]
by(simp add: Let_def ifupdateirrel sfw oifra rmra split: Option.bind_splits option.splits)
[PROOF STATE]
proof (state)
this:
simple_linux_router_nol12 rt fw p = Some (p\<lparr>p_oiface := oif\<rparr>)
goal:
No subgoals!
[PROOF STEP]
qed | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "LOFT_LinuxRouter_OpenFlow_Translation",
"hexsha": null,
"include": null,
"lang": null,
"length": 30,
"llama_tokens": 4359,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
from pathfinding.core.diagonal_movement import DiagonalMovement
from pathfinding.core.grid import Grid
from pathfinding.finder.best_first import BestFirst
import entities as ent
import numpy as np
finder = BestFirst(diagonal_movement=DiagonalMovement.never)
def sub(a,b):
n=a[0]-b[0]
c=a[1]-b[1]
nc=(n,c)
return nc
def set_path(self, dest, maxi=False):
if dest in self.lasterror:
return
try:
slicex, slicey = get_slice(self)
dx, dy = dest
sx, sy = tuple(self.cord)
sx = sx - slicex[0]
sy = sy - slicey[0]
dx = dx - slicex[0]
dy = dy - slicey[0]
map = self.data['path'].transpose()
grid = Grid(matrix=map[slicey[0]:slicey[1], slicex[0]:slicex[1]])
start = grid.node(sx,sy)
end = grid.node(dx, dy)
except:
# print("error seeking for path")
#print("Cord: {} Dest: {}".format(self.cord, dest))
if dest not in self.lasterror:
self.lasterror.append(dest)
return
path, runs = finder.find_path(start, end, grid)
# a= np.array(grid.grid_str(path=path, start=start, end=end))
for num, p in enumerate(path):
if num<len(path)-1:
self.walk(sub(path[num+1],path[num]))
if self.specie=='bunny' and num==5:
self.queue.append(['cmd', 'self.lookrun()', 'look'])
if maxi==True:
if num ==2: self.queue.append(['cmd', 'self.hunt()', 'hunt'])
if num >=4:
self.queue.append(['cmd', 'self.hunt()', 'hunt'])
break
if num==15:
break
self.read_queue()
if path:
return True
return False
def get_slice(self):
#print("Slicing")
size=10
sx, sy = tuple(self.cord)
slicex = [sx-size, sx+size]
slicey = [sy-size, sy+size]
slicex[0] = 0 if slicex[0]<0 else slicex[0]
slicey[0] = 0 if slicey[0]<0 else slicey[0]
slicex[1] = 127 if slicex[1]>127 else slicex[1]
slicey[1] = 127 if slicey[1]>127 else slicey[1]
return slicex, slicey
def random_path(self, size=3, border=False, typ=False):
#print("Searching for Path\n\n")
for x in range(4):
if border:
ar = np.random.randint(size*2, size=(2))-size
if np.abs(ar[0])>size/2 or np.abs(ar[1])>size/2:
rnd=ar
else:
continue
else:
rnd = np.random.randint(size*2, size=(2))-size
dest = (self.cord[0]+rnd[0],self.cord[1]+rnd[1])
if dest[0]>=0 and dest[1]>=0 and dest[0]<=127 and dest[1]<=127:
if value_tile(self, dest):
if border:
if not self.search_for(typ, 0, dest):
continue
self.path(self, dest)
return
if border:
self.rand_path(self, size)
def value_tile(self, xy, status="tile"):
slicex, slicey = get_slice(self)
x, y = xy
#print(xy, slicex, slicey)
if x >= slicex[0] and x<slicex[1]:
if y>= slicey[0] and y<slicey[1]:
if status=="tile" or status=="both":
if self.data['map'][x][y] in [0, 3, 4]:
if status=="tile":
#print("Tile Found: ", self.data['map'][x][y])
return True
else:
if self.data['obj'][x][y]==-1:
return True
return False
| {
"alphanum_fraction": 0.6061210912,
"author": null,
"avg_line_length": 27.0810810811,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ad4cc29754b86e7ec87e709d1fbe7115efdec4d8",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "46b88c014efb635ef7ef2388fe7be4610c538c94",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Leonardo8133/Simulacao-de-Ecossistema",
"max_forks_repo_path": "path.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "46b88c014efb635ef7ef2388fe7be4610c538c94",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Leonardo8133/Simulacao-de-Ecossistema",
"max_issues_repo_path": "path.py",
"max_line_length": 75,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "46b88c014efb635ef7ef2388fe7be4610c538c94",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Leonardo8133/Simulacao-de-Ecossistema",
"max_stars_repo_path": "path.py",
"max_stars_repo_stars_event_max_datetime": "2020-06-21T03:17:22.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-06-19T17:55:11.000Z",
"num_tokens": 967,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3006
} |
subroutine setlats_r(lats_nodes_r,global_lats_r,iprint,lonsperlar)
!
use mod_param, only: nodes,latr,lonr,icolor,liope
implicit none
!
integer lats_nodes_r(nodes)
integer global_lats_r(latr)
integer iprint,opt,ifin,nodesio
integer lonsperlar(latr)
integer jcount,jpt,lat,lats_sum,node,i
integer ilatpe,ngrptg,ngrptl,ipe,irest,idp
!
integer,allocatable :: lats_hold(:,:)
allocate ( lats_hold(latr,nodes) )
!
!AA opt = 1
opt = 2
if (opt == 2) lonsperlar = lonr ! full grid
lats_nodes_r = 0
if (liope .and. icolor == 2) then
nodesio = 1
else
nodesio = nodes
endif
!
ngrptg = 0
do lat=1,latr
do i=1,lonsperlar(lat)
ngrptg = ngrptg + 1
enddo
enddo
!
! distribution of the grid
!
ilatpe = ngrptg/nodesio
ngrptl = 0
ipe = 0
irest = 0
idp = 1
do lat=1,latr
ifin = lonsperlar(lat)
ngrptl = ngrptl + ifin
if (ngrptl*nodesio <= ngrptg+irest) then
lats_nodes_r(ipe+1) = lats_nodes_r(ipe+1) + 1
lats_hold(idp,ipe+1) = lat
idp = idp + 1
else
ipe = ipe + 1
if (ipe <= nodesio) lats_hold(1,ipe+1) = lat
idp = 2
irest = irest + ngrptg - (ngrptl-ifin)*nodesio
ngrptl = ifin
lats_nodes_r(ipe+1) = lats_nodes_r(ipe+1) + 1
endif
enddo
!!
jpt = 0
do node=1,nodesio
if ( lats_nodes_r(node) > 0 ) then
do jcount=1,lats_nodes_r(node)
global_lats_r(jpt+jcount) = lats_hold(jcount,node)
enddo
endif
jpt = jpt+lats_nodes_r(node)
enddo
!
deallocate (lats_hold)
if ( iprint .ne. 1 ) return
!
jpt=0
do node=1,nodesio
if ( lats_nodes_r(node) .gt. 0 ) then
print 600
lats_sum=0
do jcount=1,lats_nodes_r(node)
lats_sum=lats_sum + lonsperlar(global_lats_r(jpt+jcount))
print 700, node-1,
x node, lats_nodes_r(node),
x jpt+jcount, global_lats_r(jpt+jcount),
x lonsperlar(global_lats_r(jpt+jcount)),
x lats_sum
enddo
endif
jpt=jpt+lats_nodes_r(node)
enddo
!
print 600
!
600 format ( ' ' )
!
700 format ( 'setlats_r me=', i4,
x ' lats_nodes_r(', i4, ' )=', i4,
x ' global_lats_r(', i4, ' )=', i4,
x ' lonsperlar=', i5,
x ' lats_sum=', i6 )
!
return
end
| {
"alphanum_fraction": 0.4994551398,
"author": null,
"avg_line_length": 27.8080808081,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "ff188de953b99fc4333bdcebf1ae4a31f74d5358",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 10,
"max_forks_repo_forks_event_max_datetime": "2022-03-11T16:26:29.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-07-05T18:00:44.000Z",
"max_forks_repo_head_hexsha": "022af23abbc7883891006b57379be96d9a50df23",
"max_forks_repo_licenses": [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
],
"max_forks_repo_name": "GEOS-ESM/GMAO_Shared",
"max_forks_repo_path": "GMAO_stoch/setlats.f",
"max_issues_count": 105,
"max_issues_repo_head_hexsha": "022af23abbc7883891006b57379be96d9a50df23",
"max_issues_repo_issues_event_max_datetime": "2022-03-22T02:12:16.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-07-08T19:27:23.000Z",
"max_issues_repo_licenses": [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
],
"max_issues_repo_name": "GEOS-ESM/GMAO_Shared",
"max_issues_repo_path": "GMAO_stoch/setlats.f",
"max_line_length": 72,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "022af23abbc7883891006b57379be96d9a50df23",
"max_stars_repo_licenses": [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
],
"max_stars_repo_name": "GEOS-ESM/GMAO_Shared",
"max_stars_repo_path": "GMAO_stoch/setlats.f",
"max_stars_repo_stars_event_max_datetime": "2020-02-01T17:36:53.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-02-01T17:36:53.000Z",
"num_tokens": 916,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2753
} |
#-*-coding:utf-8-*-
import csv
import re
import pandas as pd
import gensim
import nltk
from nltk.corpus import stopwords
import numpy as np
import math
import ssl
import random
import collections
def cos_sim(a, b):
a_norm = np.linalg.norm(a)
b_norm = np.linalg.norm(b)
cos = np.dot(a,b)/(a_norm * b_norm)
return cos
#�����
pairlist = [4075, 4989, 12307, 42697, 66267, 66959, 96959, 129480, 129483, 130032, 165935, 178771, 180055, 191505, 200612, 206402,239286,255361,283651,283653,288181,297935,314296,317229]
ID_pairs = []
random_ID = 0
#for i in range(len(pairlist)):
# for j in range(i+1,len(pairlist)):
# random_ID = math.floor(random.uniform(0,320000))
# while random_ID==pairlist[i] or random_ID == pairlist[j]:
# random_ID = math.floor(random.uniform(0, 320000))
# ID_pairs.append([pairlist[i],pairlist[j],random_ID])
k=0
for i in range(math.floor(len(pairlist)/2)):
k = 2*i
j = 2*i+1
random_ID = math.floor(random.uniform(0, 320000))
while random_ID == pairlist[i] or random_ID == pairlist[j]:
random_ID = math.floor(random.uniform(0, 320000))
ID_pairs.append([pairlist[k],pairlist[j],random_ID])
nltk.download('stopwords')
x_train = []
reader = csv.reader(open("some_papers.csv", 'rt', encoding='utf-8'))
result = list(reader)
for i in range(len(result)):
result[i].append(result[i][1])
result[i].append(result[i][2])
result.pop(0)
stoplist = set(stopwords.words('english'))
result = [[word for word in (re.sub(' - _ 1 2 3 4 5 6 7 6 8 9 0 ` ~ ! @ # % & } : > ]','',"".join(result[i]).lower()).split()) if word not in stoplist] for i in range(len(result))]
for i,text in enumerate(result):
l = len(text)
text[l-1] = text[l-1].strip()
document = gensim.models.doc2vec.TaggedDocument(text,tags=[i])
x_train.append(document)
doc_id = 21000
model = gensim.models.doc2vec.Doc2Vec.load("model1.txt")
test = model.infer_vector(x_train[doc_id].words)
sims = model.docvecs.most_similar([test], topn=len(model.docvecs))
sims_csv = pd.DataFrame(data = sims)
sims_csv.to_csv('ranks.csv', encoding='utf-8')
print('Document ({}): ?{}?\n'.format(doc_id, ' '.join(x_train[doc_id].words)))
#print('Document ({}): ?{"machine", "learning","artificial","intelligence"}?\n')
#print(u'SIMILAR/DISSIMILAR DOCS PER MODEL %s:\n' % model)
for label, index in [('MOST', 0), ('SECOND', 1), ('THIRD', 2), ('Fourth', 3), ('LEAST', len(sims) - 1)]:
print(u'%s %s: ?%s?\n' % (label, sims[index], ' '.join(x_train[sims[index][0]].words)))
#for label, index in [('MOST', 0), ('SECOND-MOST', 1), ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]:
# print(u'%s %s: ?%s?\n' % (label, sims[index], ' '.join(x_train[sims[index][0]].words)))
ranks = []
second_ranks = []
for doc_id in range(100, 101):
print(doc_id)
inferred_vector = model.infer_vector(x_train[doc_id].words)
sims = model.docvecs.most_similar([inferred_vector], topn=len(model.docvecs))
rank = [docid for docid, sim in sims].index(doc_id)
ranks.append(rank)
count0 = 0
count1 = 0
count = 0
for x,y,z in ID_pairs:
count0 = cos_sim(model.docvecs[x-1],model.docvecs[y-1])
count1 = cos_sim(model.docvecs[y-1],model.docvecs[z-1])
if abs(count0)>abs(count1):
count+=1;
# print('pair %s %s : %s\n' % (x-1,y-1,count0))
# print('pair %s %s : %s\n' % (y-1,z-1,count1))
print(count/len(ID_pairs))
#counter = collections.Counter(ranks)
#print(counter)
# second_ranks.append(sims[1])
#rank_csv = pd.DataFrame(data = ranks)
#sims_csv = pd.DataFrame(data = second_ranks)
#rank_csv.to_csv('C:/Users/alienware/Desktop/Project_Crimson/ranks.csv',index= False, encoding='utf-8')
#sims_csv.to_csv('C:/Users/alienware/Desktop/Project_Crimson/sims.csv',index= False, encoding='utf-8')
#print('Document ({}): ?{}?\n'.format(doc_id, ' '.join(x_train[doc_id].words)))
#print(u'SIMILAR/DISSIMILAR DOCS PER MODEL %s:\n' % model)
#for label, index in [('MOST', 0), ('SECOND-MOST', 1), ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]:
# print(u'%s %s: ?%s?\n' % (label, sims[index], ' '.join(x_train[sims[index][0]].words)))
#list_of_vec = []
#for i in range(1000):
# list_of_vec.append(model.docvecs[i])
#test = pd.DataFrame(data = list_of_vec)
#test.to_csv('C:/Users/alienware/Desktop/model_vec1.csv',index= False, encoding='utf-8')
#print(vector) | {
"alphanum_fraction": 0.6574458276,
"author": null,
"avg_line_length": 39.0810810811,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b024903aee4a5eca9391f0262320c61a71af505f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2bb0a221bea05af0ddc4ebd87e5ec86a8d14d12f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "XURIGHT/Advisor-Advisee_SAE",
"max_forks_repo_path": "preprocessing.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "2bb0a221bea05af0ddc4ebd87e5ec86a8d14d12f",
"max_issues_repo_issues_event_max_datetime": "2021-09-15T12:15:32.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-09-15T12:15:32.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "XURIGHT/Advisor-Advisee_SAE",
"max_issues_repo_path": "preprocessing.py",
"max_line_length": 186,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2bb0a221bea05af0ddc4ebd87e5ec86a8d14d12f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "XURIGHT/Advisor-Advisee_SAE",
"max_stars_repo_path": "preprocessing.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1413,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4338
} |
# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
from sagemaker.model_monitor import CronExpressionGenerator
def test_cron_expression_generator_hourly_returns_expected_value():
assert CronExpressionGenerator.hourly() == "cron(0 * ? * * *)"
def test_cron_expression_generator_daily_returns_expected_value_when_called_with_no_parameters():
assert CronExpressionGenerator.daily() == "cron(0 0 ? * * *)"
def test_cron_expression_generator_daily_returns_expected_value_when_called_with_parameters():
assert CronExpressionGenerator.daily(hour=5) == "cron(0 5 ? * * *)"
def test_cron_expression_generator_daily_every_x_hours_returns_expected_value_when_called_without_customizations():
assert CronExpressionGenerator.daily_every_x_hours(hour_interval=6) == "cron(0 0/6 ? * * *)"
def test_cron_expression_generator_daily_every_x_hours_returns_expected_value_when_called_with_customizations():
assert (
CronExpressionGenerator.daily_every_x_hours(hour_interval=7, starting_hour=8)
== "cron(0 8/7 ? * * *)"
)
| {
"alphanum_fraction": 0.7832298137,
"author": null,
"avg_line_length": 41.2820512821,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e08db3edcb063730ce434f7d7e1251375bc4fbcc",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-12-04T01:16:12.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-12-04T01:16:12.000Z",
"max_forks_repo_head_hexsha": "aa54102b5113b1d39bbbd4d9d341775f84641681",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "eitansela/sagemaker-python-sdk",
"max_forks_repo_path": "tests/unit/sagemaker/monitor/test_cron_expression_generator.py",
"max_issues_count": 24,
"max_issues_repo_head_hexsha": "aa54102b5113b1d39bbbd4d9d341775f84641681",
"max_issues_repo_issues_event_max_datetime": "2021-05-28T13:36:51.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-05-18T07:10:27.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "eitansela/sagemaker-python-sdk",
"max_issues_repo_path": "tests/unit/sagemaker/monitor/test_cron_expression_generator.py",
"max_line_length": 115,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "aa54102b5113b1d39bbbd4d9d341775f84641681",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "eitansela/sagemaker-python-sdk",
"max_stars_repo_path": "tests/unit/sagemaker/monitor/test_cron_expression_generator.py",
"max_stars_repo_stars_event_max_datetime": "2021-07-22T00:23:51.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-07-22T00:23:51.000Z",
"num_tokens": 363,
"path": null,
"reason": "from sage",
"repo": null,
"save_path": null,
"sha": null,
"size": 1610
} |
#!/usr/bin/env python
import argparse
import numpy as np
import scipy.io as sio
import os
import sys
sys.path.insert(1, '.')
import h5py
from vdetlib.vdet.dataset import imagenet_vdet_classes
from vdetlib.utils.common import quick_args
from vdetlib.utils.protocol import proto_load, proto_dump, bbox_hash
import gzip
import json
if __name__ == '__main__':
args = quick_args(['vid_file', 'score_root', 'save_det'])
vid_proto = proto_load(args.vid_file)
vid_name = vid_proto['video']
assert vid_name == os.path.basename(os.path.normpath(args.score_root))
print "Processing {}.".format(vid_name)
if os.path.isfile(args.save_det):
print "{} already exists.".format(args.save_det)
sys.exit(0)
det_proto = {}
det_proto['video'] = vid_name
det_proto['detections'] = []
for frame in vid_proto['frames']:
frame_id = frame['frame']
basename = os.path.splitext(frame['path'])[0]
score_file = os.path.join(args.score_root, basename + '.mat')
if not os.path.isfile(score_file):
score_file = os.path.join(args.score_root, frame['path'] + '.mat')
if os.path.isfile(score_file):
try:
d = sio.loadmat(score_file)
boxes = d['boxes']
zs = d['zs']
except NotImplementedError:
with h5py.File(score_file) as d:
# skip empty boxes
boxes = d['boxes'].value.T.astype('float32')
zs = d['zs'].value.T.astype('float32')
if boxes.ndim == 1:
continue
assert boxes.shape[0] == zs.shape[0]
for box, scores in zip(boxes, zs):
det = {}
bbox = box.tolist()
det['frame'] = frame_id
det['bbox'] = bbox
det['hash'] = bbox_hash(vid_name, frame_id, bbox)
scores_proto = []
for class_id, (cls_name, score) in \
enumerate(zip(imagenet_vdet_classes[1:], scores), start=1):
scores_proto.append({
"class": cls_name,
"class_index": class_id,
"score": float(score)
})
det['scores'] = scores_proto
det_proto['detections'].append(det)
save_dir = os.path.dirname(args.save_det)
if not os.path.isdir(save_dir):
try:
os.makedirs(save_dir)
except:
raise
proto_dump(det_proto, args.save_det)
| {
"alphanum_fraction": 0.5527131783,
"author": null,
"avg_line_length": 35.3424657534,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7005161c0d4369825025e95103e96858f64358eb",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 176,
"max_forks_repo_forks_event_max_datetime": "2022-02-18T00:29:48.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-04-13T05:45:54.000Z",
"max_forks_repo_head_hexsha": "84789e411114fd4669e5a44d9ceeb0dde903efab",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bhaktipriya/T-CNN",
"max_forks_repo_path": "tools/detection/gen_det_proto.py",
"max_issues_count": 17,
"max_issues_repo_head_hexsha": "84789e411114fd4669e5a44d9ceeb0dde903efab",
"max_issues_repo_issues_event_max_datetime": "2019-10-22T20:01:05.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-04-20T02:27:44.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bhaktipriya/T-CNN",
"max_issues_repo_path": "tools/detection/gen_det_proto.py",
"max_line_length": 79,
"max_stars_count": 419,
"max_stars_repo_head_hexsha": "84789e411114fd4669e5a44d9ceeb0dde903efab",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "myfavouritekk/T-CNN",
"max_stars_repo_path": "tools/detection/gen_det_proto.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-10T02:52:18.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-04-12T12:44:25.000Z",
"num_tokens": 576,
"path": null,
"reason": "import numpy,import scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2580
} |
import os
import re
from conllu import *
import pandas as pd
import numpy as np
from collections import Counter
def get_lexique_maju_mini(lexique):
''' Standardize the capitalization of words, with only PROPN capitalized '''
lexique_maju_mini = list()
for terme in lexique:
if terme[1]=='PROPN':
terme[0] = terme[0].capitalize()
lexique_maju_mini.append(terme)
else:
terme[0] = terme[0].lower()
lexique_maju_mini.append(terme)
return lexique_maju_mini
def get_lexique_ambigu(lexique_form, lexique_form_pos):
''' List of words with multiple POS '''
count = dict(Counter(lexique_form))
form_ambigu = [key for key, value in count.items() if value>1]
lexique_ambigu = list()
for form in form_ambigu:
for form_pos in lexique_form_pos:
if form in form_pos:
lexique_ambigu.append(form_pos)
return form_ambigu
###### Read lexique file and get word list with standardized capitalization ######
with open('Final_Complete_Gold_Lexicon_BC_for_Yuchen_26.02.2020.txt', 'r') as f: # Insérer le chemin absolu du lexique morphosyntaxique
lexique = f.readlines()
lexique = [x.strip('\n').split('\t') for x in lexique]
lexique = get_lexique_maju_mini(lexique)
###### Read lexique file and get word list with standardized capitalization ######
###### Get 3 different morph info combination lists ######
lexique_form = [x[0].lower() for x in lexique]
lexique_form_pos = [[x[0].lower(), x[1]] for x in lexique]
lexique_form_pos_trait = [[x[0].lower(), x[1], sorted(x[2].split('|'))] for x in lexique]
###### Get 3 different morph info combination lists ######
###### List of words with multiple POS ######
lexique_ambigu = get_lexique_ambigu(lexique_form, lexique_form_pos)
###### List of words with multiple POS ######
###### Read conll file and get list of sentences with conll format ######
with open('1408.conllu', 'r', encoding='UTF-8') as f: # Insérer le chemin absolu du fichier CONLL-U à traiter
conll = f.read()
conll = parse(conll)
###### Read conll file and get list of sentences with conll format ######
###### Files to write ######
f = open('Correction_80_conll_ref_add.conllu', 'w', encoding='UTF-8') # Insérer le chemin absolu du fichier CONLL-U corrigé
f_erreur_form = open('erreur_forme.txt', 'w') # Insérer le chemin absolu du fichier qui enregistrer les erreurs sur la forme
f_erreur_pos_unique = open('erreur_pos_unique.txt', 'w') # Insérer le chemin absolu du fichier qui enregistrer les erreurs sur la partie du discours corrigées automatiquement
f_erreur_pos_ambigu = open('erreur_pos_ambigu.txt', 'w') # Insérer le chemin absolu du fichier qui enregistrer les erreurs sur la partie du discours corrigées manuellement
f_erreur_trait = open('erreur_trait.txt', 'w') # Insérer le chemin absolu du fichier qui enregistrer les erreurs sur les traits morphologiques
###### Files to write ######
###### Start error checking and error logging ######
for sentence in conll:
sentence = sentence.serialize().strip('\n').split('\n')
for one_line in sentence:
if one_line[0]=='#':
f.write(one_line+'\n')
else:
line = one_line.split('\t') # ['23', '>+', '_', 'PUNCT', '_', '_', '26', 'punct', '_', 'startali=283829|endali=283859']
conll_form = line[1].lower()
conll_form_pos = [line[1].lower(), line[3]]
conll_form_pos_trait = [line[1].lower(), line[3], sorted(line[5].split('|'))]
if conll_form in lexique_form:
if conll_form_pos in lexique_form_pos:
if conll_form_pos_trait in lexique_form_pos_trait:
f.write(one_line+'\n')
else:
f_erreur_trait.write(conll_form_pos_trait[0]+'\t'+conll_form_pos_trait[1]+'\t'+'|'.join(conll_form_pos_trait[2])+'\n')
for x in lexique_form_pos_trait:
if conll_form_pos==[x[0],x[1]]:
line[5] = '|'.join(x[2])
one_line = '\t'.join(line)
f.write(one_line+'\n')
else:
if conll_form not in lexique_ambigu:
f_erreur_pos_unique.write('# erreur_pos_unique = '+one_line+'\n'+'\n'.join(sentence)+'\n\n')
for x in lexique_form_pos_trait:
if conll_form==x[0]:
line[3] = x[1]
line[5] = '|'.join(x[2])
one_line = '\t'.join(line)
f.write(one_line+'\n')
else:
f.write(one_line+'\n')
f_erreur_pos_ambigu.write('# erreur_pos_ambigu = '+one_line+'\n'+'\n'.join(sentence)+'\n\n')
else:
f_erreur_form.write(conll_form+'\n')
f.write(one_line+'\n')
f.write('\n')
###### Start error checking and error logging ######
f.close()
f_erreur_form.close
f_erreur_pos_unique.close()
f_erreur_pos_ambigu.close()
f_erreur_trait.close()
| {
"alphanum_fraction": 0.5957406351,
"author": null,
"avg_line_length": 49.1495327103,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "02e7c471637c18cbfaaa8a83c7a985fbeca9f412",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "4b80297fb19e303f9996469237a0af770dd59798",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "songyuchencyan/Lexiques_NSC",
"max_forks_repo_path": "Scripts/Fouille_des_erreurs_forme_pos_traits.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "4b80297fb19e303f9996469237a0af770dd59798",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "songyuchencyan/Lexiques_NSC",
"max_issues_repo_path": "Scripts/Fouille_des_erreurs_forme_pos_traits.py",
"max_line_length": 175,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "4b80297fb19e303f9996469237a0af770dd59798",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "songyuchencyan/Lexiques_NSC",
"max_stars_repo_path": "Scripts/Fouille_des_erreurs_forme_pos_traits.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1321,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5259
} |
import random
import torch
import logging
import copy
import os
import numpy as np
from functools import partial
from transformers import (
MODEL_MAPPING,
AutoConfig,
AutoTokenizer,
AutoModel,
)
from densephrases import Encoder
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
def to_list(tensor):
return tensor.detach().cpu().tolist()
def to_numpy(tensor):
return tensor.detach().cpu().numpy()
def backward_compat(model_dict):
# Remove teacher
model_dict = {key: val for key, val in model_dict.items() if not key.startswith('cross_encoder')}
model_dict = {key: val for key, val in model_dict.items() if not key.startswith('bert_qd')}
model_dict = {key: val for key, val in model_dict.items() if not key.startswith('qa_outputs')}
# Replace old names to current ones
mapping = {
'bert_start': 'phrase_encoder',
'bert_q_start': 'query_start_encoder',
'bert_q_end': 'query_end_encoder',
}
new_model_dict = {}
for key, val in model_dict.items():
for old_key, new_key in mapping.items():
if key.startswith(old_key):
new_model_dict[key.replace(old_key, new_key)] = val
elif all(not key.startswith(old_k) for old_k in mapping.keys()):
new_model_dict[key] = val
return new_model_dict
def load_encoder(device, args, phrase_only=False):
# Configure paths for DnesePhrases
args.model_type = args.model_type.lower()
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.pretrained_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.pretrained_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
# Prepare PLM if not load_dir
pretrained = None
if not args.load_dir:
pretrained = AutoModel.from_pretrained(
args.pretrained_name_or_path,
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
load_class = Encoder
logger.info(f'DensePhrases encoder initialized with {args.pretrained_name_or_path} ({pretrained.__class__})')
else:
# TODO: need to update transformers so that from_pretrained maps to model hub directly
if args.load_dir.startswith('princeton-nlp'):
hf_model_path = f"https://huggingface.co/{args.load_dir}/resolve/main/pytorch_model.bin"
else:
hf_model_path = args.load_dir
load_class = partial(
Encoder.from_pretrained,
pretrained_model_name_or_path=hf_model_path,
cache_dir=args.cache_dir if args.cache_dir else None,
)
logger.info(f'DensePhrases encoder loaded from {args.load_dir}')
# DensePhrases encoder object
model = load_class(
config=config,
tokenizer=tokenizer,
transformer_cls=MODEL_MAPPING[config.__class__],
pretrained=copy.deepcopy(pretrained) if pretrained is not None else None,
lambda_kl=getattr(args, 'lambda_kl', 0.0),
lambda_neg=getattr(args, 'lambda_neg', 0.0),
lambda_flt=getattr(args, 'lambda_flt', 0.0),
)
# Phrase only (for phrase embedding)
if phrase_only:
if hasattr(model, "module"):
del model.module.query_start_encoder
del model.module.query_end_encoder
else:
del model.query_start_encoder
del model.query_end_encoder
logger.info("Load only phrase encoders for embedding phrases")
model.to(device)
logger.info('Number of model parameters: {:,}'.format(sum(p.numel() for p in model.parameters())))
return model, tokenizer, config
| {
"alphanum_fraction": 0.6771555996,
"author": null,
"avg_line_length": 33.9159663866,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "bb6a396d6cc113afaef3cf331c9be7b15e9c9b18",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 40,
"max_forks_repo_forks_event_max_datetime": "2022-03-23T10:56:54.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-04-12T09:53:00.000Z",
"max_forks_repo_head_hexsha": "5a34f4f0b32f27a85771b6d7c39ed2e71ece6784",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "nowkim/GeNER",
"max_forks_repo_path": "densephrases/utils/single_utils.py",
"max_issues_count": 17,
"max_issues_repo_head_hexsha": "5a34f4f0b32f27a85771b6d7c39ed2e71ece6784",
"max_issues_repo_issues_event_max_datetime": "2021-12-22T06:10:15.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-06-04T19:14:53.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "nowkim/GeNER",
"max_issues_repo_path": "densephrases/utils/single_utils.py",
"max_line_length": 117,
"max_stars_count": 331,
"max_stars_repo_head_hexsha": "5a34f4f0b32f27a85771b6d7c39ed2e71ece6784",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "nowkim/GeNER",
"max_stars_repo_path": "densephrases/utils/single_utils.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-28T04:18:52.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-03-31T11:14:55.000Z",
"num_tokens": 891,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4036
} |
import os, sys
BASE_DIR = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__))))
import argparse
import numpy as np
import json
import datetime
from collections import defaultdict
from data_utils import *
import torch
import torch.nn as nn
import torch.nn.functional as F
def compute_euclidean(query_vecs, mus, clip_vec=False):
queries_normalized = torch.square((query_vecs - mus))
distances = torch.sum(queries_normalized, dim= -1)
return distances
def compute_mahalanobis(query_vecs, mus, sigmas, activation_fn=None, clip_vec=False):
if not activation_fn == None:
sigmas = activation_fn(sigmas) + 1.0e-6
if clip_vec:
query_vecs = query_vecs.clamp(-100.0, 100.0)
queries_normalized = torch.square(torch.mul((query_vecs - mus), sigmas))
distances = torch.sum(queries_normalized, dim= -1)
return distances
def order_embedding_distance(query_vec, source_vecs, device, activation_fn=None, projected=False):
## Order embedding
# distance between query to source is max(0, query_vec - source_vec)
if not activation_fn == None:
query_vec = activation_fn(query_vec)
if not projected:
source_vecs = activation_fn(source_vecs)
distances = torch.max(query_vec - source_vecs, torch.zeros(query_vec.shape, device=device))
distances = torch.sum(torch.square(distances), dim=-1)
return distances
def order_embedding_projection(query_vec, source_vecs, device, activation_fn=None):
## Order embedding projection
if not activation_fn == None:
query_vec = activation_fn(query_vec)
source_vecs = activation_fn(source_vecs)
distances = torch.max(query_vec - source_vecs, torch.zeros(query_vec.shape, device=device))
projection = query_vec - distances
return projection
def margin_selection(fitting_sorted_idx, embedding_distance, K, num_negs=5):
#select the positive to be the closest by fitting loss
positive_idx = fitting_sorted_idx[0,:]
#random selection of negatives that are "far away"
perm = torch.randperm(fitting_sorted_idx.size(0)-1) + 1
negative_idx = fitting_sorted_idx[perm[:num_negs], :]
#gather corresponding distances
positive_distances = torch.gather(embedding_distance, 0, positive_idx.unsqueeze(0))
positive_distances = positive_distances.unsqueeze(0).repeat(num_negs,1,1)
negative_distances = torch.gather(embedding_distance, 0, negative_idx)
return positive_distances, negative_distances
def margin_loss_multi(positive_distances, negative_distances, margin, device):
num_negs = negative_distances.shape[0]
positive_distances, _ = torch.min(positive_distances, dim=0)
positive_distances = positive_distances.unsqueeze(0).repeat(num_negs,1)
l = positive_distances - negative_distances + margin
l = torch.max(l, torch.zeros(l.shape, device=device))
return l
def margin_loss(positive_distances, negative_distances, margin, device):
l = positive_distances - negative_distances + margin
l = torch.max(l, torch.zeros(l.shape, device=device))
return l
def regression_loss(embedding_distance, actual_distance, obj_sigmas):
obj_sigmas = torch.sigmoid(obj_sigmas)
# obj_sigmas = 1.0
embedding_distance = embedding_distance/100.0
qij = F.softmax(-embedding_distance, dim= -1)
#tranform to reasonable ranges
actual_distance = actual_distance*100.0
pij = torch.div(actual_distance, obj_sigmas)
pij = F.softmax(-actual_distance, dim= -1)
# loss = torch.sum(torch.square(pij-qij), dim=-1)
loss = torch.sum(torch.abs(pij-qij), dim= -1)
# print(actual_distance)
# print(pij)
# print()
# print(embedding_distance)
# print(qij)
return loss
def get_symmetric(pc):
reflected_pc = torch.cat([-pc[:,:,0].unsqueeze(-1), pc[:,:,1].unsqueeze(-1), pc[:,:,2].unsqueeze(-1)], axis=2)
return reflected_pc
### Property 2
def inclusion_margin_loss():
# Property 1: embedding_distance ~ fitting_loss
return | {
"alphanum_fraction": 0.7649516845,
"author": null,
"avg_line_length": 30.632,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "793d24f1cb7ea125005661860fce34885126e396",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 5,
"max_forks_repo_forks_event_max_datetime": "2022-01-12T08:41:32.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-06-28T06:55:32.000Z",
"max_forks_repo_head_hexsha": "87fd70ff95a6ce24c06ee4bd4ed87378ee8e785c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "yujie-tao/joint_learning_retrieval_deformation",
"max_forks_repo_path": "losses.py",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "87fd70ff95a6ce24c06ee4bd4ed87378ee8e785c",
"max_issues_repo_issues_event_max_datetime": "2021-11-16T13:39:38.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-06-22T09:08:08.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "yujie-tao/joint_learning_retrieval_deformation",
"max_issues_repo_path": "losses.py",
"max_line_length": 111,
"max_stars_count": 29,
"max_stars_repo_head_hexsha": "87fd70ff95a6ce24c06ee4bd4ed87378ee8e785c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "yujie-tao/joint_learning_retrieval_deformation",
"max_stars_repo_path": "losses.py",
"max_stars_repo_stars_event_max_datetime": "2022-01-17T02:33:44.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-06-15T10:00:04.000Z",
"num_tokens": 947,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3829
} |
struct AdamIterable{T, F, S, R}
w0::T
f::F
stepsize::S
beta1::R
beta2::R
epsilon::R
end
AdamIterable(w0, f; stepsize=1e-3, beta1=0.9, beta2=0.999, epsilon=1e-8) = AdamIterable(w0, f, to_iterator(stepsize), beta1, beta2, epsilon)
Base.IteratorSize(::Type{<:AdamIterable}) = Base.IsInfinite()
mutable struct AdamState{T, G, S, R}
w::T
f_w
grad_f_w::G
stepsize_iterator::S
m::T
v::T
beta1t::R
beta2t::R
end
function Base.iterate(iter::AdamIterable)
w = copy(iter.w0)
grad_f_w, f_w = gradient(iter.f, w)
state = AdamState(
w, f_w, grad_f_w, Iterators.Stateful(iter.stepsize),
zero(w), zero(w), iter.beta1, iter.beta2
)
return IterationOutput(state.w, state.f_w, state.grad_f_w), state
end
function Base.iterate(iter::AdamIterable, state::AdamState)
state.m .= iter.beta1 .* state.m .+ (1 - iter.beta1) .* state.grad_f_w
state.v .= iter.beta2 .* state.v .+ (1 - iter.beta2) .* state.grad_f_w .^ 2
stepsize = popfirst!(state.stepsize_iterator)
alpha_t = stepsize * sqrt(1 - state.beta2t)/(1 - state.beta1t)
state.w .-= alpha_t .* state.m ./ (sqrt.(state.v) .+ iter.epsilon)
state.beta1t *= iter.beta1
state.beta2t *= iter.beta2
state.grad_f_w, state.f_w = gradient(iter.f, state.w)
return IterationOutput(state.w, state.f_w, state.grad_f_w), state
end
struct Adam
kwargs
Adam(; kwargs...) = new(kwargs)
end
(alg::Adam)(args...) = AdamIterable(args...; alg.kwargs...)
| {
"alphanum_fraction": 0.642287234,
"author": null,
"avg_line_length": 28.3773584906,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "13c4dacdc56c10fbd436ab3834f1d01eb9dddd25",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-05-04T22:38:55.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-05-04T22:38:55.000Z",
"max_forks_repo_head_hexsha": "5d18e81d767af321106f5c4639b9275e34cf1bc0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "lostella/ProtoGrad.jl",
"max_forks_repo_path": "src/optimizers/adam.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5d18e81d767af321106f5c4639b9275e34cf1bc0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "lostella/ProtoGrad.jl",
"max_issues_repo_path": "src/optimizers/adam.jl",
"max_line_length": 140,
"max_stars_count": 25,
"max_stars_repo_head_hexsha": "5d18e81d767af321106f5c4639b9275e34cf1bc0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "lostella/ProtoGrad.jl",
"max_stars_repo_path": "src/optimizers/adam.jl",
"max_stars_repo_stars_event_max_datetime": "2022-01-25T03:22:07.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-04-22T08:29:03.000Z",
"num_tokens": 491,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1504
} |
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2018, PickNik LLC
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of PickNik LLC nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
/* Author: Henning Kayser
* Desc: Generation of occupancy data from pcl or planning scenes
*/
#include <rtr_moveit/occupancy_handler.h>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl_ros/transforms.h>
#include <chrono>
// Eigen
#include <Eigen/Geometry>
#include <eigen_conversions/eigen_msg.h>
// collision checks
#include <moveit/collision_detection/world.h>
#include <moveit/collision_detection_fcl/collision_world_fcl.h>
#include <geometric_shapes/shapes.h>
// RapidPlan
#include <rtr-occupancy/Voxel.hpp>
namespace rtr_moveit
{
const std::string LOGNAME = "occupancy_handler";
OccupancyHandler::OccupancyHandler(const ros::NodeHandle& nh) : nh_(nh)
{
}
void OccupancyHandler::setVolumeRegion(const RoadmapVolume& roadmap_volume)
{
volume_region_ = roadmap_volume;
}
void OccupancyHandler::setPointCloudTopic(const std::string& pcl_topic)
{
pcl_topic_ = pcl_topic;
}
bool OccupancyHandler::fromPointCloud(const std::string& pcl_topic, OccupancyData& occupancy_data, int timeout)
{
// if point cloud is older than 100ms, get a new one
// TODO(RTR-59): Use planning time to determine timeouts
if (!shared_pcl_ptr_ || (ros::Time::now().toNSec() * 1000 - shared_pcl_ptr_->header.stamp > 100000))
{
std::unique_lock<std::mutex> lock(pcl_mtx_);
ros::Subscriber pcl_sub = nh_.subscribe(pcl_topic, 1, &OccupancyHandler::pclCallback, this);
pcl_ready_ = false;
bool pcl_success = pcl_condition_.wait_for(lock, std::chrono::milliseconds(timeout), [&]() { return pcl_ready_; });
pcl_sub.shutdown();
if (!pcl_success)
{
ROS_ERROR_NAMED(LOGNAME, "Waiting for point cloud data timed out");
return false;
}
if (shared_pcl_ptr_)
{
tf::TransformListener tf_listener;
const std::string& cloud_frame = shared_pcl_ptr_->header.frame_id;
const std::string& volume_frame = volume_region_.pose.header.frame_id;
if (!tf_listener.canTransform(volume_frame, cloud_frame, ros::Time::now()) &&
!tf_listener.waitForTransform(volume_frame, cloud_frame, ros::Time::now(), ros::Duration(1.0)))
{
ROS_ERROR_NAMED(LOGNAME, "Unable to transform point cloud into volume region frame");
return false;
}
tf::StampedTransform cloud_to_volume;
tf_listener.lookupTransform(volume_frame, cloud_frame, ros::Time::now(), cloud_to_volume);
pcl_ros::transformPointCloud(*shared_pcl_ptr_, *shared_pcl_ptr_, cloud_to_volume);
}
// get result
occupancy_data.type = OccupancyData::Type::POINT_CLOUD;
occupancy_data.point_cloud = shared_pcl_ptr_;
}
return occupancy_data.point_cloud != NULL;
}
void OccupancyHandler::pclCallback(const pcl::PCLPointCloud2ConstPtr& cloud_pcl2)
{
std::unique_lock<std::mutex> lock(pcl_mtx_);
// prevent overwriting shared_pcl_ptr_ in case subscriber wasn't shut down fast enough
if (!pcl_ready_)
{
if (!shared_pcl_ptr_)
shared_pcl_ptr_.reset(new pcl::PointCloud<pcl::PointXYZ>());
pcl::fromPCLPointCloud2(*cloud_pcl2, *shared_pcl_ptr_);
pcl_ready_ = true;
lock.unlock();
pcl_condition_.notify_one();
}
}
bool OccupancyHandler::fromPlanningScene(const planning_scene::PlanningSceneConstPtr& planning_scene,
OccupancyData& occupancy_data)
{
// region volume dimensions
float x_length = volume_region_.dimension[0];
float y_length = volume_region_.dimension[1];
float z_length = volume_region_.dimension[2];
// voxel resolution
float x_voxels = volume_region_.voxel_resolution[0];
float y_voxels = volume_region_.voxel_resolution[1];
float z_voxels = volume_region_.voxel_resolution[2];
// voxel dimensions
float x_voxel_dimension = x_length / x_voxels;
float y_voxel_dimension = y_length / y_voxels;
float z_voxel_dimension = z_length / z_voxels;
// Compute transform: world->volume
// world_to_volume points at the corner of the volume origin (x=0,y=0,z=0)
// we use auto to support Affine3d and Isometry3d (kinetic + melodic)
auto world_to_base(planning_scene->getFrameTransform(volume_region_.pose.header.frame_id));
auto base_to_volume = world_to_base;
tf::poseMsgToEigen(volume_region_.pose.pose, base_to_volume);
auto world_to_volume = world_to_base * base_to_volume;
// create collision world and add voxel box shape one step outside the volume grid
collision_detection::CollisionWorldFCL world;
shapes::Box box(x_voxel_dimension, y_voxel_dimension, z_voxel_dimension);
Eigen::Translation3d box_start_position(-0.5 * x_voxel_dimension, -0.5 * y_voxel_dimension, -0.5 * z_voxel_dimension);
// occupancy box id and dimensions
// TODO(henningkayser): Check that box id is not present in planning scene - should be unique
std::string box_id = "rapidplan_collision_box";
world.getWorld()->addToObject(box_id, std::make_shared<const shapes::Box>(box), world_to_volume * box_start_position);
// clear scene boxes vector
occupancy_data.type = OccupancyData::Type::VOXELS;
occupancy_data.voxels.resize(0);
// x/y/z translation steps, since relative movements are more efficient than repositioning the object
auto volume_orientation = world_to_volume.rotation();
auto x_step(volume_orientation * decltype(world_to_volume)(Eigen::Translation3d(x_voxel_dimension, 0, 0)));
auto y_step(volume_orientation * decltype(world_to_volume)(Eigen::Translation3d(0, y_voxel_dimension, 0)));
auto z_step(volume_orientation * decltype(world_to_volume)(Eigen::Translation3d(0, 0, z_voxel_dimension)));
// x/y reset transforms
auto y_reset(volume_orientation *
decltype(world_to_volume)(Eigen::Translation3d(0, -y_voxels * y_voxel_dimension, 0)));
auto z_reset(volume_orientation *
decltype(world_to_volume)(Eigen::Translation3d(0, 0, -z_voxels * z_voxel_dimension)));
// Loop over X/Y/Z voxel positions and check for box collisions in the collision scene
// NOTE: This implementation is a prototype and will be replaced by more efficient methods as described below
// TODO(RTR-57): More efficient implementations:
// * Iterate over collision objects and only sample local bounding boxes
// * Use octree search, since boxes can have variable sizes
// TODO(RTR-57): adjust grid to odd volume dimensions
// TODO(RTR-57): Do we need extra Box padding here?
collision_detection::CollisionRequest request;
collision_detection::CollisionResult result;
for (uint16_t x = 0; x < x_voxels; ++x)
{
world.getWorld()->moveObject(box_id, x_step);
for (uint16_t y = 0; y < y_voxels; ++y)
{
world.getWorld()->moveObject(box_id, y_step);
for (uint16_t z = 0; z < z_voxels; ++z)
{
world.getWorld()->moveObject(box_id, z_step);
planning_scene->getCollisionWorld()->checkWorldCollision(request, result, world);
if (result.collision)
{
occupancy_data.voxels.push_back(rtr::Voxel(x, y, z));
result.clear(); // TODO(RTR-57): Is this really necessary?
}
}
// move object back to z start
world.getWorld()->moveObject(box_id, z_reset);
}
// move object back to y start
world.getWorld()->moveObject(box_id, y_reset);
}
return true;
}
} // namespace rtr_moveit
| {
"alphanum_fraction": 0.7175951462,
"author": null,
"avg_line_length": 42.558685446,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "b2929657bae3569203346a42b168074f35b84a6e",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9cd37bb5476f5edb42d13ec331a8f0abc7eb113b",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "henningkayser/rtr_moveit",
"max_forks_repo_path": "rtr_moveit/src/occupancy_handler.cpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9cd37bb5476f5edb42d13ec331a8f0abc7eb113b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "henningkayser/rtr_moveit",
"max_issues_repo_path": "rtr_moveit/src/occupancy_handler.cpp",
"max_line_length": 120,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "9cd37bb5476f5edb42d13ec331a8f0abc7eb113b",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "henningkayser/rtr_moveit",
"max_stars_repo_path": "rtr_moveit/src/occupancy_handler.cpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2174,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 9065
} |
import itertools
import numpy
import pandas
import sklearn.naive_bayes as naive_bayes
# Set some options for printing all the columns
pandas.set_option('precision', 13)
# Define a function to visualize the percent of a particular target category by a nominal predictor
def RowWithColumn (
rowVar, # Row variable
columnVar, # Column predictor
show = 'ROW'): # Show ROW fraction, COLUMN fraction, or BOTH table
countTable = pandas.crosstab(index = rowVar, columns = columnVar, margins = False, dropna = True)
print("Frequency Table: \n", countTable)
print( )
if (show == 'ROW' or show == 'BOTH'):
rowFraction = countTable.div(countTable.sum(1), axis='index')
print("Row Fraction Table: \n", rowFraction)
print( )
if (show == 'COLUMN' or show == 'BOTH'):
columnFraction = countTable.div(countTable.sum(0), axis='columns')
print("Column Fraction Table: \n", columnFraction)
print( )
return
# Specify the roles
feature = ['tv', 'magazine', 'friends', 'doctor']
target = 'supps'
# Read the Excel file
nutrition = pandas.read_excel('C:\\Users\\minlam\\Documents\\IIT\\Machine Learning\\Data\\Nutrition_Information.xls',
sheet_name = 'Sheet1',
usecols = feature + [target])
nutrition = nutrition.dropna()
# Look at the row distribution
print(nutrition.groupby(target).size())
for pred in feature:
RowWithColumn(rowVar = nutrition[target], columnVar = nutrition[pred], show = 'ROW')
# Make the binary features take values 0 and 1 (was 2=No and 1=Yes)
nutrition[feature] = 2 - nutrition[feature]
xTrain = nutrition[feature].astype('category')
yTrain = nutrition[target].astype('category')
_objNB = naive_bayes.BernoulliNB(alpha = 1.e-10)
thisFit = _objNB.fit(xTrain, yTrain)
print('Probability of each class')
print(numpy.exp(thisFit.class_log_prior_))
print('Empirical probability of features given a class, P(x_i|y)')
print(numpy.exp(thisFit.feature_log_prob_))
print('Number of samples encountered for each class during fitting')
print(thisFit.class_count_)
print('Number of samples encountered for each (class, feature) during fitting')
print(thisFit.feature_count_)
yTrain_predProb = _objNB.predict_proba(xTrain)
# Create the all possible combinations of the features' values
xTest = pandas.DataFrame(list(itertools.product([0,1], repeat = len(feature))), columns = feature)
# Score the xTest and append the predicted probabilities to the xTest
yTest_predProb = pandas.DataFrame(_objNB.predict_proba(xTest), columns = ['P_suppsYes', 'P_suppsNo'])
yTest_score = pandas.concat([xTest, yTest_predProb], axis = 1)
| {
"alphanum_fraction": 0.6923636364,
"author": null,
"avg_line_length": 36.1842105263,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "141e2d43f385d386f25aa6e4916faca7092f343a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "725ef6d90a9d22694147245b9a0928477e053585",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "dvtate/cs484",
"max_forks_repo_path": "in-class/Week 10 Nutrition Naive Bayes.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "725ef6d90a9d22694147245b9a0928477e053585",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "dvtate/cs484",
"max_issues_repo_path": "in-class/Week 10 Nutrition Naive Bayes.py",
"max_line_length": 118,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "725ef6d90a9d22694147245b9a0928477e053585",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "dvtate/cs484",
"max_stars_repo_path": "in-class/Week 10 Nutrition Naive Bayes.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 663,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2750
} |
%!TEX root = forallx-ubc.tex
\chapter{SL Trees}
\label{ch.sl.trees}
So far we have learned one way to evaluate SL argument forms for validity: an argument is valid just in case no interpretation satisfies the premises but falsifies the conclusion. We can check to see whether an argument is valid by constructing truth tables, representing all of the possible interpretations, and checking to see whether any of them do so. This method has two main advantages: one is that it can be done in a formal and formulaic way --- it requires no particular rational insight, just a straightforward application of the rules of SL. Another is that, assuming you follow the rules correctly, it will always succeed in identifying whether the argument is valid or not.
The truth-table method also has a significant disadvantage, however: it rapidly becomes extremely cumbersome for evaluating arguments using more than two or three atomic sentence-letters. To evaluate an argument like this one, you'd need to consider four rows of a truth-table:
\begin{earg}
\item[] $P\eiff Q$
\item[] $\enot Q$
\item[] $\enot Q \eor (P \eif \enot Q)$
\item[\therefore] $\enot P \eif Q$
\end{earg}
But change just two of the atoms to new letters, and the required table will grow exponentially. For this argument, you'd need sixteen rows.
\begin{earg}
\item[] $P\eiff Q$
\item[] $\enot A$
\item[] $\enot Q \eor (A \eif \enot B)$
\item[\therefore] $\enot P \eif B$
\end{earg}
For this one, you'd need two hundred fifty-six!
\label{8letterargument}
\begin{earg}
\item[] $A\eiff B$
\item[] $\enot B \eif (C \eor D)$
\item[] $E \eif \enot C$
\item[] $(\enot D \eand F) \eor G$
\item[] $\enot A \eand E$
\item[\therefore] $H \eor G$
\end{earg}
So it is useful to have alternate ways of proving entailments. In this chapter we will introduce a \emph{proof system} for SL. The proofs we will construct in this chapter are called \define{analytic tableaux}. Another name for them is \define{trees}, because of the way they `branch' out to represent possibilities. In Ch.\ \ref{ch.ND.proofs} we will examine a different proof system.
\section{Satisfiability and entailment}
The method of trees is most directly a way to test for satisfiability of a set of SL sentences. Since validity is definable in terms of satisfiability, it also gives a way to test for validity.
Recall from \S\ref{sec.semanticsSL} the definition of entailment in SL: $\metaSetX{}\models\metaA{}$ means that there is no interpretation that satisfies every sentence in \metaSetX{} but falsifies \metaA{}. (Remember, `\metaSetX{}' can stand for any set of SL sentences.) This in turn is equivalent to the claim that there is no interpretation that satisfies $\{\metaSetX{},\enot\metaA{}\}$. Unsatisfiability can be represented with a double-turnstile with $\bot$ on the right-hand side.
\factoidbox{
In general, $\metaSetX{}\models\metaB{}$ is equivalent to $\metaSetX{},\enot\metaB{}\models\bot$.
}
So if you want to tell whether an argument in SL is valid, check to see whether the premises, along with the negation of the conclusion, are jointly satisfiable. If they are, the argument is invalid. If they're not, the argument is valid. This is the basic idea of the tree method. We'll write down the sentences we're attempting to satisfy, and then we'll work through their consequences, in order to see whether it's possible to complete the task.
Let's begin by working through an example that illustrates the general idea, then come back to give precise rules for the tree method.
\section{An example: proving validity}
Let's attempt to prove whether this is valid in SL:
\begin{earg}
\item[] $A\eand B$
\item[] $\enot(C\eor D)$
\item[] $(\enot B \eor C) \eor E$
\item[\therefore] $E$
\end{earg}
To evaluate this argument, we consider whether it's possible to satisfy the three premises along with the negation of the conclusion. We begin by writing down the sentences we're going to attempt to satisfy.
\begin{prooftree}
{ % begin tree preamble
single branches,
close with=\ensuremath{\times},
%to prove={\{A\eand B, \enot(C\eor D), (\enot B\eor C)\eor E\} \vdash{} E}
} % end tree preamble
[A\eand B
[\enot(C\eor D), grouped
[(\enot B \eor C) \eor E, grouped
[\enot E, grouped, name=negconc
% [A
% [B, grouped
% [\enot C
% [\enot D, grouped
% [\enot B \eor C
% [\enot B, close]
% [C, close]
% ]
% [E, close]
% ]
% ]
% ]
% ]
]
]
]
]
\end{prooftree}
Notice that we're putting the \emph{negation} of the conclusion here in line (4), since we're looking to see whether it's possible to satisfy the premises and \emph{falsify} the conclusion. The sentences we write down at the beginning of the tree, we call the \define{root}. Trees are designed to show whether the root is satisfiable, and if so, how.
With proof trees, the idea is to continue writing down that which our previous lines \emph{already commit us to}. Consider the first line, $A \eand B$. For this conjunction to be true, both conjuncts must be true. So any interpretation that satisfies the top four lines must also satisfy both $A$ and $B$; the truth of those sentences is a commitment of what we already have. Each follow from what is already written down. We represent this by continuing the tree with those sentences:
\begin{prooftree}
{ % begin tree preamble
single branches,
close with=\ensuremath{\times},
%to prove={\{A\eand B, \enot(C\eor D), (\enot B\eor C)\eor E\} \vdash{} E}
} % end tree preamble
[A\eand B, checked
[\enot(C\eor D), grouped
[(\enot B \eor C) \eor E, grouped
[\enot E, grouped
[A
[B, grouped
% [\enot C
% [\enot D, grouped
% [\enot B \eor C
% [\enot B, close]
% [C, close]
% ]
% [E, close]
% ]
% ]
]
]
]
]
]
]
\end{prooftree}
In addition to introducing lines (5) and (6), we also add a check mark on (1), to indicate that we've now considered it and represented its consequences. Think of the check mark as saying that we've extended the tree in a way that encodes the information from this line.
Now consider line (2). This is a negated disjunction. Disjunctions are true any time either disjunct is true, so this \emph{negated} disjunction can only be true if \emph{both} disjuncts are \emph{false}. So we include new lines for \enot $C$ and \enot $D$, adding a check mark on line (2):
\begin{prooftree}
{ % begin tree preamble
single branches,
close with=\ensuremath{\times},
%to prove={\{A\eand B, \enot(C\eor D), (\enot B\eor C)\eor E\} \vdash{} E}
} % end tree preamble
[A\eand B, checked
[\enot(C\eor D), grouped, checked
[(\enot B \eor C) \eor E, grouped
[\enot E, grouped
[A
[B, grouped
[\enot C
[\enot D, grouped
% [\enot B \eor C
% [\enot B, close]
% [C, close]
% ]
% [E, close]
]
]
]
]
]
]
]
]
\end{prooftree}
Line (3) is a disjunction. Unlike the previous two cases, it doesn't tell us what categorically \emph{must} be the case; it says that one of two possibilities must be met. We represent this in our tree by \emph{branching} into two different columns:
\begin{prooftree}
{ % begin tree preamble
single branches,
close with=\ensuremath{\times},
%to prove={\{A\eand B, \enot(C\eor D), (\enot B\eor C)\eor E\} \vdash{} E}
} % end tree preamble
[A\eand B, checked
[\enot(C\eor D), grouped, checked
[(\enot B \eor C) \eor E, grouped, checked
[\enot E, grouped
[A
[B, grouped
[\enot C
[\enot D, grouped
[\enot B \eor C
% [\enot B, close]
% [C, close]
]
[E]
]
]
]
]
]
]
]
]
\end{prooftree}
Think of these two branches as representing the idea that the root implies that at least one of these ways of continuing the tree must be possible.
So now we have two branches to consider. Start by examining the right branch. It gives an atomic sentence, $E$. Notice, however, that line (4) was \enot $E$. So if we're looking for a way to satisfy (1)-(4), this right branch isn't a possible interpretation. It requires $E$ to be true, and it also requires \enot $E$ to be true. If a branch contains any sentence and also contains its negation, we know that branch doesn't correspond to a possible interpretation. We'll consider this branch \emph{closed}, and mark this with an `\ensuremath\times'.
The left branch is another disjunction. It too branches out into its two disjuncts:
\begin{prooftree}
{ % begin tree preamble
single branches,
close with=\ensuremath{\times},
%to prove={\{A\eand B, \enot(C\eor D), (\enot B\eor C)\eor E\} \vdash{} E}
} % end tree preamble
[A\eand B, checked
[\enot(C\eor D), grouped, checked
[(\enot B \eor C) \eor E, grouped, checked
[\enot E, grouped
[A
[B, grouped
[\enot C
[\enot D, grouped
[\enot B \eor C, checked
[\enot B, close]
[C, close]
]
[E, close]
]
]
]
]
]
]
]
]
\end{prooftree}
Both of these disjuncts also result in closed branches. The left branch at (10) is the negation of (6), and the right branch is the negation of (7). Now every branch in this tree is closed. This corresponds to the idea that every possible way there could be to satisfy (1)-(4) ended up requiring a contradiction. There is no interpretation that satisfies (1)-(4). In other words, the argument we began with was valid.
In the previous chapter we used the double turnstile, `$\models$', to represent entailment. In this chapter we will use a different but related symbol, the \define{single turnstile}, which looks like this: `$\vdash$'. Think of this symbol as representing \emph{provability}. So in proving that $E$ follows from the premises above, we're showing that {\{$A\eand B$, \enot$(C\eor D)$, $(\enot B\eor C)\eor E\} \vdash{} E$}.
Unsurprisingly, provability and entailment will turn out to be closely connected; we'll consider the connection in detail in Chapter \ref{ch.SLsoundcomplete}.
\section{An example: proving invalidity}
\label{sec.SLinvalidtree}
Let's work through another example to further illustrate the idea. In \S\ref{sec.SLtreerules} we'll learn the formal rules for trees in SL. Consider this argument form:
\begin{earg}
\item[] $(D\eor A) \eand \enot N$
\item[] $N \eor \enot A$
\item[\therefore] $\enot N \eand A$
\end{earg}
As before, we'll construct a tree that includes the premises and the negation of the conclusion at the top, and we'll attempt to find an interpretation satisfying those three lines. By convention, we write the claim we're attempting to prove at the top of the tree. We begin by processing line (1), which is a conjunction; its conjuncts are given in (4) and (5). Line (6) processes the disjunction in line (2), and the left branch closes.
\begin{prooftree}
{
to prove={\{(D\eor A) \eand \enot N, N \eor \enot A\} \vdash{} \enot N \eand A},
single branches,
close with=\ensuremath{\times},
}
[(D\eor A) \eand \enot N, checked
[N \eor \enot A, grouped, checked
[\enot (\enot N \eand A), grouped%, checked
[D \eor A%, checked
[\enot N, grouped
[N, close
]
[\enot A
% [\enot\enot N, close
% ]
% [\enot A
% [D]
% [A, close]
% ]
]
]
]
]
]
]
\end{prooftree}
Line (3) is a negated conjunction. A conjunction is true if and only if both conjuncts are true, so it is false if at least one conjunct is false. So to resolve (3), line (7) branches into the negation of each conjunct, \enot\enot$N$ and \enot$A$. The former closes because it's the negation of line (5). The final sentence requiring resolution is the disjunction on line (4); it branches, and one branch closes.
\begin{prooftree}
{
to prove={\{(D\eor A) \eand \enot N, N \eor \enot A\} \vdash{} \enot N \eand A},
single branches,
close with=\ensuremath{\times},
}
[(D\eor A) \eand \enot N, checked
[N \eor \enot A, grouped, checked
[\enot (\enot N \eand A), grouped, checked
[D \eor A, checked
[\enot N, grouped
[N, close
]
[\enot A
[\enot \enot N, close
]
[\enot A
[D, open]
[A, close]
]
]
]
]
]
]
]
\end{prooftree}
The $\uparrow$ indicates that the open branch ending in $D$ is a \emph{completed} branch. (We'll precisely define `completion' in \S\ref{sec.SL.tree.completion}.) What this means is that this branch represents a way to satisfy all three sentences at the root of the tree. In other words, this argument form is \emph{not} valid in SL. Furthermore, examining the open branch demonstrates an interpretation that satisfies the root. The branch includes three wffs that are either atoms or negated atoms: \enot $A$, $D$, and \enot $N$. So it suggests this interpretation:
\begin{displaymath}
\script{I} =
\left\{
\begin{array}{ll}
A = 0\\
D = 1\\
N = 0
\end{array}
\right.
\end{displaymath}
You can verify this result by evaluating the premises and the conclusion of the argument we began with on this interpretation. Since there is an interpretation that satisfies the premises and falsifies the conclusion, our tree system proves the argument invalid.
\section{Resolution rules for SL trees}
\label{sec.SLtreerules}
Hopefully the examples we've worked through have given you a decent intuitive sense of the tree method for SL. Now let's get more precise about it, by giving formal rules for trees. You should be able to recognize the following rules as a generalization of the steps of the proofs given above.
We begin with \define{resolution} rules. These rules identify ways that tree branches can be extended, given sentences that are already in that branch. The rules depend on the main connective of the sentence in question. (If the main connective is negation, then they also depend on the main connective of the negand.)
\label{SL.treerules.start}
\subsection{Conjunction}
The rule for conjunction is that if you have a conjunction in a branch, you may make a linear extension of the branch that includes each conjunct, adding a check mark next to the conjunction. Using our Greek symbols once again as variables to stand in for any sentence of SL, any time you have this,
\begin{center}
\begin{prooftree}
{not line numbering,
single branches}
[\metaA{}\eand\metaB{}%, checked
% [\metaA{}
% [\metaB{}, grouped
% ]
% ]
]
\end{prooftree}
\end{center}
you may extend each open branch of the tree to this:
\begin{center}
\begin{prooftree}
{not line numbering,
single branches}
[\metaA{}\eand\metaB{}, checked
[\metaA{}
[\metaB{}, grouped
]
]
]
\end{prooftree}
\end{center}
It is important to remember once again that \metaA{} and \metaB{} here can stand for \emph{any} sentences of SL, including complex ones.
If you have extended a tree branch using the conjunction rule, we say that you have \emph{resolved} the conjunction. When sentences are resolved, they are marked with a check. In stating the resolution rules, we'll typically omit the check mark; here is the conjunction rule:
\factoidbox{
\begin{center}
\begin{prooftree}
{not line numbering}
[\metaA{}\eand\metaB{}
[\metaA{}
[\metaB{}, grouped
]
]
]
\end{prooftree}
\end{center}
}
\subsection{Negated conjunction}
If you have a negated conjunction, you may resolve it by branching into the negation of each conjunct. This makes sense because there are two ways for a negated conjunction to be true --- either conjunct can be false.
\factoidbox{
\begin{center}
\begin{prooftree}
{not line numbering,
single branches}
[\enot(\metaA{}\eand\metaB{})
[\enot\metaA{}]
[\enot\metaB{}]
]
\end{prooftree}
\end{center}
}
\subsection{Disjunction}
\label{subsec.DisjunctionTreeRule}
\begin{groupitems}
Disjunctions branch into each disjunct.
\factoidbox{
\begin{center}
\begin{prooftree}
{not line numbering,
single branches}
[\metaA{}\eor\metaB{}
[\metaA{}]
[\metaB{}]
]
\end{prooftree}
\end{center}
}
\end{groupitems}
\subsection{Negated disjunction}
Since disjunctions are true any time either disjunct is true, they are only false if both disjuncts are false. So negated disjunctions are resolved with a linear development containing the negation of each disjunct.
\factoidbox{
\begin{center}
\begin{prooftree}
{not line numbering,
single branches}
[\enot(\metaA{}\eor\metaB{})
[\enot\metaA{}
[\enot\metaB{}, grouped
]
]
]
\end{prooftree}
\end{center}
}
\subsection{Conditional}
Recall the characteristic truth table for the conditional in SL:
\begin{center}
\begin{tabular}{c|c|c}
\metaA{} & \metaB{} & \metaA{}\eif\metaB{}\\
\hline
1 & 1 & 1\\
1 & 0 & 0\\
0 & 1 & 1\\
0 & 0 & 1
\end{tabular}
\end{center}
Conditionals are true any time the antecedent is false, and also any time the consequent is true. We can represent this with a branching tree development, similar to a disjunction.
\factoidbox{
\begin{center}
\begin{prooftree}
{not line numbering,
single branches}
[\metaA{}\eif\metaB{}
[\enot\metaA{}]
[\metaB{}]
]
\end{prooftree}
\end{center}
}
\subsection{Negated conditional}
As in the case of disjunction, the negation of a conditional is a relatively strong claim --- the only way for a conditional to be false is for its antecedent to be true \emph{and} for its consequent to be false. We represent this with a linear development for negated conditionals:
\factoidbox{
\begin{center}
\begin{prooftree}
{not line numbering,
single branches}
[\enot(\metaA{}\eif\metaB{})
[\metaA{}
[\enot\metaB{}, grouped
]
]
]
\end{prooftree}
\end{center}
}
\subsection{Biconditional}
Biconditionals require a slightly different structure. A biconditional says that two sentences have the same truth value: either both are true, or both are false. Since these represent two different ways a biconditional can be true, our tree rules will develop biconditionals into two new branches. But unlike in the case of our other rules, each new branch will contain two sentences. One branch will have both sides of the biconditional; the other will have both sides' \emph{negations}:
\factoidbox{
\begin{center}
\begin{prooftree}
{not line numbering,
single branches}
[\metaA{}\eiff\metaB{}
[\metaA{}
[\metaB{}, grouped]
]
[\enot\metaA{}
[\enot\metaB{}, grouped]
]
]
\end{prooftree}
\end{center}
}
\subsection{Negated biconditional}
\begin{groupitems}
Negated biconditionals yield the same structure, but instead of new branches where each subsentence has the same truth value, they yield branches in which the subsentences have opposite values.
\factoidbox{
\begin{center}
\begin{prooftree}
{not line numbering,
single branches}
[\enot(\metaA{}\eiff\metaB{})
[\metaA{}
[\enot\metaB{}, grouped]
]
[\enot\metaA{}
[\metaB{}, grouped]
]
]
\end{prooftree}
\end{center}
}
\end{groupitems}
\subsection{Double negation}
There is one more resolution rule, for double-negated sentences. One resolves a double negation by developing a linear branch with the negand of the negand --- i.e., the wff obtained from removing both negations from the left of the double-negation.
\factoidbox{
\begin{center}
\begin{prooftree}
{not line numbering, single branches}
[\enot\enot\metaA{}
[\metaA{}]
]
\end{prooftree}
\end{center}
}
\label{SL.treerules.end}
\subsection{Resolution rules summary}
These nine resolution rules describe, in purely formal terms, how to resolve most sentences of SL. The only exceptions are atomic sentences, and atomic sentences with a single negation sign in front of them. In our formal proof system, atoms and negated atoms have a special role to play; they are not resolved. In developing a tree proof, you may apply any of these rules to any unresolved sentence (other than an atom or negated atom) in the branch. Mark resolved sentences with a check.
Note that it is \emph{not} a rule that sentences must be resolved in the order in which they appear in the tree; one can save sentences and come back later to resolve them. However, if you are resolving a sentence after the tree has already done some branching, you must perform the resolution rule under \emph{each} open branch that includes that sentence. Here is a tree illustrating the issue.
\begin{prooftree}
{
}
[(D \eand \enot R) \eor Q, name={p1}, checked
[\enot Q \eor R, grouped, checked, name={p2}
[D \eand \enot R, just={\eor}:p1, name={and}, checked
[\enot Q, just={\eor}:p2
[D, just={\eand}:and
[\enot R, grouped, open]
]
]
[R, name={R}
[D, just={\eand}:and
[\enot R, grouped, close={:R,!c}]
]
]
]
[Q
[\enot Q, close={:!u,!c}
]
[R, open
]
]
]
]
\end{prooftree}
This tree has two disjunctions in the root, so it will involve branching multiple times. We could start with either one, but this tree resolved line (1) first, branching into the two disjuncts at line (3). The `1 \eor' to the right at that line is the explanation for what is happening there: it indicates that the disjunction rule was applied to line (1). When the second disjunction is processed at line (4), it needs to be done on both branches, since both branches include line (2). That's why our two branches split into four at line (4). One branch closes at that point. At line (5), the conjunction at line (3) is processed. This needs to be done on both branches that include that conjunction, but not the right-most branch, which does not include it. Resolving a sentence affects everything below it in the tree in its own descendent branches, but it doesn't affect branches that are off to the side.
Besides the tree resolution rules, there are two more rules to our proof system.
\section{Branch closure rules}
In the tree examples above, we \emph{closed} branches when they contained sentences along with their negations. Here is our general rule for tree closure:
\factoidbox{
If any branch contains some formula \metaA{}, and also contains \enot\metaA{}, that branch is \define{closed}. Any branch that is not closed is \define{open}. We mark closed branches with the `\ensuremath{\times}' symbol.
A tree is \define{closed} if and only if every branch in that tree is closed. A tree is \define{open} if and only if it is not closed.
}
If every branch is closed, the method proves that the root is unsatisfiable.
Note that `\metaA{}' stands in for \emph{any} sentence of SL; it is not part of the closure rule that one must have an atomic sentence and its negation. Consider for example this tree:
\begin{prooftree}
{
}
[(\enot\enot S \eand T) \eif (\enot P \eiff (Q \eor R)), checked
[\enot\enot S \eand T, grouped, name={p2}
[\enot (\enot P \eiff (Q\eor R)), grouped, name={p3}
[\enot(\enot\enot S \eand T), close={:p2,!c}
]
[(\enot P \eiff (Q \eor R)), close={:p3,!c}
]
]
]
]
\end{prooftree}
In this tree, we only resolved the first sentence, using the conditional rule, and the tree immediately closed. The line numbers under the closure symbols specify exactly why the branches close: the left branch developed into the negation of (2), and the right branch developed into the negation of (3). Notice that we \emph{could} have developed lines (2) and (3), using the conjunction or negated biconditional rules, respectively, but this would have resulted in a more complicated tree that ultimately yielded the same result. Similarly, if we hadn't noticed that both branches close at line (4), we could have continued processing the tree, developing the left branch using the negated conjunction rule on (4), and developing the right branch using the biconditionals rule. But this too would have involved needless complication for no benefit. (Eventually, the tree would have closed with simpler sentences too.) You can save yourself a lot of work by noticing when branches are ready to mark as closed, and by thinking a bit strategically about which sentences to resolve first.
\section{Branch completion rules}
\label{sec.SL.tree.completion}
Here is the final piece of our formal proof system. In our examples above we have used `$\uparrow$' to indicate that an open branch is complete. We need a formal characterization of branch completion.
In SL, a \define{resolvable} wff is a wff for which we have a resolution rule. That is, anything other than an atom or a negated atom.
\factoidbox{
A branch is \define{complete} if and only if either it is closed, or every resolvable sentence in that branch has been resolved.
A tree is complete if and only if every branch in that tree is complete.}
If there is at least one open branch in a completed tree, the tree method indicates that that branch corresponds to an interpretation that satisfies the root.
We use the `$\vdash{}$' symbol to indicate tree closure. `$\metaSetX{}\vdash{} \bot$' means that a tree with root \metaSetX{} closes. We use `$\nvdash{}\bot$' to indicate that a completed tree remains open.
We will also define our single turnstile symbol so that $\metaSetX{}\vdash{} \metaA{}$ is equivalent to $\metaSetX{}, \enot\metaA{}\vdash{}\bot$. So the latter is a way of saying that the tree method shows that the argument from $\metaSetX{}$ to $\metaA{}$ is valid.
This completes the introduction of the formal rules for the SL tree proof system. In the remainder of this chapter, we'll work through some more examples and provide some advice for working through trees more efficiently. In Chapter \ref{ch.SLsoundcomplete} we'll explain why the tree method works, and prove why it's a good one.
\section{Resolution order}
The resolution rules do not specify the order in which you should resolve sentences in a tree; you are free to resolve in any order you like. But some ways of processing trees are more efficient than others. Here is an example to illustrate. Suppose we want to consider whether $\{\enot (C \eand A), D \eiff C, A \eor B, \enot B\}\vdash{}\bot$. So we put those sentences in the root of a tree. Here's what happens if we resolve the sentences in the order in which they're given:
\begin{prooftree}
{
}
[\enot (C \eand A), checked, name=p1
[D \eiff C, checked, grouped, name=p2
[A \eor B, grouped, checked, name=p3
[\enot B, grouped, name=p4
[\enot C, just={\enot\eand}:p1
[D, just={\eiff}:p2
[C, grouped, close]
]
[\enot D
[\enot C, grouped
[A, just={\eor}:p3, open]
[B, close]
]
]
]
[\enot A
[D
[C, grouped
[A, close]
[B, close]
]
]
[\enot D
[\enot C, grouped
[A, close]
[B, close]
]
]
]
]
]
]
]
\end{prooftree}
Again, the justifications for particular steps are added to the right of the tree for clarity. For example, `1\enot\eand{}' indicates that line (5) was produced by performing the negated conjunction resolution rule on line (1). This tree remains open, offering this interpretation that satisfies the root:
\begin{displaymath}
\script{I} =
\left\{
\begin{array}{ll}
A = 1\\
B = 0\\
C = 0\\
D = 0
\end{array}
\right.
\end{displaymath}
This tree reminds us again that if a sentence in a tree has multiple open branches below it, then resolving that sentence requires that the resolution be performed in \emph{each} open branch below. That's why, for example, resolving line (3) at line (8) requires three different new branchings. (We do not resolve under the left-most column, because it is already closed.) So when there are more open branches, resolving sentences requires more work on the tree. Consequently, it is sometimes a good idea to think a bit strategically, and close off parts of the tree earlier, to save yourself some work.
The example above is a perfectly fine instance of a completed tree, and it does yield the correct answer. However, it's possible to get there with less work, by choosing to resolve sentences that will close off branches right away. Here is another way of developing a tree with the same root as in the previous example. This version begins by resolving line (3), because doing so will close off one of its new branches immediately. It then continues to resolve in the most efficient order:
\begin{prooftree}
{
}
[\enot (C \eand A), checked, name=p1
[D \eiff C, checked, grouped, name=p2
[A \eor B, grouped, checked, name=p3
[\enot B, grouped, name=p4
[A, just={\eor:p3}
[\enot C, just={\enot\eand:p1}
[D, just={\eiff:p2}
[C, grouped, close]]
[\enot D
[\enot C, grouped, open]]
]
[\enot A, close]
]
[B, close]
]
]
]
]
\end{prooftree}
This tree gets us to the same result much more quickly, pointing to the same interpretation that satisfies the root. As a general rule of thumb, it's good advice to look a step ahead, and resolve sentences that won't lead to new open branches, before resolving ones that will. For just the same reason, it is usually more efficient to resolve those sentences that have linear rules before those that have branching rules.
\section{Choosing the right root}
\label{sec.sl.treeroots}
Trees are used to indicate whether the root is satisfiable. We test arguments for validity with a tree by putting their premises along with the negation of their conclusions in the root; if the tree remains open, indicating that the set is satisfiable, that means it's possible to satisfy the premises while falsifying the conclusion, which means the argument is invalid. If the tree closes, that suggests the argument is valid.
But trees can be used to evaluate many other kinds of questions, aside from validity. Any question that can be converted into a question about satisfiability can be answered with a tree. For example, if you want to find out whether a set of sentences is consistent, put that set of sentences into the tree. A set of sentences is consistent if and only if there is an interpretation satisfying it, so you can use a tree to find out.
What if you want to find out whether a sentence is a tautology? A tautology is a sentence that is satisfied by \emph{every} interpretation. To test this claim with a tree, we'll attempt to find an interpretation that falsifies it, by putting its \emph{negation} in the root. If this tree remains open, it shows us how to satisfy the negation, which means that the sentence is \emph{not} a tautology. If it closes, then the negation is unsatisfiable, which means the sentence \emph{is} a tautology.
To find out whether a sentence is a contradiction, check and see whether it is possible to satisfy it, by putting it in the root of the tree. If the tree remains open, the sentence is satisfiable, which means it's not a contradiction. If the tree closes, then it is a contradiction.
To use a tree to answer a question, the first step is always to convert the question into a question about whether some sentence, or set of sentences, is satisfiable.
\practiceproblems
If you want additional practice, you can construct trees for any of the SL arguments and entailment claims in the exercises for the previous two chapters.
\solutions
\problempart
\label{pr.sl.treeroot}
To evaluate each of the following claims with a tree, (a) what would you put in the root of the tree?, and (b) if the tree closes, does that show that the claim is true or false?
\begin{earg}
\item $\{P, P \eif Q, Q \eif \enot P\} \vdash{}\bot$
%(a) $\{P, P \eif Q, Q \eif \enot P\}$, (b) true
\item $(P \eif Q) \eiff (Q \eif P)$ is a tautology.
%(a) $\enot((P \eif Q) \eiff (Q \eif P))$, (b) true
\item The following argument is valid:
\begin{ekey}
\item[] $P \eand Q$
\item[] $\enot R \eif \enot Q$
\item[\therefore] $P \eand R$
\end{ekey}
%(a) $\{P \eand Q, \enot R \eif \enot Q, \enot (P \eand R)\}$, (b) true
\item There is no interpretation that satisfies $A \eor B$, $B \eif C$, and $A \eiff C$ without also satisfying C.
%$\{A \eor B, B \eif C, A \eiff C, \enot C\}, (b) true
\item $A \eiff \enot A$ is a contradiction.
%(a) $A \eiff \enot A$, (b) false
\item Every interpretation satisfying $P$, $P \eif Q$, and $\enot Q$ also satisfies $A$.
%(a) $\{P, P \eif Q, \enot Q, \enot A\}$
\item There is at least one interpretation that satisfies $P \eif Q$, $\enot P \eor \enot Q$, and $Q \eif P$.
%(a) $\{P \eif Q, \enot P \eor \enot Q, Q \eif P\}, (b) false.
\end{earg}
\solutions
\problempart
\label{pr.sl.agtree}
Evaluate the argument given on p.\ \pageref{8letterargument} by constructing a tree. If it is invalid, give a model demonstrating it so.
\solutions
\problempart Evaluate each claim from Part \ref{pr.sl.treeroot} by constructing a tree. If applicable, give the interpretation that demonstrates the claim true or false.
\label{tree.examples}
\problempart Recall the discussion of the Sheffer stroke in Chapter \ref{ch.TruthTables}, page \pageref{pr.altConnectives}. That connective, if added to SL, would have this characteristic truth table:
\begin{center}
\begin{tabular}{c|c|c}
\metaA{} & \metaB{} & \metaA{}$|$\metaB{}\\
\hline
1 & 1 & 0\\
1 & 0 & 1\\
0 & 1 & 1\\
0 & 0 & 1
\end{tabular}
\end{center}
What would be appropriate tree resolution rules for the Sheffer stroke, or the negated Sheffer stroke?
| {
"alphanum_fraction": 0.7260548816,
"author": null,
"avg_line_length": 41.7487113402,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "77f220683e08aee0d1616ed2b07676189bf69661",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2022-03-07T16:39:29.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-06-19T20:30:30.000Z",
"max_forks_repo_head_hexsha": "925bfb510101aa77174d977d2b956fc8088950e6",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "lauragreenstreet/for-all-x",
"max_forks_repo_path": "Latex-Files/forallx-ubc-5-SLtrees.tex",
"max_issues_count": 19,
"max_issues_repo_head_hexsha": "925bfb510101aa77174d977d2b956fc8088950e6",
"max_issues_repo_issues_event_max_datetime": "2021-12-13T23:39:59.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-02-18T21:45:53.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "mavaddat/for-all-x",
"max_issues_repo_path": "Latex-Files/forallx-ubc-5-SLtrees.tex",
"max_line_length": 1085,
"max_stars_count": 10,
"max_stars_repo_head_hexsha": "b7cc18e497065e45e54af30c615999941941b23d",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "jonathanichikawa/for-all-x",
"max_stars_repo_path": "Latex-Files/forallx-ubc-5-SLtrees.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-05T00:58:11.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-03-29T14:57:23.000Z",
"num_tokens": 9221,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 32397
} |
# 1. design model - we need input size and output size, forward pass with
# all the different layers
# 2. construct the loss and optimizer
# 3. training loop
# a. compute prediction
# b. do backward pass to get gradients
# c. update our weights
# slight adjustments to model and cost function.
import torch
import torch.nn as nn
import numpy as np
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# 0. prepare data
bc = datasets.load_breast_cancer()
X, y = bc.data, bc.target
n_samples, n_features = X.shape
# print(n_samples, n_features)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)
#scale
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
X_train = torch.from_numpy(X_train.astype(np.float32))
X_test = torch.from_numpy(X_test.astype(np.float32))
y_train = torch.from_numpy(y_train.astype(np.float32))
y_test = torch.from_numpy(y_test.astype(np.float32))
y_train = y_train.view(y_train.shape[0], 1)
y_test = y_test.view(y_test.shape[0], 1)
# convert to torch tensors
# 1. model
# f = wx + b, sigmoid at the end
class LogisticRegression(nn.Module):
def __init__(self, n_input_features):
super(LogisticRegression, self).__init__()
self.linear = nn.Linear(n_input_features, 1) # 30 input features, 1 output feature
def forward(self, x):
y_predicted = torch.sigmoid(self.linear(x))
return y_predicted
model = LogisticRegression(n_features)
# 2. loss and optimizer
learning_rate = 0.01
criterion = nn.BCELoss()
optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate)
# 3. training
num_epochs = 100
# training loop
for epoch in range(num_epochs):
# forward pass and loss
y_predicted = model(X_train)
loss = criterion(y_predicted, y_train)
# backward pass
loss.backward()
# update our weights
optimizer.step()
# empty our gradients, because the backward function here will always add up all gradients to the
# .grad attribute
optimizer.zero_grad()
if (epoch+1) % 10 == 0:
print(f'epoch: {epoch+1}, loss = {loss.item():.4f}')
# allow safe acquisition and release of operating system resources.
with torch.no_grad():
y_predicted = model(X_test)
y_predicted_cls = y_predicted.round()
acc = y_predicted_cls.eq(y_test).sum() / float(y_test.shape[0])
# for every prediction that is correct it will add +1
print(f'accuracy = {acc:.4f}') | {
"alphanum_fraction": 0.7164004675,
"author": null,
"avg_line_length": 32.0875,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7e3192a7997d6efb07f15fc0177af2de94a2da1d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6e6c67f9fdd49e21579085ededcb8c266b9508dc",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "LilySu/neural-networks-pytorch",
"max_forks_repo_path": "08_logistic_regression.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6e6c67f9fdd49e21579085ededcb8c266b9508dc",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "LilySu/neural-networks-pytorch",
"max_issues_repo_path": "08_logistic_regression.py",
"max_line_length": 101,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6e6c67f9fdd49e21579085ededcb8c266b9508dc",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "LilySu/neural-networks-pytorch",
"max_stars_repo_path": "08_logistic_regression.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 651,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2567
} |
// ------------------------------------ //
#include "GlobalCEFHandler.h"
#include "GUI/GuiCEFApplication.h"
#include "include/cef_app.h"
#include <boost/filesystem.hpp>
#include <iostream>
using namespace Leviathan;
// ------------------------------------ //
DLLEXPORT bool Leviathan::GlobalCEFHandler::CEFFirstCheckChildProcess(
int argcount, char* args[], int& returnvalue,
std::shared_ptr<CEFApplicationKeeper>& keeper, const std::string& logname
#ifdef _WIN32
#ifdef CEF_ENABLE_SANDBOX
,
CefScopedSandboxInfo& windowssandbox
#endif
,
HINSTANCE hInstance
#endif // _WIN32
)
{
// Check for no graphics mode //
for(int i = 0; i < argcount; ++i) {
if(std::string_view(args[i]).find("--nogui") != std::string_view::npos) {
// No gui commandline specified //
std::cout << "Not using CEF because --nogui is specified" << std::endl;
return false;
}
}
// Run CEF startup //
keeper = std::make_shared<CEFApplicationKeeper>();
void* windows_sandbox_info = nullptr;
#ifdef CEF_ENABLE_SANDBOX
windows_sandbox_info = &sandbox;
#endif
// Provide CEF with command-line arguments //
#ifdef _WIN32
CefMainArgs main_args(hInstance);
#else
// Make a copy of the args to not have CEF mess with them
std::vector<char*> cefArgs;
std::vector<std::string> cefArgData;
cefArgs.resize(argcount);
cefArgData.reserve(argcount);
for(int i = 0; i < argcount; ++i) {
cefArgData.push_back(args[i]);
cefArgs[i] = cefArgData.back().data();
}
CefMainArgs main_args(cefArgs.size(), cefArgs.data());
// CefMainArgs main_args(argcount, args);
#endif // _WIN32
// Callback object //
keeper->CEFApp = CefRefPtr<GUI::CefApplication>(new GUI::CefApplication());
// Check are we a sub process //
int exit_code = CefExecuteProcess(main_args, keeper->CEFApp.get(), windows_sandbox_info);
if(exit_code >= 0) {
// This was a sub process //
returnvalue = exit_code;
return true;
}
// Specify CEF global settings here.
CefSettings settings;
// Apparently this "just works" on non-windows platforms
#if !defined(CEF_ENABLE_SANDBOX) && defined(_WIN32)
settings.no_sandbox = true;
#endif
try {
CefString(&settings.locales_dir_path) =
boost::filesystem::canonical("Data/locales").wstring();
const auto currentCanonical = boost::filesystem::canonical("./").wstring();
// CefString(&settings.resources_dir_path) = currentCanonical;
CefString(&settings.log_file) =
currentCanonical + Convert::Utf8ToUtf16("/" + logname + "LogCEF.txt");
const auto cachePath = currentCanonical + L"/Data/Cache/CEF";
boost::filesystem::create_directories(cachePath);
CefString(&settings.cache_path) = cachePath;
} catch(const boost::filesystem::filesystem_error& e) {
std::stringstream msg;
msg << "Error missing file or accessing cache location: " << e.what() << "\n";
std::ofstream write(std::string("Leviathan_start_failure_") +
#ifdef __linux
std::to_string(::getpid()) +
#endif //__linux
".txt");
write << msg.str();
write << "Args are (" << argcount << ")" << std::endl;
for(int i = 0; i < argcount; ++i)
write << args[i] << std::endl;
write << std::endl;
write.close();
std::cout << msg.str();
abort();
}
// TODO: log_severity
// settings.log_severity = cef_log_severity_t::LOGSEVERITY_DEBUG;
// TODO: user agent
settings.windowless_rendering_enabled = true;
settings.external_message_pump = true;
// Apparently this is missing from the windows version but not the linux version. For some
// reason?
// settings.single_process = false;
// // Enable remote debugging
// settings.remote_debugging_port = 9090;
// Only works on windows
// And the OnPaint assumes it is on the main thread so this doesn't work at all
settings.multi_threaded_message_loop = false;
// Initialize CEF.
CefInitialize(main_args, settings, keeper->CEFApp.get(), windows_sandbox_info);
CEFInitialized = true;
AccessToThese = keeper.get();
// Wasn't a sub process //
return false;
}
// ------------------------------------ //
DLLEXPORT void Leviathan::GlobalCEFHandler::CEFLastThingInProgram()
{
if(!CEFInitialized)
return;
// Close it down //
CefShutdown();
// The automatic templates remove the need for this message, which won't be logged anyway
// std::cout << "CEF shutdown called" << std::endl;
}
// ------------------------------------ //
DLLEXPORT void Leviathan::GlobalCEFHandler::DoCEFMessageLoopWork()
{
if(!CEFInitialized) {
LOG_ERROR("DoCEFMessageLoopWork called before CEF is initialized");
return;
}
CefDoMessageLoopWork();
}
// ------------------------------------ //
DLLEXPORT CEFApplicationKeeper* Leviathan::GlobalCEFHandler::GetCEFObjects()
{
return AccessToThese;
}
DLLEXPORT void Leviathan::GlobalCEFHandler::RegisterCustomJavaScriptQueryHandler(
std::shared_ptr<GUI::JSAsyncCustom> ptr)
{
std::unique_lock<std::recursive_mutex> guard(JSCustomMutex);
// Add it //
CustomJSHandlers.push_back(ptr);
// Notify all //
for(size_t i = 0; i < JSAsynToNotify.size(); i++) {
GUARD_LOCK_OTHER_NAME(JSAsynToNotify[i], guard2);
JSAsynToNotify[i]->RegisterNewCustom(guard2, ptr);
}
// Things created after this will automatically retrieve the ones that are registered
// before it is created
}
DLLEXPORT void Leviathan::GlobalCEFHandler::UnRegisterCustomJavaScriptQueryHandler(
GUI::JSAsyncCustom* toremove)
{
std::unique_lock<std::recursive_mutex> guard(JSCustomMutex);
// Notify all objects //
for(size_t i = 0; i < JSAsynToNotify.size(); i++) {
JSAsynToNotify[i]->UnregisterCustom(toremove);
}
// Compare pointers and remove it //
for(size_t i = 0; i < CustomJSHandlers.size(); i++) {
if(CustomJSHandlers[i].get() == toremove) {
CustomJSHandlers.erase(CustomJSHandlers.begin() + i);
return;
}
}
}
DLLEXPORT const std::vector<std::shared_ptr<GUI::JSAsyncCustom>>&
Leviathan::GlobalCEFHandler::GetRegisteredCustomHandlers()
{
return CustomJSHandlers;
}
DLLEXPORT void Leviathan::GlobalCEFHandler::RegisterJSAsync(GUI::LeviathanJavaScriptAsync* ptr)
{
std::unique_lock<std::recursive_mutex> guard(JSCustomMutex);
JSAsynToNotify.push_back(ptr);
}
DLLEXPORT void Leviathan::GlobalCEFHandler::UnRegisterJSAsync(
GUI::LeviathanJavaScriptAsync* ptr)
{
std::unique_lock<std::recursive_mutex> guard(JSCustomMutex);
for(size_t i = 0; i < JSAsynToNotify.size(); i++) {
if(JSAsynToNotify[i] == ptr) {
JSAsynToNotify.erase(JSAsynToNotify.begin() + i);
return;
}
}
}
std::recursive_mutex Leviathan::GlobalCEFHandler::JSCustomMutex;
std::vector<GUI::LeviathanJavaScriptAsync*> Leviathan::GlobalCEFHandler::JSAsynToNotify;
std::vector<std::shared_ptr<GUI::JSAsyncCustom>> Leviathan::GlobalCEFHandler::CustomJSHandlers;
CEFApplicationKeeper* Leviathan::GlobalCEFHandler::AccessToThese = NULL;
bool Leviathan::GlobalCEFHandler::CEFInitialized = false;
// ------------------------------------ //
DLLEXPORT void GlobalCEFHandler::RegisterCustomExtension(
std::shared_ptr<GUI::CustomExtension> extension)
{
CustomExtensions.push_back(extension);
AccessToThese->CEFApp->RegisterCustomExtension(extension);
}
DLLEXPORT bool GlobalCEFHandler::HandleCustomExtensionProcessMessage(
CefRefPtr<CefBrowser> browser, CefProcessId source_process,
CefRefPtr<CefProcessMessage> message)
{
// Pass to the extensions until it is handled //
for(const auto& ext : CustomExtensions) {
if(ext->MessageHandler) {
if(ext->MessageHandler->OnProcessMessageReceived(browser, source_process, message))
return true;
}
}
return false;
}
std::vector<std::shared_ptr<GUI::CustomExtension>> GlobalCEFHandler::CustomExtensions;
// ------------------ CEFSandboxInfoKeeper ------------------ //
DLLEXPORT Leviathan::CEFApplicationKeeper::CEFApplicationKeeper() {}
DLLEXPORT Leviathan::CEFApplicationKeeper::~CEFApplicationKeeper() {}
// ------------------------------------ //
DLLEXPORT CefRefPtr<GUI::CefApplication> Leviathan::CEFApplicationKeeper::GetCEFApp() const
{
return CEFApp;
}
| {
"alphanum_fraction": 0.6494893222,
"author": null,
"avg_line_length": 29.206779661,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "7d4e64669eb88e497938eb4f4c0ec6acb5685edd",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 14,
"max_forks_repo_forks_event_max_datetime": "2021-09-11T03:12:15.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-04-09T02:26:15.000Z",
"max_forks_repo_head_hexsha": "0a0d2ea004a153f9b17c6230da029e8160716f71",
"max_forks_repo_licenses": [
"BSL-1.0"
],
"max_forks_repo_name": "hhyyrylainen/Leviathan",
"max_forks_repo_path": "Engine/GlobalCEFHandler.cpp",
"max_issues_count": 46,
"max_issues_repo_head_hexsha": "0a0d2ea004a153f9b17c6230da029e8160716f71",
"max_issues_repo_issues_event_max_datetime": "2019-12-14T11:16:04.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-04-02T11:06:01.000Z",
"max_issues_repo_licenses": [
"BSL-1.0"
],
"max_issues_repo_name": "hhyyrylainen/Leviathan",
"max_issues_repo_path": "Engine/GlobalCEFHandler.cpp",
"max_line_length": 95,
"max_stars_count": 16,
"max_stars_repo_head_hexsha": "0a0d2ea004a153f9b17c6230da029e8160716f71",
"max_stars_repo_licenses": [
"BSL-1.0"
],
"max_stars_repo_name": "hhyyrylainen/Leviathan",
"max_stars_repo_path": "Engine/GlobalCEFHandler.cpp",
"max_stars_repo_stars_event_max_datetime": "2022-03-09T20:38:59.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-12-22T02:09:05.000Z",
"num_tokens": 2082,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 8616
} |
/*
learn.c
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_multifit.h>
#include "learn.h"
#include "feature.h"
#include "imatrix.h"
#include "dmatrix.h"
#include "util.h"
#include "likelihood.h"
#include "hyper.h"
void mvslda_learn(document *data, double **resp, double *alpha, double beta, double nu2, double sigma2,
int nclass, int nlex, int dlenmax, int nresp, int maxiter, double **phi, double **theta, double **eta,
int **n_mz, int **n_zw, FILE *likp, FILE *hyperp, unsigned long int random_seed){
document *dp;
int ndocs;
int *n_m;
int *n_z;
int ***topics;
int word_index;
int word_num;
double sum_alpha;
double *left;
double *center;
double *log_right;
double *p_z;
double *log_p_z;
double *cum_sum_p_z;
double log_Z, sum_p_r, sum_empirical_z;
double temp_prediction;
double lik;
double **temp_phi;
double **temp_theta;
double **empirical_z;
int z;
int it;
int m, w, t, i, j, k;
const gsl_rng_type *T;
gsl_rng *r;
gsl_rng_env_setup();
T = gsl_rng_default;
r = gsl_rng_alloc(T);
// count data length
for(dp = data, ndocs = 0;(dp->len) != -1;dp++, ndocs++)
;
// initialize buffers
if((n_m = calloc(ndocs,sizeof(int))) == NULL){
fprintf(stderr,"mvslda_learn:: cannot allocate n_m.\n");
return;
}
if((n_z = calloc(nclass,sizeof(int))) == NULL){
fprintf(stderr,"mvslda_learn:: cannot allocate n_z.\n");
return;
}
if((left = calloc(nclass,sizeof(double))) == NULL){
fprintf(stderr,"mvslda_learn:: cannot allocate left.\n");
return;
}
if((center = calloc(nclass,sizeof(double))) == NULL){
fprintf(stderr,"mvslda_learn:: cannot allocate center.\n");
return;
}
if((log_right = calloc(nclass,sizeof(double))) == NULL){
fprintf(stderr,"mvslda_learn:: cannot allocate log_cright.\n");
return;
}
if((p_z = calloc(nclass,sizeof(double))) == NULL){
fprintf(stderr,"mvslda_learn:: cannot allocate p_z.\n");
return;
}
if((log_p_z = calloc(nclass,sizeof(double))) == NULL){
fprintf(stderr,"mvslda_learn:: cannot allocate log_p_z.\n");
return;
}
if((cum_sum_p_z = calloc((nclass+1),sizeof(double))) == NULL){
fprintf(stderr,"mvslda_learn:: cannot allocate cum_sum_p_z.\n");
return;
}
if((topics = calloc(ndocs,sizeof(int **))) == NULL){
fprintf(stderr,"mvslda_learn:: cannot allocate topics.\n");
return;
}
if((empirical_z = dmatrix(ndocs, nclass)) == NULL){
fprintf(stderr,"mvslda_learn:: cannot allocate empirical_z.\n");
return;
}
if((temp_phi = dmatrix(nlex, nclass)) == NULL){
fprintf(stderr,"mvslda_learn:: cannot allocate temp_phi.\n");
exit(1);
}
if((temp_theta = dmatrix(ndocs, nclass)) == NULL){
fprintf(stderr,"mvslda_learn:: cannot allocate temp_theta.\n");
exit(1);
}
printf("Number of documents = %d\n",ndocs);
printf("Number of unique words = %d\n",nlex);
printf("Number of latent classes = %d\n",nclass);
printf("Number of responses = %d\n",nresp);
printf("Number of iteration = %d\n",maxiter);
// choose an arbitrary topic as first topic for word
gsl_rng_set(r, random_seed);
for(dp = data, m = 0;(dp->len) != -1;dp++, m++){
if((topics[m] = calloc((dp->len), sizeof(int *))) == NULL){
fprintf(stderr,"mvslda_learn:: cannot allocate topics[m].\n");
return;
}
for(w = 0;w < (dp->len);w++){
if((topics[m][w] = calloc((dp->cnt[w]), sizeof(int))) == NULL){
fprintf(stderr,"mvslda_learn:: cannot allocate topics[m][w].\n");
return;
}
word_index = dp->id[w];
word_num = dp->cnt[w];
for(i = 0;i < word_num;i++){
z = (int)gsl_rng_uniform_int(r, nclass);
n_mz[m][z] += 1;
n_m[m] += 1;
n_zw[z][word_index] += 1;
n_z[z] += 1;
topics[m][w][i] = z;
}
}
}
// initialize eta ([nclass, nresp])
for(k = 0;k < nclass;k++)
for(t = 0;t < nresp;t++)
eta[k][t] = 0.0;
// learning main
for(it = 0;it < maxiter;it++){
printf("iteration %2d/%3d..\n", it + 1, maxiter);
fflush(stdout);
sum_alpha = 0.0;
for(k = 0;k < nclass;k++)
sum_alpha += alpha[k];
for (dp = data, m = 0; (dp->len) != -1; dp++, m++){
// for words
for(w = 0;w < (dp->len);w++){
word_index = dp->id[w];
word_num = dp->cnt[w];
for(i = 0;i < word_num;i++){
z = topics[m][w][i];
n_mz[m][z] -= 1;
n_m[m] -= 1;
n_zw[z][word_index] -= 1;
n_z[z] -= 1;
// compute conditional distribution log_p_z
// log_p_z left ... theta term
for(k = 0;k < nclass;k++){
left[k] = (double)n_mz[m][k] + alpha[k];
left[k] /= ((double)n_m[m] + sum_alpha);
}
// log_p_z center ... phi term
for(k = 0;k < nclass;k++){
center[k] = (double)n_zw[k][word_index] + beta;
center[k] /= ((double)n_z[k] + (double)nlex * beta);
}
// temporal log_p_z (left and center)
for(k = 0; k < nclass;k++){
log_p_z[k] = log(left[k]) + log(center[k]);
}
// p_z right ... eta term
sum_empirical_z = 0.0;
for(k = 0; k < nclass;k++){
empirical_z[m][k] = (double)n_mz[m][k];
sum_empirical_z += (double)n_mz[m][k];
}
for(k = 0; k < nclass;k++){
empirical_z[m][k] = empirical_z[m][k] / sum_empirical_z;
}
for(t = 0; t < nresp; t++){
temp_prediction = 0.0;
for(k = 0;k < nclass;k++){
temp_prediction += eta[k][t] * empirical_z[m][k]; // dot(eta, z_d)
}
for(k = 0;k < nclass;k++){
log_right[k] = 1.0;
log_right[k] *= 1.0 / (2 * sigma2);
log_right[k] *= (eta[k][t] / (double)n_m[m]);
log_right[k] *= (2 * (resp[m][t] - temp_prediction) - (eta[k][t] / (double)n_m[m]));
log_p_z[k] += log_right[k];
}
}
// conditional distribution log_p_z
// log_Z = logsumexp(logP_k1 + logP_k2 + ... logP_kK)
log_Z = logsumexp(log_p_z, nclass);
for(k = 0;k < nclass;k++){
p_z[k] = exp(log_p_z[k] - log_Z); // normalize to obtain probabilities
}
// random sampling from p_z
z = sampling_multinomial(r, p_z, cum_sum_p_z, nclass);
// update buffers
n_mz[m][z] += 1;
n_m[m] += 1;
n_zw[z][word_index] += 1;
n_z[z] += 1;
topics[m][w][i] = z;
}
}
}
// for eta update
//least squares for dot(Z, eta) = resp
for(m = 0; m < ndocs; m++){
sum_empirical_z = 0.0;
for(k = 0; k < nclass;k++){
empirical_z[m][k] = (double)n_mz[m][k];
sum_empirical_z += (double)n_mz[m][k];
}
for(k = 0; k < nclass;k++){
empirical_z[m][k] = empirical_z[m][k] / sum_empirical_z;
}
}
for(t = 0; t < nresp; t++){
double chisq;
gsl_matrix *Z, *cov;
gsl_vector *y, *c;
gsl_multifit_linear_workspace * work = gsl_multifit_linear_alloc (ndocs, nclass);
Z = gsl_matrix_alloc(ndocs, nclass);
cov = gsl_matrix_alloc(nclass, nclass);
y = gsl_vector_alloc(ndocs);
c = gsl_vector_alloc(nclass);
for(m = 0;m < ndocs;m++){
gsl_vector_set(y, m, resp[m][t]);
for(k = 0;k < nclass;k++){
gsl_matrix_set(Z, m, k, empirical_z[m][k]);
}
}
gsl_multifit_linear(Z, y, c, cov, &chisq, work);
for(k = 0;k < nclass;k++){
eta[k][t] = gsl_vector_get(c, k);
}
gsl_multifit_linear_free(work);
gsl_matrix_free(Z);
gsl_matrix_free(cov);
gsl_vector_free(y);
gsl_vector_free(c);
}
// update hyperparameters.
update_alpha(alpha, n_m, n_mz, ndocs, nclass);
beta = update_beta(beta, n_z, n_zw, nclass, nlex);
// compute likelihood.
lik = loglikelihood(n_mz, n_zw, n_m, nclass, nlex, ndocs, nresp, resp, alpha, beta, eta, empirical_z, nu2, sigma2);
printf("\tlikelihood ... %.8f\n",lik);
printf("\talpha = \n\t");
for(k = 0;k < nclass;k++)
printf("%.8f ",alpha[k]);
printf("\n\tbeta ... %.2f\n",beta);
fprintf(likp,"%.8f\n",lik);
for(k = 0;k < nclass;k++)
fprintf(hyperp,"%.8f,",alpha[k]);
fprintf(hyperp,"%.8f\n",beta);
}
// compute matrix phi ([nlex, nclass] matrix)
for(w = 0;w < nlex;w++)
for(k = 0;k < nclass;k++)
temp_phi[w][k] = (double)n_zw[k][w] + beta;
normalize_matrix_col(phi, temp_phi, nlex, nclass);
// compute matrix theta ([ndocs, nclass])
for(m = 0;m < ndocs;m++)
for(k = 0;k < nclass;k++)
temp_theta[m][k] = (double)n_mz[m][k] + alpha[k];
normalize_matrix_row(theta, temp_theta, ndocs, nclass);
free(n_m);
free(n_z);
free(left);
free(center);
free(log_right);
free(p_z);
free(log_p_z);
free(cum_sum_p_z);
for(dp = data, m = 0;(dp->len) != -1;dp++, m++){
for(w = 0;w < (dp->len);w++){
free(topics[m][w]);
}
free(topics[m]);
}
free(topics);
free_dmatrix(temp_phi, nlex);
free_dmatrix(temp_theta, ndocs);
free_dmatrix(empirical_z, ndocs);
return;
}
int sampling_multinomial(gsl_rng *r, double *p, double *cum_sum_p, int len_p){
int k, z;
double sampling;
cum_sum_p[0] = 0.0;
for(k = 0;k < len_p;k++){
cum_sum_p[k+1] = cum_sum_p[k] + p[k];
}
sampling = gsl_rng_uniform(r);
for(k = 0;k < len_p;k++){
if((sampling >= cum_sum_p[k]) && (sampling < cum_sum_p[k+1])){
z = k;
break;
}
}
return z;
}
| {
"alphanum_fraction": 0.4718234981,
"author": null,
"avg_line_length": 35.1588785047,
"converted": null,
"ext": "c",
"file": null,
"hexsha": "2871dce7add65922b146db4426992c6b6cc23990",
"include": null,
"lang": "C",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b3db46a01a5561b92c64c7662571f26a6aa9eb6d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "khigashi1987/mvsLDA",
"max_forks_repo_path": "src/learn.c",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b3db46a01a5561b92c64c7662571f26a6aa9eb6d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "khigashi1987/mvsLDA",
"max_issues_repo_path": "src/learn.c",
"max_line_length": 123,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b3db46a01a5561b92c64c7662571f26a6aa9eb6d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "khigashi1987/mvsLDA",
"max_stars_repo_path": "src/learn.c",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3051,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 11286
} |
import numpy as np
from flexx import flx
from bokeh.plotting import figure
# import trainer.lib as lib
x = np.linspace(0, 6, 50)
p1 = figure()
p1.line(x, np.sin(x))
p2 = figure()
p2.line(x, np.cos(x))
class DebuggerGui(flx.PyWidget):
# def __init__(self):
# # self.bs: List[Optional[flx.Button]] = [None, None, None]
# super().__init__()
def init(self):
with flx.HBox():
with flx.VBox():
self.bs0 = flx.Button(text='Button1', flex=0)
self.bs1 = flx.Button(text='Button2', flex=1)
self.bs2 = flx.Button(text='Button3', flex=2)
self.prog = flx.ProgressBar(flex=1, value=0.1, text='{percent} done')
self.lbl_placeholder = flx.Label(flex=1, style='overflow-y: scroll;')
with flx.VBox():
self.lbl = flx.Label(flex=1, style='overflow-y: scroll;')
# flx.BokehWidget.from_plot(p1)
# flx.BokehWidget.from_plot(p2)
if __name__ == '__main__':
logger = flx.App(DebuggerGui)
logger.export('logger.html', link=0)
if __name__ == '__main__':
logger.launch('browser')
flx.run()
| {
"alphanum_fraction": 0.5714285714,
"author": null,
"avg_line_length": 28.1666666667,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "253d3442431af12ef9042328aea6361aaebe7f45",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "54bca3252e194c054bdd3af2b94d6dde940a2a86",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Telcrome/ai-trainer",
"max_forks_repo_path": "trainer/flxgui/main.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "54bca3252e194c054bdd3af2b94d6dde940a2a86",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Telcrome/ai-trainer",
"max_issues_repo_path": "trainer/flxgui/main.py",
"max_line_length": 85,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "54bca3252e194c054bdd3af2b94d6dde940a2a86",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Telcrome/ai-trainer",
"max_stars_repo_path": "trainer/flxgui/main.py",
"max_stars_repo_stars_event_max_datetime": "2021-05-05T12:57:42.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-05-05T12:57:42.000Z",
"num_tokens": 333,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1183
} |
# Morphological Transforamtions
# using HSV (hue satutaiton value )
import cv2
import numpy as np
cap = cv2.VideoCapture(0) # select the first camera in the system
while True:
_ , frame = cap.read()
hsv = cv2.cvtColor(frame , cv2.COLOR_BGR2HSV)
# Now filter the video ( remove Noise , and )
# The idea is detect the unique color in the video streaming
lower_red = np.array([0,153,0])
upper_red = np.array([153,200,255])
# create many blurs based on Noisy
mask = cv2.inRange(hsv , lower_red , upper_red) # select a unique color # noisy
result = cv2.bitwise_and(frame , frame , mask =mask )
kernel = np.ones((5,5) , np.int8 )/255 # getting numpy array of 15 x15
erosion = cv2.erode(mask , kernel,iterations =1)
dilation = cv2.dilate(mask , kernel , iterations = 1)
opening = cv2.morphologyEx(mask , cv2.MORPH_OPEN ,kernel)
closing = cv2.morphologyEx(mask , cv2.MORPH_CLOSE ,kernel)
cv2.imshow ( 'frame ' , frame )
cv2.imshow('mask', mask)
cv2.imshow('result',result)
cv2.imshow('erosion',erosion)
cv2.imshow('dilation',dilation)
cv2.imshow('opening',opening)
cv2.imshow('closing',closing)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
| {
"alphanum_fraction": 0.6218978102,
"author": null,
"avg_line_length": 24.9090909091,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "20a1ae411c6f1737458faf90c2204030ed3bde31",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d1848838ff1acd6dfcf551b99380a8bbf9c879fa",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "priyanshgupta1998/Image_Processing",
"max_forks_repo_path": "Opencv1/prayog21.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d1848838ff1acd6dfcf551b99380a8bbf9c879fa",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "priyanshgupta1998/Image_Processing",
"max_issues_repo_path": "Opencv1/prayog21.py",
"max_line_length": 88,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d1848838ff1acd6dfcf551b99380a8bbf9c879fa",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "priyanshgupta1998/Image_Processing",
"max_stars_repo_path": "Opencv1/prayog21.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 387,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1370
} |
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
classes = ("plane", "car", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck")
def _get_transform():
return torchvision.transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
def train_data_loader():
transform = _get_transform()
trainset = torchvision.datasets.CIFAR10(
root="./data", train=True, download=False, transform=transform
)
return torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)
def test_data_loader():
transform = _get_transform()
testset = torchvision.datasets.CIFAR10(
root="./data", train=False, download=False, transform=transform
)
return torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)
def show_img(img):
"""displays an image"""
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
| {
"alphanum_fraction": 0.7050272562,
"author": null,
"avg_line_length": 33.693877551,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "fc641f8099abea1399f4bf96c532ca0d2f7dd76d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2052,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T23:02:51.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-09-30T22:11:46.000Z",
"max_forks_repo_head_hexsha": "c5ddecce1f739a345465b9a38b064983a129141d",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "jerrypeng7773/amazon-sagemaker-examples",
"max_forks_repo_path": "aws_sagemaker_studio/frameworks/pytorch_cnn_cifar10/cifar_utils.py",
"max_issues_count": 1959,
"max_issues_repo_head_hexsha": "c5ddecce1f739a345465b9a38b064983a129141d",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T23:58:37.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-09-30T20:22:42.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "jerrypeng7773/amazon-sagemaker-examples",
"max_issues_repo_path": "aws_sagemaker_studio/frameworks/pytorch_cnn_cifar10/cifar_utils.py",
"max_line_length": 91,
"max_stars_count": 2610,
"max_stars_repo_head_hexsha": "c5ddecce1f739a345465b9a38b064983a129141d",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "jerrypeng7773/amazon-sagemaker-examples",
"max_stars_repo_path": "aws_sagemaker_studio/frameworks/pytorch_cnn_cifar10/cifar_utils.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T18:02:31.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-01T14:14:53.000Z",
"num_tokens": 419,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1651
} |
from collections import OrderedDict
import numpy as np
from gym.spaces import Box, Dict
from multiworld.envs.env_util import get_stat_in_paths, \
create_stats_ordered_dict, get_asset_full_path
from multiworld.core.multitask_env import MultitaskEnv
from multiworld.envs.mujoco.sawyer_xyz.base import SawyerXYZEnv
from multiworld.envs.mujoco.cameras import sawyer_pick_and_place_camera
class SawyerPickAndPlaceEnv(MultitaskEnv, SawyerXYZEnv):
def __init__(
self,
obj_low=None,
obj_high=None,
reward_type='hand_and_obj_distance',
indicator_threshold=0.06,
obj_init_pos=(0, 0.65, 0.03),
fix_goal=False,
fixed_goal=(0.15, 0.6, 0.055, -0.15, 0.6),
goal_low=None,
goal_high=None,
hide_goal_markers=False,
hide_arm=False,
num_objects=1,
**kwargs
):
self.quick_init(locals())
MultitaskEnv.__init__(self)
self.hide_arm = hide_arm
SawyerXYZEnv.__init__(
self,
model_name=self.model_name,
**kwargs
)
self.num_objects = num_objects
if obj_low is None:
obj_low = self.hand_low
if obj_high is None:
obj_high = self.hand_high
if goal_low is None:
goal_low = np.hstack((self.hand_low, np.tile(obj_low, num_objects)))
if goal_high is None:
goal_high = np.hstack((self.hand_high, np.tile(obj_high, num_objects)))
self.reward_type = reward_type
self.indicator_threshold = indicator_threshold
self.obj_init_pos = np.array(obj_init_pos)
self.fix_goal = fix_goal
self.fixed_goal = np.array(fixed_goal)
self._state_goal = None
self.hide_goal_markers = hide_goal_markers
self.action_space = Box(
np.array([-1, -1, -1, -1]),
np.array([1, 1, 1, 1]),
)
self.hand_and_obj_space = Box(
np.hstack((self.hand_low, np.tile(obj_low, num_objects))),
np.hstack((self.hand_high, np.tile(obj_high, num_objects))),
)
self.observation_space = Dict([
('observation', self.hand_and_obj_space),
('desired_goal', self.hand_and_obj_space),
('achieved_goal', self.hand_and_obj_space),
('state_observation', self.hand_and_obj_space),
('state_desired_goal', self.hand_and_obj_space),
('state_achieved_goal', self.hand_and_obj_space),
])
@property
def model_name(self):
if self.hide_arm:
print('hiding')
return get_asset_full_path('sawyer_xyz/sawyer_pick_and_place_hidden_arm.xml')
print('not hiding')
return get_asset_full_path('sawyer_xyz/sawyer_pick_and_place.xml')
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0
self.viewer.cam.lookat[0] = 0
self.viewer.cam.lookat[1] = 1.0
self.viewer.cam.lookat[2] = 0.5
self.viewer.cam.distance = 0.3
self.viewer.cam.elevation = -45
self.viewer.cam.azimuth = 270
self.viewer.cam.trackbodyid = -1
def step(self, action):
self.set_xyz_action(action[:3])
self.do_simulation(action[3:])
# The marker seems to get reset every time you do a simulation
self._set_goal_marker(self._state_goal)
ob = self._get_obs()
reward = self.compute_reward(action, ob)
info = self._get_info()
done = False
return ob, reward, done, info
def _get_obs(self):
e = self.get_endeff_pos()
b = np.concatenate(self.get_object_positions())
flat_obs = np.concatenate((e, b))
return dict(
observation=flat_obs,
desired_goal=self._state_goal,
achieved_goal=flat_obs,
state_observation=flat_obs,
state_desired_goal=self._state_goal,
state_achieved_goal=flat_obs,
)
def _get_info(self):
hand_goal = self._state_goal[:3]
obj_goal = self._state_goal[3:]
hand_distance = np.linalg.norm(hand_goal - self.get_endeff_pos())
obj_distance = np.linalg.norm(obj_goal - np.concatenate(self.get_object_positions()))
# touch_distance = np.linalg.norm(
# self.get_endeff_pos() - self.get_obj_pos()
# )
return dict(
hand_distance=hand_distance,
obj_distance=obj_distance,
hand_and_obj_distance=hand_distance+obj_distance,
# touch_distance=touch_distance,
hand_success=float(hand_distance < self.indicator_threshold),
obj_success=float(obj_distance < self.indicator_threshold),
hand_and_obj_success=float(
hand_distance+obj_distance < self.indicator_threshold
),
# touch_success=float(touch_distance < self.indicator_threshold),
)
def get_obj_pos(self):
return self.data.get_body_xpos('obj').copy()
def get_object_positions(self):
return [self.data.get_body_xpos('obj' + str(i)).copy() for i in range(self.num_objects)]
def _set_goal_marker(self, goal):
"""
This should be use ONLY for visualization. Use self._state_goal for
logging, learning, etc.
"""
self.data.site_xpos[self.model.site_name2id('hand-goal-site')] = (
goal[:3]
)
self.data.site_xpos[self.model.site_name2id('obj-goal-site')] = (
goal[3:6]
)
if self.hide_goal_markers:
self.data.site_xpos[self.model.site_name2id('hand-goal-site'), 2] = (
-1000
)
self.data.site_xpos[self.model.site_name2id('obj-goal-site'), 2] = (
-1000
)
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[8:11] = pos.copy()
qvel[8:15] = 0
self.set_state(qpos, qvel)
def _set_object_xyz(self, pos, object_num):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
off = object_num
qpos[8 + 7 * off:11 + 7 * off] = pos.copy()
qvel[8 + 7 * off:15 + 7 * off] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
goal = self.sample_goal()
self._state_goal = goal['state_desired_goal']
self._set_goal_marker(self._state_goal)
for obj_num in range(self.num_objects):
self._set_object_xyz(self.obj_init_pos[3*obj_num:3*(obj_num + 1)], obj_num)
return self._get_obs()
def _reset_hand(self):
for _ in range(10):
self.data.set_mocap_pos('mocap', np.array([0, 0.5, 0.02]))
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
self.do_simulation(None, self.frame_skip)
def put_obj_in_hand(self):
new_obj_pos = self.data.get_site_xpos('endeffector')
new_obj_pos[1] -= 0.02
new_obj_pos[2] += 0.03
self.do_simulation(-1)
self._set_obj_xyz(new_obj_pos)
self.do_simulation(1)
def put_obj_in_hand(self, object_num):
new_obj_pos = self.data.get_site_xpos('endeffector').copy()
self.do_simulation(-1)
new_obj_pos[1] -= 0.02
new_obj_pos[2] += 0.02
self._set_object_xyz(new_obj_pos, object_num)
self.do_simulation(1)
def set_to_goal(self, goal):
state_goal = goal['state_desired_goal']
hand_goal = state_goal[:3]
for _ in range(30):
self.data.set_mocap_pos('mocap', hand_goal)
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
# keep gripper closed
self.do_simulation(np.array([-1]))
error = self.data.get_site_xpos('endeffector') - hand_goal
self._set_obj_xyz(state_goal[3:] + error)
self.do_simulation(np.array([1]))
self.sim.forward()
"""
Multitask functions
"""
def get_goal(self):
return {
'desired_goal': self._state_goal,
'state_desired_goal': self._state_goal,
}
def sample_goals(self, batch_size, p_obj_in_hand=0.75):
if self.fix_goal:
goals = np.repeat(
self.fixed_goal.copy()[None],
batch_size,
0
)
else:
goals = np.random.uniform(
self.hand_and_obj_space.low,
self.hand_and_obj_space.high,
size=(batch_size, self.hand_and_obj_space.low.size),
)
num_objs_in_hand = int(batch_size * p_obj_in_hand)
if batch_size == 1:
num_objs_in_hand = int(np.random.random() < p_obj_in_hand)
# Put object in hand
for idx in range(batch_size):
ball_num = np.random.randint(self.num_objects)
ball_goals_idx = 3 + 3 * ball_num
if idx < num_objs_in_hand:
goals[idx, ball_goals_idx:ball_goals_idx + 3] = \
goals[idx, :3].copy()
goals[idx, ball_goals_idx + 1] -= 0.02
goals[idx, ball_goals_idx + 2] += 0.02
else:
goals[idx, ball_goals_idx + 2] = self.obj_init_pos[2]
for ball in range(self.num_objects):
if ball == ball_num:
continue
#goals[idx:, 3+3*ball + 2] = self.obj_init_pos[2]
goals[idx:, 3*(1+ball):3*(2+ball)] = self.obj_init_pos[3*ball:3*(ball + 1)]
# z_ball_idxs = np.array([3 + 3*num + 2 for num in range(self.num_objects)])
# Put object one the table (not floating)
# goals[num_objs_in_hand:, z_ball_idxs] = self.obj_init_pos[2]
return {
'desired_goal': goals,
'state_desired_goal': goals,
}
def compute_rewards(self, actions, obs):
achieved_goals = obs['state_achieved_goal']
desired_goals = obs['state_desired_goal']
hand_pos = achieved_goals[:, :3]
obj_pos = achieved_goals[:, 3:]
hand_goals = desired_goals[:, :3]
obj_goals = desired_goals[:, 3:]
hand_distances = np.linalg.norm(hand_goals - hand_pos, axis=1)
obj_distances = np.linalg.norm(obj_goals - obj_pos, axis=1)
hand_and_obj_distances = hand_distances + obj_distances
#touch_distances = np.linalg.norm(hand_pos - obj_pos, axis=1)
if self.reward_type == 'hand_distance':
r = -hand_distances
elif self.reward_type == 'hand_success':
r = -(hand_distances < self.indicator_threshold).astype(float)
elif self.reward_type == 'obj_distance':
r = -obj_distances
elif self.reward_type == 'obj_success':
r = -(obj_distances < self.indicator_threshold).astype(float)
elif self.reward_type == 'hand_and_obj_distance':
r = -hand_and_obj_distances
elif self.reward_type == 'hand_and_obj_success':
r = -(
hand_and_obj_distances < self.indicator_threshold
).astype(float)
elif self.reward_type == 'touch_distance':
r = -touch_distances
elif self.reward_type == 'touch_success':
r = -(touch_distances < self.indicator_threshold).astype(float)
else:
raise NotImplementedError("Invalid/no reward type.")
return r
def get_diagnostics(self, paths, prefix=''):
statistics = OrderedDict()
for stat_name in [
'hand_distance',
'obj_distance',
'hand_and_obj_distance',
'touch_distance',
'hand_success',
'obj_success',
'hand_and_obj_success',
'touch_success',
]:
stat_name = stat_name
stat = get_stat_in_paths(paths, 'env_infos', stat_name)
statistics.update(create_stats_ordered_dict(
'%s%s' % (prefix, stat_name),
stat,
always_show_all_stats=True,
))
statistics.update(create_stats_ordered_dict(
'Final %s%s' % (prefix, stat_name),
[s[-1] for s in stat],
always_show_all_stats=True,
))
return statistics
def get_env_state(self):
base_state = super().get_env_state()
goal = self._state_goal.copy()
return base_state, goal
def set_env_state(self, state):
base_state, goal = state
super().set_env_state(base_state)
self._state_goal = goal
self._set_goal_marker(goal)
class SawyerPickAndPlaceEnvYZ(SawyerPickAndPlaceEnv):
def __init__(
self,
x_axis=0.0,
oracle_resets=False,
*args,
**kwargs
):
self.quick_init(locals())
super().__init__(*args, **kwargs)
self.oracle_resets = oracle_resets
self.x_axis = x_axis
for idx in range(self.num_objects + 1):
self.hand_and_obj_space.low[3*idx] = x_axis
self.hand_and_obj_space.high[3*idx] = x_axis
self.observation_space = Dict([
('observation', self.hand_and_obj_space),
('desired_goal', self.hand_and_obj_space),
('achieved_goal', self.hand_and_obj_space),
('state_observation', self.hand_and_obj_space),
('state_desired_goal', self.hand_and_obj_space),
('state_achieved_goal', self.hand_and_obj_space),
])
self.action_space = Box(
np.array([-1, -1, -1]),
np.array([1, 1, 1]),
)
def reset_model(self):
super().reset_model()
if self.oracle_resets:
self.set_to_goal(self.sample_goal())
return self._get_obs()
def viewer_setup(self):
sawyer_pick_and_place_camera(self.viewer.cam)
def convert_2d_action(self, action):
cur_x_pos = self.get_endeff_pos()[0]
adjust_x = self.x_axis - cur_x_pos
return np.r_[adjust_x, action]
def step(self, action):
# new_obj_pos = self.data.get_site_xpos('obj')
# new_obj_pos[0] = self.x_axis
# self._set_obj_xyz(new_obj_pos)
action = self.convert_2d_action(action)
return super().step(action)
def _reset_hand(self):
for _ in range(10):
self.data.set_mocap_pos('mocap', np.array([0, 0.6, 0.2]))
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
self.do_simulation(None, self.frame_skip)
def put_obj_in_hand(self, object_num):
self.do_simulation(1)
self.do_simulation(-.05)
new_obj_pos = self.data.get_site_xpos('endeffector').copy()
new_obj_pos[0] = self.x_axis
new_obj_pos[1] -= 0.01
new_obj_pos[2] -= 0.01
self._set_object_xyz(new_obj_pos, object_num)
self.do_simulation(1)
self.sim.forward()
def set_to_goal(self, goal):
state_goal = goal['state_desired_goal']
hand_goal = state_goal[:3]
for _ in range(30):
self.data.set_mocap_pos('mocap', hand_goal)
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
self.do_simulation(np.array([-1]))
in_hand = []
not_in_hand = []
for obj_num in range(self.num_objects):
obj_goal_pos = state_goal[3*(obj_num+1):3*(obj_num+2)]
assert obj_goal_pos[0] == self.x_axis
if obj_goal_pos[2] == self.obj_init_pos[2]:
# self._set_object_xyz(obj_goal_pos, obj_num)
not_in_hand.append((obj_goal_pos, obj_num))
else:
in_hand.append(obj_num)
for obj_num in in_hand:
self.put_obj_in_hand(obj_num)
for obj_goal_pos, obj_num in not_in_hand:
self._set_object_xyz(obj_goal_pos, obj_num)
if not in_hand:
action = 1 - 2 * int(np.random.random() > .5)
self.do_simulation(np.array([action]))
| {
"alphanum_fraction": 0.590381632,
"author": null,
"avg_line_length": 35.4175824176,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "115dc111b9fb3a89fe2991a5ae279eeb53b30612",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-08-13T23:47:47.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-08-13T23:47:47.000Z",
"max_forks_repo_head_hexsha": "7576a00b884f629ad5de86f6c8a3618770273029",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "stevenlin1111/multiworld",
"max_forks_repo_path": "multiworld/envs/mujoco/sawyer_xyz/sawyer_pick_and_place.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7576a00b884f629ad5de86f6c8a3618770273029",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "stevenlin1111/multiworld",
"max_issues_repo_path": "multiworld/envs/mujoco/sawyer_xyz/sawyer_pick_and_place.py",
"max_line_length": 96,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7576a00b884f629ad5de86f6c8a3618770273029",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "stevenlin1111/multiworld",
"max_stars_repo_path": "multiworld/envs/mujoco/sawyer_xyz/sawyer_pick_and_place.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3874,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 16115
} |
# -*- coding: utf-8 -*-
"""
Creates and saves CNN model for keyword detection.
"""
import json
import numpy as np
from sklearn.model_selection import train_test_split
import tensorflow.keras as keras
DATA_PATH = "data.json"
SAVED_MODEL_PATH = "model.h5"
LEARNING_RATE = 0.001
EPOCHS = 40
BATCH_SIZE = 32
NUMBER_OF_KEYWORDS = 10
def load_dataset(data_path):
with open(data_path, "r") as fp:
data = json.load(fp)
# extracts inputs and targets
X = np.array(data["MFCCs"])
y = np.array(data["labels"])
return X, y
def get_data_splits(data_path, test_size=0.1, test_validation=0.1):
# load dataset
X, y = load_dataset(data_path)
# create splits
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
X_train, X_validation, y_train, y_validation = train_test_split(
X_train, y_train, test_size=test_validation
)
# convert inputs from 2D to 3D arrays
X_train = X_train[..., np.newaxis]
X_validation = X_validation[..., np.newaxis]
X_test = X_test[..., np.newaxis]
return X_train, X_validation, X_test, y_train, y_validation, y_test
def build_model(input_shape, learning_rate, loss="sparse_categorical_crossentropy"):
# build network
model = keras.Sequential()
# conv layer 1
model.add(keras.layers.Conv2D(64, (3, 3), activation="relu",
input_shape=input_shape,
kernel_regularizer=keras.regularizers.l2(0.001)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.MaxPool2D((3, 3), strides=(2, 2), padding="same"))
# conv layer 2
model.add(keras.layers.Conv2D(32, (3, 3), activation="relu",
input_shape=input_shape,
kernel_regularizer=keras.regularizers.l2(0.001)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.MaxPool2D((3, 3), strides=(2, 2), padding="same"))
# conv layer 3
model.add(keras.layers.Conv2D(32, (2, 2), activation="relu",
input_shape=input_shape,
kernel_regularizer=keras.regularizers.l2(0.001)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.MaxPool2D((2, 2), strides=(2, 2), padding="same"))
# flatten and feed into dense layer
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(64, activation="relu"))
model.add(keras.layers.Dropout(0.3))
# classifier
model.add(keras.layers.Dense(NUMBER_OF_KEYWORDS, activation="softmax"))
# compile model
optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"])
model.summary()
return model
def main():
# load train/validate/target splits
X_train, X_validation, X_test, y_train, y_validation, y_test = get_data_splits(DATA_PATH)
# build the CNN model
input_shape = X_train.shape[1:4]
model = build_model(input_shape, learning_rate=LEARNING_RATE)
# train the model
model.fit(X_train, y_train, epochs=EPOCHS, batch_size=BATCH_SIZE,
validation_data=(X_validation, y_validation))
# evaluate the model
test_error, test_accuracy = model.evaluate(X_test, y_test)
print(f"Test error: {test_error}, test accuracy: {test_accuracy}")
# save model
model.save(SAVED_MODEL_PATH)
if __name__ == "__main__":
main()
| {
"alphanum_fraction": 0.6627641348,
"author": null,
"avg_line_length": 30.452173913,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "9e8a0a17813528925a38fa9fc95c4b0e6c01b1dc",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3075938c45efb2b6a275d9474b599f106164bb9d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sszokoly/VoiceInterface",
"max_forks_repo_path": "train.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3075938c45efb2b6a275d9474b599f106164bb9d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sszokoly/VoiceInterface",
"max_issues_repo_path": "train.py",
"max_line_length": 93,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3075938c45efb2b6a275d9474b599f106164bb9d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sszokoly/VoiceInterface",
"max_stars_repo_path": "train.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 875,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3502
} |
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),"../"))
from skdecide.builders.discrete_optimization.generic_tools.do_problem import Solution, Problem, EncodingRegister, TypeAttribute, \
ObjectiveRegister, TypeObjective, ObjectiveHandling, ModeOptim
from typing import List, Union, NamedTuple, Tuple, Dict
import numpy as np
from numba import njit
from functools import partial
import math
from abc import abstractmethod
from copy import deepcopy
class SolutionTSP(Solution):
permutation_from0: Union[List[int], np.array]
start_index: int
end_index: int
permutation: Union[List[int], np.array]
lengths: List[float] # to store the details of length of the tsp if you want.
length: float # to store the length of the tsp, in case your mutation computes it :)
def __init__(self,
problem=None,
start_index=None,
end_index=None,
permutation=None,
lengths=None,
length=None,
permutation_from0=None):
assert(permutation is not None or permutation_from0 is not None)
# if permutation is not None and permutation_from0 is None:
# assert(start_index is not None and end_index is not None and lengths is not None and length is not None)
self.start_index = start_index
self.end_index = end_index
self.permutation = permutation
self.lengths = lengths
self.length = length
self.permutation_from0 = permutation_from0
self.problem = problem
if self.start_index is None:
self.start_index = problem.start_index
if self.end_index is None:
self.end_index = problem.end_index
# convert perm
if self.permutation is None:
self.permutation = self.problem.convert_perm_from0_to_original_perm(self.permutation_from0)
if self.permutation_from0 is None:
self.permutation_from0 = self.problem.convert_original_perm_to_perm_from0(self.permutation)
# print('problem__:', problem)
# print('permutation_from0__:', self.permutation_from0)
# TODO: Think about moving this into another function (to prevent unecessary calls to evaluate()
if self.length is None:
self.problem.evaluate(self)
def copy(self):
return SolutionTSP(problem=self.problem,
start_index=self.start_index,
end_index=self.end_index,
permutation=list(self.permutation),
lengths=list(self.lengths),
length=self.length,
permutation_from0=deepcopy(self.permutation_from0))
def lazy_copy(self):
return SolutionTSP(problem=self.problem,
start_index=self.start_index,
end_index=self.end_index,
permutation=self.permutation,
lengths=self.lengths,
length=self.length,
permutation_from0=self.permutation_from0)
def __str__(self):
return "perm :"+str(self.permutation)+"\nobj="+str(self.length)
def change_problem(self, new_problem):
self.__init__(problem=new_problem,
start_index=self.start_index,
end_index=self.end_index,
permutation=list(self.permutation),
lengths=list(self.lengths),
length=self.length,
permutation_from0=deepcopy(self.permutation_from0))
class Point:
...
class TSPModel(Problem):
list_points: List[Point]
np_points: np.array
node_count: int
def __init__(self,
list_points: List[Point],
node_count: int,
start_index: int=0,
end_index: int=0):
self.list_points = list_points
self.node_count = node_count
self.start_index = start_index
self.end_index = end_index
if self.start_index is None:
self.start_index = 0
if self.end_index is None:
self.end_index = 0
self.ind_in_permutation = [i for i in range(self.node_count) if i != self.start_index and i != self.end_index]
self.length_permutation = len(self.ind_in_permutation)
# print('start_index: ', start_index)
# print('end_index: ', end_index)
self.original_indices_to_permutation_indices = [i for i in range(self.node_count)
if i != self.start_index and i != self.end_index]
self.original_indices_to_permutation_indices_dict = {}
counter = 0
for i in range(self.node_count):
if i != self.start_index and i != self.end_index:
self.original_indices_to_permutation_indices_dict[i] = counter
counter += 1
# print('original_indices_to_permutation_indices: ', self.original_indices_to_permutation_indices)
# for a given tsp kind of problem, you should provide a custom evaluate function, for now still abstract.
@abstractmethod
def evaluate_function(self, var_tsp: SolutionTSP):
...
@abstractmethod
def evaluate_function_indexes(self, index_1, index_2):
...
def evaluate_from_encoding(self, int_vector, encoding_name):
if encoding_name == 'permutation_from0':
tsp_sol = SolutionTSP(problem=self,
start_index=self.start_index,
end_index=self.end_index,
permutation=self.convert_perm_from0_to_original_perm(int_vector))
elif encoding_name == "permutation":
tsp_sol = SolutionTSP(problem=self,
start_index=self.start_index,
end_index=self.end_index,
permutation=int_vector)
elif encoding_name == 'custom':
kwargs = {encoding_name: int_vector, 'problem': self, 'start_index': self.start_index, 'end_index':self.end_index}
tsp_sol = SolutionTSP(**kwargs)
objectives = self.evaluate(tsp_sol)
return objectives
def evaluate(self, var_tsp: SolutionTSP)-> Dict[str, float]:
lengths, obj = self.evaluate_function(var_tsp)
var_tsp.length = obj
var_tsp.lengths = lengths
return {'length': obj}
# return obj
def satisfy(self, var_tsp: SolutionTSP)->bool:
b = var_tsp.permutation[0] == self.start_index and var_tsp.permutation[-1] == self.end_index
if not b:
return False
def get_dummy_solution(self):
var = SolutionTSP(problem=self, start_index=self.start_index,
end_index=self.end_index,
permutation=list(self.ind_in_permutation),
permutation_from0=None,
lengths=None, length=None)
self.evaluate(var)
return var
def __str__(self):
return "TSP problem with number of nodes : : "+str(self.node_count)
def convert_perm_from0_to_original_perm(self, perm_from0):
perm = [self.original_indices_to_permutation_indices[x] for x in perm_from0]
return perm
def convert_original_perm_to_perm_from0(self, perm):
#print('mapping: ', self.original_indices_to_permutation_indices_dict)
#print('original: ', perm)
perm_from0 = [self.original_indices_to_permutation_indices_dict[i] for i in perm]
return perm_from0
def get_solution_type(self):
return SolutionTSP
def get_attribute_register(self) -> EncodingRegister:
dict_register = {}
dict_register["permutation_from0"] = {"name": "permutation_from0",
"type": [TypeAttribute.PERMUTATION],
"range": range(len(self.original_indices_to_permutation_indices)),
"n": len(self.original_indices_to_permutation_indices)}
dict_register["permutation"] = {"name": "permutation",
"type": [TypeAttribute.PERMUTATION, TypeAttribute.PERMUTATION_TSP],
"range": self.ind_in_permutation,
"n": self.length_permutation}
return EncodingRegister(dict_register)
def get_objective_register(self) -> ObjectiveRegister:
dict_objective = {"length": {"type": TypeObjective.OBJECTIVE, "default_weight": 1}}
return ObjectiveRegister(objective_sense=ModeOptim.MINIMIZATION,
objective_handling=ObjectiveHandling.SINGLE,
dict_objective_to_doc=dict_objective)
# One
class Point2D(Point, NamedTuple):
x: float
y: float
class TSPModel2D(TSPModel):
def __init__(self, list_points: List[Point2D],
node_count: int,
start_index: int=0,
end_index: int=0,
use_numba=True):
TSPModel.__init__(self, list_points, node_count,
start_index=start_index,
end_index=end_index)
self.np_points = np.zeros((node_count, 2))
for i in range(self.node_count):
self.np_points[i, 0] = self.list_points[i].x
self.np_points[i, 1] = self.list_points[i].y
if use_numba:
self.evaluate_function_2d = build_evaluate_function_np(self)
else:
self.evaluate_function_2d = build_evaluate_function(self)
def evaluate_function(self, var_tsp: SolutionTSP):
return self.evaluate_function_2d(solution=var_tsp.permutation)
def evaluate_function_indexes(self, index_1, index_2)->float:
return length(self.list_points[index_1], self.list_points[index_2])
def length(point1, point2):
return math.sqrt((point1.x - point2.x)**2 + (point1.y - point2.y)**2)
def compute_length(start_index,
end_index,
solution: List[int],
list_points: List[Point2D],
node_count: int,
length_permutation: int):
obj = length(list_points[start_index], list_points[solution[0]])
lengths = [obj]
for index in range(0, length_permutation-1):
ll = length(list_points[solution[index]], list_points[solution[index+1]])
obj += ll
lengths += [ll]
lengths += [length(list_points[end_index], list_points[solution[-1]])]
obj += lengths[-1]
return lengths, obj
# More efficient implementation
@njit
def compute_length_np(start_index,
end_index,
solution: Union[List[int], np.array],
np_points,
node_count,
length_permutation)->Tuple[Union[List[float], np.array], float]:
obj = np.sqrt((np_points[start_index, 0]-np_points[solution[0], 0])**2+\
(np_points[start_index, 1]-np_points[solution[0], 1])**2)
lengths = np.zeros((node_count))
lengths[0] = obj
pp = obj
for index in range(0, length_permutation-1):
ll = math.sqrt((np_points[solution[index], 0]-np_points[solution[index+1], 0])**2+\
(np_points[solution[index], 1]-np_points[solution[index+1], 1])**2)
obj += ll
lengths[index] = ll
lengths[node_count-1] = np.sqrt((np_points[end_index, 0]-np_points[solution[-1], 0])**2+\
(np_points[end_index, 1]-np_points[solution[-1], 1])**2)
obj += lengths[node_count-1]
return lengths, obj
def build_evaluate_function(tsp_model: TSPModel):
return partial(compute_length,
start_index=tsp_model.start_index,
end_index=tsp_model.end_index,
length_permutation=tsp_model.length_permutation,
list_points=tsp_model.list_points,
node_count=tsp_model.node_count)
def build_evaluate_function_np(tsp_model: TSPModel):
return partial(compute_length_np,
start_index=tsp_model.start_index,
end_index=tsp_model.end_index,
length_permutation=tsp_model.length_permutation,
np_points=tsp_model.np_points,
node_count=tsp_model.node_count)
| {
"alphanum_fraction": 0.6040942144,
"author": null,
"avg_line_length": 42.8881355932,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "c0a71e645ce485043ef9025160745200dc09ab32",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "37bcea112da39d1390ff2b30951b36ee5dbc0e6d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "galleon/bug-free-invention",
"max_forks_repo_path": "skdecide/builders/discrete_optimization/tsp/tsp_model.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "37bcea112da39d1390ff2b30951b36ee5dbc0e6d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "galleon/bug-free-invention",
"max_issues_repo_path": "skdecide/builders/discrete_optimization/tsp/tsp_model.py",
"max_line_length": 130,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "37bcea112da39d1390ff2b30951b36ee5dbc0e6d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "galleon/bug-free-invention",
"max_stars_repo_path": "skdecide/builders/discrete_optimization/tsp/tsp_model.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2618,
"path": null,
"reason": "import numpy,from numba",
"repo": null,
"save_path": null,
"sha": null,
"size": 12652
} |
# -*- coding:utf-8 -*-
# &Author AnFany
import pandas as pd
import numpy as np
# 训练数据文件路径
train_path = 'C:/Users/GWT9\Desktop/Adult_Train.csv'
# 测试数据文件路径
test_path = 'C:/Users/GWT9\Desktop/Adult_Test.csv'
# 因为测试数据native-country中不存在Holand-Netherlands,不便于独热编码。
# 因此在测试文件中添加一个native-country为Holand-Netherlands的样本,然后在删除即可
# 为简化程序,手动添加
def handle_data(filepath, miss='fill'): # 定义处理数据的函数
data = pd.read_csv(r'%s'%filepath)
data = data.replace('?', np.nan)
# 处理缺失值
if miss == 'del': # 删除掉缺失值
miss_data = data.dropna(how='any')
else:
miss_data = data.fillna(method='ffill')
# 新建DataFrame
newdata = pd.DataFrame()
# 独热化编码
for ikey in miss_data:
if miss_data[ikey].dtype == 'object': # 独热编码
onedata = pd.get_dummies(miss_data[ikey])
newdata = pd.concat([newdata, onedata], axis=1)
else:
newdata[ikey] = miss_data[ikey]
return newdata
train_data = handle_data(train_path)
test_data = handle_data(test_path)
test_data = test_data.drop([len(test_data) - 1], inplace=False) # 删除添加的最后一个样本
# 数据标准化
# 所有特征数据标准化, 目标数据0-1化
def norm(trdata, tedata):
tr_da = pd.DataFrame()
te_da = pd.DataFrame()
for hh in trdata.columns:
if hh not in ['<=50K', '>50K']:
tr_da[hh] = (trdata[hh] - np.mean(trdata[hh])) / np.std(trdata[hh]) # 标准化
te_da[hh] = (tedata[hh] - np.mean(trdata[hh])) / np.std(trdata[hh]) # 标准化
# tr_da[hh] = (trdata[hh] - np.min(trdata[hh])) / (np.max(trdata[hh]) - np.min(trdata[hh])) # 0-1化
# te_da[hh] = (tedata[hh] - np.min(trdata[hh])) / (np.max(trdata[hh]) - np.min(trdata[hh])) # 0-1化
else:
tr_da[hh] = trdata[hh].values
te_da[hh] = tedata['%s.'%hh].values # 训练数据和测试数据的Money字段内容不同。测试数据的多个"."
return tr_da, te_da
Train_data, Test_data = norm(train_data, test_data)
# 将训练数据平均分为n份,利用K折交叉验证计算模型最终的正确率
# 将训练数据分为训练数据和验证数据
def kfold(trdata, k=10):
vadata = trdata.values
legth = len(vadata)
datadict = {}
signnuber = np.arange(legth)
for hh in range(k):
np.random.shuffle(vadata)
yanzhneg = np.random.choice(signnuber, int(legth / k), replace=False)
oneflod_yan = vadata[yanzhneg]
oneflod_xun = vadata[[hdd for hdd in signnuber if hdd not in yanzhneg]]
datadict[hh] = [oneflod_xun, oneflod_yan]
return datadict
# 存储K折交叉验证的数据字典
kfold_train_datadict = kfold(Train_data)
| {
"alphanum_fraction": 0.6097178683,
"author": null,
"avg_line_length": 31.5061728395,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b0c1c81605703f3d9673ec0e12514d5963a0a1f0",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 235,
"max_forks_repo_forks_event_max_datetime": "2022-03-11T03:20:07.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-06-28T05:31:40.000Z",
"max_forks_repo_head_hexsha": "d9effcbb1b390dc608a0f4c0a28f0ad03892047a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "976634681/Machine-Learning-for-Beginner-by-Python3",
"max_forks_repo_path": "BPNN/BPNN_Classify/BPNN_Classify_Data.py",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "d9effcbb1b390dc608a0f4c0a28f0ad03892047a",
"max_issues_repo_issues_event_max_datetime": "2021-03-11T13:23:06.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-01-14T16:41:02.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "976634681/Machine-Learning-for-Beginner-by-Python3",
"max_issues_repo_path": "BPNN/BPNN_Classify/BPNN_Classify_Data.py",
"max_line_length": 113,
"max_stars_count": 397,
"max_stars_repo_head_hexsha": "1022ee7ce4d387da5e5fd8c3b66ac9b1bfc1974c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Anfany/Machine-Learning-for-Beginner",
"max_stars_repo_path": "BPNN/BPNN_Classify/BPNN_Classify_Data.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T09:53:37.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-05-28T02:07:32.000Z",
"num_tokens": 959,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2552
} |
[STATEMENT]
lemma map_color_of: "color_of (map f t) = color_of t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. color_of (RBT_Impl.map f t) = color_of t
[PROOF STEP]
by (induct t) simp+ | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 87,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
from __future__ import (absolute_import, division, print_function)
"""
This example shows how to plot data on rectangular 2D grids
(grids that are not rectlinear in geographic or native map projection
coordinates).
An example of such a grid is the 'POP' grid which is used in
the ocean component NCAR Community Climate System Model (CCSM).
"POP" stands for "Parallel Ocean Program", which was developed
at Los Alamos.
These grids may be thought of as rectangular arrays wrapped around the
globe in the usual way, with one subscript, call it I, associated with
longitude and the other subscript, call it J, associated with latitude,
and then deformed in such a way as to move the top edge of the array to
a circle centered somewhere other than over the North Pole (typically,
over Greenland or Canada) and the bottom edge of the array to a circle
that is centered on the South Pole, but lies entirely within Antarctica.
The lines defined by the rows and columns of the rectangular arrays are
locally orthogonal to each other.
POP grids are used extensively locally in oceanographic and ice models.
"""
from matplotlib import rcParams
import numpy.ma as ma
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from netCDF4 import Dataset as NetCDFFile
# read in data from netCDF file.
infile = 'ccsm_popgrid.nc'
fpin = NetCDFFile(infile)
tlat = fpin.variables['TLAT'][:]
tlon = fpin.variables['TLONG'][:]
# masked array returned, masked where data == _FillValue
temp = fpin.variables['TEMP'][:]
fpin.close()
# make longitudes monotonically increasing.
tlon = np.where(np.greater_equal(tlon,min(tlon[:,0])),tlon-360,tlon)
# stack grids side-by-side (in longitiudinal direction), so
# any range of longitudes may be plotted on a world map.
tlon = np.concatenate((tlon,tlon+360),1)
tlat = np.concatenate((tlat,tlat),1)
temp = ma.concatenate((temp,temp),1)
tlon = tlon-360.
plt.figure(figsize=(6,8))
plt.subplot(2,1,1)
# subplot 1 just shows POP grid cells.
m = Basemap(projection='merc', lat_ts=20, llcrnrlon=-180, \
urcrnrlon=180, llcrnrlat=-84, urcrnrlat=84, resolution='c')
m.drawcoastlines()
m.fillcontinents(color='white')
x, y = m(tlon,tlat)
im = m.pcolormesh(x,y,ma.masked_array(np.zeros(temp.shape,'f'), temp.mask),
shading='faceted', antialiased=True, cmap=plt.cm.cool,
vmin=0, vmax=0)
# disclaimer: these are not really the grid cells because of the
# way pcolor interprets the x and y args.
plt.title('(A) CCSM POP Grid Cells')
# subplot 2 is a contour plot of surface temperature from the
# CCSM ocean model.
plt.subplot(2,1,2)
m.drawcoastlines()
m.fillcontinents(color='white')
CS1 = m.contourf(x,y,temp,15)
CS2 = m.contour(x,y,temp,15,colors='black',linewidths=0.5)
plt.title('(B) Surface Temp contours on POP Grid')
plt.show()
#plt.savefig('ccsm_popgrid.ps')
| {
"alphanum_fraction": 0.7430651872,
"author": null,
"avg_line_length": 35.6049382716,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1a0d719f2db6a86859e32b66d62f32a3ff639b40",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8e9a37e09a65b16429b699f7c12fcab754e1a85a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "DWesl/basemap",
"max_forks_repo_path": "examples/ccsm_popgrid.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8e9a37e09a65b16429b699f7c12fcab754e1a85a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "DWesl/basemap",
"max_issues_repo_path": "examples/ccsm_popgrid.py",
"max_line_length": 75,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "8e9a37e09a65b16429b699f7c12fcab754e1a85a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "DWesl/basemap",
"max_stars_repo_path": "examples/ccsm_popgrid.py",
"max_stars_repo_stars_event_max_datetime": "2021-12-26T14:13:11.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-12-26T14:13:11.000Z",
"num_tokens": 765,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2884
} |
import random
import numpy as np
import matplotlib
from matplotlib import pyplot
from matplotlib.animation import FuncAnimation
def draw_image(idx, centroids, width, height):
"""
Draw image from
:param idx:
:param centroids:
:param width:
:param height:
:return:
"""
data = np.zeros((height, width, 3), dtype=np.uint8)
temp_data = centroids[idx]
data = np.reshape(temp_data, data.shape)
pyplot.imshow(data.astype(np.uint8))
pyplot.show()
def init_centroids(data, k):
"""
Create initial unique centroids vector
:param data: data which select centroids from
:param k: number of centroids
:return: vector of newly created centroids
"""
m, n = data.shape
indexes = np.random.choice(m, k, replace=False)
centroids = data[indexes]
# TODO simplify this
for i in range(k):
other_rows = centroids[np.arange(len(centroids)) != i]
if np.equal(centroids[i], other_rows).all(axis=1).any():
for a in range(m):
index = random.randrange(m)
if not np.equal(data[index], other_rows).all(axis=1).any():
centroids[i] = data[index]
break
return centroids
def find_closest_centroids(data, centroids):
"""
TODO
:param data:
:param centroids:
:return:
"""
k = centroids.shape[0]
idx = np.zeros(data.shape[0], dtype=int)
distance = np.zeros(k)
for example in range(data.shape[0]):
for cent in range(k):
distance[cent] = np.sqrt(np.sum(np.power((data[example, :] -
centroids[cent, :]), 2)))
idx[example] = np.argmin(distance)
return idx
def draw_points_animation(data, idx_history, centroid_history):
"""
:param data:
:param idx_history:
:param centroid_history:
:return:
"""
print('running animation')
fig = pyplot.figure()
animation = FuncAnimation(fig, plot_progress_means,
frames=len(idx_history),
interval=500,
repeat_delay=2,
fargs=(data, centroid_history, idx_history))
pyplot.show()
def generate_new_centroids(data, idx, k):
# Useful variables
m, n = data.shape
# You need to return the following variables correctly.
centroids = np.zeros((k, n))
for i in range(k):
x_temp = data[idx == i]
centroids[i, :] = np.sum(x_temp, 0) / np.size(x_temp, 0)
return centroids
def plot_progress_means(i, data, centroid_history, idx_history):
K = centroid_history[0].shape[0]
pyplot.gcf().clf()
cmap = pyplot.cm.rainbow
norm = matplotlib.colors.Normalize(vmin=0, vmax=2)
for k in range(K):
current = np.stack([c[k, :] for c in centroid_history[:i + 1]], axis=0)
pyplot.plot(current[:, 0], current[:, 1],
'-Xk',
mec='k',
lw=2,
ms=10,
mfc=cmap(norm(k)),
mew=2)
pyplot.scatter(data[:, 0], data[:, 1],
c=idx_history[i],
cmap=cmap,
marker='o',
s=8 ** 2,
linewidths=1, )
pyplot.grid(False)
pyplot.title('Iteration number %d' % (i + 1))
| {
"alphanum_fraction": 0.5479172735,
"author": null,
"avg_line_length": 27.685483871,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d9e80e13adf95ad1f81987b93949e9aea11d16a1",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a2a659ddde225f691d7ca76478a4dbd62e4f1e33",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "humanengineerin/k-means",
"max_forks_repo_path": "src/k_means/helpers.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a2a659ddde225f691d7ca76478a4dbd62e4f1e33",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "humanengineerin/k-means",
"max_issues_repo_path": "src/k_means/helpers.py",
"max_line_length": 79,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a2a659ddde225f691d7ca76478a4dbd62e4f1e33",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "humanengineerin/k-means",
"max_stars_repo_path": "src/k_means/helpers.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 790,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3433
} |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import chi2_contingency, chi2
#%%
df = pd.read_csv('data/advertisement_clicks.csv')
df.head()
#%% Create the contingency table
df_crosstab = pd.crosstab(df['advertisement_id'], df['action'], margins = False)
#%% chi2 api approach
stat, p, dof, expected = chi2_contingency(df_crosstab)
print(stat, p, dof)
if p<0.05:
print("The result is significant. Reject Null hypothesis.")
else:
print("Failed to reject Null hypothesis.")
#%% Chi2 code approach
column_sums = np.array(df_crosstab.sum(axis=0))
row_sums = np.array(df_crosstab.sum(axis=1))
expected = np.dot(row_sums.reshape(-1,1), column_sums.reshape(1,-1))/np.sum(row_sums)
diff = np.array(df_crosstab) - expected
numerator = diff ** 2
chi2_statistic = np.sum(numerator/expected)
print(chi2_statistic)
#%%
critical_value = chi2.ppf(0.95, df= 1)
p_value = chi2.sf(chi2_statistic, df=1)
# p_value = 1 - chi2.cdf(chi2_statistic, df=1)
if p_value<0.05:
print("The result is significant. Reject Null hypothesis.")
else:
print("Failed to reject Null hypothesis.")
| {
"alphanum_fraction": 0.7310704961,
"author": null,
"avg_line_length": 24.9782608696,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "2a1129b5bea0cfdad5f635c08cbc0bd7634a71a2",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "14a96b5acd711a29010d4bb7a7cdd41a0ae492f5",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "chinmaykurade/ab-testing-course",
"max_forks_repo_path": "probability/adv_clicks_chi2test.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "14a96b5acd711a29010d4bb7a7cdd41a0ae492f5",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "chinmaykurade/ab-testing-course",
"max_issues_repo_path": "probability/adv_clicks_chi2test.py",
"max_line_length": 85,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "14a96b5acd711a29010d4bb7a7cdd41a0ae492f5",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "chinmaykurade/ab-testing-course",
"max_stars_repo_path": "probability/adv_clicks_chi2test.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 332,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1149
} |
import cv2
import numpy as np
import os
# Capturing the user's web cam
camera = cv2.VideoCapture(0)
# Creating a classifier object
classifier = cv2.CascadeClassifier("..\Datasets\haarcascade_frontalface_default.xml")
# The name of the file where data is stored
file_name = "saved_data.npy"
# Attributes boxes around the faces
color = (0, 255, 255)
thickness = 2
# To store the data
names = []
pixels = []
# Finding out the name of the user to store the data
name = input("Enter the name of the user: ")
while True:
# Reading from the webcam
status, frame = camera.read()
if status:
cv2.imshow("Screen", frame)
# Detecting faces in the frame
faces = classifier.detectMultiScale(frame)
# Drawing a box around each face in the frame
for face in faces:
# Storing the coordinates where the box needs to be drawn
x, y, width, height = face
# Cutting the face in the image
face_cut = frame[y:y + height, x:x + width]
# Storing the gray form of the image
face_cut_gray = cv2.cvtColor(face_cut, cv2.COLOR_BGR2GRAY)
# Resizing the face cut region
face_cut_gray = cv2.resize(face_cut_gray, (200, 200))
# Flattening the gray image
face_cut_gray_flat = face_cut_gray.flatten()
rectangle = cv2.rectangle(frame, (x, y), (x + width, y + height), color, thickness)
cv2.imshow("Screen", rectangle)
cv2.imshow("Cut screen", face_cut_gray)
# Waiting for the user to enter something
key = cv2.waitKey(1)
# Quit the loop when the user enters 'x'
if key == ord("x"):
break
# Capture the frame if the user enters 'c'
if key == ord("c"):
names.append([name])
pixels.append(face_cut_gray_flat)
data = np.hstack([names, pixels])
# If the file name to store the data already exists, retrieve the stored data restore the new data
if os.path.exists(file_name):
# Retrieving the stored data
stored_data = np.load(file_name)
# Updating the data
data = np.vstack(data, stored_data)
# Saving the new data
if (names != []) and (pixels != []):
np.save(file_name, data)
camera.release()
cv2.destroyAllWindows() | {
"alphanum_fraction": 0.6433597186,
"author": null,
"avg_line_length": 26.7529411765,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "bce0cff41bd232c627e8b786a3ddb86001c05f34",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "34bb460bb6f2789c0010d43b8738fe01e6c3b62c",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "BossLogic314/Machine-Learning",
"max_forks_repo_path": "FaceRecognition/face_detection.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "34bb460bb6f2789c0010d43b8738fe01e6c3b62c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "BossLogic314/Machine-Learning",
"max_issues_repo_path": "FaceRecognition/face_detection.py",
"max_line_length": 98,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "34bb460bb6f2789c0010d43b8738fe01e6c3b62c",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "BossLogic314/Machine-Learning",
"max_stars_repo_path": "FaceRecognition/face_detection.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 551,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2274
} |
import keras
import pandas as pd
import numpy as np
from numpy.random import randint
from keras.models import Sequential
from keras.layers import *
import pdb
def evaluate_metrics(Yt, Yp):
tp = Yt.sum()
tn = Yt.size - tp
fp = Yp[Yt == 0].sum()
fn = (1 - Yp[Yt == 1]).sum()
prec = tp / (tp + fp)
recall = tp / (fn + tp)
return {'precision': prec, 'recall': recall, 'tp': tp, 'tn': tn, 'fp': fp, 'fn': fn}
dataset = pd.read_pickle('data/processed/training_v3.pkl')
dataset = dataset[
['DayOfWeek',
'UniqueCarrier',
'Origin',
'Dest',
'CRSDepTime',
'DepDel15',
'Distance',
'lon',
'lat',
'tmpf',
'drct',
'sknt',
'vsby',
'skyc1',
'skyl1']
]
dataset.dropna(axis=0, inplace=True)
interesting = dataset[
['DepDel15',
'DayOfWeek',
# 'UniqueCarrier',
# 'Origin',
# 'Dest',
# 'CRSDepTime',
'Distance',
'lon',
'lat',
'tmpf',
'drct',
'sknt',
'vsby',
# 'skyc1',
'skyl1']
].astype(np.float32)
# interesting = interesting.dropna(axis = 0)
def make_onehot(col):
global interesting
global dataset
dataset[col] = dataset[col].astype('category')
unique = dataset[col].unique()
df = pd.DataFrame({col + str(u): (dataset[col] == u).astype(np.int8) for u in unique})
interesting = pd.concat([interesting, df], axis=1, ignore_index=True)
print('appended', col)
def make_onehot_multiple(l):
for v in l:
make_onehot(v)
make_onehot_multiple(['UniqueCarrier', 'Origin', 'Dest', 'skyc1'])
interesting = interesting.sample(frac=1).reset_index(drop=True)
n = interesting.shape[0]
m = interesting.shape[1] - 1
interesting[0] = interesting[0].astype(np.int8)
train_percent = 0.7
def batch_generator(X, Y, batch_size):
positive = X[Y == 1]
negative = X[Y == 0]
n_pos = positive.shape[0]
n_neg = negative.shape[0]
batch_n_pos = batch_size // 8
batch_n_neg = batch_size - batch_n_pos
while True:
batch_positive = positive[randint(n_pos, size=batch_n_pos), :]
batch_negative = negative[randint(n_neg, size=batch_n_neg), :]
yield (np.vstack((batch_positive, batch_negative)), np.hstack((np.zeros(batch_n_pos), np.ones(batch_n_neg))))
train, test = interesting.iloc[:int(n * train_percent)], interesting.iloc[int(n * train_percent):]
X_train, X_test = train.drop(0, axis=1).values, test.drop(0, axis=1).values
Y_train, Y_test = train[0].values.astype(np.int8), test[0].values.astype(np.int8)
model = Sequential()
model.add(Dense(100, input_dim=m, init='uniform', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
checkpointer = keras.callbacks.ModelCheckpoint(filepath="weights.hdf5", monitor='val_loss', verbose=1,
save_best_only=True)
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
Y_pred = model.predict_classes(X_test, batch_size=64, verbose=1)
print(evaluate_metrics(Y_test, Y_pred))
self.losses.append(logs.get('loss'))
# history = LossHistory()
# model.fit(X_train, Y_train, nb_epoch=20, batch_size=32, callbacks=[checkpointer], validation_data=(X_test, Y_test))
model.fit_generator(
batch_generator(X_train, Y_train, 64),
samples_per_epoch=2 ** 17,
nb_epoch=100,
callbacks=[checkpointer],
validation_data=batch_generator(X_test, Y_test, 64),
nb_val_samples=2 ** 15
)
Y_pred = model.predict_classes(X_test, batch_size=32)
print(evaluate_metrics(Y_test, Y_pred))
| {
"alphanum_fraction": 0.6420750782,
"author": null,
"avg_line_length": 27.014084507,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b3be81be40f2feb5f1f7925b18215485c180a466",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "96a9aeb1cf0f3fa588c587db08ba0b8fa980eac9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "nasi-famnit/HOur-flight",
"max_forks_repo_path": "classify.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "96a9aeb1cf0f3fa588c587db08ba0b8fa980eac9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "nasi-famnit/HOur-flight",
"max_issues_repo_path": "classify.py",
"max_line_length": 117,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "96a9aeb1cf0f3fa588c587db08ba0b8fa980eac9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "nasi-famnit/HOur-flight",
"max_stars_repo_path": "classify.py",
"max_stars_repo_stars_event_max_datetime": "2016-04-24T10:49:52.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-04-24T10:49:52.000Z",
"num_tokens": 1046,
"path": null,
"reason": "import numpy,from numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3836
} |
using Statistics
using StatsBase
using SpecialFunctions
using Roots
include("load_data.jl")
###############################################################################
###############################################################################
### functions that work on interaction activity
"""
coefficient_variation(cas::contact_activities{T}; jackknife_error=false) where T
calculates the coefficient of variation as the ratio of std deviation to the
mean (sigma/mu) accross all individuals from the contact_activities object.
This means that the mean and the variance are really calculates over all
individuals.
With the option `jackknife_error=true` a jackknife error is computed and
returned (which is factor 100+ faster than our cas convenience solution because
we can eaily precompute the partial contributions for the jackknife procedure)
"""
function coefficient_variation(cas::contact_activities{T}; jackknife_error=false) where T
# data = vector of tuples (mean(a), mean(a^2)) for local activities a per id
# avg = 1/N sum(mean(a))
# var = 1/N (sum(mean(a^2)) - avg^2
function estimate_cv(data)
N = length(data)
avg = mean(getindex.(data,1))
std = sqrt((sum(getindex.(data,2)) - N*avg^2)/(N-1))
return std/avg
end
#print("... coefficient of variation:\n") -> move to analysis all functions
data = Vector{Tuple{Float64, Float64}}(undef, length(activities(cas)))
@showprogress 1 for (i,a) in enumerate(activities(cas))
data[i] = (mean(a), mean(x->x^2, a))
end
cv_est = estimate_cv(data)
if jackknife_error
return jackknife(estimate_cv, data, naive=cv_est)
else
return cv_est
end
end
"""
autocorrelation_function(cas::contact_activities, lags;)
calculate the autocorrelation function of the interaction activities accross
participants for the provided lags.
"""
function autocorrelation_function(cas::contact_activities{I,T}, lags::Union{AbstractRange{Int64}, AbstractVector{Int64}}) where {I,T}
@assert length(lags) > 0
avg = 0.0
for a in activities(cas)
avg += sum(a)/length(a)
end
avg /= length(activities(cas))
#avg = sum(sum.(activities(cas)))/length(activities(cas))/length(times(cas))
cov = zeros(Float64, length(lags))
p = Progress(length(cas), 1, "Autocorrelation: ", offset=1)
for a in activities(cas)
for (index, k) in enumerate(lags)
for i in 1:length(a)-k
cov[index] += a[i]*a[i+k]
end
end
next!(p)
end
cov ./= length(activities(cas)).*(length(times(cas)).-lags)
C = cov .- avg^2
C ./= C[1]
return C
end
"""
rate(cas::contact_activities, time_range)
calculate the rate of interactions for the times in `time_range`
(start:step:end) averaged over individuals and repetitions of `time_range` in
the data (e.g. if time_range is 0:timestep:1week) and the data has 4
wees, then the average ist also across weeks.
"""
function rate(cas::contact_activities{I,T}, time_range::AbstractRange) where {I,T}
@assert step(time_range) >= step(times(cas))
rate = Histogram(time_range)
norm = Histogram(time_range)
time_max = last(time_range)
for a in activities(cas)
for (i, t) in enumerate(times(cas))
# modulo for periodic boundaries
rate[ t%time_max ] += a[i]
norm[ t%time_max ] += 1
end
end
# rate is avg. number of interactions per timestep
rate = float(rate)
rate.weights ./= norm.weights
return rate
end
###############################################################################
###############################################################################
### functions that operate on list of durations
"""
returns the distribution of contact durations from `list_durations`, which is as usual list of lists of durations
# Example for data preparation:
```
list_durations = [getindex.(encounter, 4) for encounter in list_encounter];
```
# Error
If interested in the statistics of contacts (most natural) one has to estimate the error of the distribution weights.
Important is that the jackknife routine does not comply with leaving out m=0 elements
```
full_dist = distribution_contact_durations(list_durations, timestep=timestep(experiment));
P, Pj, Perr = jackknife_mj(x->distribution_contact_durations(x, edges=full_dist.edges[1]).weights, list_durations[length.(list_durations) .> 0], naive=full_dist.weights);
```
In case one is interested in logartihmic binning, this should be the way to achieve it
```
xbin, Pbin = logbin(full_dist)
xP, xP_J, xP_err = jackknife_mj(x->hcat(logbin(distribution_contact_durations(x, edges=full_dist.edges[1]))...)[:,1:2], list_durations[length.(list_durations) .> 0], naive=hcat(xbin,Pbin));
```
which equals
```
xbin_n, xbin_J, xbin_err = jackknife_mj(x->logbin(distribution_contact_durations(x, edges=full_dist.edges[1]))[1], list_durations[length.(list_durations) .> 0], naive=xbin);
Pbin_n, Pbin_J, Pbin_err = jackknife_mj(x->logbin(distribution_contact_durations(x, edges=full_dist.edges[1]))[2], list_durations[length.(list_durations) .> 0], naive=Pbin);
```
"""
function distribution_durations(list_x::Vector{T}; edges=missing, timestep=missing) where T
# flatten the list of lists (not very expansive as it turns out)
flat_list = vcat(list_x...)
# construct edges of distribution if not provided
if ismissing(edges)
if ismissing(timestep)
throw(error("either distribution `edges` or `timestep` of experiment have to be specified via keyword arguments"))
else
edges = 0:timestep:maximum(flat_list)+timestep
end
end
# sort elements of list into Histogram
dist = fit(Histogram{Float64}, flat_list, edges)
normalize!(dist)
return dist
end
###############################################################################
###############################################################################
### functions that operate on encounter trains (or derivatives such as list_dts)
"""
rate(ets::encounter_trains, time_range)
calculate the rate of contacts for the times in `time_range`
(start:step:end) averaged over individuals and repetitions of `time_range` in
the data (e.g. if time_range is 0:timestep:1week) and the data has 4
weeks, then the average ist also across weeks.
# Error estimation
should be based on ordinary jackknife method, because the quantity of rate has
same number of entries for each train
```
r,rj,err = jackknife(x->rate(x, 0:300:seconds_from_days(7)), ets)
```
"""
function rate(
ets::encounter_trains{I,T},
time_range::AbstractRange;
# optional arguments
) where {I,T}
@assert step(time_range) >= timestep(ets)
# for now because I am unsure how do deal with periodic ranges that do not
# start at 0 -> easier to align data acordingly
@assert first(time_range) == 0
num_contacts = Histogram(time_range)
time_max = last(time_range)
for train in trains(ets)
for t in train
# modulo for periodic boundaries
num_contacts[ t%time_max ] += 1
end
end
# normalization considers how good time_range fits in duration(ets)
norm = Histogram(time_range)
time_max = last(time_range)
norm.weights .+= floor(Int, duration(ets) / time_max) * length(trains(ets))
time_left = duration(ets) % time_max
if time_left > 0
norm.weights[1:StatsBase.binindex(norm, time_left)] .+= length(trains(ets))
end
# rate is avg. number of contacts per second (hence need to devide
# additionally by time step)
rate = float(num_contacts)
rate.weights ./= norm.weights
rate.weights ./= step(time_range)
return rate
end
"""
returns the rate of encounters in `time_range` (with according timesteps, unit
of dataset) conditioned on having a contact at time 0.
"""
function conditional_encounter_rate(
ets::encounter_trains{I,T},
time_range::AbstractRange;
# optional arguments
) where {I,T}
num_contacts = Histogram(time_range)
norm = Histogram(time_range)
for train in trains(ets)
#collect statistic over all contacts within the train
for (i, contact_time) in enumerate(train)
# add next contacts to histogram
for j in i+1:length(train)
time = @inbounds train[j]
push!(num_contacts, time - contact_time)
#if time - contact_time > last(num_contacts.edges[1])
# break
#end
end
# add a 1 to all those bins that are covered by the dataset
time_max = duration(ets) - contact_time
#binindex returns max bin if time_max crosses boundaries
index_max = StatsBase.binindex(norm, time_max)
if index_max > length(norm.weights)
@inbounds norm.weights .+= 1
else
for j in 1:index_max
@inbounds norm.weights[j] += 1
end
end
end
end
rate = float(num_contacts)
rate.weights ./= step(time_range)
rate.weights ./= norm.weights
return rate
end
function integrate(rate::AbstractHistogram, interval::Tuple)
@assert(first(interval) <= last(interval))
@assert(first(interval) >= first(rate.edges[1]))
@assert(first(interval) < last(rate.edges[1]))
first_idx = StatsBase.binindex(rate, first(interval))
last_idx = StatsBase.binindex(rate, last(interval))
integral = sum(rate.weights[first_idx:last_idx])
return integral*step(rate.edges[1])
end
"""
distribution_number_encoutner(ets, window, [pattern,])
calculates the distribution of the number of contacts within a certain time
window. With the keyword `pattern` one may specify a pattern of reoccuring
time-windows that should be distinguished. Using pattern one should be aware of
the reference time of the experiment (which can be to some extent be controlled
with the keyword offset). (Copenhagen starts on Sunday assumed at 0:00)
Examples:
calculate distribution of daily number of contacts irrespective of which day
```
distribution_number_encoutner(ets, seconds_from_days(1))
``
calculate distribution of daily number of contacts for each day of the week separately
```
distribution_number_encoutner(ets, seconds_from_days(1), pattern=1:7)
``
calculate distribution of daily number of contacts distinguishing weekdays and weekends
```
distribution_number_encoutner(ets, seconds_from_days(1), pattern=[1,2,2,2,2,2,1])
``
"""
function distribution_number_encounter(
ets::encounter_trains{I,T},
window::Real,
pattern::Union{AbstractRange, AbstractVector};
### optional
offset::Real = 0,
edges=missing,
) where {I,T}
# find unique elements of pattern
unique_labels = sort!(unique(pattern))
# create Histogram to sort count contacts
num_contacts = Histogram(0:window:duration(ets))
# create dictionary to sort the number of contacts
list_num = Dict(unique_labels .=> [Int64[] for i in 1:length(unique_labels)])
# iterate over contact trains and sort into lists (each part of pattern is
# `window` long)
p = Progress(length(ets), 1, "Distribution: ", offset=1)
for train in trains(ets)
next!(p)
num_contacts.weights .= 0
for time in train
push!(num_contacts, time-offset)
end
# add to list
for (i, num) in enumerate(num_contacts.weights)
push!(list_num[pattern[1+(i-1)%length(pattern)]], num)
end
end
if ismissing(edges)
list_dist = [fit(Histogram{Float64}, list_num[label], 0:1:maximum(list_num[label])+1) for label in unique_labels]
else
list_dist = [fit(Histogram{Float64}, list_num[label], edges) for label in unique_labels]
end
normalize!.(list_dist)
return Dict(unique_labels .=> list_dist)
end
distribution_number_encounter(ets::encounter_trains{I,T}, window::Real; offset::Real = 0, edges=missing, return_type=Dict) where {I,T} = distribution_number_encounter(ets, window, [1,], offset=offset, edges=edges)
"""
inter_encounter_intervals(ets)
calculate the list of inter-encoutner times for each id in `ets` (of type encoutner
trains) and return as vector of vector of inter-encoutner times
(Vector{Vector{T}}).
"""
function inter_encounter_intervals(ets::encounter_trains{I,T})::Vector{Vector{T}} where {I,T}
list_dts = Vector{T}[]
for train in trains(ets)
dts = diff(train)
push!(list_dts, dts)
end
return list_dts
end
function inter_encounter_intervals(trains::Vector{Vector{T}})::Vector{Vector{T}} where {I,T}
list_dts = Vector{T}[]
for train in trains
dts = diff(train)
push!(list_dts, dts)
end
return list_dts
end
"""
autocorrelation_function(list_dts, lags)
calculate autocorrelation function of inter-contact intervals. Due to the
finite-sample bias, we precalculate the stationary mean interval for each lag
once for the x-data set and once for the y-data set, cf. [Spitzner et al., PLOS
One (2021)].
# Error estimation:
The statistics of interest here is the inter-contact intervals (dts), which
differe from list to list. Hence, jackknife_mj should be used:
```
c, cj, cerr = jackknife_mj(x->autocorrelation_function(x, lags), list_dts)
```
"""
function autocorrelation_function(list_dts::Vector{Vector{T}}, lags::Union{AbstractRange{Int64}, AbstractVector{Int64}}) where T
@assert length(lags) > 0
# calculate global mean assuming stationary inter-contact intervals across
# individuals
sum_dts = zero(T)
num_dts = zero(Int64)
for dts in list_dts
sum_dts += sum(dts)
num_dts += length(dts)
end
mean_dt = float(sum_dts)/float(num_dts)
#mean_dt = sum(sum.(list_dts))/sum(length.(list_dts))
# calculate actual autocorrelation function E[ (X_1 - E[X_1])(X_2 - E[X_2]) ]
C = zeros(Float64, length(lags))
N = zeros(Int64, length(lags))
p = Progress(length(list_dts), 1, "Autocorrelation: ", offset=1)
for dts in list_dts
for (index,k) in enumerate(lags)
for i in 1:length(dts)-k
term = (dts[i] - mean_dt) * (dts[i+k] - mean_dt)
C[index] += term
end
# N is iteratively calculated because lengths(dts) is not the same
# across elements
N[index] += max(length(dts)-k, 0)
end
next!(p)
end
C ./= N
C ./= C[1]
return C
end
#wrapper functions
autocorrelation_function(dts::Vector{T}, lags) where T = autocorrelation_function([dts,], lags)
autocorrelation_function(ets::encounter_trains, lags) = autocorrelation_function(inter_contact_times(ets), lags)
function autocorrelation_function_hack(list_dts::Vector{Vector{T}}, lags) where T
# precalculate mean dt for each lag across trains, distinguishing between
# [1:end-k] and [1+k:end]
mean_dt_x = zeros(length(lags))
mean_dt_y = zeros(length(lags))
N = zeros(length(lags))
for dts in list_dts
for (j,k) in enumerate(lags)
mean_dt_x[j] += sum(dts[1:end-k])
mean_dt_y[j] += sum(dts[1+k:end])
N[j] += max(length(dts)-k, 0)
end
end
mean_dt_x ./= N
mean_dt_y ./= N
# calculate actual autocorrelation function E[ (X_1 - E[X_1])(X_2 - E[X_2]) ]
C = zeros(length(lags))
C2 = zeros(length(lags))
N = zeros(length(lags))
for dts in list_dts
for (j,k) in enumerate(lags)
for i in 1:length(dts)-k
term = (dts[i] - mean_dt_x[j])*(dts[i+k] - mean_dt_y[j])
C[j] += term
end
N[j] += max(length(dts)-k, 0)
end
end
C ./= N
Var = C[1]
C ./= Var
return C
end
###############################################################################
###############################################################################
### cluster
"""
sample encounter cluster from the list of inter-encounter intervals
and a minimum separation that specifies that larger inter-encoutner intervals terminate the cluster
minimum_separation should be on the order of mean(duration)~8*300 for CNS
"""
function sample_cluster_sizes(list_dts::Vector{Vector{T}}, minimum_separation::Number) where T
samples = [Int[] for i in 1:length(list_dts)]
for (i,dts) in enumerate(list_dts)
# start from cluster size 1 because we work on level of dts
size = 1
for dt in dts
if dt < minimum_separation
size += 1
else
push!(samples[i], size)
size = 1
end
end
end
return samples
end
"""
evaluate distribution of contact cluster from contact activities as the sum of
successive simultaneious contacts.
"""
function sample_cluster_sizes(cas::contact_activities{I,T}) where {I,T}
samples = [Int[] for i in 1:length(cas)]
for (i,activity) in enumerate(activities(cas))
size = 0
for a in activity
if size > 0 && a==0
push!(samples[i], size)
size = 0
end
size += a
end
end
return samples
end
"""
This could also be used for distribution_duration
"""
function distribution(samples::Vector{T}; edges=missing, resolution=1) where T
# flatten the list of lists (not very expansive as it turns out)
flat_samples = vcat(samples...)
if ismissing(edges)
dist = fit(Histogram{Float64}, flat_samples, 0:resolution:maximum(flat_samples)+resolution)
else
dist = fit(Histogram{Float64}, flat_samples, edges)
end
normalize!(dist)
return dist
end
| {
"alphanum_fraction": 0.6421475611,
"author": null,
"avg_line_length": 33.6804511278,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "60579f73a66a6496c694a93626a8504f64d2e066",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b2961205da1464852ff914c669e5f7829067a2c6",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "zierenberg/resonance_contact_disease",
"max_forks_repo_path": "analysis/temporal_features.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b2961205da1464852ff914c669e5f7829067a2c6",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "zierenberg/resonance_contact_disease",
"max_issues_repo_path": "analysis/temporal_features.jl",
"max_line_length": 213,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b2961205da1464852ff914c669e5f7829067a2c6",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "zierenberg/resonance_contact_disease",
"max_stars_repo_path": "analysis/temporal_features.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4482,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 17918
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.