text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
"""
Remove images where cobj doesn't exist for easier coaddition
"""
import os
import numpy as np
ls_image=sorted(os.listdir('./image'))
ls_prod=sorted(os.listdir('./prod'))
# list all dates of images
dates=[]
for f in range(len(ls_image)):
dates.append(ls_image[f][0:6]) # indices 0:6 -> date string
dates=sorted(list(set(dates)))
# sort image and prod directories by night
ifiles=[]
pfiles=[]
for date in dates:
inights=[]
for i in range(len(ls_image)):
if ls_image[i][0:6]==date:
inights.append(ls_image[i])
ifiles.append(inights)
pnights=[]
for p in range(len(ls_prod)):
if ls_prod[p][0:6]==date:
pnights.append(ls_prod[p])
pfiles.append(pnights)
# remove exposures where corresponding cobj files don't exist
for n in range(len(ifiles)):
if len(ifiles[n])!=len(pfiles[n]):
iexp=[]
for j in range(len(ifiles[n])):
iexp.append(ifiles[n][j][22:25]) # indices 22:25 -> exposure id string
pexp=[]
for k in range(len(pfiles[n])):
pexp.append(pfiles[n][k][22:25])
for l in range(len(iexp)):
if iexp[l] not in pexp:
for m in range(len(ifiles[n])):
if ifiles[n][m][22:25]==iexp[l]:
print('No cobj file for image {}... Removing this file.'.format(ifiles[n][m]))
os.remove('./image/{}'.format(ifiles[n][m]))
|
{"hexsha": "ef9cb6785c33463b24b4e74cbc64f5f1c8a3e7fc", "size": 1439, "ext": "py", "lang": "Python", "max_stars_repo_path": "py/rotseutil/rm_nocobj_ims.py", "max_stars_repo_name": "rotsehub/rosteutil", "max_stars_repo_head_hexsha": "33c62071c724ad3d1d08fa0289d7244cf716d66d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-11-03T02:02:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-19T23:05:14.000Z", "max_issues_repo_path": "py/rotseutil/rm_nocobj_ims.py", "max_issues_repo_name": "rotsehub/rotseutil", "max_issues_repo_head_hexsha": "33c62071c724ad3d1d08fa0289d7244cf716d66d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-04-05T04:19:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-12T19:49:50.000Z", "max_forks_repo_path": "py/rotseutil/rm_nocobj_ims.py", "max_forks_repo_name": "rotsehub/rosteutil", "max_forks_repo_head_hexsha": "33c62071c724ad3d1d08fa0289d7244cf716d66d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-06-16T01:55:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-25T21:23:31.000Z", "avg_line_length": 29.9791666667, "max_line_length": 102, "alphanum_fraction": 0.5823488534, "include": true, "reason": "import numpy", "num_tokens": 372}
|
function derivative(A::LinearInterpolation{<:AbstractVector}, t::Number)
idx = searchsortedfirst(A.t, t)
if A.t[idx] >= t
idx -= 1
end
idx == 0 ? idx += 1 : nothing
θ = 1 / (A.t[idx+1] - A.t[idx])
(A.u[idx+1] - A.u[idx]) / (A.t[idx+1] - A.t[idx])
end
function derivative(A::LinearInterpolation{<:AbstractMatrix}, t::Number)
idx = searchsortedfirst(A.t, t)
if A.t[idx] >= t
idx -= 1
end
idx == 0 ? idx += 1 : nothing
θ = 1 / (A.t[idx+1] - A.t[idx])
@views @. (A.u[:, idx+1] - A.u[:, idx]) / (A.t[idx+1] - A.t[idx])
end
function derivative(A::QuadraticInterpolation{<:AbstractVector}, t::Number)
idx = searchsortedfirst(A.t, t)
if A.t[idx] >= t
idx -= 1
end
idx == 0 ? idx += 1 : nothing
if idx == length(A.t) - 1
i₀ = idx - 1; i₁ = idx; i₂ = i₁ + 1;
else
i₀ = idx; i₁ = i₀ + 1; i₂ = i₁ + 1;
end
dl₀ = (2t - A.t[i₁] - A.t[i₂]) / ((A.t[i₀] - A.t[i₁]) * (A.t[i₀] - A.t[i₂]))
dl₁ = (2t - A.t[i₀] - A.t[i₂]) / ((A.t[i₁] - A.t[i₀]) * (A.t[i₁] - A.t[i₂]))
dl₂ = (2t - A.t[i₀] - A.t[i₁]) / ((A.t[i₂] - A.t[i₀]) * (A.t[i₂] - A.t[i₁]))
A.u[i₀] * dl₀ + A.u[i₁] * dl₁ + A.u[i₂] * dl₂
end
function derivative(A::QuadraticInterpolation{<:AbstractMatrix}, t::Number)
idx = searchsortedfirst(A.t, t)
if A.t[idx] >= t
idx -= 1
end
idx == 0 ? idx += 1 : nothing
if idx == length(A.t) - 1
i₀ = idx - 1; i₁ = idx; i₂ = i₁ + 1;
else
i₀ = idx; i₁ = i₀ + 1; i₂ = i₁ + 1;
end
dl₀ = (2t - A.t[i₁] - A.t[i₂]) / ((A.t[i₀] - A.t[i₁]) * (A.t[i₀] - A.t[i₂]))
dl₁ = (2t - A.t[i₀] - A.t[i₂]) / ((A.t[i₁] - A.t[i₀]) * (A.t[i₁] - A.t[i₂]))
dl₂ = (2t - A.t[i₀] - A.t[i₁]) / ((A.t[i₂] - A.t[i₀]) * (A.t[i₂] - A.t[i₁]))
@views @. A.u[:, i₀] * dl₀ + A.u[:, i₁] * dl₁ + A.u[:, i₂] * dl₂
end
function derivative(A::LagrangeInterpolation{<:AbstractVector}, t::Number)
idxs = findRequiredIdxs(A, t)
if A.t[idxs[1]] == t
return zero(A.u[idxs[1]])
end
G = zero(A.u[1]); F = zero(A.t[1])
DG = zero(A.u[1]); DF = zero(A.t[1])
tmp = G
for i = 1:length(idxs)
if isnan(A.bcache[idxs[i]])
mult = one(A.t[1])
for j = 1:(i - 1)
mult *= (A.t[idxs[i]] - A.t[idxs[j]])
end
for j = (i+1):length(idxs)
mult *= (A.t[idxs[i]] - A.t[idxs[j]])
end
A.bcache[idxs[i]] = mult
else
mult = A.bcache[idxs[i]]
end
wi = inv(mult)
tti = t - A.t[idxs[i]]
tmp = wi / (t - A.t[idxs[i]])
g = tmp * A.u[idxs[i]]
G += g
DG -= g / (t - A.t[idxs[i]])
F += tmp
DF -= tmp / (t - A.t[idxs[i]])
end
(DG * F - G * DF) / (F ^ 2)
end
function derivative(A::LagrangeInterpolation{<:AbstractMatrix}, t::Number)
idxs = findRequiredIdxs(A, t)
if A.t[idxs[1]] == t
return zero(A.u[:, idxs[1]])
end
G = zero(A.u[:, 1]); F = zero(A.t[1])
DG = zero(A.u[:, 1]); DF = zero(A.t[1])
tmp = G
for i = 1:length(idxs)
if isnan(A.bcache[idxs[i]])
mult = one(A.t[1])
for j = 1:(i - 1)
mult *= (A.t[idxs[i]] - A.t[idxs[j]])
end
for j = (i+1):length(idxs)
mult *= (A.t[idxs[i]] - A.t[idxs[j]])
end
A.bcache[idxs[i]] = mult
else
mult = A.bcache[idxs[i]]
end
wi = inv(mult)
tti = t - A.t[idxs[i]]
tmp = wi / (t - A.t[idxs[i]])
g = tmp * A.u[:, idxs[i]]
@. G += g
@. DG -= g / (t - A.t[idxs[i]])
F += tmp
DF -= tmp / (t - A.t[idxs[i]])
end
@. (DG * F - G * DF) / (F ^ 2)
end
function derivative(A::AkimaInterpolation{<:AbstractVector}, t::Number)
i = searchsortedlast(A.t, t)
i == 0 && return zero(A.u[1])
i == length(A.t) && return zero(A.u[end])
wj = t - A.t[i]
@evalpoly wj A.b[i] 2A.c[i] 3A.d[i]
end
function derivative(A::ConstantInterpolation{<:AbstractVector}, t::Number)
return isempty(searchsorted(A.t, t)) ? zero(A.u[1]) : eltype(A.u)(NaN)
end
function derivative(A::ConstantInterpolation{<:AbstractMatrix}, t::Number)
return isempty(searchsorted(A.t, t)) ? zero(A.u[:, 1]) : eltype(A.u)(NaN) .* A.u[:, 1]
end
# QuadraticSpline Interpolation
function derivative(A::QuadraticSpline{<:AbstractVector{<:Number}}, t::Number)
i = searchsortedfirst(A.t, t)
i == 1 ? i += 1 : nothing
σ = 1//2 * (A.z[i] - A.z[i - 1]) / (A.t[i] - A.t[i - 1])
A.z[i-1] + 2σ * (t - A.t[i-1])
end
# CubicSpline Interpolation
function derivative(A::CubicSpline{<:AbstractVector{<:Number}}, t::Number)
i = searchsortedfirst(A.t, t)
isnothing(i) ? i = length(A.t) - 1 : i -= 1
i == 0 ? i += 1 : nothing
dI = -3A.z[i] * (A.t[i + 1] - t)^2 / (6A.h[i + 1]) + 3A.z[i + 1] * (t - A.t[i])^2 / (6A.h[i + 1])
dC = A.u[i + 1] / A.h[i + 1] - A.z[i + 1] * A.h[i + 1] / 6
dD = -(A.u[i] / A.h[i + 1] - A.z[i] * A.h[i + 1] / 6)
dI + dC + dD
end
function derivative(A::BSplineInterpolation{<:AbstractVector{<:Number}}, t::Number)
# change t into param [0 1]
idx = searchsortedlast(A.t,t)
idx == length(A.t) ? idx -= 1 : nothing
n = length(A.t)
scale = (A.p[idx+1] - A.p[idx]) / (A.t[idx+1] - A.t[idx])
t_ = A.p[idx] + (t - A.t[idx]) * scale
N = DataInterpolations.spline_coefficients(n, A.d-1, A.k, t_)
ducum = zero(eltype(A.u))
for i = 1:(n - 1)
ducum += N[i + 1] * (A.c[i + 1] - A.c[i]) / (A.k[i + A.d + 1] - A.k[i + 1])
end
ducum * A.d * scale
end
# BSpline Curve Approx
function derivative(A::BSplineApprox{<:AbstractVector{<:Number}}, t::Number)
# change t into param [0 1]
idx = searchsortedlast(A.t,t)
idx == 0 ? idx += 1 : nothing
scale = (A.p[idx+1] - A.p[idx]) / (A.t[idx+1] - A.t[idx])
t_ = A.p[idx] + (t - A.t[idx]) * scale
N = spline_coefficients(A.h, A.d-1, A.k, t_)
ducum = zero(eltype(A.u))
for i = 1:(A.h - 1)
ducum += N[i + 1] * (A.c[i + 1] - A.c[i]) / (A.k[i + A.d + 1] - A.k[i + 1])
end
ducum * A.d * scale
end
|
{"hexsha": "6335488cc6d161242e6298393d052f86fa264d96", "size": 5734, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/derivatives.jl", "max_stars_repo_name": "oxinabox/DataInterpolations.jl", "max_stars_repo_head_hexsha": "48dc13a270b29b49fde77e300a9e14db091ccbeb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 63, "max_stars_repo_stars_event_min_datetime": "2019-10-01T17:22:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T16:34:42.000Z", "max_issues_repo_path": "src/derivatives.jl", "max_issues_repo_name": "oxinabox/DataInterpolations.jl", "max_issues_repo_head_hexsha": "48dc13a270b29b49fde77e300a9e14db091ccbeb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 47, "max_issues_repo_issues_event_min_datetime": "2019-08-10T02:48:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T14:30:37.000Z", "max_forks_repo_path": "src/derivatives.jl", "max_forks_repo_name": "oxinabox/DataInterpolations.jl", "max_forks_repo_head_hexsha": "48dc13a270b29b49fde77e300a9e14db091ccbeb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2019-08-10T15:27:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-05T19:40:03.000Z", "avg_line_length": 30.9945945946, "max_line_length": 99, "alphanum_fraction": 0.5132542728, "num_tokens": 2407}
|
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from keras.callbacks import Callback
from LeagueData.Database import Item
from LeagueData.DatabaseHandler import session
import operator
from Data.StaticChampionData import index_to_item_id, champion_id_to_name, index_to_item_shoes
from keras import backend as K
import random
import logging
def decode(predictions, length, shoes=False):
""":param predictions List of List's with the predictions
:param length how many items you want to return from your prediction list
:param shoes if you predicting shoes or not"""
labeled_prediction = []
for prediction in predictions:
mapped_items = {}
for index, value in enumerate(prediction):
if shoes:
mapped_items[index_to_item_shoes.get(index)] = value
else:
mapped_items[index_to_item_id.get(index)] = value
sorted_items = sorted(mapped_items.items(), key=operator.itemgetter(1), reverse=True)
mapped_items = {}
if length:
sorted_items = sorted_items[:length]
for _id, value in sorted_items:
item = session.query(Item).filter_by(id=_id).first()
if item:
name = item.name
else:
name = _id
mapped_items[name] = float(f'{value:0.2f}')
labeled_prediction.append(mapped_items)
return labeled_prediction
def configure_tf_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
config.log_device_placement = False # to log device placement (on which device the operation ran)
sess = tf.Session(config=config)
set_session(sess)
def item_to_categorical(items, size):
"""The fast one hot encoding
:param items List of list with indexes
:param size The size of the One Hot encoded vector. must be big enough to hold all the indexes
:return np.array with the list of one hot encodings """
one_hot = []
for item in tqdm(items):
one = np.zeros(size,)
for x in item:
one[x] = 1
one_hot.append(one)
return np.array(one_hot)
class CustomCallback(Callback):
def __init__(self, length=7, debug=False):
"""
A Custom Callback that decodes one random validation prediction and print or log it
:param length: length for decoding
:param debug: logs the print statement to tools.log
"""
super(CustomCallback, self).__init__()
self.debug = debug
self.length = length
if debug:
logging.basicConfig(filename='tools.log', level=logging.DEBUG)
self.var_y_true = tf.Variable(0., validate_shape=False)
self.var_y_pred = tf.Variable(0., validate_shape=False)
self.var_y_shoes_true = tf.Variable(0., validate_shape=False)
self.var_y_shoes_pred = tf.Variable(0., validate_shape=False)
self.var_x_champion = tf.Variable(0., validate_shape=False)
self.var_x_enemies = tf.Variable(0., validate_shape=False)
def on_epoch_end(self, epoch, logs=None):
index = random.randint(0, len(K.eval(self.var_y_true))-1)
champion = champion_id_to_name.get(K.eval(self.var_x_champion)[index][0])
enemies = str([champion_id_to_name.get(enemy) for enemy in K.eval(self.var_x_enemies)[index]]).replace('[', '').replace(']', '')
item_pred = decode([K.eval(self.var_y_pred)[index]], self.length)
item_true = decode([K.eval(self.var_y_true)[index]], self.length)
shoes_pred = decode([K.eval(self.var_y_shoes_pred)[index]], length=self.length, shoes=True)
shoes_true = decode([K.eval(self.var_y_shoes_true)[index]], length=self.length, shoes=True)
print(f'Champion: {champion}, Enemy Team: {enemies}\n'
f'Predicted Build: {item_pred[0]}\nTrue Build: {item_true[0]}\n'
f'Predicted Shoes: {shoes_pred[0]}\nTrue Shoes: {shoes_true[0]}')
if self.debug:
logging.debug(f'\nChampion: {champion}, Enemy Team: {enemies}\n'
f'Predicted Build: {item_pred[0]}\nTrue Build: {item_true[0]}\n'
f'Predicted Shoes: {shoes_pred[0]}\nTrue Shoes: {shoes_true[0]}')
|
{"hexsha": "9193bf34fef2835e4107423ee1fea9e425e4f57d", "size": 4354, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools.py", "max_stars_repo_name": "Plutokekz/LeagueItem", "max_stars_repo_head_hexsha": "23ed033b21857df777234ea8f43ade8daa8bf547", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tools.py", "max_issues_repo_name": "Plutokekz/LeagueItem", "max_issues_repo_head_hexsha": "23ed033b21857df777234ea8f43ade8daa8bf547", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-09-25T23:59:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-25T15:42:59.000Z", "max_forks_repo_path": "tools.py", "max_forks_repo_name": "Plutokekz/LeagueItem", "max_forks_repo_head_hexsha": "23ed033b21857df777234ea8f43ade8daa8bf547", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.6862745098, "max_line_length": 136, "alphanum_fraction": 0.6626090951, "include": true, "reason": "import numpy", "num_tokens": 998}
|
import gym
import highway_env
from agent import Agent
import pandas as pd
import numpy as np
env = gym.make("highway-v0")
done = False
# Notes
# Action space between 0 and 4 inclusive
# 0 is merge left
# 1 is do nothing
# 2 is merge right
# 3 is speed up
# 4 is slow down
#
## Obs space is a 5x5 matrix with values between -1 and 1
## This represents a matrix with the labels:
## presence, x, y, vx, vy: Ego Vehicle
## presence, x, y, vx, vy: VEHICLE 1
## presence, x, y, vx, vy: VEHICLE 2
## presence, x, y, vx, vy: VEHICLE 3
##
## X increases over time
## Y = 0 in top line
## Y = 4 in next line
## Y = 8 in next lane
## Y = 12 in bottom lane
next_step = 1
while not env.vehicle.crashed:
obs, _, _, _ = env.step(next_step)
# print(pd.DataFrame.from_records([env.vehicle.to_dict()])["x", "y", "vx", "vy"])
ego_dict = env.vehicle.to_dict()
ego_agent = Agent(
np.array([ego_dict["x"], ego_dict["y"] / 4]),
np.array([ego_dict["x"] + 100, ego_dict["y"] / 4]),
50,
50,
5,
np.array([ego_dict["vx"], ego_dict["vy"] / 4]),
)
print(f"Ego (x, y): {ego_agent.pos[0], ego_agent.pos[1], ego_agent.vel[0], ego_agent.vel[1]}")
# print(f"Ego (lane, lane_index): {env.vehicle.lane, env.vehicle.lane_index}")
neighbors = []
for vehicle in env.road.close_vehicles_to(
env.vehicle, env.PERCEPTION_DISTANCE, see_behind=True
):
adj_dict = vehicle.to_dict()
neighbors.append(
Agent(
np.array([adj_dict["x"], adj_dict["y"] / 4]),
np.array([adj_dict["x"] + 100, adj_dict["y"] / 4]),
50,
50,
5,
np.array([adj_dict["vx"], adj_dict["vy"] / 4]),
)
)
print(f"Neighbor (x, y): {neighbors[-1].pos[0], neighbors[-1].pos[1], neighbors[-1].vel[0], neighbors[-1].vel[1], ego_agent.time_to_collision(neighbors[-1])}")
# Add agents so the ego doesnt merge off of the edge of the lane
neighbors.append(
Agent(
np.array([-1, ego_dict["y"] / 4]),
np.array([adj_dict["x"] + 100, adj_dict["y"] / 4]),
50,
50,
5,
np.array([ego_dict["vx"], 0.5]),
)
)
neighbors.append(
Agent(
np.array([5, ego_dict["y"] / 4]),
np.array([adj_dict["x"] + 100, adj_dict["y"] / 4]),
50,
50,
5,
np.array([ego_dict["vx"], -0.5]),
)
)
delta_v = ego_agent.computeForces(neighbors)
print(delta_v)
# If the X instruction is larger
# If the X instruction is positive
# if abs(delta_v[0]) == delta_v[0]:
# print("Speed up")
# else:
# print("Slow down")
lane_epsilon = 0.0125
move_epsilon = 0.01
def how_close(x):
return abs(round(x) - x), round(x)
laneness = how_close(ego_agent.pos[1])
can_change = False
if laneness[1] in [0, 1, 2, 3] and lane_epsilon > laneness[0]:
can_change = True
if can_change and abs(delta_v[1]) > move_epsilon:
if abs(delta_v[1]) == delta_v[1]:
print("Merge down")
next_step = 2
else:
print("Merge up")
next_step = 0
else:
if abs(delta_v[0]) == delta_v[0]:
print("Speed up")
next_step = 3
else:
print("Slow down")
next_step = 4
env.render()
|
{"hexsha": "6d409d33559b4a6519981780018a2f54e1281d04", "size": 3487, "ext": "py", "lang": "Python", "max_stars_repo_path": "powerlaw.py", "max_stars_repo_name": "AlexanderDavid/Powerlaw-Highway-Env", "max_stars_repo_head_hexsha": "e3e3b6277e0a75e4dcbc7988a9cb144137328d22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "powerlaw.py", "max_issues_repo_name": "AlexanderDavid/Powerlaw-Highway-Env", "max_issues_repo_head_hexsha": "e3e3b6277e0a75e4dcbc7988a9cb144137328d22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "powerlaw.py", "max_forks_repo_name": "AlexanderDavid/Powerlaw-Highway-Env", "max_forks_repo_head_hexsha": "e3e3b6277e0a75e4dcbc7988a9cb144137328d22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1209677419, "max_line_length": 167, "alphanum_fraction": 0.5334098079, "include": true, "reason": "import numpy", "num_tokens": 1031}
|
const naivebayesstanmodel = "
// supervised naive Bayes
data {
// training data
int<lower=1> K; // num topics
int<lower=1> V; // num words
int<lower=0> M; // num docs
int<lower=0> N; // total word instances
int<lower=1,upper=K> z[M]; // topic for doc m
int<lower=1,upper=V> w[N]; // word n
int<lower=1,upper=M> doc[N]; // doc ID for word n
// hyperparameters
vector<lower=0>[K] alpha; // topic prior
vector<lower=0>[V] beta; // word prior
}
parameters {
simplex[K] theta; // topic prevalence
simplex[V] phi[K]; // word dist for topic k
}
model {
// priors
theta ~ dirichlet(alpha);
for (k in 1:K)
phi[k] ~ dirichlet(beta);
// likelihood, including latent category
for (m in 1:M)
z[m] ~ categorical(theta);
for (n in 1:N)
w[n] ~ categorical(phi[z[doc[n]]]);
}
"
|
{"hexsha": "fcab77e0d28e8553fe4eb5fa962db78faa0a6d73", "size": 888, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "models/stan-models/MoC-stan.model.jl", "max_stars_repo_name": "JuliaTagBot/ContinuousBenchmarks.jl", "max_stars_repo_head_hexsha": "000432d25acef05a11ea51dedfd841c761735e0a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/stan-models/MoC-stan.model.jl", "max_issues_repo_name": "JuliaTagBot/ContinuousBenchmarks.jl", "max_issues_repo_head_hexsha": "000432d25acef05a11ea51dedfd841c761735e0a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/stan-models/MoC-stan.model.jl", "max_forks_repo_name": "JuliaTagBot/ContinuousBenchmarks.jl", "max_forks_repo_head_hexsha": "000432d25acef05a11ea51dedfd841c761735e0a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1176470588, "max_line_length": 55, "alphanum_fraction": 0.5675675676, "num_tokens": 291}
|
\documentclass[utf8x,xcolor=pdftex,dvipsnames,table]{beamer}
\usetheme{Malmoe} % Now it's a beamer presentation with the lisa theme!
\setbeamertemplate{footline}[page number]
\usecolortheme{beaver}
\usepackage[T1]{fontenc}
\usepackage{amsmath}
\usepackage[utf8x]{inputenc}
%\logo{\includegraphics[width=.8in]{UdeM_NoirBleu_logo_Marie_crop}}
\usepackage{listings}
\newcommand{\superscript}[1]{\ensuremath{^{\textrm{#1}}}}
\mode<presentation>
\title{Theano and LSTM for Sentiment Analysis}
\author{%
\footnotesize
Frédéric Bastien \newline
Département d'Informatique et de Recherche Opérationnelle \newline
Université de Montréal \newline
Montréal, Canada \newline
\texttt{bastienf@iro.umontreal.ca} \newline \newline
Presentation prepared with Pierre Luc Carrier, KyungHyun Cho and \newline
Çağlar Gülçehre
}
\date{Next.ML 2015}
\setbeamertemplate{navigation symbols}{}
\begin{document}
\begin{frame}[plain]
\titlepage
\vspace{-5em}
\includegraphics[width=1in]{../hpcs2011_tutorial/pics/lisabook_logo_text_3.png}
\hfill
\includegraphics[width=.8in]{../hpcs2011_tutorial/pics/UdeM_NoirBleu_logo_Marie_crop}
\end{frame}
\section{Introduction}
\begin{frame}
\frametitle{Task}
This is a classification task where, given a review of a movie, we
need to establish whether the movie review is positive or negative.
We use the IMDB dataset.
\end{frame}
\begin{frame}
\tableofcontents[currentsection]
\end{frame}
\begin{frame}{High level}\setcounter{page}{1}
Python <- \{NumPy/SciPy/libgpuarray\} <- Theano <- Pylearn2
\begin{itemize}
\item Python: OO coding language
\item Numpy: $n$-dimensional array object and scientific computing toolbox
\item SciPy: sparse matrix objects and more scientific computing functionality
\item libgpuarray: GPU $n$-dimensional array object in C for CUDA and OpenCL
\item Theano: compiler/symbolic graph manipulation
\item Pylearn2: machine learning framework for researchers
\end{itemize}
\end{frame}
%% \begin{frame}{Others}
%% \begin{itemize}
%% \item IPython: Advanced python shell
%% \item IPython notebook: web-based interactive computational environment where you can combine code execution, text, mathematics, plots and rich media into a single document
%% \item matplotlib: one of the many plotting library
%% \item PyTables: hdf5 container with extra functionality
%% \item pandas: other data structure
%% \item ...
%% \end{itemize}
%% \end{frame}
\begin{frame}{Python}
\begin{itemize}
\item General-purpose high-level OO interpreted language
\item Emphasizes code readability
\item Comprehensive standard library
\item Dynamic type and memory management
\item Slow execution
\item Easily extensible with C
\item Popular in {\em web development}\ and {\em scientific communities}
\end{itemize}
\end{frame}
\begin{frame}{NumPy/SciPy}
\begin{itemize}
\item Python floats are full-fledged objects on the heap
\begin{itemize}
\item Not suitable for high-performance computing!
\end{itemize}
\item NumPy provides an $n$-dimensional numeric array in Python
\begin{itemize}
\item Perfect for high-performance computing
\item Slices of arrays are views (no copying)
\end{itemize}
\item NumPy provides
\begin{itemize}
\item Elementwise computations
\item Linear algebra, Fourier transforms
\item Pseudorandom number generators (many distributions)
\end{itemize}
\item SciPy provides lots more, including
\begin{itemize}
\item Sparse matrices
\item More linear algebra
\item Solvers and optimization algorithms
\item Matlab-compatible I/O
\item I/O and signal processing for images and audio
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}{What's missing?}
\begin{itemize}
\item Non-lazy evaluation (required by Python) hurts performance
\item Bound to the CPU
\item Lacks symbolic or automatic differentiation
\item No automatic speed and stability optimization
\end{itemize}
\end{frame}
\begin{frame}{Goal of the stack}
\begin{center}
\begin{bf}Fast to develop\end{bf}\newline \bigskip
\begin{bf}Fast to run\end{bf}\newline \bigskip
\hspace{-2.5cm}
\includegraphics[width=0.35\textwidth]{../omlw2014/road-runner-1.jpg}
\end{center}
\end{frame}
\section{Theano}
\begin{frame}
\tableofcontents[currentsection]
\end{frame}
\begin{frame}{Description}
High-level domain-specific language for numeric computation.
\begin{itemize}
\item Syntax as close to NumPy as possible
\item Compiles most common expressions to C for CPU and/or GPU
\item Limited expressivity means more opportunities for optimizations
\begin{itemize}
\item No subroutines -> global optimization
\item Strongly typed -> compiles to C
\item Array oriented -> easy parallelism
\item Support for looping and branching in expressions
\end{itemize}
\item Automatic speed and stability optimizations
\item Can reuse other technologies for best performance.
\begin{itemize}
\item BLAS, SciPy, Cython, Numba, PyCUDA, CUDA, ...
\end{itemize}
\item Automatic differentiation and R op
\item Sparse matrices (CPU only)
\item Extensive unit-testing and self-verification
\item Works on Linux, OS X and Windows
\end{itemize}
\end{frame}
%% \begin{frame}{Why scripting for GPUs?}
%% \begin{bf}They complement each other\end{bf}
%% GPUs are everything that high level languages are not
%% \begin{itemize}
%% \item Highly parallel
%% \item Very architecture-sensitive
%% \item Built for maximum FP/memory throughput
%% \item So hard to program that meta-programming is easier
%% \end{itemize}
%% \begin{bf}Best of both worlds:\end{bf} easily scripted code which invokes high-performance GPU kernels.
%% \begin{bf}Theano C code generation removes overhead\end{bf} of
%% function calls between Python and C by launching many C functions at once.
%% \end{frame}
\begin{frame}{Project status?}
\begin{itemize}
\item Mature: Theano has been developed and used since January 2008 (7 yrs old)
\item Driven hundreds research papers
\item Good user documentation
\item Active mailing list with participants from outside our lab
\item Core technology for a few Silicon-Valley start-ups
\item Many contributors (some from outside our lab)
\item Used to teach many university classes
\item Has been used for research at big compagnies
\end{itemize}
Theano: \url{deeplearning.net/software/theano/}
Deep Learning Tutorials: \url{deeplearning.net/tutorial/}
\end{frame}
\begin{frame}[fragile]
\frametitle{Simple example}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
import theano
# declare symbolic variable
a = theano.tensor.vector("a")
# build symbolic expression
b = a + a ** 10
# compile function
f = theano.function([a], b)
# Execute with numerical value
print f([0, 1, 2])
# prints `array([0, 2, 1026])`
\end{lstlisting}
\end{frame}
\begin{frame}{Simple example}
\center
\includegraphics[width=0.35\textwidth]{../hpcs2011_tutorial/pics/f_unoptimized.png}
\hspace{0.1\textwidth}
\includegraphics[width=0.35\textwidth]{../hpcs2011_tutorial/pics/f_optimized.png}
\end{frame}
%% \begin{frame}{Overview of Library}
%% Theano is many things
%% \begin{itemize}
%% \item Language
%% \item Compiler
%% \item Python library
%% \end{itemize}
%% \end{frame}
\begin{frame}{Overview Language}
\begin{itemize}
\item Operations on scalar, vector, matrix, tensor, and sparse variables
\item Linear algebra
\item Element-wise nonlinearities
\item Convolution
\item Indexing, slicing and advanced indexing.
\item Reduction
\item Dimshuffle (n-dim transpose)
\item Extensible
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{Scalar math}
Some example of scalar operations:
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
import theano
from theano import tensor as tt
x = tt.scalar()
y = tt.scalar()
z = x+y
w = z*x
a = tt.sqrt(w)
b = tt.exp(a)
c = a ** b
d = tt.log(c)
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]
\frametitle{Vector math}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
from theano import tensor as tt
x = tt.vector()
y = tt.vector()
# Scalar math applied elementwise
a = x * y
# Vector dot product
b = tt.dot(x, y)
# Broadcasting (as NumPy, very powerful)
c = a + b
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]
\frametitle{Matrix math}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
from theano import tensor as tt
x = tt.matrix()
y = tt.matrix()
a = tt.vector()
# Matrix-matrix product
b = tt.dot(x, y)
# Matrix-vector product
c = tt.dot(x, a)
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]
\frametitle{Tensors}
Using Theano:
\begin{itemize}
\item Dimensionality defined by length of ``broadcastable'' argument
\item Can add (or do other elemwise op) on two
tensors with same dimensionality
\item Duplicate tensors along broadcastable axes to make size match
\end{itemize}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
from theano import tensor as tt
tensor3 = tt.TensorType(
broadcastable=(False, False, False),
dtype='float32')
x = tt.tensor3()
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]
\frametitle{Reductions}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
from theano import tensor as tt
tensor3 = tt.TensorType(
broadcastable=(False, False, False),
dtype='float32')
x = tensor3()
total = x.sum()
marginals = x.sum(axis=(0, 2))
mx = x.max(axis=1)
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]
\frametitle{Dimshuffle}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
from theano import tensor as tt
tensor3 = tt.TensorType(
broadcastable=(False, False, False))
x = tensor3()
y = x.dimshuffle((2, 1, 0))
a = tt.matrix()
b = a.tt
# Same as b
c = a.dimshuffle((0, 1))
# Adding to larger tensor
d = a.dimshuffle((0, 1, 'x'))
e = a + d
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]
\frametitle{Indexing}
As NumPy!
This mean all slices, index selection return view
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
# return views, supported on GPU
a_tensor[int]
a_tensor[int, int]
a_tensor[start:stop:step, start:stop:step]
a_tensor[::-1] # reverse the first dimension
# Advanced indexing, return copy
a_tensor[an_index_vector] # Supported on GPU
a_tensor[an_index_vector, an_index_vector]
a_tensor[int, an_index_vector]
a_tensor[an_index_tensor, ...]
\end{lstlisting}
\end{frame}
\subsection{Compiling/Running}
\begin{frame}{Compiling and running expression}
\begin{itemize}
\item theano.function
\item shared variables and updates
\item compilation modes
\item compilation for GPU
\item optimizations
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{theano.function}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
>>> from theano import tensor as tt
>>> x = tt.scalar()
>>> y = tt.scalar()
>>> from theano import function
>>> # first arg is list of SYMBOLIC inputs
>>> # second arg is SYMBOLIC output
>>> f = function([x, y], x + y)
>>> # Call it with NUMERICAL values
>>> # Get a NUMERICAL output
>>> f(1., 2.)
array(3.0)
\end{lstlisting}
\end{frame}
\begin{frame}{Shared variables}
\begin{itemize}
\item It’s hard to do much with purely functional programming
\item ``shared variables'' add just a little bit of imperative programming
\item A ``shared variable'' is a buffer that stores a numerical value for a Theano variable
\item Can write to as many shared variables as you want, once each, at the end of the function
\item Modify outside Theano function with get\_value() and set\_value() methods.
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{Shared variable example}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
>>> from theano import shared
>>> x = shared(0.)
>>> from theano.compat.python2x import OrderedDict
>>> updates = OrderedDict()
>>> updates[x] = x + 1
>>> f = function([], updates=updates)
>>> f()
>>> x.get_value()
1.0
>>> x.set_value(100.)
>>> f()
>>> x.get_value()
101.0
\end{lstlisting}
\end{frame}
\begin{frame}{Which dict?}
\begin{itemize}
\item Use theano.compat.python2x.OrderedDict
\item Not collections.OrderedDict
\begin{itemize}
\item This isn’t available in older versions of python
\end{itemize}
\item Not \{\} aka dict
\begin{itemize}
\item The iteration order of this built-in class is not
deterministic (thanks, Python!) so if Theano
accepted this, the same script could compile
different C programs each time you run it
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}{Compilation modes}
\begin{itemize}
\item Can compile in different modes to get different kinds of programs
\item Can specify these modes very precisely with arguments to theano.function
\item Can use a few quick presets with environment variable flags
\end{itemize}
\end{frame}
\begin{frame}{Example preset compilation modes}
\begin{itemize}
\item FAST\_RUN: default. Fastest execution, slowest compilation
\item FAST\_COMPILE: Fastest compilation, slowest execution. No C code.
\item DEBUG\_MODE: Adds lots of checks.
Raises error messages in situations other
modes regard as fine.
\item optimizer=fast\_compile: as mode=FAST\_COMPILE, but with C code.
\item theano.function(..., mode=``FAST\_COMPILE'')
\item THEANO\_FLAGS=mode=FAST\_COMPILE python script.py
\end{itemize}
\end{frame}
\begin{frame}{Compilation for GPU}
\begin{itemize}
\item Theano current back-end only supports 32 bit on GPU
\item libgpuarray (new-backend) support all dtype
\item CUDA supports 64 bit, but is slow on gamer GPUs
\item tt.fscalar, tt.fvector, tt.fmatrix are all 32 bit
\item tt.scalar, tt.vector, tt.matrix resolve to 32 bit or 64 bit depending on theano’s floatX flag
\item floatX is float64 by default, set it to float32
\item Set device flag to gpu (or a specific gpu, like gpu0)
\item Flag: warn\_float64={'ignore', 'warn', 'raise', 'pdb'}
\end{itemize}
\end{frame}
\subsection{Modifying expressions}
\begin{frame}{Modifying expressions}
\begin{itemize}
\item The grad method
\item Others
% \item Variable nodes
% \item Types
% \item Ops
% \item Apply nodes
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{The grad method}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
>>> x = tt.scalar('x')
>>> y = 2. * x
>>> g = tt.grad(y, x)
# Print the not optimized graph
>>> theano.printing.pydotprint(g)
\end{lstlisting}
\includegraphics[width=0.75\textwidth]{theano_grad.png}
\end{frame}
%% \begin{frame}{Theano Variables}
%% \begin{itemize}
%% \item A Variable is a theano expression
%% \item Can come from tt.scalar, tt.matrix, etc.
%% \item Can come from doing operations on other Variables
%% \item Every Variable has a type field, identifying its Type \newline
%% e.g. TensorType((True, False), ‘float32’)
%% \item Variables can be thought of as nodes in a graph
%% \end{itemize}
%% \end{frame}
%% \begin{frame}{Ops}
%% \begin{itemize}
%% \item An Op is any class that describes a
%% mathematical function of some variables
%% \item Can call the op on some variables to get a
%% new variable or variables
%% \item An Op class can supply other forms of
%% information about the function, such as its
%% derivatives
%% \end{itemize}
%% \end{frame}
%% \begin{frame}{Apply nodes}
%% \begin{itemize}
%% \item The Apply class is a specific instance of an application of an Op
%% \item Notable fields:
%% \begin{itemize}
%% \item op: The Op to be applied
%% \item inputs: The Variables to be used as input
%% \item outputs: The Variables produced
%% \end{itemize}
%% \item Variable.owner identifies the Apply that created the variable
%% \item Variable and Apply instances are nodes and owner/
%% inputs/outputs identify edges in a Theano graph
%% \end{itemize}
%% \end{frame}
\begin{frame}{Others}
\begin{itemize}
\item R\_op, L\_op for hessian free
\item hessian
\item jacobian
\item you can navigate the graph if you need
(go from the result of computation to its input, recursively)
\end{itemize}
\end{frame}
\subsection{Debugging}
\begin{frame}{Debugging}
\begin{itemize}
\item DEBUG\_MODE
\item Error message
\item theano.printing.debugprint
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{Error message: code}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
import numpy as np
import theano
import theano.tensor as tt
x = tt.vector()
y = tt.vector()
z = x + x
z = z + y
f = theano.function([x, y], z)
f(np.ones((2,)), np.ones((3,)))
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]
\frametitle{Error message: 1st part}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
basicstyle=\scriptsize
}
\begin{lstlisting}
Traceback (most recent call last):
[...]
ValueError: Input dimension mis-match.
(input[0].shape[0] = 3, input[1].shape[0] = 2)
Apply node that caused the error:
Elemwise{add,no_inplace}(<TensorType(float64, vector)>,
<TensorType(float64, vector)>,
<TensorType(float64, vector)>)
Inputs types: [TensorType(float64, vector),
TensorType(float64, vector),
TensorType(float64, vector)]
Inputs shapes: [(3,), (2,), (2,)]
Inputs strides: [(8,), (8,), (8,)]
Inputs scalar values: ['not scalar', 'not scalar', 'not scalar']
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]
\frametitle{Error message: 2st part}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
basicstyle=\footnotesize
}
\begin{lstlisting}
HINT: Re-running with most Theano optimization
disabled could give you a back-traces when this
node was created. This can be done with by setting
the Theano flags optimizer=fast_compile
HINT: Use the Theano flag 'exception_verbosity=high'
for a debugprint of this apply node.
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]
\frametitle{Error message: exception\_verbosity=high}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
basicstyle=\scriptsize,
xleftmargin=-1em
}
\begin{lstlisting}
Debugprint of the apply node:
Elemwise{add,no_inplace} [@A] <TensorType(float64, vector)> ''
|<TensorType(float64, vector)> [@B] <TensorType(float64, vector)>
|<TensorType(float64, vector)> [@C] <TensorType(float64, vector)>
|<TensorType(float64, vector)> [@C] <TensorType(float64, vector)>
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]
\frametitle{Error message: optimizer=fast\_compile}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
Backtrace when the node is created:
File "test.py", line 7, in <module>
z = z + y
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]
\frametitle{Error message: Traceback}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
basicstyle=\footnotesize,
xleftmargin=-1em
}
\begin{lstlisting}
Traceback (most recent call last):
File "test.py", line 9, in <module>
f(np.ones((2,)), np.ones((3,)))
File "/u/bastienf/repos/theano/compile/function/types.py",
line 589, in __call__
self.fn.thunks[self.fn.position_of_error])
File "/u/bastienf/repos/theano/compile/function/types.py",
line 579, in __call__
outputs = self.fn()
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]
\frametitle{debugprint}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
>>> from theano.printing import debugprint
>>> debugprint(a)
Elemwise{mul,no_inplace} [@A] ''
|TensorConstant{2.0} [@B]
|Elemwise{add,no_inplace} [@C] 'z'
|<TensorType(float64, scalar)> [@D]
|<TensorType(float64, scalar)> [@E]
\end{lstlisting}
\end{frame}
%% \begin{frame}{Pylearn2}
%% Machine Learning library aimed at researchers
%% \begin{itemize}
%% \item Built on top of Theano, for fast execution and use of GPU
%% \item Easy to try variants of implemented algorithms, and to extend them (using Theano)
%% \item Very modular, each component of the library can be used in isolation
%% \item Experiments can be specified through a YAML config file, or by a Python script
%% \item Scripts for visualizing weights, plot monitored values
%% \end{itemize}
%% \end{frame}
%% \begin{frame}{libgpuarray}
%% Goal: A common GPU $n$-dimensional array that can be reused by all projects, support for both CUDA and OpenCL.
%% \newline \newline
%% Motivation:
%% \begin{itemize}
%% \item Currently there are at least 6 different GPU arrays in Python
%% \begin{itemize}
%% \item CudaNdarray (Theano), GPUArray (pycuda), CUDAMatrix (cudamat), GPUArray (pyopencl), Clyther, Copperhead, ...
%% \item There are even more if we include other languages.
%% \end{itemize}
%% \item They are incompatible
%% \begin{itemize}
%% \item None have the same properties and interface
%% \end{itemize}
%% \item All of them implement a subset of numpy.ndarray properties
%% \item This is the new GPU backend on Theano
%% \end{itemize}
%% \end{frame}
%% \begin{frame}{Project status?}
%% \begin{itemize}
%% \item Usable directly, but not all implementation available.
%% \item Multiple GPUs works.
%% \item Is the next GPU array container for Theano and is working.
%% \begin{itemize}
%% \item Not all Theano implementations available now.
%% \item OpenCL misses more implementations.
%% \item Multiple GPUs: supported in libgpuarray
%% \item Multiple GPUs: close to get integrated in Theano.
%% \end{itemize}
%% \item Web site: \url{http://deeplearning.net/software/libgpuarray/}
%% \end{itemize}
%% \end{frame}
%% \section{libgpuarray}
%% \begin{frame}
%% \tableofcontents[currentsection]
%% \end{frame}
%% %TODO, make much shorter
%% \begin{frame}{libgpuarray: Design Goals}
%% \begin{itemize}
%% \item Have the base object in C to allow collaboration with more projects.
%% \begin{itemize}
%% \item We want people from C, C++, ruby, R, \ldots all use the same base GPU ndarray.
%% \end{itemize}
%% \item Be compatible with CUDA and OpenCL.
%% \item Not too simple, (don’t support just matrix).
%% \item Support all dtype.
%% \item Allow strided views.
%% \item But still easy to develop new code that support only a few memory layout.
%% \begin{itemize}
%% \item This ease the development of new code.
%% \end{itemize}
%% \end{itemize}
%% \end{frame}
\subsection{Scan}
\begin{frame}
\frametitle{Scan}
\begin{itemize}
\item Allows looping (for, map, while)
\item Allows recursion (reduce)
\item Allows recursion with dependency on many of the previous time steps
\item Optimize some cases like moving computation outside of scan
\item The Scan grad is done via Backpropagation Through Time(BPTT)
\end{itemize}
\end{frame}
\begin{frame}{When not to use scan}
\begin{itemize}
\item If you only need it for ``vectorization'' or
``broadcasting''. tensor and numpy.ndarray support them
natively. This will be much better for that use case.
\item If you do a fixed number of iteration that is very small (2,3). You
are probably better to just unroll the graph to do it.
\end{itemize}
\end{frame}
\begin{frame}[fragile,allowframebreaks]
\frametitle{Scan Example1: Computing tanh(v.dot(W) + b) * d where b is binomial}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
basicstyle=\footnotesize
}
\begin{lstlisting}
import theano
import theano.tensor as tt
import numpy as np
# define tensor variables
W = tt.matrix("W")
X = tt.matrix("X")
b_sym = tt.vector("b_sym")
# define shared random stream
trng = tt.shared_randomstreams.RandomStreams(1234)
d=trng.binomial(size=W[1].shape)
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]
\frametitle{Scan Example1: Computing tanh(v.dot(W) + b) * d where d is binomial (2)}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
results, updates = theano.scan(
lambda v: tt.tanh(tt.dot(v, W) + b_sym) * d,
sequences=X)
f = theano.function(inputs=[X, W, b_sym],
outputs=[results],
updates=updates)
x = np.eye(10, 2, dtype=theano.config.floatX)
w = np.ones((2, 2), dtype=theano.config.floatX)
b = np.ones((2), dtype=theano.config.floatX)
print f(x, w, b)
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]
\frametitle{Scan Example2: Computing pow(A, k)}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
import theano
import theano.tensor as tt
theano.config.warn__subtensor_merge_bug = False
k = tt.iscalar("k")
A = tt.vector("A")
def inner_fct(prior_result, B):
return prior_result * B
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]
\frametitle{Scan Example2: Computing pow(A, k) (2)}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
result, updates = theano.scan(
fn=inner_fct,
outputs_info=T.ones_like(A),
non_sequences=A, n_steps=k)
# Scan provide us with A ** 1 through A ** k.
# Keep only the last value. Scan optimize memory.
final = result[-1]
power = theano.function(inputs=[A, k], outputs=final,
updates=updates)
print power(range(10), 2)
#[ 0. 1. 4. 9. 16. 25. 36. 49. 64. 81.]
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]
\frametitle{Scan signature}
\lstset{language=Python,
commentstyle=\itshape\color{blue},
stringstyle=\color{violet},
}
\begin{lstlisting}
result, updates = theano.scan(
fn=inner_fct,
sequences=[]
outputs_info=[tt.ones_like(A)],
non_sequences=A,
n_steps=k)
\end{lstlisting}
\begin{itemize}
\item Updates are needed if there are random numbers generated in the inner function
\begin{itemize}
\item Pass them to the call theano.function(..., updates=updates)
\end{itemize}
\item The inner function of scan takes arguments like this:
scan: sequences, outputs\_info, non sequences
\end{itemize}
\end{frame}
\section{RNN}
\begin{frame}
\tableofcontents[currentsection]
\end{frame}
\begin{frame}
\frametitle{Recurrent Neural Network Overview}
\begin{itemize}
\item RNN is a class of neural network that allows to work with sequences of variable sizes.
\item Some layers have recurrent connections to themselves with a time delay.
\begin{itemize}
\item This create an internal state that allows to exhibit dynamic temporal behavior.
\end{itemize}
\end{itemize}
Image from wikipedia by Fyedernoggersnodden
\includegraphics[width=0.35\textwidth]{../images/Elman_srnn.png}
\end{frame}
\section{LSTM}
\begin{frame}
\tableofcontents[currentsection]
\end{frame}
\begin{frame}
\frametitle{Motivation}
The RNN gradient signal ends up being multiplied a large number of
times (up to as many as the number of timesteps) by the transition
matrix (the matrix containing the weights of the recurrent
connections. This means that, the magnitude of the weights in the
transition matrix can have a strong impact on the learning process.
\begin{itemize}
\item \begin{bf}vanishing gradients\end{bf}
If the weights in this matrix are small (or, more formally, if the leading eigenvalue of the weight matrix is smaller than 1.0).
\item \begin{bf}exploding gradients\end{bf} If the weights in this matrix are large (or, again, more formally, if the leading eigenvalue of the weight matrix is larger than 1.0),
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{History}
\begin{itemize}
\item Original version introduced in 1997 by Hochreiter, S., \& Schmidhuber, J.
\item Forget gate introduced in 2000 by Gers, F. A., Schmidhuber, J., \& Cummins, F.
\item All people we know use forget gate now.
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{LSTM overview}
\includegraphics[width=0.75\textwidth]{../images/lstm.png}
\end{frame}
\begin{frame}
\frametitle{LSTM cell}
\includegraphics[width=0.75\textwidth]{../images/lstm_memorycell.png}
\end{frame}
\begin{frame}[allowframebreaks]
\frametitle{LSTM math}
The equations on the next slide describe how a layer of memory cells is updated at every timestep t.
In these equations :
% 'm' has no special meaning here except being a size reference for the length of the label (and the spacing before the descriptions
\begin{description}[m]
\item[$x_t$] \hfill \\
is the input to the memory cell layer at time t
\item[$W_i$, $W_f$, $W_c$, $W_o$, $U_i$, $U_f$, $U_c$, $U_o$ and $V_o$] \hfill \\
are weight matrices
\item[$b_i$, $b_f$, $b_c$ and $b_o$] \hfill \\
are bias vectors
\end{description}
\framebreak
First, we compute the values for $i_t$, the input gate, and $\widetilde{C_t}$ the candidate value for the states of the memory cells at time t :
\begin{equation}
i_t = \sigma(W_i x_t + U_i h_{t-1} + b_i)
\end{equation}
\begin{equation}
\widetilde{C_t} = tanh(W_c x_t + U_c h_{t-1} + b_c)
\end{equation}
Second, we compute the value for $f_t$, the activation of the memory cells’ forget gates at time t :
\begin{equation}
f_t = \sigma(W_f x_t + U_f h_{t-1} + b_f)
\end{equation}
\framebreak
Given the value of the input gate activation $i_t$, the forget gate activation $f_t$ and the candidate state value $\widetilde{C_t}$, we can compute $C_t$ the memory cells’ new state at time $t$ :
\begin{equation}
C_t = i_t * \widetilde{C_t} + f_t * C_{t-1}
\end{equation}
With the new state of the memory cells, we can compute the value of their output gates and, subsequently, their outputs :
\begin{equation}
o_t = \sigma(W_o x_t + U_o h_{t-1} + V_o C_t + b_1)
\end{equation}
\begin{equation}
h_t = o_t * tanh(C_t)
\end{equation}
\end{frame}
\begin{frame}
\frametitle{Tutorial LSTM}
The model we used in this tutorial is a variation of the standard LSTM model. In this variant, the activation of a cell’s output gate does not depend on the memory cell’s state $C_t$. This allows us to perform part of the computation more efficiently (see next slide, for details). This means that, in the variant we have implemented, there is no matrix $V_o$ and equation (5) is replaced by equation (7) :
\begin{equation}
o_t = \sigma(W_o x_t + U_o h_{t-1} + b_1)
\end{equation}
\end{frame}
\begin{frame}
\frametitle{Implementation Note}
In the code included this tutorial, the equations (1), (2), (3) and (7) are performed in parallel to make the computation more efficient. This is possible because none of these equations rely on a result produced by the other ones. It is achieved by concatenating the four matrices $W_*$ into a single weight matrix W and performing the same concatenation on the weight matrices $U_*$ to produce the matrix U and the bias vectors $b_*$ to produce the vector b. Then, the pre-nonlinearity activations can be computed with :
\vspace{-1em}
\begin{equation*}
z = \sigma(W x_t + U h_{t-1} + b)
\end{equation*}
\vspace{-2em} % don't remove the blank line
The result is then sliced to obtain the pre-nonlinearity activations for i, f, $\widetilde{C_t}$, and o and the non-linearities are then applied independently for each.
\end{frame}
\begin{frame}{LSTM Tips For Training}
\begin{itemize}
\item Do not use SGD, but use something like adagrad or rmsprop.
\item Initialize any recurrent weights as orthogonal matrices (orth\_weights). This helps optimization.
\item Take out any operation that does not have to be inside "scan".
Theano does many cases, but not all.
\item Rescale (clip) the L2 norm of the gradient, if necessary.
\item You can use weight noise (try first with $dot(U_c+noise, h_{t-1})$).
\item You can use dropout at the output of the recurrent layer.
\end{itemize}
\end{frame}
\section{Exercices}
\begin{frame}{Exercices}
\begin{itemize}
\item Theano exercice: Work through the ``0[1-4]*'' exercices (directory):
Available at ``git~clone~https://github.com/abergeron/ccw\_tutorial\_theano.git''.
\item Scan exercices: \url{http://deeplearning.net/software/theano/tutorial/loop.html\#exercise}
\item Modif LSTM: Add the V\_o parameter and use it.
\item Modif LSTM: Reverse the input sequence and try it like that:
Sutskever-NIPS2014 (No solutions provided)
\item Modif LSTM: Add to have 2 LSTM layers. The new one take the
input in the reverse order. Then you concatenate the mean of the
outputs of both LSTM to the logistic regression. (No solutions provided)
\end{itemize}
% I don't know how to fix this frame since it seems incomplete.
Deep Learning Tutorial on LSTM: \url{http://deeplearning.net/tutorial/lstm.html}
(It have the papers
\end{frame}
\begin{frame}{Acknowledgments}
\begin{itemize}
\item All people working or having worked at the LISA lab.
\item All Theano users/contributors
\item Compute Canada, RQCHP, NSERC, and Canada Research Chairs for providing funds or access to compute resources.
\end{itemize}
\end{frame}
\begin{frame}
\begin{center}
\Huge
Questions?
\end{center}
\end{frame}
\end{document}
|
{"hexsha": "258bb582a7449d90db19d6123c84357cefbd89d5", "size": 34480, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/nextml2015/presentation.tex", "max_stars_repo_name": "rpgoldman/Theano-PyMC", "max_stars_repo_head_hexsha": "4669be82a00da3bd78f6683c066c3e0073eecb52", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/nextml2015/presentation.tex", "max_issues_repo_name": "rpgoldman/Theano-PyMC", "max_issues_repo_head_hexsha": "4669be82a00da3bd78f6683c066c3e0073eecb52", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/nextml2015/presentation.tex", "max_forks_repo_name": "rpgoldman/Theano-PyMC", "max_forks_repo_head_hexsha": "4669be82a00da3bd78f6683c066c3e0073eecb52", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8269896194, "max_line_length": 522, "alphanum_fraction": 0.7089907193, "num_tokens": 9794}
|
from __future__ import division, unicode_literals
import numpy as np
from traits.api import HasStrictTraits, Array, Float, Instance, Property, Tuple
from tvtk.api import tvtk
from tvtk.common import configure_input_data, is_old_pipeline
VolumeArray = Array(shape=(None, None, None))
# The point data scalars need a name for some Mayavi operations.
POINT_DATA_SCALARS_NAME = 'VolumeData'
def _apply_mask(volume_data, mask_data):
""" Mask out a portion of the data.
"""
mask_image_data = _image_data_from_array(mask_data, volume_data.spacing)
masker = tvtk.ImageMask()
if is_old_pipeline():
masker.set_image_input(volume_data)
masker.set_mask_input(mask_image_data)
else:
masker.set_image_input_data(volume_data)
masker.set_mask_input_data(mask_image_data)
masker.update()
result = masker.output
result.point_data.scalars.name = POINT_DATA_SCALARS_NAME
return masker.output
def _image_data_from_array(array, spacing):
""" Build an ImageData object from a numpy array.
"""
image_data = tvtk.ImageData()
image_data.spacing = spacing
image_data.dimensions = array.shape
image_data.point_data.scalars = array.ravel('F')
image_data.point_data.scalars.name = POINT_DATA_SCALARS_NAME
return image_data
def _resample_data(image_data):
""" Resample data onto a uniform 256^3 grid.
"""
spacing = image_data.spacing
dims = image_data.dimensions
output_spacing = (spacing[0] * (dims[0] / 256),
spacing[1] * (dims[1] / 256),
spacing[2] * (dims[2] / 256))
reslicer = tvtk.ImageReslice(interpolation_mode='cubic',
output_extent=(0, 255, 0, 255, 0, 255),
output_spacing=output_spacing)
configure_input_data(reslicer, image_data)
reslicer.update()
result = reslicer.output
result.point_data.scalars.name = POINT_DATA_SCALARS_NAME
return reslicer.output
class VolumeData(HasStrictTraits):
""" A high level interface to uniform rectilinear volume data which
provides masking and resampling.
"""
# A mask to apply to the data
mask_data = Property(VolumeArray, depends_on='_mask_data')
# The mask data as a fortran array
_mask_data = VolumeArray
# The data itself.
raw_data = Property(VolumeArray, depends_on='_raw_data')
# The data as a fortran array
_raw_data = VolumeArray
# The bounds of the volume
bounds = Tuple(Float, Float, Float)
# The spacing between grid points in each dimension.
spacing = Tuple(Float, Float, Float)
# A resampled/masked version of the data, suitable for rendering
render_data = Property(Instance(tvtk.DataObject))
_render_data = Instance(tvtk.DataObject)
# -------------------------------------------------------------------------
# Public interface
# -------------------------------------------------------------------------
def clear_mask(self):
""" Remove any mask which is currently set.
"""
self.mask_data = np.empty((0, 0, 0), dtype='uint8')
# -------------------------------------------------------------------------
# Traits handlers
# -------------------------------------------------------------------------
def _bounds_default(self):
return tuple(np.array(self.spacing) * np.array(self.raw_data.shape))
def _spacing_default(self):
return (1.0, 1.0, 1.0)
def _get_render_data(self):
if self._render_data is None:
self._render_data = self._prepare_data()
return self._render_data
def _get_mask_data(self):
return self._mask_data
def _set_mask_data(self, value):
self._render_data = None
self._mask_data = np.asfortranarray(value)
def _get_raw_data(self):
return self._raw_data
def _set_raw_data(self, value):
self._render_data = None
self._raw_data = np.asfortranarray(value)
# -------------------------------------------------------------------------
# Private methods
# -------------------------------------------------------------------------
def _prepare_data(self):
image_data = _image_data_from_array(self.raw_data, self.spacing)
resampled_data = _resample_data(image_data)
if self.mask_data.size > 1:
masked_data = _apply_mask(resampled_data, self.mask_data)
return masked_data
return resampled_data
|
{"hexsha": "c19553412ab2fa00003905a819824a80ae18f39f", "size": 4532, "ext": "py", "lang": "Python", "max_stars_repo_path": "ensemble/volren/volume_data.py", "max_stars_repo_name": "enthought/ensemble", "max_stars_repo_head_hexsha": "b63229224b1c0f1c18b04f0c1f2619e8e5e46428", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2016-03-02T16:58:15.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-23T19:53:04.000Z", "max_issues_repo_path": "ensemble/volren/volume_data.py", "max_issues_repo_name": "enthought/ensemble", "max_issues_repo_head_hexsha": "b63229224b1c0f1c18b04f0c1f2619e8e5e46428", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 32, "max_issues_repo_issues_event_min_datetime": "2015-03-25T23:09:07.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-18T12:23:27.000Z", "max_forks_repo_path": "ensemble/volren/volume_data.py", "max_forks_repo_name": "enthought/ensemble", "max_forks_repo_head_hexsha": "b63229224b1c0f1c18b04f0c1f2619e8e5e46428", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2015-04-14T10:04:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-03T03:51:42.000Z", "avg_line_length": 32.1418439716, "max_line_length": 79, "alphanum_fraction": 0.6085613416, "include": true, "reason": "import numpy", "num_tokens": 990}
|
\documentclass[../../main.tex]{subfiles}
\begin{document}
\subsubsection{At Key}
The operation $atKey$ will return the Value $v$ at some specified Key $k$.
\begin{schema}{AtKey[KV, K]}
m? : KV \\
v! : V \\
k? : K \\
atKey~\_ : KV \cross K \surj V
\where
v! = atKey(m?, k?) @ \\
\t2 let ~~ coll == ((\seq m?) \filter (k?, m?_{k?})) \implies \langle (k?, m?_{k?}) \rangle \iff k? \in \dom m? \\
\ \ \ = (second(head(coll)) \iff k? \mapsto m?_{k?} \in coll) ~\lor \\
\t1 (\emptyset \iff k? \not \in \dom m?)
\end{schema}
In the schema above, $coll$ is the result of filtering for $(k?, m?_{k?})$ within $\seq m?$.
If the mapping was in the original $m?$, it will also be in the sequence of mappings. This means
we can filter over the sequence to look for the mapping and if found, it is returned as $\langle (k?, m?_{k?}) \rangle$.
To return the mapping itself, $head(coll)$ is used to extract the mapping such that the value mapped to $k?$ can be returned.
\begin{zed}
v! = atKey(m?, k?) = second(head(coll)) = m?_{k?} @ m?_{k?} : V \iff k? \in \dom m?
\end{zed}
The following examples demonstrate the properties of $atKey$
\begin{argue}
M = \ldata k_{0}v_{k_{0}}, k_{1}v_{k_{1}} \rdata \\
\t1 k_{0} = abc \ \land v_{k_{0}} = 123 & $k_{0}v_{k_{0}} = abc \mapsto 123$ \\
\t1 k_{1} = def \ \land v_{k_{1}} = xyz \mapsto 456 & $k_{1}v_{k_{1}} = def \mapsto xyz \mapsto 456$ \\
atKey(M, abc) = 123 \\
atKey(M, def) = xyz \mapsto 456 \\
atKey(M, foo) = \emptyset
\end{argue}
\end{document}
|
{"hexsha": "81cd4fa15532c8f2813f7fdfb11dfe1a1af947d5", "size": 1516, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/operations/kv/atKey.tex", "max_stars_repo_name": "yetanalytics/dave", "max_stars_repo_head_hexsha": "7a71c2017889862b2fb567edc8196b4382d01beb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2018-08-17T00:38:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T02:32:37.000Z", "max_issues_repo_path": "docs/operations/kv/atKey.tex", "max_issues_repo_name": "adlnet/dave", "max_issues_repo_head_hexsha": "9339713fac747118e462e4fc7e1ecd54e5d916e3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 95, "max_issues_repo_issues_event_min_datetime": "2018-08-31T18:57:51.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-15T16:40:01.000Z", "max_forks_repo_path": "docs/operations/kv/atKey.tex", "max_forks_repo_name": "yetanalytics/dave", "max_forks_repo_head_hexsha": "7a71c2017889862b2fb567edc8196b4382d01beb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-09-28T06:48:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-31T16:20:47.000Z", "avg_line_length": 45.9393939394, "max_line_length": 125, "alphanum_fraction": 0.6042216359, "num_tokens": 569}
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# train_ds = data['train'].map(lambda x,y: (resize(x),y)).shuffle(1024).cache().batch(config.batch_size).prefetch(-1)
def get_hardest_k_examples(test_dataset, model, k=32):
class_probs = model.predict(test_dataset)
predictions = np.argmax(class_probs, axis=1)
losses = tf.keras.losses.categorical_crossentropy(test_dataset.y, class_probs)
argsort_loss = np.argsort(losses)
highest_k_losses = np.array(losses)[argsort_loss[-k:]]
hardest_k_examples = test_dataset.x[argsort_loss[-k:]]
true_labels = np.argmax(test_dataset.y[argsort_loss[-k:]], axis=1)
return highest_k_losses, hardest_k_examples, true_labels, predictions
def log_high_loss_examples(test_dataset, model, k=32, run=None):
print(f'logging k={k} hardest examples')
losses, hardest_k_examples, true_labels, predictions = get_hardest_k_examples(test_dataset, model, k=k)
run = run or wandb
run.log(
{"high-loss-examples":
[wandb.Image(hard_example, caption = f'true:{label},\npred:{pred}\nloss={loss}')
for hard_example, label, pred, loss in zip(hardest_k_examples, true_labels, predictions, losses)]
})
from IPython.display import display
import warnings
warnings.filterwarnings('ignore')
from pyleaves.utils import set_tf_config
set_tf_config(num_gpus=1)
import wandb
from wandb.keras import WandbCallback
# wandb.login()
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPool2D, ReLU, ELU, LeakyReLU, Flatten, Dense, Add, AveragePooling2D, GlobalAveragePooling2D
from tensorflow.keras.layers.experimental.preprocessing import StringLookup, CategoryEncoding
import pprint
pp = pprint.PrettyPrinter(indent=4)
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(666)
tf.random.set_seed(666)
from typing import List, Tuple, Union, Dict, NamedTuple
import tensorflow_datasets as tfds
from omegaconf import OmegaConf
# from tfrecord_utils.img_utils import resize_repeat
# from boltons.funcutils import partial
# import logging
# logger = logging.getLogger('')
LOG_DIR = '/media/data/jacob/GitHub/evolution_logs'
import os
os.makedirs(LOG_DIR, exist_ok=True)
from paleoai_data.utils.logging_utils import get_logger
logger = get_logger(logdir=LOG_DIR, filename='generation_evolution_logs.log', append=True)
get_ipython().system('nvidia-smi')
# In[2]:
from genetic_algorithm.datasets.plant_village import ClassLabelEncoder, load_and_preprocess_data
from genetic_algorithm import stateful
# ## Data Definitions
# ## Creating and tracking label encoders
# In[3]:
# dataset_name='plant_village'
# data_dir = '/media/data/jacob/tensorflow_datasets'
exp_config = OmegaConf.create({'seed':756, #237,
'batch_size':24,
'input_shape':(224,224,3),
'output_size':38,
'epochs_per_organism':3,
'results_dir':'/media/data_cifs_lrs/projects/prj_fossils/users/jacob/experiments/Nov2020-Jan2021'
})
data_config = OmegaConf.create({'load':{},'preprocess':{}})
data_config['load'] = {'dataset_name':'plant_village',
'split':['train[0%:60%]','train[60%:70%]','train[70%:100%]'],
'data_dir':'/media/data/jacob/tensorflow_datasets'}
data_config['preprocess'] = {'batch_size':exp_config.batch_size,
'target_size':exp_config.input_shape[:2]}
generation_config = OmegaConf.create({
'population_size':5,
'num_generations_per_phase':3,
'fitSurvivalRate': 0.5,
'unfitSurvivalProb':0.2,
'mutationRate':0.1,
'num_phases':5
})
organism_config = OmegaConf.create({'input_shape':exp_config.input_shape,
'output_size':exp_config.output_size,
'epochs_per_organism':exp_config.epochs_per_organism})
# In[4]:
# dataset_name='plant_village'
# data_dir = '/media/data/jacob/tensorflow_datasets'
DEBUG = False#True
if DEBUG:
exp_config = OmegaConf.create({'seed':6227,
'batch_size':16,
'input_shape':(128,128,3),
'output_size':38,
'epochs_per_organism':1,
'results_dir':'/media/data_cifs_lrs/projects/prj_fossils/users/jacob/experiments/Nov2020-Jan2021/debugging_trials'
})
data_config = OmegaConf.create({'load':{},'preprocess':{}})
data_config['load'] = {'dataset_name':'plant_village',
'split':['train[0%:60%]','train[60%:70%]','train[70%:100%]'],
'data_dir':'/media/data/jacob/tensorflow_datasets'}
data_config['preprocess'] = {'batch_size':exp_config.batch_size,
'target_size':exp_config.input_shape[:2]}
generation_config = OmegaConf.create({
'population_size':1,
'num_generations_per_phase':1,
'fitSurvivalRate': 0.5,
'unfitSurvivalProb':0.2,
'mutationRate':0.1,
'num_phases':3
})
organism_config = OmegaConf.create({'input_shape':exp_config.input_shape,
'output_size':exp_config.output_size,
'epochs_per_organism':exp_config.epochs_per_organism})
# In[5]:
config = OmegaConf.create({
'experiment':exp_config,
'data':data_config,
'generation':generation_config,
'organism':organism_config
})
print(config.pretty())
# In[6]:
data, class_encoder = load_and_preprocess_data(data_config)
# In[7]:
if DEBUG:
config.organism.steps_per_epoch = 10
config.organism.validation_steps = 10
else:
config.organism.steps_per_epoch = len(data['train'])
config.organism.validation_steps = len(data['val'])
# In[8]:
config
# # Organism
# An organism contains the following:
#
# 1. phase - This denotes which phase does the organism belong to
# 2. chromosome - A dictionary of genes (hyperparameters)
# 3. model - The `tf.keras` model corresponding to the chromosome
# 4. best_organism - The best organism in the previous **phase**
# In[9]:
VERBOSE = True
import pandas as pd
import json
from box import Box
from bunch import Bunch
# from pprint import pprint as pp
import random
ActivationLayers = Box(ReLU=ReLU, ELU=ELU, LeakyReLU=LeakyReLU)
PoolingLayers = Box(MaxPool2D=MaxPool2D, AveragePooling2D=AveragePooling2D)
class Chromosome(stateful.Stateful):#BaseChromosome):#(NamedTuple):
def __init__(self,
hparams: Dict=None,
name=''):
super().__init__()
self.set_state(hparams)
def get_state(self):
"""Returns the current state of this object.
This method is called during `save`.
"""
return self._state
def set_state(self, state):
"""Sets the current state of this object.
This method is called during `reload`.
# Arguments:
state: Dict. The state to restore for this object.
"""
self._state = state
@property
def deserialized_state(self):
state = copy.deepcopy(self.get_state())
state['activation_type'] = ActivationLayers[state['activation_type']]
state['pool_type'] = PoolingLayers[state['pool_type']]
# state['activation_type'] = [ActivationLayers[act_layer] for act_layer in state['activation_type']]
# state['pool_type'] = [PoolingLayers[pool_layer] for pool_layer in state['pool_type']]
return state
# In[10]:
import copy
class ChromosomeOptions(stateful.Stateful): #BaseOptions): #object):
"""
Container class for encapsulating variable-length lists of potential gene variants (individual hyperparameters).
To be used as a reservoir from which to sample a complete chromosome made up of 1 variant per gene.
This should be logged for describing the scope of a given AutoML experiment's hyperparameter search space
Gene: The unique identifier of a particular hyperparameter that may reference any of a set of possible variant values.
Variant: The particular value of a gene. Used to refer to the 1 value for a single chromosome instance, or 1 value from a set of gene options.
Args:
NamedTuple ([type]): [description]
"""
def __init__(self,
hparam_lists,
phase=0,
seed=None):
# self.__chromosomes = {k:v for k,v in locals().items() if k not in ['self', 'kwargs'] and not k.startswith('__')}
# print(self.__chromosomes)
self.set_state(hparam_lists, phase=phase, seed=seed)
def get_state(self):
"""Returns the current state of this object.
This method is called during `save`.
"""
return self.state
def get_config(self):
config = copy.deepcopy(self.state)
return config
def set_state(self, state, phase=0, seed=None):
"""Sets the current state of this object.
This method is called during `reload`.
# Arguments:
state: Dict. The state to restore for this object.
"""
self.set_seed(seed)
self.phase = phase
self.state = state
def set_seed(self, seed=None):
self.seed = seed
self.rng = np.random.default_rng(seed)
def sample_k_variants_from_gene(self, gene: str, k: int=1):
'''
Randomly sample the list of variants corresponding to the key indicated by the first arg, 'gene'. Produce a random sequence of length k, with the default==1.
Note: If k==1: this automatically returns a single unit from the variants list, which may or may not be a scalar object (e.g. int, str, float)
If k > 1: then the sampled variants will always be returned in a list.
'''
all_variants = self.chromosomes[gene]
variant_idx = self.rng.integers(low=0, high=len(all_variants), size=k)
sampled_variants = [all_variants[idx] for idx in variant_idx.tolist()]
if k==1:
sampled_variants = sampled_variants[0]
return sampled_variants
def generate_chromosome(self, phase: int=None, seed=None):
'''
Primary function for utilizing a ChromosomeOptions object during experimentation.
Running this function will randomly generate a new Chromosome instance for which each genetic variant is randomly sampled from this object's contained data,
in the form of mappings between gene names as keys, and lists of variants as values.
'''
return Chromosome(hparams={gene:self.sample_k_variants_from_gene(gene) for gene in self.chromosomes.keys()})
def generate_k_chromosomes(self, k: int=1, seed=None):
return [self.generate_chromosome(seed=seed) for _ in range(k)]
@property
def chromosomes(self):
return self.state
@property
def deserialized_state(self):
state = copy.deepcopy(self.state)
state['activation_type'] = [ActivationLayers[act_layer] for act_layer in state['activation_type']]
state['pool_type'] = [PoolingLayers[pool_layer] for pool_layer in state['pool_type']]
return state
class ChromosomeSampler:
def __call__(self, phase: int):
if phase==0:
options = ChromosomeOptions({
# 'b_include_layer':[True],
'a_filter_size':[(1,1), (3,3), (5,5), (7,7), (9,9)],
'a_include_BN':[True, False],
'a_output_channels':[8, 16, 32, 64, 128, 256, 512],
'activation_type':['ReLU', 'ELU', 'LeakyReLU'],
'b_filter_size':[(1,1), (3,3), (5,5), (7,7), (9,9)],
'b_include_BN':[True, False],
'b_output_channels':[8, 16, 32, 64, 128, 256, 512],
'include_pool':[True, False],
'pool_type':['MaxPool2D', 'AveragePooling2D'],
'include_skip':[True, False]
},
phase=phase)
else:
options = ChromosomeOptions({
'b_include_layer':[True, False],
'a_filter_size':[(1,1), (3,3), (5,5), (7,7), (9,9)],
'a_include_BN':[True, False],
'a_output_channels':[8, 16, 32, 64, 128, 256, 512],
'activation_type':['ReLU', 'ELU', 'LeakyReLU'],
'b_filter_size':[(1,1), (3,3), (5,5), (7,7), (9,9)],
'b_include_BN':[True, False],
'b_output_channels':[8, 16, 32, 64, 128, 256, 512],
'include_pool':[True, False],
'pool_type':['MaxPool2D', 'AveragePooling2D'],
'include_skip':[True, False]
},
phase=phase)
return options.generate_chromosome(phase=phase)
# In[11]:
phases = []
sampler=ChromosomeSampler()
phases.append(sampler(phase=0))
phases.append(sampler(phase=1))
# ## Schema for defining, loading, using, and logging configuration for hparam search
#
#
# ### 1. Begin Hooks
#
# a. at_search_begin
# Store hparam search space definitions in a file called `search_space.json`
# b. at_trial_begin
#
# c. at_train_begin
#
# d. at_epoch_begin
#
# e. at_batch_begin
#
# ### 2. End Hooks
#
# a. at_batch_end
#
# b. at_epoch_end
#
# c. at_train_end
#
# d. at_trial_end
#
# e. at_search_end
#
#
# 7.
# ## 1. INTERESTING REFACTOR IDEA:
# TODO: Refactor chromosome structure to standardize the configuration options for repeated model structures
# ### (3 AM 11/27/20)
#
# e.g. Create a separate ConvOptions(NamedTuple) class to contain all 3 options:
# filter_size
# include_BN
# output_channels
#
# Then in each "ChromosomeOptions" (consider making each of those a chromosome, and upgrading what's now a chromosome to a full Genome)
# store a separate ConvOptions for layer a and layer b, separately.
#
#
# ## 2. TODO:
# Consider transferring mutate() method from Organism to Chromosome, while potentially keeping crossover() method as part of organism's namespace. Purpose is to encapsulate functionality as close as possible with the data/abstractions it will operate on
#
#
# ## 3. To Consider:
# How can I quantify the information coverage and computational complexity of a given set of chromosome options?
#
# a. Start with the raw # of permutations of all chromosome options
# b. Adjust by the expected coverage for each variant. E.g. How much of the hyperparameter space are we covering in our naive uniform grid search?
# In[15]:
import gc
class Organism:
def __init__(self,
data: Dict[str,tf.data.Dataset],
config=None,
chromosome=None,
phase=0,
generation_number=0,
organism_id=0,
best_organism=None):
'''
Organism is an actor with a State that can take Action in the environment
config is a . accessible dict object containing model params that will stay constant during evolution
chromosome is a dictionary of genes
phase is the phase that the individual belongs to
best_organism is the best organism of the previous phase
TODO:
1. implement to_json and from_json methods for copies
2. Separate out step where organism is associated with a dataset
'''
self.data = data
self.train_data = data['train']
self.val_data = data['val']
self.test_data = data['test']
self.config = config
self.chromosome = chromosome
self.phase = phase
self.generation_number = generation_number
self.organism_id = organism_id
self.best_organism=best_organism
if phase > 0:
if best_organism is None:
print(f'phase {phase} gen {generation} organism {organism_id}.\nNo previous best model, creating from scratch.')
else:
self.last_model = best_organism.model
self.debug = DEBUG
@property
def name(self):
return f'phase_{self.phase}-gen_{self.generation_number}-organism_{self.organism_id}'
@property
def config(self):
return self._config
@config.setter
def config(self, config=None):
config = config or OmegaConf.create({})
print(config)
config.input_shape = config.input_shape or (224,224,3)
config.output_size = config.output_size or 38
config.epochs_per_organism = config.epochs_per_organism or 5
self._config = config
def get_metrics(self):
return [tf.keras.metrics.TruePositives(name='tp'),
tf.keras.metrics.FalsePositives(name='fp'),
tf.keras.metrics.TrueNegatives(name='tn'),
tf.keras.metrics.FalseNegatives(name='fn'),
tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall'),
tf.keras.metrics.AUC(name='auc')]
@property
def chromosome(self):
return self._chromosome.deserialized_state
@chromosome.setter
def chromosome(self, chromosome):
self._chromosome = chromosome
def build_model(self):
'''
This is the function to build the keras model
'''
K.clear_session()
gc.collect()
inputs = Input(shape=self.config.input_shape)
if self.phase != 0:
# Slice the prev best model # Use the model as a layer # Attach new layer to the sliced model
intermediate_model = Model(inputs=self.last_model.input,
outputs=self.last_model.layers[-3].output)
for layer in intermediate_model.layers:
# To make the iteration efficient
layer.trainable = False
inter_inputs = intermediate_model(inputs)
x = Conv2D(filters=self.chromosome['a_output_channels'],
padding='same',
kernel_size=self.chromosome['a_filter_size'],
use_bias=self.chromosome['a_include_BN'])(inter_inputs)
# This is to ensure that we do not randomly chose anothere activation
self.chromosome['activation_type'] = self.best_organism.chromosome['activation_type']
else:
# For PHASE 0 only
# input layer
x = Conv2D(filters=self.chromosome['a_output_channels'],
padding='same',
kernel_size=self.chromosome['a_filter_size'],
use_bias=self.chromosome['a_include_BN'])(inputs)
if self.chromosome['a_include_BN']:
x = BatchNormalization()(x)
x = self.chromosome['activation_type']()(x)
if self.chromosome['include_pool']:
x = self.chromosome['pool_type'](strides=(1,1),
padding='same')(x)
if self.phase != 0 and self.chromosome['b_include_layer'] == False:
# Except for PHASE0, there is a choice for
# the number of layers that the model wants
if self.chromosome['include_skip']:
y = Conv2D(filters=self.chromosome['a_output_channels'],
kernel_size=(1,1),
padding='same')(inter_inputs)
x = Add()([y,x])
x = GlobalAveragePooling2D()(x)
x = Dense(self.config.output_size, activation='softmax')(x)
else:
# PHASE0 or no skip
# in the tail
x = Conv2D(filters=self.chromosome['b_output_channels'],
padding='same',
kernel_size=self.chromosome['b_filter_size'],
use_bias=self.chromosome['b_include_BN'])(x)
if self.chromosome['b_include_BN']:
x = BatchNormalization()(x)
x = self.chromosome['activation_type']()(x)
if self.chromosome['include_skip']:
y = Conv2D(filters=self.chromosome['b_output_channels'],
padding='same',
kernel_size=(1,1))(inputs)
x = Add()([y,x])
x = GlobalAveragePooling2D()(x)
x = Dense(self.config.output_size, activation='softmax')(x)
self.model = Model(inputs=[inputs], outputs=[x])
self.model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=self.get_metrics())
def fitnessFunction(self,
train_data,
val_data,
generation_number):
'''
This function is used to calculate the
fitness of an individual.
'''
print('FFITNESS FUNCTION FFS')
print('vars():', vars())
self.run = wandb.init(**self.get_wandb_credentials(phase=self.phase,
generation_number=generation_number),
config=self.config)
self.model.fit(train_data,
steps_per_epoch=self.config.steps_per_epoch,
epochs=self.config.epochs_per_organism,
callbacks=[WandbCallback()],
verbose=1)
_, self.fitness = self.model.evaluate(val_data,
steps=self.config.validation_steps,
verbose=1)
# def fitnessFunction(self,
# train_data,
# val_data,
# generation_number):
# '''
# This function is used to calculate the
# fitness of an individual.
# '''
# print('FFITNESS FUNCTION FFS')
# print('vars():', vars())
# wandb.init(**self.get_wandb_credentials(phase=self.phase,
# generation_number=generation_number))
# self.model.fit(train_data,
# steps_per_epoch=self.config.steps_per_epoch,
# epochs=self.config.epochs_per_organism,
# callbacks=[WandbCallback()],
# verbose=1)
# _, self.fitness = self.model.evaluate(val_data,
# steps=self.config.validation_steps,
# verbose=1)
def crossover(self,
partner,
generation_number):
'''
This function helps in making children from two
parent individuals.
'''
child_chromosome = {}
endpoint = np.random.randint(low=0, high=len(self.chromosome))
for idx, key in enumerate(self.chromosome):
if idx <= endpoint:
child_chromosome[key] = self.chromosome[key]
else:
child_chromosome[key] = partner.chromosome[key]
child = Organism(chromosome=child_chromosome,
data=self.data,
config=self.config,
phase=self.phase,
generation_number=generation_number,
organism_id=f'{self.organism_id}+{partner.organism_id}',
best_organism=self.best_organism)
child.build_model()
child.fitnessFunction(self.train_data,
self.val_data,
generation_number=generation_number)
return child
def mutation(self, generation_number):
'''
One of the gene is to be mutated.
'''
index = np.random.randint(0, len(self.chromosome))
key = list(self.chromosome.keys())[index]
if self.phase != 0:
self.chromosome[key] = options[key][np.random.randint(len(options[key]))]
else:
self.chromosome[key] = options_phase0[key][np.random.randint(len(options_phase0[key]))]
self.build_model()
self.fitnessFunction(self.train_data,
self.val_data,
generation_number=generation_number)
def show(self):
'''
Util function to show the individual's properties.
'''
pp.pprint(self.config)
pp.pprint(self.chromosome)
def get_wandb_credentials(self, phase: int=None, generation_number: int=None):
phase = phase or self.phase
generation_number = generation_number or self.generation_number
if self.debug:
return get_wandb_credentials(phase=phase,
generation_number=generation_number,
entity="jrose",
project=f"vlga-plant_village-DEBUG")
return get_wandb_credentials(phase=phase,
generation_number=generation_number,
entity="jrose",
project=f"vlga-plant_village")
def get_wandb_credentials(phase: int,
generation_number: int,
entity="jrose",
project=f"vlga-plant_village"):
return dict(entity=entity,
project=project,
group='KAGp{}'.format(phase),
job_type='g{}'.format(generation_number))
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
# # Generation
# This is a class that hold generations of models.
#
# 1. fitSurvivalRate - The amount of fit individuals we want in the next generation.
# 2. unfitSurvivalProb - The probability of sending unfit individuals
# 3. mutationRate - The mutation rate to change genes in an individual.
# 4. phase - The phase that the generation belongs to.
# 5. population_size - The amount of individuals that the generation consists of.
# 6. best_organism - The best organism (individual) is the last phase
# In[16]:
class Generation:
def __init__(self,
data,
generation_config,
organism_config,
phase,
previous_best_organism,
verbose: bool=False):
self.data = data
self.config = generation_config
self.organism_config = organism_config
self.population = []
self.generation_number = 0
self.phase = phase
# creating the first population: GENERATION_0
# can be thought of as the setup function
self.previous_best_organism = previous_best_organism or None
self.best = {}
self._initialized = False
self.initialize_population(verbose=verbose)
self.verbose = verbose
@property
def config(self):
return self._config
@config.setter
def config(self, config=None):
config = config or OmegaConf.create({})
config.population_size = config.population_size or 5
config.num_generations_per_phase = config.num_generations_per_phase or 3
config.fitSurvivalRate = config.fitSurvivalRate or 0.5
config.unfitSurvivalProb = config.unfitSurvivalProb or 0.2
config.mutationRate = config.mutationRate or 0.1
config.num_phases = config.num_phases or 5
self._config = config
self.__dict__.update(config)
def initialize_population(self, verbose=True):
'''
1. Create self.population_size individual organisms from scratch by randomly sampling an initial set of hyperparameters (a chromosome)
2. As each is instantiated, build its model
3. Assess their fitness one-by-one
4. Sort models by relative fitness so we have a (potentially) new Best Organism (best model)
4. Increment generation number to 1
'''
for idx in range(self.population_size):
if verbose:
print('<'*10,' '*5,'>'*10)
print(f'Creating, training then testing organism {idx} out of a maximum {self.population_size} from generation {self.generation_number} and phase {self.phase}')
org = Organism(chromosome=sampler(self.phase), #.get_state(),
data=self.data,
config=self.organism_config,
phase=self.phase,
generation_number=self.generation_number,
organism_id=idx,
best_organism=self.previous_best_organism)
org.build_model()
org.fitnessFunction(org.data['train'],
org.data['test'],
generation_number=self.generation_number)
self.population.append(org)
self._initialized = True
self.sortModel(verbose=verbose)
self.generation_number += 1
self.evaluate(run=self.population[0].run)
def sortModel(self, verbose: bool=True):
'''
sort the models according to the
fitness in descending order.
'''
previous_best = self.best_fitness
fitness = [ind.fitness for ind in self.population]
sort_index = np.argsort(fitness)[::-1]
self.population = [self.population[index] for index in sort_index]
if self.best_organism_so_far.fitness > previous_best:
self.best['organism'] = self.best_organism_so_far
self.best['model'] = self.best_organism_so_far.model
self.best['fitness'] = self.best_organism_so_far.fitness
if verbose:
print(f'''NEW BEST MODEL:
Fitness = {self.best["fitness"]:.3f}
Previous Fitness = {previous_best:.3f}
Name = {self.best['organism'].name}
chromosome = {self.best['organism'].chromosome}''')
@property
def best_organism_so_far(self):
if self._initialized:
return self.population[0]
else:
return self.previous_best_organism
@property
def best_fitness(self):
if self._initialized:
return self.population[0].fitness
elif self.previous_best_organism is not None:
return self.previous_best_organism.fitness
else:
return 0.0
def generate(self):
'''
Generate a new generation in the same phase
'''
number_of_fit = int(self.population_size * self.fitSurvivalRate)
new_pop = self.population[:number_of_fit]
for individual in self.population[number_of_fit:]:
if np.random.rand() <= self.unfitSurvivalProb:
new_pop.append(individual)
for index, individual in enumerate(new_pop):
if np.random.rand() <= self.mutationRate:
new_pop[index].mutation(generation_number=self.generation_number)
fitness = [ind.fitness for ind in new_pop]
children=[]
for idx in range(self.population_size-len(new_pop)):
parents = np.random.choice(new_pop, replace=False, size=(2,), p=softmax(fitness))
A=parents[0]
B=parents[1]
child=A.crossover(B, generation_number=self.generation_number)
children.append(child)
self.population = new_pop+children
self.sortModel()
self.generation_number+=1
def evaluate(self, run=None, last=False):
'''
Evaluate the generation
'''
print('EVALUATE')
fitness = [ind.fitness for ind in self.population]
BestOrganism = self.population[0]
if run is None:
run = BestOrganism.run
run.log({'population_size':len(fitness)}, commit=False)
run.log({'Best fitness': fitness[0]}, commit=False)
run.log({'Average fitness': sum(fitness)/len(fitness)})
self.population[0].show()
print('BEST ORGANISM', BestOrganism.name)
k=16
if last:
k=32
model_path = f'best-model-phase_{self.phase}.png'
tf.keras.utils.plot_model(BestOrganism.model, to_file=model_path)
run.log({"best_model": [wandb.Image(model_path, caption=f"Best Model phase_{self.phase}")]})
log_high_loss_examples(BestOrganism.test_dataset,
BestOrganism.model,
k=k,
run=run)
return BestOrganism
def run_generation(self):
print('RUN GENERATION')
self.generate()
last = False
if self.generation_number == self.num_generations_per_phase:
last = True
best_organism = self.evaluate(last=last)
return best_organism
def run_phase(self):#, num_generations_per_phase: int=1):
print('RUN PHASE')
while self.generation_number < self.num_generations_per_phase:
best_organism = self.run_generation()
print(f'FINISHED GENERATION {self.generation_number}')
print(vars())
if self.verbose:
print(f'FINISHED generation {self.generation_number}. Best fitness = {best_organism.fitness}')
return self.population[0] #best_organism
# return self.population[0]
# In[ ]:
best_organism = None
for phase in range(config.generation.num_phases):
print("PHASE {}".format(phase))
generation = Generation(data=data,
generation_config=config['generation'],
organism_config=config['organism'],
phase=phase,
previous_best_organism=best_organism,
verbose=VERBOSE)
best_organism = generation.run_phase()
# In[ ]:
### 1. Using tfds.features.ClassLabel
# feature_labels = tfds.features.ClassLabel(names=vocab)
# data = ['Potato___healthy',
# 'Potato___Late_blight',
# 'Raspberry___healthy',
# 'Soybean___healthy',
# 'Squash___Powdery_mildew',
# 'Strawberry___healthy',
# 'Strawberry___Leaf_scorch',
# 'Tomato___Bacterial_spot',
# 'Tomato___Early_blight',
# 'Tomato___healthy']
# data += data[::-1]
# print([feature_labels.str2int(label) for label in data])
# data = train_data
# data_enc = data.map(lambda x,y: (x, feature_labels.int2str(y)))
### 2. Using StringLookup and CategoryEncoding Layers
# layer = StringLookup(vocabulary=vocab, num_oov_indices=0, mask_token=None)
# i_layer = StringLookup(vocabulary=layer.get_vocabulary(), invert=True)
# int_data = layer(data)
# print(len(layer.get_vocabulary()))
# print(len(class_encoder.class_list))
# print(set(layer.get_vocabulary())==set(class_encoder.class_list))
# i_layer = StringLookup(vocabulary=layer.get_vocabulary(), invert=True)
# int_data = layer(data)
# print(layer(data))
# print(i_layer(int_data))
# In[ ]:
# # from tensorflow.keras.layers.experimental.preprocessing import StringLookup, CategoryEncoding
# # data = tf.constant(["a", "b", "c", "b", "c", "a"])
# # # Use StringLookup to build an index of the feature values
# # indexer = StringLookup()
# # indexer.adapt(data)
# # # Use CategoryEncoding to encode the integer indices to a one-hot vector
# # encoder = CategoryEncoding(output_mode="binary")
# # encoder.adapt(indexer(data))
# # # Convert new test data (which includes unknown feature values)
# # test_data = tf.constant(["a", "b", "c", "d", "e", ""])
# # encoded_data = encoder(indexer(test_data))
# # print(encoded_data)
# vocab = ["a", "b", "c", "d"]
# data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
# layer = StringLookup(vocabulary=vocab)
# i_layer = StringLookup(vocabulary=layer.get_vocabulary(), invert=True)
# int_data = layer(data)
# print(layer(data))
# print(i_layer(int_data))
|
{"hexsha": "5c08feb8ea7129bd0a68677b11bf10c47edf7b11", "size": 37878, "ext": "py", "lang": "Python", "max_stars_repo_path": "Notebooks/generation_script.py", "max_stars_repo_name": "JacobARose/genetic_algorithm", "max_stars_repo_head_hexsha": "de4c52637f6b928b96c0306b7da59a054322b56c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Notebooks/generation_script.py", "max_issues_repo_name": "JacobARose/genetic_algorithm", "max_issues_repo_head_hexsha": "de4c52637f6b928b96c0306b7da59a054322b56c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Notebooks/generation_script.py", "max_forks_repo_name": "JacobARose/genetic_algorithm", "max_forks_repo_head_hexsha": "de4c52637f6b928b96c0306b7da59a054322b56c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7647058824, "max_line_length": 257, "alphanum_fraction": 0.57980886, "include": true, "reason": "import numpy", "num_tokens": 8006}
|
module MathFuns
export Eb, deltafun, deltadxfun, dabs
"""
Eb(x,m)
compute ``\\sqrt{(x^2+m)}``
"""
function Eb(x, m2)
sqrt(x^2 + m2)
end
"""
deltafun(x,dϵ=0.02)
compute ``\\delta`` function with the approximation ``\\frac{\\epsilon}{\\pi(\\epsilon^2+x^2)}``
and ``\\epsilon`` is set to be ``0.02`` by default
"""
deltafun(x, dϵ = 1.0) = dϵ / (pi * (dϵ^2 + x^2))
function deltadxfun(x, ϵ = 1.0)
(-2 * x * ϵ) / (pi * (x^2 + ϵ^2)^2)
end
function dabs(x)
if x > 0.0
return 1.0
elseif x < 0.0
return -1.0
elseif x == 0.0
return 0.0
end
end
end
|
{"hexsha": "de299fa9b99cd1efcb3f132e109f798fe64e4dc2", "size": 603, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/artifacts/math.jl", "max_stars_repo_name": "Yangyang-Tan/FRGRealTime.jl", "max_stars_repo_head_hexsha": "6581b783432a5d5d08d00c887b483f9596d12fe3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-05-11T06:52:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-26T13:04:20.000Z", "max_issues_repo_path": "src/artifacts/math.jl", "max_issues_repo_name": "Yangyang-Tan/FRGRealTime.jl", "max_issues_repo_head_hexsha": "6581b783432a5d5d08d00c887b483f9596d12fe3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/artifacts/math.jl", "max_forks_repo_name": "Yangyang-Tan/FRGRealTime.jl", "max_forks_repo_head_hexsha": "6581b783432a5d5d08d00c887b483f9596d12fe3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.8684210526, "max_line_length": 96, "alphanum_fraction": 0.5306799337, "num_tokens": 249}
|
from statsmodels.tsa.seasonal import seasonal_decompose
from pyramid.arima import auto_arima
import pandas as pd
def DecomposeSeriesSeasonal(series_time_index,series, *frequency):
data = pd.DataFrame(series,index = series_time_index,columns=["Series"])
# use additive model if negative values in time series
model = 'multiplicative'
if (min(series) <= 0):
model = 'additive'
# call seasonal_decompose with optional frequency parameter
if not frequency:
if isinstance(series_time_index, pd.DatetimeIndex):
return seasonal_decompose(data, model=model)
else:
return seasonal_decompose(data, model=model, freq=1)
else:
return seasonal_decompose(data, model=model, freq=frequency[0])
class Arima():
def __init__(self, seasonal, *seasonal_differencing):
'''
initialize ARIMA class
hyperparameters:
seasonal: boolean indicating whether time series has seasonal component
seasonal_differencing: optional HP indicating the length of seasonal differencing
'''
self.seasonal = seasonal
self.seasonal_differencing = seasonal_differencing
self.arima_model = None
def fit(self, train):
'''
fit ARIMA model on training data
parameters:
train : training time series
'''
# default: annual data
if not self.seasonal_differencing:
self.arima_model = auto_arima(train, start_p=1, start_q=1,
max_p=5, max_q=5, m=1,
seasonal=self.seasonal,
d=None, D=1, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True)
# specified seasonal differencing parameter
else:
self.arima_model = auto_arima(train, start_p=1, start_q=1,
max_p=5, max_q=5, m=self.seasonal_differencing[0],
seasonal=self.seasonal,
d=None, D=1, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True)
self.arima_model.fit(train)
def predict(self, n_periods):
'''
forecasts the time series n_periods into the future
parameters:
n_periods: number of periods to forecast into the future
returns: time series forecast n_periods into the future
'''
return self.arima_model.predict(n_periods = n_periods)
|
{"hexsha": "609e6edec9925df5aea849f920fc5646cad05e83", "size": 2755, "ext": "py", "lang": "Python", "max_stars_repo_path": "Sloth/predict.py", "max_stars_repo_name": "NewKnowledge/sloth", "max_stars_repo_head_hexsha": "ed6c9011962f76a1ad1498a03af8b31ac75c6576", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Sloth/predict.py", "max_issues_repo_name": "NewKnowledge/sloth", "max_issues_repo_head_hexsha": "ed6c9011962f76a1ad1498a03af8b31ac75c6576", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-11-26T23:17:15.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-17T21:39:20.000Z", "max_forks_repo_path": "Sloth/predict.py", "max_forks_repo_name": "NewKnowledge/sloth", "max_forks_repo_head_hexsha": "ed6c9011962f76a1ad1498a03af8b31ac75c6576", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2638888889, "max_line_length": 102, "alphanum_fraction": 0.5742286751, "include": true, "reason": "from statsmodels", "num_tokens": 548}
|
import cv2
import numpy as np
from imread_from_url import imread_from_url
from mobileHumanPose import MobileHumanPose, YoloV5s
from mobileHumanPose.utils_pose_estimation import draw_skeleton, draw_heatmap, vis_3d_multiple_skeleton
if __name__ == '__main__':
draw_detections = False
# Camera parameters for the deprojection
# TODO: Correct the deprojection function to properly transform the joints to 3D
focal_length = [None, None]
principal_points = [None, None]
pose_model_path='models/mobile_human_pose_working_well_256x256.onnx'
pose_estimator = MobileHumanPose(pose_model_path, focal_length, principal_points)
# Initialize person detector
detector_model_path='models/model_float32.onnx'
person_detector = YoloV5s(detector_model_path, conf_thres=0.5, iou_thres=0.4)
# image = cv2.imread("input.jpg")
image = imread_from_url("https://static2.diariovasco.com/www/pre2017/multimedia/noticias/201412/01/media/DF0N5391.jpg")
# Detect people in the image
boxes, detection_scores = person_detector(image)
# Exit if no person has been detected
if boxes is None:
print("No person was detected")
sys.exit()
# Simulate depth based on the bouding box area
areas = (boxes[:,2] - boxes[:,0]) * (boxes[:,3] - boxes[:,1])
depths = 500/(areas/(image.shape[0]*image.shape[1]))+500
# Draw detected person bounding boxes
pose_img = image.copy()
if draw_detections:
pose_img = person_detector.draw_detections(pose_img, boxes, detection_scores)
# Initialize the represntation images
heatmap_viz_img = image.copy()
img_heatmap = np.empty(image.shape[:2])
pose_3d_list = []
# Estimate the pose for each detected person
for i, bbox in enumerate(boxes):
# Draw the estimated pose
keypoints, pose_3d, person_heatmap, scores = pose_estimator(image, bbox, depths[i])
pose_img = draw_skeleton(pose_img, keypoints, bbox[:2], scores)
# Add the person heatmap to the image heatmap
img_heatmap[bbox[1]:bbox[3],bbox[0]:bbox[2]] += person_heatmap
# Add the 3d pose to the list
pose_3d_list.append(pose_3d)
# Draw heatmap
heatmap_viz_img = draw_heatmap(heatmap_viz_img, img_heatmap)
# Draw 3D pose
vis_kps = np.array(pose_3d_list)
img_3dpos = vis_3d_multiple_skeleton(vis_kps, np.ones_like(vis_kps))
img_3dpos = cv2.resize(img_3dpos[200:-200,150:-150], image.shape[1::-1])
# Combine the images for showing them together
combined_img = np.hstack((heatmap_viz_img, pose_img, img_3dpos))
cv2.imwrite("output.bmp", combined_img)
cv2.namedWindow("Estimated pose", cv2.WINDOW_NORMAL)
cv2.imshow("Estimated pose", combined_img)
cv2.waitKey(0)
|
{"hexsha": "2af0adb0ad0247d429f3dcaef0b39d2670cd416b", "size": 2790, "ext": "py", "lang": "Python", "max_stars_repo_path": "image3DPoseEstimation.py", "max_stars_repo_name": "matthiasseibold/ONNX-Mobile-Human-Pose-3D", "max_stars_repo_head_hexsha": "fc69545981b8be3c5c7a2f13528fc58503887876", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 61, "max_stars_repo_stars_event_min_datetime": "2021-09-26T13:44:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T06:39:51.000Z", "max_issues_repo_path": "image3DPoseEstimation.py", "max_issues_repo_name": "matthiasseibold/ONNX-Mobile-Human-Pose-3D", "max_issues_repo_head_hexsha": "fc69545981b8be3c5c7a2f13528fc58503887876", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-09-27T07:05:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-29T18:35:09.000Z", "max_forks_repo_path": "image3DPoseEstimation.py", "max_forks_repo_name": "matthiasseibold/ONNX-Mobile-Human-Pose-3D", "max_forks_repo_head_hexsha": "fc69545981b8be3c5c7a2f13528fc58503887876", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-10-05T12:43:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T11:05:01.000Z", "avg_line_length": 35.3164556962, "max_line_length": 123, "alphanum_fraction": 0.7107526882, "include": true, "reason": "import numpy", "num_tokens": 753}
|
[STATEMENT]
lemma solution_upd1:
"c \<noteq> 0 \<Longrightarrow> solution (A(p:=(\<lambda>j. A p j / c))) n x = solution A n x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. c \<noteq> (0::'a) \<Longrightarrow> solution (A(p := \<lambda>j. A p j / c)) n x = solution A n x
[PROOF STEP]
apply(cases "p<n")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>c \<noteq> (0::'a); p < n\<rbrakk> \<Longrightarrow> solution (A(p := \<lambda>j. A p j / c)) n x = solution A n x
2. \<lbrakk>c \<noteq> (0::'a); \<not> p < n\<rbrakk> \<Longrightarrow> solution (A(p := \<lambda>j. A p j / c)) n x = solution A n x
[PROOF STEP]
prefer 2
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>c \<noteq> (0::'a); \<not> p < n\<rbrakk> \<Longrightarrow> solution (A(p := \<lambda>j. A p j / c)) n x = solution A n x
2. \<lbrakk>c \<noteq> (0::'a); p < n\<rbrakk> \<Longrightarrow> solution (A(p := \<lambda>j. A p j / c)) n x = solution A n x
[PROOF STEP]
apply(simp add: solution_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>c \<noteq> (0::'a); p < n\<rbrakk> \<Longrightarrow> solution (A(p := \<lambda>j. A p j / c)) n x = solution A n x
[PROOF STEP]
apply(clarsimp simp add: solution_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>c \<noteq> (0::'a); p < n\<rbrakk> \<Longrightarrow> (\<forall>i. (i = p \<longrightarrow> (\<Sum>j = 0..<n. A p j * x j / c) = A p n / c) \<and> (i \<noteq> p \<longrightarrow> i < n \<longrightarrow> (\<Sum>j = 0..<n. A i j * x j) = A i n)) = (\<forall>i<n. (\<Sum>j = 0..<n. A i j * x j) = A i n)
[PROOF STEP]
apply rule
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>c \<noteq> (0::'a); p < n; \<forall>i. (i = p \<longrightarrow> (\<Sum>j = 0..<n. A p j * x j / c) = A p n / c) \<and> (i \<noteq> p \<longrightarrow> i < n \<longrightarrow> (\<Sum>j = 0..<n. A i j * x j) = A i n)\<rbrakk> \<Longrightarrow> \<forall>i<n. (\<Sum>j = 0..<n. A i j * x j) = A i n
2. \<lbrakk>c \<noteq> (0::'a); p < n; \<forall>i<n. (\<Sum>j = 0..<n. A i j * x j) = A i n\<rbrakk> \<Longrightarrow> \<forall>i. (i = p \<longrightarrow> (\<Sum>j = 0..<n. A p j * x j / c) = A p n / c) \<and> (i \<noteq> p \<longrightarrow> i < n \<longrightarrow> (\<Sum>j = 0..<n. A i j * x j) = A i n)
[PROOF STEP]
apply clarsimp
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>i. \<lbrakk>c \<noteq> (0::'a); p < n; \<forall>i. (i = p \<longrightarrow> (\<Sum>j = 0..<n. A p j * x j / c) = A p n / c) \<and> (i \<noteq> p \<longrightarrow> i < n \<longrightarrow> (\<Sum>j = 0..<n. A i j * x j) = A i n); i < n\<rbrakk> \<Longrightarrow> (\<Sum>j = 0..<n. A i j * x j) = A i n
2. \<lbrakk>c \<noteq> (0::'a); p < n; \<forall>i<n. (\<Sum>j = 0..<n. A i j * x j) = A i n\<rbrakk> \<Longrightarrow> \<forall>i. (i = p \<longrightarrow> (\<Sum>j = 0..<n. A p j * x j / c) = A p n / c) \<and> (i \<noteq> p \<longrightarrow> i < n \<longrightarrow> (\<Sum>j = 0..<n. A i j * x j) = A i n)
[PROOF STEP]
apply(case_tac "i=p")
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>i. \<lbrakk>c \<noteq> (0::'a); p < n; \<forall>i. (i = p \<longrightarrow> (\<Sum>j = 0..<n. A p j * x j / c) = A p n / c) \<and> (i \<noteq> p \<longrightarrow> i < n \<longrightarrow> (\<Sum>j = 0..<n. A i j * x j) = A i n); i < n; i = p\<rbrakk> \<Longrightarrow> (\<Sum>j = 0..<n. A i j * x j) = A i n
2. \<And>i. \<lbrakk>c \<noteq> (0::'a); p < n; \<forall>i. (i = p \<longrightarrow> (\<Sum>j = 0..<n. A p j * x j / c) = A p n / c) \<and> (i \<noteq> p \<longrightarrow> i < n \<longrightarrow> (\<Sum>j = 0..<n. A i j * x j) = A i n); i < n; i \<noteq> p\<rbrakk> \<Longrightarrow> (\<Sum>j = 0..<n. A i j * x j) = A i n
3. \<lbrakk>c \<noteq> (0::'a); p < n; \<forall>i<n. (\<Sum>j = 0..<n. A i j * x j) = A i n\<rbrakk> \<Longrightarrow> \<forall>i. (i = p \<longrightarrow> (\<Sum>j = 0..<n. A p j * x j / c) = A p n / c) \<and> (i \<noteq> p \<longrightarrow> i < n \<longrightarrow> (\<Sum>j = 0..<n. A i j * x j) = A i n)
[PROOF STEP]
apply (simp add: sum_divide_distrib[symmetric] eq_divide_eq field_simps)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>i. \<lbrakk>c \<noteq> (0::'a); p < n; \<forall>i. (i = p \<longrightarrow> (\<Sum>j = 0..<n. A p j * x j / c) = A p n / c) \<and> (i \<noteq> p \<longrightarrow> i < n \<longrightarrow> (\<Sum>j = 0..<n. A i j * x j) = A i n); i < n; i \<noteq> p\<rbrakk> \<Longrightarrow> (\<Sum>j = 0..<n. A i j * x j) = A i n
2. \<lbrakk>c \<noteq> (0::'a); p < n; \<forall>i<n. (\<Sum>j = 0..<n. A i j * x j) = A i n\<rbrakk> \<Longrightarrow> \<forall>i. (i = p \<longrightarrow> (\<Sum>j = 0..<n. A p j * x j / c) = A p n / c) \<and> (i \<noteq> p \<longrightarrow> i < n \<longrightarrow> (\<Sum>j = 0..<n. A i j * x j) = A i n)
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>c \<noteq> (0::'a); p < n; \<forall>i<n. (\<Sum>j = 0..<n. A i j * x j) = A i n\<rbrakk> \<Longrightarrow> \<forall>i. (i = p \<longrightarrow> (\<Sum>j = 0..<n. A p j * x j / c) = A p n / c) \<and> (i \<noteq> p \<longrightarrow> i < n \<longrightarrow> (\<Sum>j = 0..<n. A i j * x j) = A i n)
[PROOF STEP]
apply (simp add: sum_divide_distrib[symmetric] eq_divide_eq field_simps)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 2392, "file": "Gauss-Jordan-Elim-Fun_Gauss_Jordan_Elim_Fun", "length": 11}
|
# encoding: utf-8
# https://github.com/charlesCXK/TorchSSC/blob/master/model/sketch.nyu/network.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from functools import partial
from collections import OrderedDict
from models.config_sketch import config
from models.resnet_sketch import get_resnet50
from xmuda.models.projection_layer import Project2Dto3D
# from .resnet import get_resnet50
def group_weight(weight_group, module, lr):
group_decay = []
group_no_decay = []
for m in module.modules():
if isinstance(m, nn.Linear):
group_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
elif isinstance(m, (nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d)):
group_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
elif isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d) \
or isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.GroupNorm):
if m.weight is not None:
group_no_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
elif isinstance(m, nn.Parameter):
group_decay.append(m)
# else:
# print(m, norm_layer)
# print(module.modules)
# print( len(list(module.parameters())) , 'HHHHHHHHHHHHHHHHH', len(group_decay) + len(
# group_no_decay))
assert len(list(module.parameters())) == len(group_decay) + len(
group_no_decay)
weight_group.append(dict(params=group_decay, lr=lr))
weight_group.append(dict(params=group_no_decay, weight_decay=.0, lr=lr))
return weight_group
class SimpleRB(nn.Module):
def __init__(self, in_channel, norm_layer, bn_momentum):
super(SimpleRB, self).__init__()
self.path = nn.Sequential(
nn.Conv3d(in_channel, in_channel, kernel_size=3, padding=1, bias=False),
norm_layer(in_channel, momentum=bn_momentum),
nn.ReLU(),
nn.Conv3d(in_channel, in_channel, kernel_size=3, padding=1, bias=False),
norm_layer(in_channel, momentum=bn_momentum),
)
self.relu = nn.ReLU()
def forward(self, x):
residual = x
conv_path = self.path(x)
out = residual + conv_path
out = self.relu(out)
return out
'''
3D Residual Block,3x3x3 conv ==> 3 smaller 3D conv, refered from DDRNet
'''
class Bottleneck3D(nn.Module):
def __init__(self, inplanes, planes, norm_layer, stride=1, dilation=[1, 1, 1], expansion=4, downsample=None,
fist_dilation=1, multi_grid=1,
bn_momentum=0.0003):
super(Bottleneck3D, self).__init__()
# often,planes = inplanes // 4
self.expansion = expansion
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(planes, momentum=bn_momentum)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=(1, 1, 3), stride=(1, 1, stride),
dilation=(1, 1, dilation[0]), padding=(0, 0, dilation[0]), bias=False)
self.bn2 = norm_layer(planes, momentum=bn_momentum)
self.conv3 = nn.Conv3d(planes, planes, kernel_size=(1, 3, 1), stride=(1, stride, 1),
dilation=(1, dilation[1], 1), padding=(0, dilation[1], 0), bias=False)
self.bn3 = norm_layer(planes, momentum=bn_momentum)
self.conv4 = nn.Conv3d(planes, planes, kernel_size=(3, 1, 1), stride=(stride, 1, 1),
dilation=(dilation[2], 1, 1), padding=(dilation[2], 0, 0), bias=False)
self.bn4 = norm_layer(planes, momentum=bn_momentum)
self.conv5 = nn.Conv3d(planes, planes * self.expansion, kernel_size=(1, 1, 1), bias=False)
self.bn5 = norm_layer(planes * self.expansion, momentum=bn_momentum)
self.relu = nn.ReLU(inplace=False)
self.relu_inplace = nn.ReLU(inplace=True)
self.downsample = downsample
self.dilation = dilation
self.stride = stride
self.downsample2 = nn.Sequential(
nn.AvgPool3d(kernel_size=(1, stride, 1), stride=(1, stride, 1)),
nn.Conv3d(planes, planes, kernel_size=1, stride=1, bias=False),
norm_layer(planes, momentum=bn_momentum),
)
self.downsample3 = nn.Sequential(
nn.AvgPool3d(kernel_size=(stride, 1, 1), stride=(stride, 1, 1)),
nn.Conv3d(planes, planes, kernel_size=1, stride=1, bias=False),
norm_layer(planes, momentum=bn_momentum),
)
self.downsample4 = nn.Sequential(
nn.AvgPool3d(kernel_size=(stride, 1, 1), stride=(stride, 1, 1)),
nn.Conv3d(planes, planes, kernel_size=1, stride=1, bias=False),
norm_layer(planes, momentum=bn_momentum),
)
def forward(self, x):
residual = x
out1 = self.relu(self.bn1(self.conv1(x)))
out2 = self.bn2(self.conv2(out1))
out2_relu = self.relu(out2)
out3 = self.bn3(self.conv3(out2_relu))
if self.stride != 1:
out2 = self.downsample2(out2)
out3 = out3 + out2
out3_relu = self.relu(out3)
out4 = self.bn4(self.conv4(out3_relu))
if self.stride != 1:
out2 = self.downsample3(out2)
out3 = self.downsample4(out3)
out4 = out4 + out2 + out3
out4_relu = self.relu(out4)
out5 = self.bn5(self.conv5(out4_relu))
if self.downsample is not None:
residual = self.downsample(x)
out = out5 + residual
out_relu = self.relu(out)
return out_relu
'''
Input: 60*36*60 sketch
Latent code: 15*9*15
'''
class CVAE(nn.Module):
def __init__(self, norm_layer, bn_momentum, latent_size=16):
super(CVAE, self).__init__()
self.latent_size = latent_size
self.encoder = nn.Sequential(
nn.Conv3d(2, 3, kernel_size=3, padding=1, bias=False),
norm_layer(3, momentum=bn_momentum),
nn.ReLU(),
nn.Conv3d(3, 16, kernel_size=3, padding=1, bias=False),
norm_layer(16, momentum=bn_momentum),
nn.ReLU(),
nn.Conv3d(16, 16, kernel_size=3, padding=1, bias=False),
norm_layer(16, momentum=bn_momentum),
nn.ReLU(),
nn.AvgPool3d(kernel_size=2, stride=2),
nn.Conv3d(16, self.latent_size, kernel_size=3, padding=1, bias=False),
norm_layer(self.latent_size, momentum=bn_momentum),
nn.ReLU(),
nn.AvgPool3d(kernel_size=2, stride=2),
nn.Conv3d(self.latent_size, self.latent_size, kernel_size=3, padding=1, bias=False),
norm_layer(self.latent_size, momentum=bn_momentum),
nn.ReLU(),
)
self.mean = nn.Conv3d(self.latent_size, self.latent_size, kernel_size=1, bias=True) # predict mean.
self.log_var = nn.Conv3d(self.latent_size, self.latent_size, kernel_size=1, bias=True) # predict log(var).
self.decoder_x = nn.Sequential(
nn.Conv3d(1, 3, kernel_size=3, padding=1, bias=False),
norm_layer(3, momentum=bn_momentum),
nn.ReLU(),
nn.Conv3d(3, 16, kernel_size=3, padding=1, bias=False),
norm_layer(16, momentum=bn_momentum),
nn.ReLU(),
nn.Conv3d(16, 16, kernel_size=3, padding=1, bias=False),
norm_layer(16, momentum=bn_momentum),
nn.ReLU(),
nn.AvgPool3d(kernel_size=2, stride=2),
nn.Conv3d(16, self.latent_size, kernel_size=3, padding=1, bias=False),
norm_layer(self.latent_size, momentum=bn_momentum),
nn.ReLU(),
nn.AvgPool3d(kernel_size=2, stride=2),
nn.Conv3d(self.latent_size, self.latent_size, kernel_size=3, padding=1, bias=False),
norm_layer(self.latent_size, momentum=bn_momentum),
nn.ReLU(),
)
self.decoder = nn.Sequential(
nn.ConvTranspose3d(self.latent_size*2, self.latent_size, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1),
norm_layer(self.latent_size, momentum=bn_momentum),
nn.ReLU(inplace=False),
nn.ConvTranspose3d(self.latent_size, self.latent_size, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1),
norm_layer(self.latent_size, momentum=bn_momentum),
nn.ReLU(inplace=False),
nn.Dropout3d(0.1),
nn.Conv3d(self.latent_size, 2, kernel_size=1, bias=True)
)
def forward(self, x, gt=None):
b, c, h, w, l = x.shape
if self.training:
gt = gt.view(b, 1, h, w, l).float()
for_encoder = torch.cat([x, gt], dim=1)
enc = self.encoder(for_encoder)
pred_mean = self.mean(enc)
pred_log_var = self.log_var(enc)
decoder_x = self.decoder_x(x)
out_samples = []
out_samples_gsnn = []
for i in range(config.samples):
std = pred_log_var.mul(0.5).exp_()
eps = torch.randn([b, self.latent_size, h // 4, w // 4, l // 4]).cuda()
z1 = eps * std + pred_mean
z2 = torch.randn([b, self.latent_size, h // 4, w // 4, l // 4]).cuda()
sketch = self.decoder(torch.cat([decoder_x, z1], dim=1))
out_samples.append(sketch)
sketch_gsnn = self.decoder(torch.cat([decoder_x, z2], dim=1))
out_samples_gsnn.append(sketch_gsnn)
sketch = torch.cat([torch.unsqueeze(out_sample, dim=0) for out_sample in out_samples])
sketch = torch.mean(sketch, dim=0)
sketch_gsnn = torch.cat([torch.unsqueeze(out_sample, dim=0) for out_sample in out_samples_gsnn])
sketch_gsnn = torch.mean(sketch_gsnn, dim=0)
return pred_mean, pred_log_var, sketch_gsnn, sketch
else:
out_samples = []
for i in range(config.samples):
z = torch.randn([b, self.latent_size, h // 4, w // 4, l // 4]).cuda()
decoder_x = self.decoder_x(x)
out = self.decoder(torch.cat([decoder_x, z], dim=1))
out_samples.append(out)
sketch_gsnn = torch.cat([torch.unsqueeze(out_sample, dim=0) for out_sample in out_samples])
sketch_gsnn = torch.mean(sketch_gsnn, dim=0)
return None, None, sketch_gsnn, None
class STAGE1(nn.Module):
def __init__(self, class_num, norm_layer, resnet_out=2048, feature=512, ThreeDinit=True,
feature_oper=64,
bn_momentum=0.1, pretrained_model=None, eval=False, freeze_bn=False):
super(STAGE1, self).__init__()
self.business_layer = []
self.oper1 = nn.Sequential(
nn.Conv3d(1, 3, kernel_size=3, padding=1, bias=False),
norm_layer(3, momentum=bn_momentum),
nn.ReLU(),
nn.Conv3d(3, feature_oper, kernel_size=3, padding=1, bias=False),
norm_layer(feature_oper, momentum=bn_momentum),
nn.ReLU(),
nn.Conv3d(feature_oper, feature, kernel_size=3, padding=1, bias=False),
norm_layer(feature, momentum=bn_momentum),
nn.ReLU(inplace=False),
)
self.business_layer.append(self.oper1)
self.completion_layer1 = nn.Sequential(
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, expansion=4, stride=2, downsample=
nn.Sequential(
nn.AvgPool3d(kernel_size=2, stride=2),
nn.Conv3d(feature, feature,
kernel_size=1, stride=1, bias=False),
norm_layer(feature, momentum=bn_momentum),
# nn.ReLU(),
), norm_layer=norm_layer), # feature --> feature*2
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[1, 1, 1]),
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[2, 2, 2]),
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[3, 3, 3]),
)
self.business_layer.append(self.completion_layer1)
self.completion_layer2 = nn.Sequential(
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, expansion=8, stride=2, downsample=
nn.Sequential(
nn.AvgPool3d(kernel_size=2, stride=2),
nn.Conv3d(feature, feature * 2,
kernel_size=1, stride=1, bias=False),
norm_layer(feature * 2, momentum=bn_momentum),
# nn.ReLU(),
), norm_layer=norm_layer),
Bottleneck3D(feature * 2, feature // 2, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[1, 1, 1]),
Bottleneck3D(feature * 2, feature // 2, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[2, 2, 2]),
Bottleneck3D(feature * 2, feature // 2, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[3, 3, 3]),
)
self.business_layer.append(self.completion_layer2)
self.cvae = CVAE(norm_layer=norm_layer, bn_momentum=bn_momentum, latent_size=config.lantent_size)
self.business_layer.append(self.cvae)
self.classify_sketch = nn.ModuleList([
nn.Sequential(
nn.ConvTranspose3d(feature * 2, feature, kernel_size=3, stride=2, padding=1, dilation=1,
output_padding=1),
norm_layer(feature, momentum=bn_momentum),
nn.ReLU(inplace=False),
),
nn.Sequential(
nn.ConvTranspose3d(feature, feature, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1),
norm_layer(feature, momentum=bn_momentum),
nn.ReLU(inplace=False),
),
nn.Sequential(
nn.Dropout3d(.1),
nn.Conv3d(feature, 2, kernel_size=1, bias=True)
)]
)
self.business_layer.append(self.classify_sketch)
def forward(self, tsdf, depth_mapping_3d, sketch_gt=None):
'''
extract 3D feature
'''
tsdf = self.oper1(tsdf)
completion1 = self.completion_layer1(tsdf)
completion2 = self.completion_layer2(completion1)
up_sketch1 = self.classify_sketch[0](completion2)
up_sketch1 = up_sketch1 + completion1
up_sketch2 = self.classify_sketch[1](up_sketch1)
pred_sketch_raw = self.classify_sketch[2](up_sketch2)
_, pred_sketch_binary = torch.max(pred_sketch_raw, dim=1, keepdim=True) # (b, 1, 60, 36, 60) binary-voxel sketch
pred_mean, pred_log_var, pred_sketch_gsnn, pred_sketch= self.cvae(pred_sketch_binary.float(), sketch_gt)
return pred_sketch_raw, pred_sketch_gsnn, pred_sketch, pred_mean, pred_log_var
class STAGE2(nn.Module):
def __init__(self, class_num, norm_layer,
full_scene_size,
output_scene_size,
feature_oper=64,
resnet_out=2048, feature=512, ThreeDinit=True,
bn_momentum=0.1, pretrained_model=None, eval=False, freeze_bn=False):
super(STAGE2, self).__init__()
self.business_layer = []
if eval:
self.downsample = nn.Sequential(
nn.Conv2d(resnet_out, feature, kernel_size=1, bias=False),
nn.BatchNorm2d(feature, momentum=bn_momentum),
nn.ReLU()
)
else:
self.downsample = nn.Sequential(
nn.Conv2d(resnet_out, feature, kernel_size=1, bias=False),
nn.BatchNorm2d(feature, momentum=bn_momentum),
nn.ReLU()
)
self.business_layer.append(self.downsample)
self.resnet_out = resnet_out
self.feature = feature
self.ThreeDinit = ThreeDinit
self.pooling = nn.AvgPool3d(kernel_size=3, padding=1, stride=1)
self.business_layer.append(self.pooling)
self.semantic_layer1 = nn.Sequential(
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, expansion=4, stride=2, downsample=
nn.Sequential(
nn.AvgPool3d(kernel_size=2, stride=2),
nn.Conv3d(feature, feature,
kernel_size=1, stride=1, bias=False),
norm_layer(feature, momentum=bn_momentum),
), norm_layer=norm_layer), # feature --> feature*2
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[1, 1, 1]),
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[2, 2, 2]),
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[3, 3, 3]),
)
self.business_layer.append(self.semantic_layer1)
self.semantic_layer2 = nn.Sequential(
Bottleneck3D(feature, feature // 4, bn_momentum=bn_momentum, expansion=8, stride=2, downsample=
nn.Sequential(
nn.AvgPool3d(kernel_size=2, stride=2),
nn.Conv3d(feature, feature * 2,
kernel_size=1, stride=1, bias=False),
norm_layer(feature * 2, momentum=bn_momentum),
), norm_layer=norm_layer),
Bottleneck3D(feature * 2, feature // 2, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[1, 1, 1]),
Bottleneck3D(feature * 2, feature // 2, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[2, 2, 2]),
Bottleneck3D(feature * 2, feature // 2, bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[3, 3, 3]),
)
self.business_layer.append(self.semantic_layer2)
if full_scene_size[0] == output_scene_size[0]:
self.classify_semantic = nn.ModuleList([
nn.Sequential(
nn.ConvTranspose3d(feature * 2, feature, kernel_size=3, stride=2, padding=1, dilation=1,
output_padding=1),
norm_layer(feature, momentum=bn_momentum),
nn.ReLU(inplace=False),
),
nn.Sequential(
nn.ConvTranspose3d(feature, feature, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1),
norm_layer(feature, momentum=bn_momentum),
nn.ReLU(inplace=False),
),
nn.Sequential(
nn.Dropout3d(.1),
# nn.Conv3d(feature, class_num, kernel_size=1, bias=True)
nn.ConvTranspose3d(feature, class_num, kernel_size=4, padding=0, stride=4)
)]
)
else:
self.classify_semantic = nn.ModuleList([
nn.Sequential(
nn.ConvTranspose3d(feature * 2, feature, kernel_size=3, stride=2, padding=1, dilation=1,
output_padding=1),
norm_layer(feature, momentum=bn_momentum),
nn.ReLU(inplace=False),
),
nn.Sequential(
nn.ConvTranspose3d(feature, feature, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1),
norm_layer(feature, momentum=bn_momentum),
nn.ReLU(inplace=False),
),
nn.Sequential(
nn.Dropout3d(.1),
nn.Conv3d(feature, class_num, kernel_size=1, bias=True)
)]
)
self.business_layer.append(self.classify_semantic)
self.oper_sketch = nn.Sequential(
nn.Conv3d(2, 3, kernel_size=3, padding=1, bias=False),
norm_layer(3, momentum=bn_momentum),
nn.ReLU(),
nn.Conv3d(3, feature_oper, kernel_size=3, padding=1, bias=False),
norm_layer(feature_oper, momentum=bn_momentum),
nn.ReLU(),
nn.Conv3d(feature_oper, feature, kernel_size=3, padding=1, bias=False),
norm_layer(feature, momentum=bn_momentum),
nn.ReLU(inplace=False),
)
self.oper_sketch_cvae = nn.Sequential(
nn.Conv3d(2, 3, kernel_size=3, padding=1, bias=False),
norm_layer(3, momentum=bn_momentum),
nn.ReLU(),
nn.Conv3d(3, feature_oper, kernel_size=3, padding=1, bias=False),
norm_layer(feature_oper, momentum=bn_momentum),
nn.ReLU(),
nn.Conv3d(feature_oper, feature, kernel_size=3, padding=1, bias=False),
norm_layer(feature, momentum=bn_momentum),
nn.ReLU(inplace=False),
)
self.business_layer.append(self.oper_sketch)
self.business_layer.append(self.oper_sketch_cvae)
self.project2d3d = Project2Dto3D(full_scene_size[0]//4, full_scene_size[1]//4, full_scene_size[2]//4)
# self.project2d3d = Project2Dto3D(output_scene_size[0], output_scene_size[1], output_scene_size[2])
def forward(self, feature2d, depth_mapping_3d, pred_sketch_raw, pred_sketch_gsnn, full_img_size=None):
# reduce the channel of 2D feature map
if self.resnet_out != self.feature:
feature2d = self.downsample(feature2d)
if full_img_size is None:
feature2d = F.interpolate(feature2d, scale_factor=16, mode='bilinear', align_corners=True)
else:
feature2d = F.interpolate(feature2d, size=full_img_size, mode='bilinear', align_corners=True)
'''
project 2D feature to 3D space
'''
b, c, h, w = feature2d.shape
# feature2d = feature2d.view(b, c, h * w).permute(0, 2, 1) # b x h*w x c
#
# zerosVec = torch.zeros(b, 1, c).cuda() # for voxels that could not be projected from the depth map, we assign them zero vector
# segVec = torch.cat((feature2d, zerosVec), 1)
#
# segres = [torch.index_select(segVec[i], 0, depth_mapping_3d[i]) for i in range(b)]
# segres = torch.stack(segres).permute(0, 2, 1).contiguous().view(b, c, 60, 36, 60) # B, (channel), 60, 36, 60
# print(feature2d.shape, depth_mapping_3d.shape)
# print(torch.max(depth_mapping_3d), torch.min(depth_mapping_3d))
segres = self.project2d3d(feature2d, depth_mapping_3d) # mapping at 1:4 resolution
'''
init the 3D feature
'''
if self.ThreeDinit:
pool = self.pooling(segres)
zero = (segres == 0).float()
pool = pool * zero
segres = segres + pool
'''
extract 3D feature
'''
sketch_proi = self.oper_sketch(pred_sketch_raw)
sketch_proi_gsnn = self.oper_sketch_cvae(pred_sketch_gsnn)
seg_fea = segres + sketch_proi + sketch_proi_gsnn
semantic1 = self.semantic_layer1(seg_fea)
semantic2 = self.semantic_layer2(semantic1)
up_sem1 = self.classify_semantic[0](semantic2)
up_sem1 = up_sem1 + semantic1
up_sem2 = self.classify_semantic[1](up_sem1)
pred_semantic = self.classify_semantic[2](up_sem2)
return pred_semantic, None
'''
main network
'''
class Sketch3DSSC(nn.Module):
def __init__(self, class_num, base_lr,
full_scene_size,
output_scene_size,
optimize_everywhere=False,
feature_oper=64,
resnet_out=2048, feature=512, ThreeDinit=True,
bn_momentum=0.1, pretrained_model=None, eval=False, freeze_bn=False):
super(Sketch3DSSC, self).__init__()
self.business_layer = []
self.full_scene_size = full_scene_size
self.optimize_everywhere = optimize_everywhere
print("Sketch3DSSC_optimize_everywhere", self.optimize_everywhere)
self.nbr_classes = class_num
self.base_lr = base_lr
if eval:
self.backbone = get_resnet50(num_classes=19, dilation=[1, 1, 1, 2], bn_momentum=config.bn_momentum,
is_fpn=False,
BatchNorm2d=nn.BatchNorm2d)
else:
self.backbone = get_resnet50(num_classes=19, dilation=[1, 1, 1, 2], bn_momentum=config.bn_momentum,
is_fpn=False,
BatchNorm2d=nn.BatchNorm2d)
self.dilate = 2
for m in self.backbone.layer4.children():
m.apply(partial(self._nostride_dilate, dilate=self.dilate))
self.dilate *= 2
self.stage1 = STAGE1(class_num, nn.BatchNorm3d, resnet_out=resnet_out, feature=feature, ThreeDinit=ThreeDinit,
feature_oper=feature_oper,
bn_momentum=bn_momentum, pretrained_model=pretrained_model, eval=eval, freeze_bn=freeze_bn)
self.business_layer += self.stage1.business_layer
self.stage2 = STAGE2(class_num, nn.BatchNorm3d, full_scene_size,
output_scene_size=output_scene_size,
resnet_out=resnet_out, feature=feature, ThreeDinit=ThreeDinit,
feature_oper=feature_oper,
bn_momentum=bn_momentum, pretrained_model=pretrained_model, eval=eval, freeze_bn=freeze_bn)
self.business_layer += self.stage2.business_layer
def forward(self, data):
rgb = data['img']
tsdf = data['tsdf'].unsqueeze(1)
depth_mapping_3d = data['MAPPING_2DRGB_3DGRID'].long()
sketch_gt = data['3D_SKETCH']
h, w = rgb.size(2), rgb.size(3)
feature2d = self.backbone(rgb)
pred_sketch_raw, pred_sketch_gsnn, pred_sketch, pred_mean, pred_log_var = self.stage1(tsdf, depth_mapping_3d, sketch_gt)
# print(h, w)
pred_semantic, _ = self.stage2(feature2d, depth_mapping_3d, pred_sketch_raw, pred_sketch_gsnn, full_img_size=(h, w))
if self.training:
return {'pred_semantic': pred_semantic,
'pred_sketch_raw': pred_sketch_raw,
'pred_sketch_gsnn': pred_sketch_gsnn,
'pred_sketch': pred_sketch,
'pred_mean': pred_mean,
'pred_log_var': pred_log_var}
return {'pred_semantic': pred_semantic,
'pred_sketch_gsnn': pred_sketch_gsnn}
# @staticmethod
def _nostride_dilate(self, m, dilate):
if isinstance(m, nn.Conv2d):
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
def weights_initializer(self, feature, conv_init, bn_eps, bn_momentum, **kwargs):
for name, m in feature.named_modules():
if isinstance(m, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
conv_init(m.weight, **kwargs)
elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.BatchNorm2d):
m.eps = bn_eps
m.momentum = bn_momentum
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
return
def weights_init(self):
module_list = self.business_layer
conv_init = nn.init.kaiming_normal_
bn_eps = 1e-5 # Forcing, this could be passed through config file
bn_momentum = 0.1 # Forcing, this could be passed through config file
if isinstance(module_list, list):
for feature in module_list:
self.weights_initializer(feature, conv_init, bn_eps, bn_momentum, mode='fan_in')
else:
self.weights_initializer(module_list, conv_init, bn_eps, bn_momentum, mode='fan_in')
return
def get_parameters(self):
params_list = []
for module in self.business_layer:
params_list = group_weight(params_list, module, self.base_lr)
return params_list
def compute_loss(self, scores, ssc_label, data, class_weights, use_3DSketch_nonempty_mask):
empty_loss_weight = 1
cri_weights = torch.ones(self.nbr_classes).type_as(scores['pred_semantic'])
'''
semantic loss ---------------------------------------------------------
'''
if not self.optimize_everywhere:
if use_3DSketch_nonempty_mask:
nonempty = data['nonempty']
else:
tsdf = data['tsdf_1_1'].cpu().numpy()
# nonempty = (tsdf < 0.1) & (tsdf != 0) & (data['ssc_label_1_4'] != 255)
nonempty = (tsdf < 0.1) & (tsdf != 0) & (ssc_label != 255)
# Indices at which weight equals 1. The array is flattened for nbr of examples in batch
if self.optimize_everywhere:
selectindex = torch.nonzero(torch.ones_like(ssc_label).view(-1)).view(-1) # Occluded voxels equals 1
cri_weights[0] = 0.05
else:
selectindex = torch.nonzero(nonempty.view(-1)).view(-1) # Occluded voxels equals 1
# cri_weights[0] = 0.05
criterion = nn.CrossEntropyLoss(ignore_index=255, reduction='none', weight=cri_weights)
# print(selectindex.shape, selectindex_2.shape)
# Getting labels at indices on which weights equals 1
# filterLabel = torch.index_select(data['ssc_label_1_4'].view(-1), 0, selectindex)
filterLabel = torch.index_select(ssc_label.view(-1), 0, selectindex)
# Selecting indices at which weights equals 1 and flatenning as an array pero class percentage predicted
filterOutput = torch.index_select(scores['pred_semantic'].permute(0, 2, 3, 4, 1).contiguous().view(-1, self.nbr_classes), 0,
selectindex)
loss_semantic = criterion(filterOutput, filterLabel.long())
loss_semantic = torch.mean(loss_semantic) # TODO: There is an error here.. He shouldn't consider index where label == 255 for the mean
# torch.sum((criterion(filterOutput, filterLabel.long()))) / torch.sum(filterLabel != 255)
if not self.training:
losses = {'total': loss_semantic, 'semantic': loss_semantic}
return losses
'''
sketch loss -----------------------------------------------------------
'''
if self.optimize_everywhere:
selectindex = torch.nonzero(torch.ones_like(data['sketch_1_4']).view(-1)).view(-1) # Occluded voxels equals 1
else:
selectindex = torch.nonzero(nonempty.view(-1)).view(-1) # Occluded voxels equals 1
filter_sketch_gt = torch.index_select(data['sketch_1_4'].view(-1), 0, selectindex)
filtersketch_raw = torch.index_select(scores['pred_sketch_raw'].permute(0, 2, 3, 4, 1).contiguous().view(-1, 2),
0, selectindex)
filtersketch = torch.index_select(scores['pred_sketch'].permute(0, 2, 3, 4, 1).contiguous().view(-1, 2),
0, selectindex)
filtersketchGsnn = torch.index_select(scores['pred_sketch_gsnn'].permute(0, 2, 3, 4, 1).contiguous().view(-1, 2),
0, selectindex)
# TODO: In the sketches there are not 255 indices, the indices are binary on the sketch.. This ignore 255 is not needed...
sketch_weights = torch.ones(2).type_as(scores['pred_semantic'])
# sketch_weights[0] = 0.05
if self.optimize_everywhere:
sketch_weights[0] = 0.05
criterion_sketch = nn.CrossEntropyLoss(ignore_index=255, reduction='none', weight=sketch_weights).cuda()
loss_sketch = criterion_sketch(filtersketch, filter_sketch_gt.long())
loss_sketch = torch.mean(loss_sketch) # TODO: There is an error here.. He shouldn't consider index where label == 255 for the mean
loss_sketch_gsnn = criterion_sketch(filtersketchGsnn, filter_sketch_gt.long())
loss_sketch_gsnn = torch.mean(loss_sketch_gsnn) # TODO: There is an error here.. He shouldn't consider index where label == 255 for the mean
loss_sketch_raw = criterion_sketch(filtersketch_raw, filter_sketch_gt.long())
loss_sketch_raw = torch.mean(loss_sketch_raw) # TODO: There is an error here.. He shouldn't consider index where label == 255 for the mean
'''
KLD loss --------------------------------------------------------------
'''
KLD = -0.5 * torch.mean(1 + scores['pred_log_var'] - scores['pred_mean'].pow(2) - scores['pred_log_var'].exp())
# TODO: I should do something to have track of each of the losses...
loss = loss_semantic \
+ (loss_sketch + loss_sketch_raw) * config.sketch_weight \
+ loss_sketch_gsnn * config.sketch_weight_gsnn \
+ KLD * config.kld_weight
losses = {'total':loss, 'semantic':loss_semantic, 'sketch':loss_sketch, 'sketch_raw':loss_sketch_raw,
'sketch_gsnn':loss_sketch_gsnn, 'KLD': KLD}
return losses
def get_target(self, data):
'''
Return the target to use for evaluation of the model
'''
return data['3D_LABEL']['1_4'] # .permute(0, 2, 1, 3)
def get_validation_loss_keys(self):
return ['total', 'semantic']
def get_train_loss_keys(self):
return ['total', 'semantic', 'sketch', 'sketch_raw', 'sketch_gsnn', 'KLD']
if __name__ == '__main__':
try:
from apex.parallel import DistributedDataParallel, SyncBatchNorm
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex .")
print('Starting...')
model = Sketch3DSSC(class_num=12, feature=128, eval=True)
# print(model)
print('model loaded...')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
print('model laoded on device...')
model.eval()
print('model passed to eval mode...')
left = torch.rand(1, 3, 480, 640).cuda()
right = torch.rand(1, 3, 480, 640).cuda()
depth_mapping_3d = torch.from_numpy(np.ones((1, 129600)).astype(np.int64)).long().cuda()
tsdf = torch.rand(1, 1, 60, 36, 60).cuda()
print('model forward pass...')
out = model(left, depth_mapping_3d, tsdf, None)
print('model forward pass done...')
|
{"hexsha": "44f6dabf3d3fc6a47dbc5c9bb4615a3778f4c66b", "size": 34321, "ext": "py", "lang": "Python", "max_stars_repo_path": "xmuda/models/sketch.py", "max_stars_repo_name": "anhquancao/xmuda-extend", "max_stars_repo_head_hexsha": "4b670ec2f6766e3a624e81dbe5d97b209c1c4f76", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "xmuda/models/sketch.py", "max_issues_repo_name": "anhquancao/xmuda-extend", "max_issues_repo_head_hexsha": "4b670ec2f6766e3a624e81dbe5d97b209c1c4f76", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "xmuda/models/sketch.py", "max_forks_repo_name": "anhquancao/xmuda-extend", "max_forks_repo_head_hexsha": "4b670ec2f6766e3a624e81dbe5d97b209c1c4f76", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.3981481481, "max_line_length": 146, "alphanum_fraction": 0.6057807173, "include": true, "reason": "import numpy", "num_tokens": 8764}
|
subroutine residual_func(snes_in, stateVector, residualVector, ctx, localerr)
#include <petsc/finclude/petscsnes.h>
use petscsnes
use contexts
use grids
use input
use geometry
use mp
use diffusion
use source
implicit none
SNES:: snes_in
PetscScalar,dimension(:),intent(in):: stateVector
PetscScalar,dimension(:),intent(inout):: residualVector
type(resContext):: ctx
PetscErrorCode:: localerr
real,dimension(:),allocatable:: Efield
integer :: idx, ir, ip, ix, idx_rp1, idx_rm1, idx_pp1, idx_pm1, idx_xp1, idx_xm1
real :: delta_t, t, temp
real :: jacob_rl,jacob_rr,jacob_pl,jacob_pr,jacob_xl,jacob_xr
real :: jacob_rm1,jacob_rp1,jacob_pm1,jacob_pp1,jacob_xm1,jacob_xp1, jacob_c
real :: Dr_l, Dr_r, Dp_l, Dp_r, Dx_l, Dx_r, Ar_l, Ar_r, Ap_l, Ap_r, Ax_l, Ax_r
allocate(Efield(Nr))
select case (efield_option)
case ("none")
Efield(:) = 0.0
case ("inductive")
Efield(:) = stateVector(Nr*Np*Nx+1:Nr*Np*Nx+Nr)
end select
t = ctx%time
delta_t = ctx%delta_t
do idx = firstLocalRow, lastLocalRow
ir = get_idx_r(idx)
ip = get_idx_p(idx)
ix = get_idx_x(idx)
idx_rp1 = get_idx(ir+1,ip,ix)
idx_rm1 = get_idx(ir-1,ip,ix)
idx_pp1 = get_idx(ir,ip+1,ix)
idx_pm1 = get_idx(ir,ip-1,ix)
idx_xp1 = get_idx(ir,ip,ix+1)
idx_xm1 = get_idx(ir,ip,ix-1)
! Coordinate Jacobian defined in cell center
jacob_c = jacob(rgrid(ir),pgrid(ip),xgrid(ix))
temp = 0.0
if (ir == 1) then
jacob_rr = jacob(rgrid_edge(ir+1),pgrid(ip),xgrid(ix))
jacob_rp1 = jacob(rgrid(ir+1),pgrid(ip),xgrid(ix))
Ar_r = jacob_rr*Ar(rgrid_edge(ir+1),pgrid(ip),xgrid(ix),t)
Dr_r = jacob_c*Drr(rgrid(ir),pgrid(ip),pgrid(ix),t)*jacob_rp1*Drr(rgrid(ir+1),pgrid(ip),xgrid(ix),t) * ( rgrid(ir+1) - rgrid(ir) ) / &
( (rgrid(ir+1)-rgrid_edge(ir+1))*jacob_c*Drr(rgrid(ir),pgrid(ip),xgrid(ix),t) + (rgrid_edge(ir+1)-rgrid(ir))*jacob_rp1*Drr(rgrid(ir+1),pgrid(ip),xgrid(ix),t) )
temp=temp+ Ar_r * stateVector( get_idx(ir_upwind(ir+1,ip,ix),ip,ix) ) / (rgrid_edge(ir+1)-rgrid_edge(ir))
temp=temp- (Dr_r/( (rgrid(ir+1)-rgrid(ir))*(rgrid_edge(ir+1)-rgrid_edge(ir)))) * (stateVector(idx_rp1) - stateVector(idx))
else if (ir == Nr) then
jacob_rl = jacob(rgrid_edge(ir),pgrid(ip),xgrid(ix))
jacob_rr = jacob(rgrid_edge(ir+1),pgrid(ip),xgrid(ix))
jacob_rm1 = jacob(rgrid(ir-1),pgrid(ip),xgrid(ix))
Ar_l = jacob_rl*Ar(rgrid_edge(ir),pgrid(ip),xgrid(ix),t)
Ar_r = jacob_rr*Ar(rgrid_edge(ir+1),pgrid(ip),xgrid(ix),t)
Dr_l = jacob_rm1*Drr(rgrid(ir-1),pgrid(ip),pgrid(ix),t)*jacob_c*Drr(rgrid(ir),pgrid(ip),xgrid(ix),t) * ( rgrid(ir) - rgrid(ir-1) ) / &
( (rgrid(ir)-rgrid_edge(ir)) * jacob_rm1*Drr(rgrid(ir-1),pgrid(ip),xgrid(ix),t) + (rgrid_edge(ir+1)-rgrid(ir))*jacob_c*Drr(rgrid(ir),pgrid(ip),xgrid(ix),t) )
Dr_r = jacob_c*Drr(rgrid(ir),pgrid(ip),pgrid(ix),t)*jacob_rp1*Drr(rgrid(ir+1),pgrid(ip),xgrid(ix),t) * ( rgrid(ir+1) - rgrid(ir) ) / &
( (rgrid(ir+1)-rgrid_edge(ir+1))*jacob_c*Drr(rgrid(ir),pgrid(ip),xgrid(ix),t) + (rgrid_edge(ir+1)-rgrid(ir))*jacob_rp1*Drr(rgrid(ir+1),pgrid(ip),xgrid(ix),t) )
temp=temp- Ar_l * stateVector( get_idx(ir_upwind(ir,ip,ix),ip,ix) ) / (rgrid_edge(ir+1)-rgrid_edge(ir))
temp=temp+ 0.0
! Use a "ghost" cell with vanishing state vector. This cell has a width equal to the last r cell
temp=temp+ (Dr_l/( (rgrid(ir)-rgrid(ir-1))*(rgrid_edge(ir+1)-rgrid_edge(ir)))) * ( stateVector(idx) - stateVector(idx_rm1) )
temp=temp- (Dr_r/( (rgrid(ir)-rgrid(ir-1))*(rgrid_edge(ir+1)-rgrid_edge(ir)))) * (0.0 - stateVector(idx))
else
jacob_rl = jacob(rgrid_edge(ir),pgrid(ip),xgrid(ix))
jacob_rr = jacob(rgrid_edge(ir+1),pgrid(ip),xgrid(ix))
jacob_rm1 = jacob(rgrid(ir-1),pgrid(ip),xgrid(ix))
jacob_rp1 = jacob(rgrid(ir+1),pgrid(ip),xgrid(ix))
Ar_l = jacob_rl*Ar(rgrid_edge(ir),pgrid(ip),xgrid(ix),t)
Ar_r = jacob_rr*Ar(rgrid_edge(ir+1),pgrid(ip),xgrid(ix),t)
Dr_l = jacob_rm1*Drr(rgrid(ir-1),pgrid(ip),pgrid(ix),t)*jacob_c*Drr(rgrid(ir),pgrid(ip),xgrid(ix),t) * ( rgrid(ir) - rgrid(ir-1) ) / &
( (rgrid(ir)-rgrid_edge(ir)) * jacob_rm1*Drr(rgrid(ir-1),pgrid(ip),xgrid(ix),t) + (rgrid_edge(ir+1)-rgrid(ir))*jacob_c*Drr(rgrid(ir),pgrid(ip),xgrid(ix),t) )
Dr_r = jacob_c*Drr(rgrid(ir),pgrid(ip),pgrid(ix),t)*jacob_rp1*Drr(rgrid(ir),pgrid(ip),xgrid(ix),t) * ( rgrid(ir) - rgrid(ir-1) ) / &
( (rgrid(ir)-rgrid_edge(ir))*jacob_c*Drr(rgrid(ir),pgrid(ip),xgrid(ix),t) + (rgrid_edge(ir+1)-rgrid(ir))*jacob_rp1*Drr(rgrid(ir),pgrid(ip),xgrid(ix),t) )
temp=temp- Ar_l * stateVector( get_idx(ir_upwind(ir,ip,ix),ip,ix) ) / (rgrid_edge(ir+1)-rgrid_edge(ir))
if (ir_upwind(ir+1,ip,ix) > Nr) then
temp = temp+0.0
else
temp=temp+ Ar_r * stateVector( get_idx(ir_upwind(ir+1,ip,ix),ip,ix) ) / (rgrid_edge(ir+1)-rgrid_edge(ir))
end if
temp=temp+ (Dr_l/( (rgrid(ir)-rgrid(ir-1))*(rgrid_edge(ir+1)-rgrid_edge(ir)))) * ( stateVector(idx) - stateVector(idx_rm1) )
temp=temp- (Dr_r/( (rgrid(ir+1)-rgrid(ir))*(rgrid_edge(ir+1)-rgrid_edge(ir)))) * (0.0 - stateVector(idx))
end if
if (ip == 1) then
jacob_pr = jacob(rgrid(ir),pgrid_edge(ip+1),xgrid(ix))
jacob_pp1 = jacob(rgrid(ir),pgrid(ip+1),xgrid(ix))
Ap_r = jacob_pr*Ar(rgrid(ir),pgrid_edge(ip+1),xgrid(ix),t)
Dp_r = jacob_c*Dpp(rgrid(ir),pgrid(ip),pgrid(ix),t)*jacob_pp1*Dpp(rgrid(ir),pgrid(ip+1),xgrid(ix),t) * ( pgrid(ip+1) - pgrid(ip) ) / &
( (pgrid(ip+1)-pgrid_edge(ip+1))*jacob_c*Dpp(rgrid(ir),pgrid(ip),xgrid(ix),t) + (pgrid_edge(ip+1)-pgrid(ip))*jacob_pp1*Dpp(rgrid(ir),pgrid(ip+1),xgrid(ix),t) )
if (ip_upwind(ir,ip+1,ix) < 1) then
temp=temp+ 0.0
else
temp=temp+ Ap_r * stateVector( get_idx(ir,ip_upwind(ir,ip+1,ix),ix) ) / (pgrid_edge(ip+1)-pgrid_edge(ip))
end if
! Use a "ghost" cell with vanishing state vector. This cell has a width equal to the last p cell
temp=temp- (Dp_r/( (pgrid(ip)-pgrid(ip-1))*(pgrid_edge(ip+1)-pgrid_edge(ip)))) * (0.0 - stateVector(idx))
else if (ip == Np) then
jacob_pl = jacob(rgrid(ir),pgrid_edge(ip),xgrid(ix))
jacob_pr = jacob(rgrid(ir),pgrid_edge(ip+1),xgrid(ix))
jacob_pm1 = jacob(rgrid(ir),pgrid(ip-1),xgrid(ix))
jacob_pp1 = jacob(rgrid(ir),pgrid(ip+1),xgrid(ix))
Ap_l = jacob_pl*Ar(rgrid(ir),pgrid_edge(ip),xgrid(ix),t)
Ap_r = jacob_pr*Ar(rgrid(ir),pgrid_edge(ip+1),xgrid(ix),t)
Dp_l = jacob_pm1*Dpp(rgrid(ir),pgrid(ip-1),pgrid(ix),t)*jacob_c*Dpp(rgrid(ir),pgrid(ip),xgrid(ix),t) * ( pgrid(ip) - pgrid(ip-1) ) / &
( (pgrid(ip)-pgrid_edge(ip))*jacob_pm1*Dpp(rgrid(ir),pgrid(ip-1),xgrid(ix),t) + (pgrid_edge(ip+1)-pgrid(ip))*jacob_c**Dpp(rgrid(ir),pgrid(ip),xgrid(ix),t) )
Dp_r = jacob_c*Dpp(rgrid(ir),pgrid(ip),pgrid(ix),t)*jacob_pp1*Dpp(rgrid(ir),pgrid(ip),xgrid(ix),t) * ( pgrid(ip) - pgrid(ip-1) ) / &
( (pgrid(ip)-pgrid_edge(ip))*jacob_c*Dpp(rgrid(ir),pgrid(ip),xgrid(ix),t) + (pgrid_edge(ip+1)-pgrid(ip))*jacob_pp1*Dpp(rgrid(ir),pgrid(ip),xgrid(ix),t) )
temp=temp- Ap_l * stateVector( get_idx(ir,ip_upwind(ir,ip,ix),ix) ) / (pgrid_edge(ip+1)-pgrid_edge(ip))
if (ip_upwind(ir,ip+1,ix) > Np) then
temp=temp+0.0
else
temp=temp+ Ap_r * stateVector( get_idx(ir,ip_upwind(ir,ip+1,ix),ix) ) / (pgrid_edge(ip+1)-pgrid_edge(ip))
end if
temp=temp+ (Dp_l/( (pgrid(ip)-pgrid(ip-1))*(pgrid_edge(ip+1)-pgrid_edge(ip)))) * ( stateVector(idx) - stateVector(idx_pm1) )
temp=temp- (Dp_r/( (pgrid(ip+1)-pgrid(ip))*(pgrid_edge(ip+1)-pgrid_edge(ip)))) * (0.0 - stateVector(idx))
else
jacob_pl = jacob(rgrid(ir),pgrid_edge(ip),xgrid(ix))
jacob_pr = jacob(rgrid(ir),pgrid_edge(ip+1),xgrid(ix))
jacob_pm1 = jacob(rgrid(ir),pgrid(ip-1),xgrid(ix))
jacob_pp1 = jacob(rgrid(ir),pgrid(ip+1),xgrid(ix))
Ap_l = jacob_pl*Ar(rgrid(ir),pgrid_edge(ip),xgrid(ix),t)
Ap_r = jacob_pr*Ar(rgrid(ir),pgrid_edge(ip+1),xgrid(ix),t)
Dp_l = jacob_pm1*Dpp(rgrid(ir),pgrid(ip-1),pgrid(ix),t)*jacob_c*Dpp(rgrid(ir),pgrid(ip),xgrid(ix),t) * ( pgrid(ip) - pgrid(ip-1) ) / &
( (pgrid(ip)-pgrid_edge(ip))*jacob_pm1*Dpp(rgrid(ir),pgrid(ip-1),xgrid(ix),t) + (pgrid_edge(ip+1)-pgrid(ip))*jacob_c**Dpp(rgrid(ir),pgrid(ip),xgrid(ix),t) )
Dp_r = jacob_c*Dpp(rgrid(ir),pgrid(ip),pgrid(ix),t)*jacob_pp1*Dpp(rgrid(ir),pgrid(ip+1),xgrid(ix),t) * ( pgrid(ip+1) - pgrid(ip) ) / &
( (pgrid(ip+1)-pgrid_edge(ip+1))*jacob_c*Dpp(rgrid(ir),pgrid(ip),xgrid(ix),t) + (pgrid_edge(ip+1)-pgrid(ip))*jacob_pp1*Dpp(rgrid(ir),pgrid(ip+1),xgrid(ix),t) )
temp=temp- Ap_l * stateVector( get_idx(ir,ip_upwind(ir,ip,ix),ix) ) / (pgrid_edge(ip+1)-pgrid_edge(ip))
temp=temp+ Ap_r * stateVector( get_idx(ir,ip_upwind(ir,ip+1,ix),ix) ) / (pgrid_edge(ip+1)-pgrid_edge(ip))
temp=temp+ (Dp_l/( (pgrid(ip)-pgrid(ip-1))*(pgrid_edge(ip+1)-pgrid_edge(ip)))) * ( stateVector(idx) - stateVector(idx_pm1) )
temp=temp- (Dp_r/( (pgrid(ip+1)-pgrid(ip))*(pgrid_edge(ip+1)-pgrid_edge(ip)))) * (stateVector(idx_pp1) - stateVector(idx))
end if
if (ix == 1) then
jacob_xr = jacob(rgrid(ir),pgrid(ip),xgrid_edge((ix+1)))
jacob_xp1 = jacob(rgrid(ir),pgrid(ip),xgrid(ix+1))
Ax_r = jacob_xr*Ar(rgrid(ir),pgrid(ip),xgrid_edge(ix+1),t)
Dx_r = jacob_c*Dxx(rgrid(ir),pgrid(ip),pgrid(ix),t)*jacob_xp1*Dxx(rgrid(ir),pgrid(ip),xgrid(ix+1),t) * ( xgrid(ix+1) - xgrid(ix) ) / &
( (xgrid(ix+1)-xgrid_edge(ix+1))*jacob_c*Dxx(rgrid(ir),pgrid(ip),xgrid(ix),t) + (xgrid_edge(ix+1)-xgrid(ix))*jacob_xp1*Dxx(rgrid(ir),pgrid(ip),xgrid(ix+1),t) )
temp=temp+ Ax_r * stateVector( get_idx(ir,ip,ix_upwind(ir,ip,ix+1)) ) / (xgrid_edge(ix+1)-xgrid_edge(ix))
temp=temp- (Dx_r/( (xgrid(ix+1)-xgrid(ix))*(xgrid_edge(ix+1)-pgrid_edge(ix)))) * (stateVector(idx_xp1) - stateVector(idx))
else if (ix == Nx) then
jacob_xl = jacob(rgrid(ir),pgrid(ip),xgrid_edge(ix))
jacob_xm1 = jacob(rgrid(ir),pgrid(ip),xgrid(ix-1))
Ax_l = jacob_xl*Ar(rgrid(ir),pgrid(ip),xgrid_edge(ix),t)
Dx_l = jacob_xm1*Dxx(rgrid(ir),pgrid(ip),pgrid(ix-1),t)*jacob_c*Dxx(rgrid(ir),pgrid(ip),xgrid(ix),t) * ( xgrid(ix) - xgrid(ix-1) ) / &
( (xgrid(ix)-xgrid_edge(ix))*jacob_xm1*Dxx(rgrid(ir),pgrid(ip),xgrid(ix-1),t) + (xgrid_edge(ix+1)-xgrid(ix))*jacob_c*Dxx(rgrid(ir),pgrid(ip),xgrid(ix),t) )
temp=temp- Ax_l * stateVector( get_idx(ir,ip,ix_upwind(ir,ip,ix)) ) / (xgrid_edge(ix+1)-xgrid_edge(ix))
temp=temp+ (Dx_l/( (xgrid(ix)-xgrid(ix-1))*(xgrid_edge(ix+1)-pgrid_edge(ix)))) * ( stateVector(idx) - stateVector(idx_xm1) )
else
jacob_xl = jacob(rgrid(ir),pgrid(ip),xgrid_edge(ix))
jacob_xr = jacob(rgrid(ir),pgrid(ip),xgrid_edge((ix+1)))
jacob_xm1 = jacob(rgrid(ir),pgrid(ip),xgrid(ix-1))
jacob_xp1 = jacob(rgrid(ir),pgrid(ip),xgrid(ix+1))
! Pre-defining the advection and diffusion coefficients. Will probably save this to arrays later
Ax_l = jacob_xl*Ar(rgrid(ir),pgrid(ip),xgrid_edge(ix),t)
Ax_r = jacob_xr*Ar(rgrid(ir),pgrid(ip),xgrid_edge(ix+1),t)
Dx_l = jacob_xm1*Dxx(rgrid(ir),pgrid(ip),pgrid(ix-1),t)*jacob_c*Dxx(rgrid(ir),pgrid(ip),xgrid(ix),t) * ( xgrid(ix) - xgrid(ix-1) ) / &
( (xgrid(ix)-xgrid_edge(ix))*jacob_xm1*Dxx(rgrid(ir),pgrid(ip),xgrid(ix-1),t) + (xgrid_edge(ix+1)-xgrid(ix))*jacob_c*Dxx(rgrid(ir),pgrid(ip),xgrid(ix),t) )
Dx_r = jacob_c*Dxx(rgrid(ir),pgrid(ip),pgrid(ix),t)*jacob_xp1*Dxx(rgrid(ir),pgrid(ip),xgrid(ix+1),t) * ( xgrid(ix+1) - xgrid(ix) ) / &
( (xgrid(ix+1)-xgrid_edge(ix+1))*jacob_c*Dxx(rgrid(ir),pgrid(ip),xgrid(ix),t) + (xgrid_edge(ix+1)-xgrid(ix))*jacob_xp1*Dxx(rgrid(ir),pgrid(ip),xgrid(ix+1),t) )
temp=temp- Ax_l * stateVector( get_idx(ir,ip,ix_upwind(ir,ip,ix)) ) / (xgrid_edge(ix+1)-xgrid_edge(ix))
temp=temp+ Ax_r * stateVector( get_idx(ir,ip,ix_upwind(ir,ip,ix+1)) ) / (xgrid_edge(ix+1)-xgrid_edge(ix))
temp=temp+ (Dx_l/( (xgrid(ix)-xgrid(ix-1))*(xgrid_edge(ix+1)-pgrid_edge(ix)))) * ( stateVector(idx) - stateVector(idx_xm1) )
temp=temp- (Dx_r/( (xgrid(ix+1)-xgrid(ix))*(xgrid_edge(ix+1)-pgrid_edge(ix)))) * (stateVector(idx_xp1) - stateVector(idx))
end if
! Coordinate jacobian defined on cell faces
! residualVector(idx) = temp + ( (stateVector(idx) - ctx%prevStateVector(idx))/delta_t) - sourcefunc(rgrid(ir),pgrid(ip),xgrid(ix),t)
residualVector(idx) = temp + ( (stateVector(idx) - 0.0) - sourcefunc(rgrid(ir),pgrid(ip),xgrid(ix),t))
end do
end subroutine residual_func
subroutine jacobian_func(snes_in, stateVector, matrix_local, matrix_pc, context, localerr)
#include <petsc/finclude/petscsnes.h>
use petscsnes
use contexts
implicit none
SNES:: snes_in
Vec,intent(in):: stateVector
Mat:: matrix_local, matrix_pc
type(resContext):: context
PetscErrorCode:: localerr
end subroutine jacobian_func
|
{"hexsha": "12ed3e07ab7ebd5674a6e5a28b2bb50c6da09e9c", "size": 13438, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "residual.f90", "max_stars_repo_name": "gjwilkie/nlfp", "max_stars_repo_head_hexsha": "9e566c4566d0657ea50686d79c3ad353a6c82975", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "residual.f90", "max_issues_repo_name": "gjwilkie/nlfp", "max_issues_repo_head_hexsha": "9e566c4566d0657ea50686d79c3ad353a6c82975", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "residual.f90", "max_forks_repo_name": "gjwilkie/nlfp", "max_forks_repo_head_hexsha": "9e566c4566d0657ea50686d79c3ad353a6c82975", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.9406779661, "max_line_length": 171, "alphanum_fraction": 0.6205536538, "num_tokens": 4939}
|
import numpy as np
import flopter.core.constants as c
from flopter.core import normalise as nrm
from abc import ABC, abstractmethod
class LangmuirProbe(ABC):
def __init__(self, g, d_perp):
self.g = g
self.d_perp = d_perp
@abstractmethod
def is_angled(self):
pass
@abstractmethod
def get_collection_area(self, alpha):
pass
@abstractmethod
def get_analytical_iv(self, voltage, v_f, alpha, temp, dens):
pass
@abstractmethod
def get_2d_probe_length(self):
pass
@abstractmethod
def get_2d_probe_height(self):
pass
@abstractmethod
def get_3d_probe_depth(self):
pass
@abstractmethod
def get_2d_collection_length(self, alpha):
pass
@abstractmethod
def calc_exposed_lengths(self, alpha):
pass
@abstractmethod
def get_sheath_exp_param(self, temp, dens, alpha, c_1=0.4, c_2=0.5):
pass
def get_density(self, sat_current, temperature, alpha, gamma_i=1, mass=1, Z=1):
c_s = sound_speed(temperature, gamma_i=gamma_i, mass=mass, Z=Z)
A_coll = self.get_collection_area(alpha)
return electron_density(sat_current, c_s, A_coll, Z=Z)
def get_d_density(self, sat_current, d_sat_current, temperature, d_temperature, alpha, gamma_i=1, mass=1, Z=1):
c_s = sound_speed(temperature, gamma_i=gamma_i, mass=mass, Z=Z)
A_coll = self.get_collection_area(alpha)
n_e = electron_density(sat_current, c_s, A_coll, Z=Z)
d_c_s = d_sound_speed(c_s, temperature, d_temperature)
d_A_coll = np.abs(self.get_collection_area(alpha + np.radians(0.8)) - A_coll)
return d_electron_density(n_e, c_s, d_c_s, A_coll, d_A_coll, sat_current, d_sat_current)
def get_isat(self, temperature, density, alpha, gamma_i=1, mass=1):
c_s = sound_speed(temperature, gamma_i=gamma_i, mass=mass)
A_coll = self.get_collection_area(alpha)
return density * c.ELEM_CHARGE * c_s * A_coll
class AngledTipProbe(LangmuirProbe):
def __init__(self, a, b, L, g, d_perp, theta_f, theta_p):
super().__init__(g, d_perp)
self.a = a
self.b = b
self.L = L
self.theta_f = theta_f
self.theta_p = theta_p
def get_collection_area(self, alpha):
return calc_probe_collection_area(self.a, self.b, self.L, self.g, self.d_perp, alpha, self.theta_p)
def get_2d_collection_length(self, alpha):
d, h_coll = self.calc_exposed_lengths(alpha)
L_tip = self.L / np.cos(self.theta_p)
L_coll = ((L_tip - d) * np.sin(alpha + self.theta_p)) + (h_coll * np.cos(alpha))
return L_coll
def is_angled(self):
return self.theta_p > 0
def get_analytical_iv(self, voltage, v_f, alpha, temp, dens, mass=1, gamma_i=1.0, c_1=0.9, c_2=0.6, print_fl=False):
return analytical_iv_curve(voltage, v_f, temp, dens, alpha, self.get_collection_area(alpha), c_1=c_1, c_2=c_2,
gamma_i=gamma_i, mass=mass, L=self.L, g=self.g, print_fl=print_fl)
def get_2d_probe_length(self):
return self.L
def get_2d_probe_height(self):
return self.L * np.tan(self.theta_p)
def get_3d_probe_depth(self):
return max(self.b, self.a)
def calc_exposed_lengths(self, alpha):
return calc_probe_exposed_lengths(self.g, self.d_perp, alpha, self.theta_p)
def get_sheath_exp_param(self, temp, dens, alpha, c_1=0.4, c_2=0.5, form='bergmann'):
if form == 'bergmann':
return calc_sheath_expansion_param(temp, dens, self.L, self.g, alpha, c_1=c_1, c_2=c_2)
elif form == 'leland':
return calc_new_sheath_expansion_param(temp, dens, self.L, self.g, alpha, self.d_perp, self.theta_p,
c_1=c_1, c_2=c_2)
elif form == 'rotated':
return calc_2d_box_sheath_expansion_param(temp, dens, self.L, self.g, alpha, self.d_perp, self.theta_p,
c_1=c_1, c_2=c_2)
class FlushCylindricalProbe(LangmuirProbe):
def __init__(self, radius, g, d_perp):
super().__init__(g, d_perp)
self.radius = radius
self.theta_p = 0.0
def is_angled(self):
return False
def get_collection_area(self, alpha):
d, h_coll = self.calc_exposed_lengths(alpha)
theta_c = max(2 * np.arccos((self.radius - d) / self.radius), 0)
l_arc_eff = (1 - np.cos((np.pi / 2) - theta_c)) * self.radius
h_r = (self.radius - d) * np.sin(alpha)
A_coll = (
((np.sin(alpha) * self.radius**2) * (np.pi - theta_c + (2 * np.sin(theta_c))))
+ (2 * h_coll * np.cos(alpha) * l_arc_eff)
+ (l_arc_eff * h_r)
)
return A_coll
def get_2d_collection_length(self, alpha):
d, h_coll = self.calc_exposed_lengths(alpha)
L_coll = ((self.get_2d_probe_length() - d) * np.sin(alpha)) + (h_coll * np.cos(alpha))
return L_coll
def get_analytical_iv(self, voltage, v_f, alpha, temp, dens, mass=1, gamma_i=1.0, c_1=0.9, c_2=0.6, print_fl=False):
analytical_iv_curve(voltage, v_f, temp, dens, alpha, self.get_collection_area(alpha), c_1=c_1, c_2=c_2,
gamma_i=gamma_i, mass=mass, L=(2 * self.radius), g=self.g, print_fl=print_fl)
def get_2d_probe_length(self):
return 2 * self.radius
def get_2d_probe_height(self):
return 0
def get_3d_probe_depth(self):
return 2 * self.radius
def calc_exposed_lengths(self, alpha):
return calc_probe_exposed_lengths(self.g, self.d_perp, alpha, 0.0)
def get_sheath_exp_param(self, temp, dens, alpha, c_1=0.4, c_2=0.5):
return calc_sheath_expansion_param(temp, dens, self.get_2d_probe_length(), self.g, alpha, c_1=c_1, c_2=c_2)
def calc_probe_collection_area(a, b, L, g, d_perp, theta_perp, theta_p, print_fl=False):
# d = max(0, ((d_perp - (g * np.tan(theta_perp)))
# / (np.sin(theta_p) + (np.tan(theta_perp) * np.cos(theta_p)))))
# h_coll = max(0, (g * np.tan(theta_perp) - d_perp) * np.cos(theta_perp))
d, h_coll = calc_probe_exposed_lengths(g, d_perp, theta_perp, theta_p)
if print_fl:
print("d = {}, h_coll = {}".format(d, h_coll))
L_exp = (L / np.cos(theta_p)) - d
return ((a + (0.5 * (b - a) * (L_exp / L))) * L_exp * np.sin(theta_perp + theta_p)) + (h_coll * b)
def calc_probe_exposed_lengths(g, d_perp, theta_perp, theta_p):
d = np.array((d_perp - (g * np.tan(theta_perp)))
/ (np.sin(theta_p) + (np.tan(theta_perp) * np.cos(theta_p)))).clip(min=0)
h_coll = np.array((g * np.tan(theta_perp) - d_perp) * np.cos(theta_perp)).clip(min=0)
return d, h_coll
def calc_probe_collection_A_alt(a, b, L, theta_perp, theta_p):
return (L / np.cos(theta_p)) * (a + b) * 0.5 * np.sin(theta_p + theta_perp)
def analytical_iv_curve(voltage, v_f, temp, dens, alpha, A_coll, c_1=0.9, c_2=0.6, gamma_i=1.0, mass=1, L=1, g=0.5,
print_fl=False):
T_i = temp
T_e = temp
lambda_D = debye_length(T_e, dens)
c_s = np.sqrt((c.ELEM_CHARGE * (T_e + (gamma_i * T_i)))
/ (c.PROTON_MASS * mass))
I_0 = dens * c.ELEM_CHARGE * c_s * A_coll
a = ((c_1 + (c_2 / np.tan(alpha))) / np.sqrt(np.sin(alpha))) * (lambda_D / (L + g))
if print_fl:
print("a = {}, c_s = {}, lambda_d = {}, I_0 = {}".format(a, c_s, lambda_D, I_0))
V = (v_f - voltage) / T_e
I = I_0 * (1 + (a * np.float_power(np.abs(V), .75)) - np.exp(-V))
return I
def debye_length(temp, density):
return np.sqrt((c.EPSILON_0 * temp) / (c.ELEM_CHARGE * density))
def thermal_velocity(T_e, mass=1):
return np.sqrt(c.ELEM_CHARGE * T_e / (c.PROTON_MASS * mass))
def sound_speed(T_e, T_i=None, gamma_i=1, mass=1, Z=1):
if T_i is None:
T_i = T_e
return np.sqrt((Z * c.ELEM_CHARGE * (T_e + (gamma_i * T_i))) / (c.PROTON_MASS * mass))
def d_sound_speed(c_s, T_e, d_T_e):
return np.abs((c_s * d_T_e) / (2 * T_e))
def electron_density(I_sat, c_s, A_coll, k=0.5, Z=1.0):
return I_sat / (k * Z * c.ELEM_CHARGE * c_s * A_coll)
def d_electron_density(n_e, c_s, d_c_s, A_coll, d_A_coll, I_sat, d_I_sat):
return np.abs(n_e) * np.sqrt((d_c_s / c_s)**2 + (d_A_coll / A_coll)**2 + (d_I_sat / I_sat)**2)
def ion_larmor_radius(T_e, B, mu=1, Z=1):
v_therm = thermal_velocity(T_e, mass=mu)
omega = ion_gyrofrequency(B, mu=mu, Z=Z)
return v_therm / omega
def ion_gyrofrequency(B, mu=1, Z=1):
return gyrofrequency(B, mu * c.PROTON_MASS, Z * c.ELEM_CHARGE)
def electron_gyrofrequency(B):
return gyrofrequency(B, c.ELECTRON_MASS, c.ELEM_CHARGE)
def gyrofrequency(B, mass, charge):
return (np.abs(charge) * B) / mass
def estimate_temperature(float_pot, plasma_pot, m_e=1.0, m_i=c.P_E_MASS_RATIO):
"""
Estimates temperature using the differece between the floating and plasma
potentials using standard equation (not OML).
:param float_pot: Floating potential (in V)
:param plasma_pot: Plasma potential (in V)
:param m_e: electron mass (in kg)
:param m_i: ion mass (in kg)
:return: Estimate of temperature (in eV)
"""
return (float_pot - plasma_pot) / (np.log(0.6 * np.sqrt(2 * np.pi * m_e / m_i)))
def calc_sheath_expansion_param(temp, density, L, g, alpha, c_1=0.9, c_2=0.6):
lambda_D = debye_length(temp, density)
a = ((c_1 + (c_2 / np.tan(alpha))) / np.sqrt(np.sin(alpha))) * (lambda_D / (L + g))
return a
def calc_new_sheath_expansion_param(temp, density, L, g, alpha, d_perp, theta_p, c_1=0.4, c_2=0.5):
lambda_D = debye_length(temp, density)
a = ((((c_1 * (np.tan(alpha) + (np.tan(theta_p)))) + c_2) * lambda_D)
/ ((((L + g) * np.tan(alpha)) + (L * np.tan(theta_p)) - d_perp) * np.sqrt(np.sin(alpha))))
return a
def calc_2d_box_sheath_expansion_param(temp, density, L, g, theta, d_perp, theta_p, c_1=0.4, c_2=0.5, delta_0=0.0):
lambda_D = debye_length(temp, density)
L_eff = (L/np.cos(theta_p)) - ((d_perp + delta_0 - (g * np.tan(theta)))
/ ((np.cos(theta_p) * np.tan(theta)) + np.sin(theta_p)))
theta_tot = theta_p + theta
a = ((c_1 + c_2 * (1 / np.tan(theta_tot))) * lambda_D) / (np.sqrt(np.sin(theta_tot)) * (L_eff + (delta_0 / np.tan(theta_tot))))
return a
def decompose_sheath_exp_param(a, theta, L, g, d_perp=0, theta_p=0):
y = a * (L + g) * np.sqrt(np.sin(theta))
x = np.cos(theta) / np.sin(theta)
return x, y
def decompose_new_sheath_exp_param(a, theta, L, g, d_perp, theta_p):
y = (a * np.sqrt(np.sin(theta)) * (((L + g) * np.tan(theta))
+ (L * np.tan(theta_p)) - d_perp))
x = np.tan(theta) + np.tan(theta_p)
return x, y
def decompose_alt_new_sheath_exp_param(a, theta, L, g, d_perp, theta_p):
theta_tot = theta + theta_p
y = (a * np.sqrt(np.sin(theta))
* (L + ((np.cos(theta_p) / np.sin(theta_tot)) * ((g * np.sin(theta)) - (d_perp * np.cos(theta))))))
x = (np.cos(theta_p) * np.cos(theta)) / np.sin(theta_tot)
return x, y
def decompose_2d_box_sheath_exp_param(a, theta, L, g, d_perp, theta_p, delta_0=0.0):
L_eff = (L / np.cos(theta_p)) - ((d_perp + delta_0 - (g * np.tan(theta)))
/ ((np.cos(theta_p) * np.tan(theta)) + np.sin(theta_p)))
y = a * np.sqrt(np.sin(theta + theta_p)) * (L_eff + (delta_0 / np.tan(theta + theta_p)))
x = np.cos(theta + theta_p) / np.sin(theta + theta_p)
return x, y
class MagnumProbes(object):
def __init__(self):
L_small = 3e-3 # m
a_small = 2e-3 # m
b_small = 3e-3 # m
g_small = 2e-3 # m
d_perp_small = 3e-4 # m
theta_f_small = np.radians(72)
L_big = 5e-3 # m
a_big = 4.5e-3 # m
b_big = 6e-3 # m
g_big = 1e-3 # m
d_perp_big = 3e-4 # m
theta_f_big = np.radians(73.3)
L_lang = 5e-3 # m
a_lang = 2e-3 # m
b_lang = 3.34e-3 # m
g_lang = 1e-3 # m
d_perp_lang = 3e-4 # m
theta_f_reg = np.radians(75)
L_round = 4e-3 # m
g_round = 1.5e-3 # m
d_perp_round = 1e-4 # m
theta_p = np.radians(10)
self.probe_s = AngledTipProbe(a_small, b_small, L_small, g_small, d_perp_small, theta_f_small, theta_p)
self.probe_b = AngledTipProbe(a_big, b_big, L_big, g_big, d_perp_big, theta_f_big, theta_p)
self.probe_l = AngledTipProbe(a_lang, b_lang, L_lang, g_lang, d_perp_lang, theta_f_reg, theta_p)
self.probe_r = FlushCylindricalProbe(L_round / 2, g_round, d_perp_round)
self.probe_position = {
'l': 6,
's': -4,
'b': -14,
'r': -24
}
self.position_ind = ['l', 's', 'b', 'r']
def __getitem__(self, item):
item = item.lower()
probes = {
's': self.probe_s,
'l': self.probe_l,
'b': self.probe_b,
'r': self.probe_r,
}
return probes[item]
class MagnumProbesOld(object):
def __init__(self):
L_small = 3e-3 # m
a_small = 2e-3 # m
b_small = 3e-3 # m
g_small = 2e-3 # m
theta_f_small = np.radians(72)
L_large = 5e-3 # m
a_large = 4.5e-3 # m
b_large = 6e-3 # m
g_large = 1e-3 # m
theta_f_large = np.radians(73.3)
L_reg = 5e-3 # m
a_reg = 2e-3 # m
b_reg = 3.34e-3 # m
g_reg = 1e-3 # m
theta_f_reg = np.radians(75)
L_cyl = 4e-3 # m
g_cyl = 5e-4 # m
d_perp = 3e-4 # m
theta_p = np.radians(10)
self.probe_s = AngledTipProbe(a_small, b_small, L_small, g_small, d_perp, theta_f_small, theta_p)
self.probe_l = AngledTipProbe(a_large, b_large, L_large, g_large, d_perp, theta_f_large, theta_p)
self.probe_r = AngledTipProbe(a_reg, b_reg, L_reg, g_reg, d_perp, theta_f_reg, theta_p)
self.probe_c = FlushCylindricalProbe(L_cyl / 2, g_cyl, d_perp)
self.probes = {
's': self.probe_s,
'r': self.probe_r,
'l': self.probe_l,
'c': self.probe_c,
}
self.position = ['s', 'r', 'l', 'c']
|
{"hexsha": "0b77e501578e897f3307196e52c8042a0387c3ff", "size": 14496, "ext": "py", "lang": "Python", "max_stars_repo_path": "flopter/core/lputils.py", "max_stars_repo_name": "jackleland/flopter", "max_stars_repo_head_hexsha": "8f18f81470b456884108dc33baee836a672409c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-05-31T11:44:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-29T11:03:25.000Z", "max_issues_repo_path": "flopter/core/lputils.py", "max_issues_repo_name": "jackleland/flopter", "max_issues_repo_head_hexsha": "8f18f81470b456884108dc33baee836a672409c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-07-09T20:26:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-30T15:38:26.000Z", "max_forks_repo_path": "flopter/core/lputils.py", "max_forks_repo_name": "jackleland/flopter", "max_forks_repo_head_hexsha": "8f18f81470b456884108dc33baee836a672409c4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-09-10T02:34:57.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-10T02:34:57.000Z", "avg_line_length": 36.0597014925, "max_line_length": 131, "alphanum_fraction": 0.59375, "include": true, "reason": "import numpy", "num_tokens": 4668}
|
# Train neural network to map RGB images to RAM output
import os
import utils
import numpy as np
from models import *
import argparse
from keras import optimizers
description = "Train RGB2RAM models"
parser = argparse.ArgumentParser(description)
parser.add_argument('--game_name', type=str, default='Breakout')
parser.add_argument('--model', choices=['FF', 'CNN1', 'CNN2', 'LSTM'], default='FF')
parser.add_argument('--save_images', dest='save_data', default=False, action='store_true')
parser.add_argument('--num_epochs', type=int, default=60)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--train_split', type=float, default=0.8)
args = parser.parse_args()
# Network parameters
if args.model == 'FF':
model_type = FFModel
elif args.model == 'CNN1':
model_type = CNNModel1
elif args.model == 'CNN2':
model_type = CNNModel2
elif args.model == 'LSTM':
model_type = LSTM
save_data = args.save_data # default: False
num_epochs = args.num_epochs # default: 60
train_split = args.train_split # default: 0.8
batch_size = args.batch_size # default: 8
layer_sizes = [32, 64] # reqd only for FFNN
seq_length = 10 # reqd only for LSTM
np.random.seed(1337)
if not os.path.exists("data/{}-v4/".format(args.game_name)):
utils.get_datasets(args.game_name)
x_train, y_train, x_test, y_test = utils.load_data(args.game_name, model_type, train_split, save_data)
# Normalization
"""
mean_train, sigma_train = np.mean(x_train, axis=0), np.std(x_train, axis=0)
x_train = (x_train - mean_train)
x_test = (x_test - mean_train)
"""
if model_type == LSTMModel:
x_train = x_train[:(x_train.shape[0]-(x_train.shape[0] % seq_length))]
y_train = y_train[:(y_train.shape[0]-(y_train.shape[0] % seq_length))]
x_test = x_test[:(x_test.shape[0]-(x_test.shape[0] % seq_length))]
y_test = y_test[:(y_test.shape[0]-(y_test.shape[0] % seq_length))]
x_train = x_train.reshape((-1, seq_length, 84, 84, 1))
y_train = y_train.reshape((-1, seq_length, 128))
x_test = x_test.reshape((-1, seq_length, 84, 84, 1))
y_test = y_test.reshape((-1, seq_length, 128))
print(x_train.shape, y_train.shape)
model = model_type(layer_sizes=layer_sizes, model_type=model_type, seq_length=seq_length).build()
model.summary()
# sgd = optimizers.SGD(learning_rate=0.0001, momentum=0.0, nesterov=False)
model.compile(loss='mse', optimizer='adam', metrics=['mse','mae'])
history = model.fit(x_train,
y_train,
validation_data=(x_test, y_test),
epochs=num_epochs,
batch_size=batch_size,
shuffle=True)
utils.save_model(model, model_type, model_path=os.path.join('./saved_model/', args.game_name))
utils.plot_history(history, '{}_{}'.format(args.game_name, args.model))
|
{"hexsha": "73d00fb4407336e61d4afd06f0372b3b8b950f5e", "size": 2789, "ext": "py", "lang": "Python", "max_stars_repo_path": "rgb2ram/train.py", "max_stars_repo_name": "Nidhi-K/keras-rl", "max_stars_repo_head_hexsha": "535662d41dc51a56ead22a612b67a9e2dcb7b532", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-21T20:26:41.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-21T20:26:41.000Z", "max_issues_repo_path": "rgb2ram/train.py", "max_issues_repo_name": "Nidhi-K/keras-rl", "max_issues_repo_head_hexsha": "535662d41dc51a56ead22a612b67a9e2dcb7b532", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rgb2ram/train.py", "max_forks_repo_name": "Nidhi-K/keras-rl", "max_forks_repo_head_hexsha": "535662d41dc51a56ead22a612b67a9e2dcb7b532", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-21T20:27:35.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-21T20:27:35.000Z", "avg_line_length": 34.8625, "max_line_length": 102, "alphanum_fraction": 0.7031193976, "include": true, "reason": "import numpy", "num_tokens": 752}
|
# SPDX-FileCopyrightText: 2022 Daniel Laidig <daniel@laidig.info>
#
# SPDX-License-Identifier: MIT
import copy
import numpy as np
def shuffled(items, seed=None, prefixHead=None, prefixTail=None):
if seed is not None:
r = np.random.RandomState(seed)
else:
r = np.random
itemsCopy = copy.copy(items)
r.shuffle(itemsCopy)
# optional: move some items that start with a specific string to the head or tail of the list
if prefixHead is not None or prefixTail is not None:
head = []
regular = []
tail = []
for item in itemsCopy:
if prefixHead is not None and item.startswith(prefixHead):
head.append(item)
elif prefixTail is not None and item.startswith(prefixTail):
tail.append(item)
else:
regular.append(item)
itemsCopy = head + regular + tail
return itemsCopy
def shuffledAnswerLetters(seed=None, skip='i', similar='ou,ha,bd,mn,dt,pb,cz,pq,qg'):
letters = [chr(i) for i in range(ord('a'), ord('z')+1) if chr(i) not in skip]
shuffledLetters = shuffled(letters, seed=seed)
pairs = similar.split(',')
assert all([len(p) == 2 for p in pairs])
keep = []
keptPairs = []
move = []
for letter in shuffledLetters:
moved = False
for pair in keptPairs:
if letter in pair:
move.append(letter)
moved = True
break
if moved:
continue
for pair in pairs:
if letter in pair:
keptPairs.append(pair)
keep.append(letter)
output = keep + move
assert len(output) == len(letters)
assert set(output) == set(letters)
return output
def precision(value, decimals=3):
return ('{0:.' + str(int(decimals)) + 'f}').format(round(value, decimals))
def nodotzero(res):
ires = int(res)
if res == ires:
return f'{ires:d}'
else:
return np.format_float_positional(res)
def _interpolate_hsv(d, min_, max_):
saturation = 100
value = 50
hue_range = (0, 110)
d = int(round(d))
if d <= min_:
hue = hue_range[1] / 360
elif d >= max_:
hue = hue_range[0]
else:
hue_values = sorted(np.arange(*hue_range, int(hue_range[1] / max_)), reverse=True)
hue = hue_values[d] / 360
return hue, saturation/100, value/100
def _rgb2hex(color):
r = int(round(color[0] * 255))
g = int(round(color[1] * 255))
b = int(round(color[2] * 255))
return f'{r:02X}{g:02X}{b:02X}'
def green2red(value, green=0, red=1):
import colorsys
hsv_color = _interpolate_hsv(value, green, red)
rgb_color = colorsys.hsv_to_rgb(*hsv_color)
color = _rgb2hex(rgb_color)
return str(color)
def debug(text):
print(text)
return ''
|
{"hexsha": "5d12fc71548656987670fb1dcb30b097a8d8db5d", "size": 2858, "ext": "py", "lang": "Python", "max_stars_repo_path": "textemplate/filters.py", "max_stars_repo_name": "dlaidig/textemplate", "max_stars_repo_head_hexsha": "cfd9781801bbdf6b3a3cc902f0a2eb51228072c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "textemplate/filters.py", "max_issues_repo_name": "dlaidig/textemplate", "max_issues_repo_head_hexsha": "cfd9781801bbdf6b3a3cc902f0a2eb51228072c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "textemplate/filters.py", "max_forks_repo_name": "dlaidig/textemplate", "max_forks_repo_head_hexsha": "cfd9781801bbdf6b3a3cc902f0a2eb51228072c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.852173913, "max_line_length": 97, "alphanum_fraction": 0.5902729181, "include": true, "reason": "import numpy", "num_tokens": 751}
|
import numpy as np
import pandas as pd
from players_data import *
from players_query import *
from players_func import *
|
{"hexsha": "5496d9d1d2e1fd99f2575eb4aa9f3b66390bdae1", "size": 122, "ext": "py", "lang": "Python", "max_stars_repo_path": "source/players/players.py", "max_stars_repo_name": "PhyProg/NBA-League-Data-Analysis", "max_stars_repo_head_hexsha": "3be282101da64d194949fb94ca6491559cd6d7ef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source/players/players.py", "max_issues_repo_name": "PhyProg/NBA-League-Data-Analysis", "max_issues_repo_head_hexsha": "3be282101da64d194949fb94ca6491559cd6d7ef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/players/players.py", "max_forks_repo_name": "PhyProg/NBA-League-Data-Analysis", "max_forks_repo_head_hexsha": "3be282101da64d194949fb94ca6491559cd6d7ef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.4285714286, "max_line_length": 27, "alphanum_fraction": 0.8032786885, "include": true, "reason": "import numpy", "num_tokens": 25}
|
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <string>
#include <sstream>
#include <iostream>
#include <fstream>
#include <iomanip>
#include <stdexcept>
#include <functional>
#include <random>
#include <Eigen/Geometry>
#include <visualization_msgs/Marker.h>
#include "arc_utilities/eigen_helpers.hpp"
#include "arc_utilities/eigen_helpers_conversions.hpp"
#include "arc_utilities/pretty_print.hpp"
#include "arc_utilities/voxel_grid.hpp"
#include "arc_utilities/simple_rrt_planner.hpp"
#include "uncertainty_planning_core/simple_pid_controller.hpp"
#include "uncertainty_planning_core/simple_uncertainty_models.hpp"
#include "uncertainty_planning_core/uncertainty_contact_planning.hpp"
#include "uncertainty_planning_core/simple_robot_models.hpp"
#include "uncertainty_planning_core/simple_samplers.hpp"
#include "uncertainty_planning_core/uncertainty_planning_core.hpp"
#include "fast_kinematic_simulator/fast_kinematic_simulator.hpp"
#include "fast_kinematic_simulator/simulator_environment_builder.hpp"
#include "uncertainty_planning_examples/config_common.hpp"
#ifndef SE2_COMMON_CONFIG_HPP
#define SE2_COMMON_CONFIG_HPP
namespace se2_common_config
{
inline uncertainty_planning_core::PLANNING_AND_EXECUTION_OPTIONS GetDefaultOptions()
{
uncertainty_planning_core::PLANNING_AND_EXECUTION_OPTIONS options;
options.clustering_type = uncertainty_contact_planning::CONVEX_REGION_SIGNATURE;
options.planner_time_limit = 120.0;
options.goal_bias = 0.1;
options.step_size = 15.0 * 0.125;
options.step_duration = 10.0;
options.goal_probability_threshold = 0.51;
options.goal_distance_threshold = 2.0 * 0.125;
options.connect_after_first_solution = 0.0;
options.signature_matching_threshold = 0.75;
options.distance_clustering_threshold = 15.0 * 0.125;
options.feasibility_alpha = 0.75;
options.variance_alpha = 0.75;
options.edge_attempt_count = 50u;
options.num_particles = 24u;
options.use_contact = true;
options.use_reverse = true;
options.use_spur_actions = true;
options.max_exec_actions = 1000u;
options.max_policy_exec_time = 300.0;
options.num_policy_simulations = 10u;
options.num_policy_executions = 0u;
options.policy_action_attempt_count = 100u;
options.debug_level = 0;
options.planner_log_file = "/tmp/se2_planner_log.txt";
options.policy_log_file = "/tmp/se2_policy_log.txt";
options.planned_policy_file = "/tmp/se2_planned_policy.policy";
options.executed_policy_file = "/dev/null";
return options;
}
inline config_common::TASK_CONFIG_PARAMS GetDefaultExtraOptions()
{
return config_common::TASK_CONFIG_PARAMS(0.125, 10.0, 0.0, 0.125, "se2_maze");
}
inline config_common::TASK_CONFIG_PARAMS GetExtraOptions()
{
return config_common::GetOptions(GetDefaultExtraOptions());
}
inline uncertainty_planning_core::PLANNING_AND_EXECUTION_OPTIONS GetOptions()
{
return uncertainty_planning_core::GetOptions(GetDefaultOptions());
}
inline simple_robot_models::SE2_ROBOT_CONFIG GetDefaultRobotConfig(const config_common::TASK_CONFIG_PARAMS& options)
{
const double kp = 0.5;//1.0; //0.1;
const double ki = 0.0;
const double kd = 0.0; //0.01;
const double i_clamp = 0.0;
const double velocity_limit = 1.0;
const double angular_velocity_limit = velocity_limit * 0.125;
const double max_sensor_noise = options.sensor_error;
const double max_angular_sensor_noise = max_sensor_noise * 0.125;
const double max_actuator_noise = options.actuator_error;
const double max_angular_actuator_noise = max_actuator_noise * 0.125;
const simple_robot_models::SE2_ROBOT_CONFIG robot_config(kp, ki, kd, i_clamp, velocity_limit, max_sensor_noise, max_actuator_noise, kp, ki, kd, i_clamp, angular_velocity_limit, max_angular_sensor_noise, max_angular_actuator_noise);
return robot_config;
}
inline Eigen::Matrix<double, 3, 1> MakeConfig(const double x, const double y, const double zr)
{
Eigen::Matrix<double, 3, 1> config;
config << x, y, zr;
return config;
}
inline std::pair<Eigen::Matrix<double, 3, 1>, Eigen::Matrix<double, 3, 1>> GetStartAndGoal()
{
// Define the goals of the plan
const Eigen::Matrix<double, 3, 1> start = MakeConfig(7.75, 7.75, 0.0);
const Eigen::Matrix<double, 3, 1> goal = MakeConfig(0.75, 0.75, 0.0);
return std::make_pair(start, goal);
}
inline std::shared_ptr<EigenHelpers::VectorVector4d> GetRobotPoints()
{
std::shared_ptr<EigenHelpers::VectorVector4d> robot_points(new EigenHelpers::VectorVector4d());
const std::vector<double> x_pos = {-0.1875, -0.0625, 0.0625, 0.1875, 0.3125, 0.4375, 0.5625, 0.6875, 0.8125, 0.9375, 1.0625, 1.1875, 1.3125, 1.4375};
const std::vector<double> y_pos = {-0.1875, -0.0625, 0.0625, 0.1875, 0.3125, 0.4375, 0.5625, 0.6875, 0.8125, 0.9375, 1.0625, 1.1875, 1.3125, 1.4375};
const std::vector<double> z_pos = {-0.4375, -0.3125, -0.1875, -0.0625, 0.0625, 0.1875, 0.3125, 0.4375};
for (size_t xpdx = 0; xpdx < x_pos.size(); xpdx++)
{
for (size_t ypdx = 0; ypdx < y_pos.size(); ypdx++)
{
if (xpdx <= 3 || ypdx <= 3)
{
for (size_t zpdx = 0; zpdx < z_pos.size(); zpdx++)
{
robot_points->push_back(Eigen::Vector4d(x_pos[xpdx], y_pos[ypdx], z_pos[zpdx], 1.0));
}
}
}
}
return robot_points;
}
inline simple_robot_models::SimpleSE2Robot GetRobot(const simple_robot_models::SE2_ROBOT_CONFIG& robot_config)
{
// Make the actual robot
const Eigen::Matrix<double, 3, 1> initial_config = Eigen::Matrix<double, 3, 1>::Zero();
const simple_robot_models::SimpleSE2Robot robot(GetRobotPoints(), initial_config, robot_config);
return robot;
}
inline uncertainty_planning_core::SE2SamplerPtr GetSampler()
{
const double env_resolution = 0.125;
const double env_min_x = 0.0 + (env_resolution);
const double env_max_x = 10.0 - (env_resolution);
const double env_min_y = 0.0 + (env_resolution);
const double env_max_y = 10.0 - (env_resolution);
// Make the sampler
return uncertainty_planning_core::SE2SamplerPtr(new simple_samplers::SimpleSE2BaseSampler<uncertainty_planning_core::PRNG>(std::pair<double, double>(env_min_x, env_max_x), std::pair<double, double>(env_min_y, env_max_y)));
}
inline uncertainty_planning_core::SE2SimulatorPtr GetSimulator(const config_common::TASK_CONFIG_PARAMS& options, const int32_t debug_level)
{
const int32_t real_debug_level = std::max(0, debug_level - 10);
const simulator_environment_builder::EnvironmentComponents environment_components = simulator_environment_builder::BuildCompleteEnvironment(options.environment_id, options.environment_resolution);
const fast_kinematic_simulator::SolverParameters solver_params = fast_kinematic_simulator::GetDefaultSolverParameters();
return fast_kinematic_simulator::MakeSE2Simulator(environment_components.GetEnvironment(), environment_components.GetEnvironmentSDF(), environment_components.GetSurfaceNormalsGrid(), solver_params, options.simulation_controller_frequency, real_debug_level);
}
}
#endif // SE2_COMMON_CONFIG_HPP
|
{"hexsha": "44466a4615d8f2e5bcadddbb4e95d243127c702c", "size": 7651, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/uncertainty_planning_examples/se2_common_config.hpp", "max_stars_repo_name": "UM-ARM-Lab/uncertainty_planning_examples", "max_stars_repo_head_hexsha": "0be4bf50db1539e8f79d9225d387270e67bcc2a2", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2019-06-23T05:12:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T02:12:02.000Z", "max_issues_repo_path": "include/uncertainty_planning_examples/se2_common_config.hpp", "max_issues_repo_name": "UM-ARM-Lab/uncertainty_planning_examples", "max_issues_repo_head_hexsha": "0be4bf50db1539e8f79d9225d387270e67bcc2a2", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/uncertainty_planning_examples/se2_common_config.hpp", "max_forks_repo_name": "UM-ARM-Lab/uncertainty_planning_examples", "max_forks_repo_head_hexsha": "0be4bf50db1539e8f79d9225d387270e67bcc2a2", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2017-04-17T03:08:47.000Z", "max_forks_repo_forks_event_max_datetime": "2017-08-04T13:08:59.000Z", "avg_line_length": 46.6524390244, "max_line_length": 265, "alphanum_fraction": 0.7038295648, "num_tokens": 1991}
|
[STATEMENT]
lemma matching_sel_symm:
assumes "matching_sel f"
shows "sel_symm f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sel_symm f
[PROOF STEP]
unfolding sel_symm_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>x y. f x y = f y x
[PROOF STEP]
proof (standard, standard)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x y. f x y = f y x
[PROOF STEP]
fix x y
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x y. f x y = f y x
[PROOF STEP]
show "f x y = f y x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f x y = f y x
[PROOF STEP]
proof(cases "\<exists>e\<in>arcs G. (head G e) = x \<and> (tail G e) = y")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<exists>e\<in>arcs G. head G e = x \<and> tail G e = y \<Longrightarrow> f x y = f y x
2. \<not> (\<exists>e\<in>arcs G. head G e = x \<and> tail G e = y) \<Longrightarrow> f x y = f y x
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
\<exists>e\<in>arcs G. head G e = x \<and> tail G e = y
goal (2 subgoals):
1. \<exists>e\<in>arcs G. head G e = x \<and> tail G e = y \<Longrightarrow> f x y = f y x
2. \<not> (\<exists>e\<in>arcs G. head G e = x \<and> tail G e = y) \<Longrightarrow> f x y = f y x
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>e\<in>arcs G. head G e = x \<and> tail G e = y
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<exists>e\<in>arcs G. head G e = x \<and> tail G e = y
goal (1 subgoal):
1. f x y = f y x
[PROOF STEP]
using assms symmetric_arcs sel_sym
[PROOF STATE]
proof (prove)
using this:
\<exists>e\<in>arcs G. head G e = x \<and> tail G e = y
matching_sel f
?x \<in> arcs G \<Longrightarrow> \<exists>y. head G ?x = tail G y \<and> tail G ?x = head G y
\<lbrakk>tail G ?e\<^sub>1 = head G ?e\<^sub>2; head G ?e\<^sub>1 = tail G ?e\<^sub>2\<rbrakk> \<Longrightarrow> sel ?e\<^sub>1 = sel ?e\<^sub>2
goal (1 subgoal):
1. f x y = f y x
[PROOF STEP]
unfolding matching_sel_def
[PROOF STATE]
proof (prove)
using this:
\<exists>e\<in>arcs G. head G e = x \<and> tail G e = y
\<forall>x y. (\<exists>e. tail G e = x \<and> head G e = y \<and> f x y = sel e) \<or> (\<nexists>e. tail G e = x \<and> head G e = y) \<and> f x y = 1
?x \<in> arcs G \<Longrightarrow> \<exists>y. head G ?x = tail G y \<and> tail G ?x = head G y
\<lbrakk>tail G ?e\<^sub>1 = head G ?e\<^sub>2; head G ?e\<^sub>1 = tail G ?e\<^sub>2\<rbrakk> \<Longrightarrow> sel ?e\<^sub>1 = sel ?e\<^sub>2
goal (1 subgoal):
1. f x y = f y x
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
f x y = f y x
goal (1 subgoal):
1. \<not> (\<exists>e\<in>arcs G. head G e = x \<and> tail G e = y) \<Longrightarrow> f x y = f y x
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> (\<exists>e\<in>arcs G. head G e = x \<and> tail G e = y) \<Longrightarrow> f x y = f y x
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> (\<exists>e\<in>arcs G. head G e = x \<and> tail G e = y)
goal (1 subgoal):
1. \<not> (\<exists>e\<in>arcs G. head G e = x \<and> tail G e = y) \<Longrightarrow> f x y = f y x
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<not> (\<exists>e\<in>arcs G. head G e = x \<and> tail G e = y)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<not> (\<exists>e\<in>arcs G. head G e = x \<and> tail G e = y)
goal (1 subgoal):
1. f x y = f y x
[PROOF STEP]
by (metis assms symmetric_arcs matching_sel_def not_arc_sel_1 sel_sym)
[PROOF STATE]
proof (state)
this:
f x y = f y x
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
f x y = f y x
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1620, "file": "Query_Optimization_QueryGraph", "length": 18}
|
function [mu, sigma, map] = correction_step(mu, sigma, z, map);
% Updates the belief, i.e., mu and sigma after observing landmarks,
% and augments the map with newly observed landmarks.
% The employed sensor model measures the range and bearing of a landmark
% mu: state vector containing robot pose and poses of landmarks obeserved so far.
% Current robot pose = mu(1:3)
% Note that the landmark poses in mu are stacked in the order by which they were observed
% sigma: the covariance matrix of the system.
% z: struct array containing the landmark observations.
% Each observation z(i) has an id z(i).id, a range z(i).range, and a bearing z(i).bearing
% The vector 'map' contains the ids of all landmarks observed so far by the robot in the order
% by which they were observed, NOT in ascending id order.
% For computing sigma
global scale;
% Number of measurements in this time step
m = size(z, 2);
% Measurement noise
Q = 0.01*eye(2);
n = length(mu);
zm = zeros(2,1);
S = zeros(2);
xbar = 0;
ybar = 0;
for i = 1:m
% If the landmark is observed for the first time:
if (isempty(find(map == z(i).id)))
% Add new landmark to the map
[mu, sigma, map] = add_landmark_to_map(mu, sigma, z(i), map, Q);
% The measurement has been incorporated so we quit the correction step
continue;
endif
n=length(mu);
sigma_x_z = zeros(n,2);
% Compute sigma points from the predicted mean and covariance
% This corresponds to line 6 on slide 32
sigma_points = compute_sigma_points(mu, sigma);
% Normalize!
sigma_points(3,:) = normalize_angle(sigma_points(3,:));
% Compute lambda
n = length(mu);
num_sig = size(sigma_points,2);
lambda = scale - n;
% extract the current location of the landmark for each sigma point
% Use this for computing an expected measurement, i.e., applying the h function
landmarkIndex = find(map==(z(i).id));
landmarkXs = sigma_points(2*landmarkIndex + 2, :);
landmarkYs = sigma_points(2*landmarkIndex + 3, :);
% TODO: Compute z_points (2x2n+1), which consists of predicted measurements from all sigma points
% This corresponds to line 7 on slide 32
z_points(:,:) = [landmarkXs;landmarkYs] + repmat([z(i).range*cos(normalize_angle(z(i).bearing + mu(3)));z(i).range*sin(normalize_angle(z(i).bearing + mu(3)))],1,2*n+1);
% setup the weight vector for mean and covariance
wm = [lambda/scale, repmat(1/(2*scale), 1, 2*n)];
wc = wm;
% TODO: Compute zm, line 8 on slide 32
% zm is the recovered expected measurement mean from z_points.
% It will be a 2x1 vector [expected_range; expected_bearing].
% For computing the expected_bearing compute a weighted average by
% summing the sines/cosines of the angle
for j=1:2*n+1
zm(1,1) = zm(1,1) + wm(j)*z_points(1,j);
xbar = xbar + wm(j)*cos(z_points(2,j));
ybar = ybar + wm(j)*sin(z_points(2,j));
endfor
zm(2,1) = atan2(ybar,xbar);
% TODO: Compute the innovation covariance matrix S (2x2), line 9 on slide 32
% Remember to normalize the bearing after computing the difference
for j=1:2*n+1
S = S + wc(j)*(z_points(:,j)-zm)*(z_points(:,j)-zm)' + Q;
endfor
% TODO: Compute Sigma_x_z, line 10 on slide 32
% (which is equivalent to sigma times the Jacobian H transposed in EKF).
% sigma_x_z is an nx2 matrix, where n is the current dimensionality of mu
% Remember to normalize the bearing after computing the difference
n = length(mu);
for j=1:2*n+1
sigma_x_z = sigma_x_z + wc(j)*(sigma_points(:,j) - mu)*(z_points(:,j)-zm)';
endfor
% TODO: Compute the Kalman gain, line 11 on slide 32
K = sigma_x_z/S;
% Get the actual measurement as a vector (for computing the difference to the observation)
z_actual = [z(i).range; z(i).bearing];
% TODO: Update mu and sigma, line 12 + 13 on slide 32
% normalize the relative bearing
zdiff = z_actual - zm;
zdiff(2) = normalize_angle(zdiff(2));
mu = mu + K*zdiff;
mu(3) = normalize_angle(mu(3));
% TODO: Normalize the robot heading mu(3)
endfor
end
|
{"author": "kiran-mohan", "repo": "SLAM-Algorithms-Octave", "sha": "e0254ad38cfca2170b2af68c96c183df77c76252", "save_path": "github-repos/MATLAB/kiran-mohan-SLAM-Algorithms-Octave", "path": "github-repos/MATLAB/kiran-mohan-SLAM-Algorithms-Octave/SLAM-Algorithms-Octave-e0254ad38cfca2170b2af68c96c183df77c76252/3_UKF_SLAM/octave/correction_step_bkup.m"}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from models import JobDescription
from models.runner import goes
from populartwitterbot import Bot
from grapher import draw
import time
import random
from StringIO import StringIO
from netcdf import netcdf as nc
import numpy as np
import os
import glob
import pytz
import urllib
import matplotlib.pyplot as plt
import json
import logging
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
short = (lambda f, start=2, end=-2:
".".join((f.split('/')[-1]).split('.')[start:end]))
get_datetime = lambda f: datetime.strptime(short(f, 1), '%Y.%j.%H%M%S')
gmt = pytz.timezone('GMT')
local = pytz.timezone('America/Argentina/Buenos_Aires')
localize = lambda dt: (gmt.localize(dt)).astimezone(local)
def is_a_broked_file(filename):
try:
with nc.loader(filename) as root:
data = nc.getvar(root, 'data')
data[:]
return False
except Exception:
return True
class Presenter(object):
def __init__(self):
self.logger = logging.getLogger("solarbot")
self.logger.setLevel(logging.INFO)
handler = logging.handlers.RotatingFileHandler(
"log_solarbot.out", maxBytes=20, backupCount=5)
self.logger.addHandler(handler)
if 'CONFIG' in os.environ:
CONFIG = os.environ['CONFIG']
else:
with open('config.json') as f:
CONFIG = f.read()
self.config = json.loads(CONFIG)
self.twitter = Bot(self.config.items()[0])
config = self.config['solarbot']
self.noaaclass = config['noaaclass']
self.job = config['job']
self.places = config['places']
if not os.path.exists(self.noaaclass['folder']):
os.makedirs(self.noaaclass['folder'])
self.tags = ['raspberrypi', 'noaa', 'goes', 'satellite',
'solarradiation', 'python', 'argentina',
'heliosat2']
def upload_media(self, image):
with open(image, 'rb') as photo:
result = StringIO(photo.read())
return result
def tweet(self, status, images):
time.sleep(10)
medias = map(lambda i: self.upload_media(i), images)
params = {'status': status}
if not images:
self.twitter.update_status(status=status)
else:
params['media'] = medias[0]
self.twitter._post('/statuses/update_with_media',
params=params)
self.logger.info("%s (%s)" % (status, len(status)))
def say(self, status, screen_name):
self.twitter.send_direct_message(screen_name=screen_name, text=status)
self.logger.info("%s (%s) [%s]" % (status, len(status), screen_name))
def indexes(self, lat, lon, place):
diff = np.sqrt((lat - place[0]) ** 2 + (lon - place[1]) ** 2)
return diff==diff.min()
def graph_important_point(self, places):
with nc.loader(self.job['data']) as root:
lat, lon = nc.getvar(root, 'lat'), nc.getvar(root, 'lon')
data = np.zeros(lat.shape[1:])
for place in places.items():
data[self.indexes(lat[0], lon[0], place[1])] = 1
y, x = lat.shape[1:]
plt.figure(figsize=(x/20, y/20))
img = plt.imshow(data)
img.set_clim(0, data.max())
plt.title('inta')
plt.colorbar()
plt.axis('off')
plt.savefig('location.png', bbox_inches=0)
def get_area(self, lat, lon, places):
to_string = lambda refs: '|'.join(map(lambda s: ','.join(s),
refs))
refs = [[str(lat[0, y, x]), str(lon[0, y, x])]
for x in [0, -1] for y in [0, -1]]
refs[2], refs[3] = refs[3], refs[2]
refs.append(refs[0])
refs_str = to_string(refs)
area_map = ("http://maps.googleapis.com/maps/api/staticmap?"
"center=%s&zoom=7&size=400x400&maptype=roadmap&"
"sensor=false&path=color:red|weight:5|"
"fillcolor:white|%s" % (to_string([refs[0]]), refs_str))
self.graph_important_point(places)
urllib.urlretrieve(area_map, 'area_map.png')
self.logger.info(area_map)
return area_map
def getlastradiation(self, filepattern, places):
radiations = []
with nc.loader('static.nc') as static:
lat, lon = nc.getvar(static, 'lat')[:], nc.getvar(static, 'lon')[:]
self.get_area(lat, lon, places)
idxs = map(lambda (p, c): (p, self.indexes(lat[0], lon[0], c)),
places.items())
shape = lat.shape[1:]
inside = lambda i, d: 0 < np.where(i)[d][0] < shape[d]
idxs = filter(lambda (p, i): inside(i, 0) and inside(i, 1), idxs)
with nc.loader(filepattern) as root:
data = nc.getvar(root, 'globalradiation')
radiations = map(lambda (p, c): (p, [
float(data[-1][c]),
float(data[-1][c])]), idxs)
return dict(radiations)
def solarenergy_showcase(self):
filepattern = '%s/goes13.*.BAND_01.nc' % self.job['product']
radiations = self.getlastradiation(filepattern, self.places)
dt = get_datetime(self.files[-1])
dt_here = localize(dt)
dt_str = str(dt_here).split(' ')[-1]
self.logger.info(dt_str)
radiations = map(lambda t: "%s: %.2f" % (t[0], t[1][0]),
radiations.items())
users = ['ecolell', 'gersolar']
radiations = ', '.join(radiations)
for u in users:
self.say(u'[%s] Irradiancias (W/[m².sr]): [%s]' %
(dt_str, radiations), u)
filename = draw(filepattern, 'map.png', str(dt_here))
self.tweet('Acabamos de estimar la irradiancia solar de las '
'%s para el area de Pergamino.' % dt_str,
['area_map.png'])
tag = random.choice(self.tags)
self.tweet(u'[%s] Irradiancia en W/(m².sr) a partir del '
'modelo de @gersolar. #%s' % (dt_str, tag),
filename)
def remove_broked_files(self, files):
size = lambda f: os.stat(f).st_size
median_size = np.median(np.array(map(size, files)))
broken = filter(lambda f: size(f) < median_size , files)
self.logger.info(str(broken))
map(os.remove, broken)
def demonstrate(self):
diff = lambda dt, h: (dt - timedelta(hours=h))
decimal = (lambda dt, h: diff(dt, h).hour +
diff(dt, h).minute / 60. + diff(dt, h).second / 3600.)
should_download = lambda dt: decimal(dt, 4) >= 6 and decimal(dt, 4) <= 19
filenames = []
uptime = goes.noaaclass.next_up_datetime()
if uptime < pytz.utc.localize(datetime.utcnow()):
self.noaaclass["datetime_filter"] = should_download
try:
filenames = goes.download(**(self.noaaclass))
except Exception, e:
self.logger.info('Download skipped: %s' % (str(e)))
else:
self.tweet("The NOAA CLASS is down, the system will be back "
"at %s" % str(uptime))
self.remove_broked_files(filenames)
get_temporals = (lambda:
glob.glob('%s/*.nc' % self.job['temporal_cache']))
map(os.remove, get_temporals())
map(os.remove, glob.glob('%s/*.nc' % self.job['product']))
self.files = sorted(glob.glob(self.job['data']))
in_the_week = lambda f: get_datetime(f) >= datetime.utcnow() - timedelta(days=30)
self.files = filter(in_the_week, self.files)
name = lambda f: f.split('/')[-1]
temps = get_temporals()
last_temp = sorted(map(name, temps))[-1] if temps else ''
last_data = name(self.files[-1]) if self.files else ''
self.logger.info("%s %s" % (last_temp, last_data))
if len(self.files) >= 28 and last_temp != last_data:
begin = datetime.now()
job = JobDescription(**(self.job))
job.run()
end = datetime.now()
self.logger.info('Elapsed time %.2f seconds.' %
(end - begin).total_seconds())
self.solarenergy_showcase()
def run():
presenter = Presenter()
presenter.demonstrate()
time.sleep(10)
return presenter
if __name__ == '__main__':
run()
|
{"hexsha": "e938abfca370fdc21dc6dc8c958fa02dda427f39", "size": 8600, "ext": "py", "lang": "Python", "max_stars_repo_path": "solarbot/bot.py", "max_stars_repo_name": "limiear/solarbot", "max_stars_repo_head_hexsha": "2596f0b2bddceacf9d04bae134e30cec5c6fa965", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "solarbot/bot.py", "max_issues_repo_name": "limiear/solarbot", "max_issues_repo_head_hexsha": "2596f0b2bddceacf9d04bae134e30cec5c6fa965", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2015-04-13T04:13:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T00:48:59.000Z", "max_forks_repo_path": "solarbot/bot.py", "max_forks_repo_name": "limiear/solarbot", "max_forks_repo_head_hexsha": "2596f0b2bddceacf9d04bae134e30cec5c6fa965", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7387387387, "max_line_length": 89, "alphanum_fraction": 0.5590697674, "include": true, "reason": "import numpy", "num_tokens": 2115}
|
@testset "481.magical-string.jl" begin
let
res = [1, 1, 1, 2, 3, 3, 4, 4, 4, 5]
for i in 1:10
@test magical_string(i) == res[i]
end
end
end
|
{"hexsha": "f4fa5429c8ba5f50aadc726c5bc42d704f237cb4", "size": 192, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/problems/481.magical-string.jl", "max_stars_repo_name": "jmmshn/LeetCode.jl", "max_stars_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 74, "max_stars_repo_stars_event_min_datetime": "2020-10-27T18:58:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T13:27:49.000Z", "max_issues_repo_path": "test/problems/481.magical-string.jl", "max_issues_repo_name": "jmmshn/LeetCode.jl", "max_issues_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 57, "max_issues_repo_issues_event_min_datetime": "2020-11-01T07:26:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-19T11:57:53.000Z", "max_forks_repo_path": "test/problems/481.magical-string.jl", "max_forks_repo_name": "jmmshn/LeetCode.jl", "max_forks_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2020-10-30T11:52:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-13T10:35:11.000Z", "avg_line_length": 24.0, "max_line_length": 53, "alphanum_fraction": 0.453125, "num_tokens": 76}
|
"""
Use the basinhopping algorithm to find best alpha, speed, and frequency
that produces the best spatial correlation for a given canonical network
"""
# number stuff imports
import h5py
import numpy as np
import pandas as pd
from scipy.optimize import basinhopping
from scipy.stats import pearsonr
from sklearn.linear_model import LinearRegression
import sys
import os
import time
# spectrome imports
from spectrome.brain import Brain
from spectrome.utils import functions, path
from spectrome.forward import eigenmode
# Limit number of threads
# os.environ["OMP_NUM_THREADS"] = "2"
# os.environ["MKL_NUM_THREADS"] = "2"
# os.environ["NUMEXPR_NUM_THREADS"] = "2"
# hcp template connectome directory
hcp_dir = "../data"
HCP_brain = Brain.Brain()
HCP_brain.add_connectome(hcp_dir)
HCP_brain.reorder_connectome(HCP_brain.connectome, HCP_brain.distance_matrix)
HCP_brain.bi_symmetric_c()
HCP_brain.reduce_extreme_dir()
# Load Pablo's Yeo 2017 canonical network maps
com_dk = np.load("../data/com_dk.npy", allow_pickle=True).item()
DK_df_normalized = pd.read_csv("../data/DK_dictionary_normalized.csv").set_index(
"Unnamed: 0"
)
# binarize:
ub, lb = 1, 0 # define binary boundaries
DKfc_binarized = pd.DataFrame(
[], index=DK_df_normalized.index, columns=DK_df_normalized.columns
)
for name in DK_df_normalized.index:
u = np.mean(np.nan_to_num(DK_df_normalized.loc[name].values))
s = np.std(np.nan_to_num(DK_df_normalized.loc[name].values))
threshold = u - s * 0.1
DKfc_binarized.loc[name] = np.where(
DK_df_normalized.loc[name].values > threshold, ub, lb
)
def laplacian_corr(x, Brain, FC_networks, network_name):
# start = time.time()
# w = 2 * np.pi * x[0]
# Laplacian, Brain already prep-ed with connectomes outside of function:
Brain.decompose_complex_laplacian(alpha=x[0], k=x[1], num_ev=86)
canon_network = np.nan_to_num(FC_networks.loc[network_name].values)
# compute max correlation for optimization
corrs = np.zeros([Brain.norm_eigenmodes.shape[1], 1])
for e in np.arange(0, len(corrs)):
corrs[e] = -pearsonr(np.squeeze(canon_network), Brain.norm_eigenmodes[:, e])[0]
# end = time.time()
# print(end - start)
return np.min(corrs)
class BH_bounds(object):
def __init__(self, xmax=[5, 600], xmin=[0, 0.1]):
self.xmax = np.array(xmax)
self.xmin = np.array(xmin)
def __call__(self, **kwargs):
x = kwargs["x_new"]
tmax = bool(np.all(x <= self.xmax))
tmin = bool(np.all(x >= self.xmin))
return tmax and tmin
allx0 = np.array(
[
[0.5, 5],
[1, 100],
[0.8, 50],
[0.8, 200],
[0.5, 400],
[3, 15],
[5, 250],
[2, 150],
[2, 300],
[1, 500],
]
)
bnds = BH_bounds()
print(
"Starting optimization for {} initial condition {}".format(
str(sys.argv[1]), str(sys.argv[2])
)
)
opt_res = basinhopping(
laplacian_corr,
x0=allx0[int(sys.argv[2]), :],
minimizer_kwargs={"args": (HCP_brain, DK_df_normalized, str(sys.argv[1]))},
niter=1500,
T=0.1,
stepsize=2,
accept_test=bnds,
seed=24,
niter_success=100,
disp=True,
)
opt_alpha = opt_res["x"][0]
opt_phi = opt_res["x"][1]
# print('optimized output: {}'.format(opt_res))
# Recreate the forward solution:
# w_opt = 2 * np.pi * opt_freq
HCP_brain.decompose_complex_laplacian(alpha=opt_alpha, k=opt_phi)
canon_network = np.nan_to_num(DK_df_normalized.loc[str(sys.argv[1])].values)
# compute max correlation for optimization
corrs = np.squeeze(np.zeros([HCP_brain.norm_eigenmodes.shape[1], 1]))
for e in np.arange(0, len(corrs)):
prcorr = pearsonr(np.squeeze(canon_network), HCP_brain.norm_eigenmodes[:, e])[
0
]
corrs[e] = prcorr
# print(prcorr)
ntw_opt_corr = np.round(corrs, 3)
max_opt_corr = np.max(ntw_opt_corr)
ordered_corr = np.argsort(-ntw_opt_corr)
# print(ordered_corr)
print("basinhop:{}".format(opt_res["fun"]))
print("forward max:{}".format(max_opt_corr))
assert ntw_opt_corr[ordered_corr[1]] <= ntw_opt_corr[ordered_corr[0]]
assert max_opt_corr == -np.round(opt_res["fun"], 3)
# Linear Regression for 10 K's and save in a dictionary:
# K = 11
# if str(sys.argv[3]) == 'dice':
# # create empty list of dicts:
# LinReg = []
# keys = ['num','coef','r2score','ordereigs']
# for k in np.arange(1,K):
# selected_eigs = HCP_brain.norm_eigenmodes[:,ordered_dice[0:k]]
# canon_network = np.nan_to_num(DK_df_normalized.loc[str(sys.argv[1])].values).reshape(-1,1)
# regr = LinearRegression()
# regr.fit(canon_network, selected_eigs)
# c = regr.coef_
# r2 = regr.score(canon_network, selected_eigs)
# reg_results = {keys[0]:k, keys[1]:c, keys[2]:r2, keys[3]:ordered_dice[0:k]}
# LinReg.append(reg_results)
# print('For K = {}, chosen eigs: {}, coefficients: {} , residual error: {}'.format(k, ordered_dice[0:k], c, r2))
# opt_res['LinRegResults'] = LinReg
# file_name = str(sys.argv[1]) + str(sys.argv[2]) + "_BH_dice.h5"
# file_path = os.path.join(hcp_dir, file_name)
# path.save_hdf5(file_path, opt_res)
# print("Optimal result: " , opt_res['x'])
# elif str(sys.argv[3]) == 'corr':
# # create empty list of dicts:
# LinReg = []
# keys = ['num','coef','r2score','ordereigs']
# for k in np.arange(1,K):
# selected_eigs = HCP_brain.norm_eigenmodes[:,ordered_corr[0:k]]
# canon_network = np.nan_to_num(DK_df_normalized.loc[str(sys.argv[1])].values).reshape(-1,1)
# regr = LinearRegression()
# regr.fit(canon_network, selected_eigs)
# c = regr.coef_
# r2 = regr.score(canon_network, selected_eigs)
# reg_results = {keys[0]:k, keys[1]:c, keys[2]:r2, keys[3]:ordered_corr[0:k]}
# LinReg.append(reg_results)
# print('For K = {}, chosen eigs: {}, coefficients: {} , residual error: {}'.format(k, ordered_corr[0:k], c, r2))
# opt_res['LinRegResults'] = LinReg
file_name = str(sys.argv[1]) + str(sys.argv[2]) + "_BH_pearson.h5"
file_path = os.path.join(hcp_dir, file_name)
path.save_hdf5(file_path, opt_res)
print("Optimal result: ", opt_res["x"])
|
{"hexsha": "79243e463e8170e6acfb7c93d438d57b861978dd", "size": 6209, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/laplacian_pearson_basinhopping.py", "max_stars_repo_name": "axiezai/complex_laplacian", "max_stars_repo_head_hexsha": "e84574a7d9c051a95b5d37aa398765aeb5f85fa4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/laplacian_pearson_basinhopping.py", "max_issues_repo_name": "axiezai/complex_laplacian", "max_issues_repo_head_hexsha": "e84574a7d9c051a95b5d37aa398765aeb5f85fa4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/laplacian_pearson_basinhopping.py", "max_forks_repo_name": "axiezai/complex_laplacian", "max_forks_repo_head_hexsha": "e84574a7d9c051a95b5d37aa398765aeb5f85fa4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-19T19:04:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-19T19:04:18.000Z", "avg_line_length": 32.170984456, "max_line_length": 121, "alphanum_fraction": 0.6587212111, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1842}
|
# coding=utf-8
import cv2
import numpy as np
import os
import pickle
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import os
from fnmatch import fnmatch
def main():
S = []
project_path = os.path.dirname(os.path.realpath(__file__))
if os.path.exists(project_path + r'\check_point_model\check_point_S'):
S = pickle.load(open(project_path + r"\check_point_model\check_point_S", 'rb'))
else:
train_frames_path = project_path + r"\all_train_frames"
S = preprocess_and_training(train_frames_path)
pickle.dump(S, open(project_path + r'\check_point_model\check_point_S', 'wb'))
test_frames_path = project_path + r'\all_test_frames'
import_and_test_abnormal(test_frames_path, S)
def import_and_test_abnormal(test_frames_path, S):
test_folder_list = []
for path, subdirs, files in os.walk(test_frames_path):
for name in subdirs:
test_folder_list.append(str(os.path.join(path, name)))
feature_list = []
for folder in test_folder_list:
feature_list = framesToFeatures(folder, "*.jpg")
file_list, result = testing_algorithm(feature_list, S, 0.00001915625)
if (len(result) == 0):
print "Normal"
else:
for res in result:
print res
continue_key = raw_input("Press enter to show the abnormal frames : ")
key_str = "1"
while (key_str == "1"):
if (continue_key == ""):
show_image(folder, file_list)
key_str = raw_input("Press Enter to continue or 1 to replay : ")
def preprocess_and_training(train_frames_path):
i = 0
file_list1 = []
for path, subdirs, files in os.walk(train_frames_path):
for name in subdirs:
file_list1.append(str(os.path.join(path, name)))
feature_list = []
for folder in file_list1:
feature_list.append(framesToFeatures(folder, "*.jpg"))
i += 1
# Training for Sparse Combination Learning
S = []
B = []
feature_list = [i[:100] for i in feature_list]
# feature_list = [i for i in feature_list]
for set in feature_list:
S_temp, B_temp = training_algorithm(set)
S += S_temp
B += B_temp
return S
def training_algorithm(X):
Xc = X
S = []
B = []
gamma = []
i = 1
while (len(Xc) > 10):
# Create the initial dictionary Si using kmeans
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
flags = cv2.KMEANS_RANDOM_CENTERS
compactness, labels, centers = cv2.kmeans(np.array(Xc, dtype="float32"), 10, None, criteria, 10, flags)
centers = [(sum(val) / len(val)) for val in centers]
Si = [centers]
# Reset Gamma and Beta i for next Vector generation
gamma = []
Bi = []
epoch = 0
start = 1
while (start == 1 or start == 2 or deltaL < 0):
if (start == 2):
start = 0
if (start == 1):
deltaL = 0
start = 2
L2 = 0
L1 = 0
Bi = optimise_beta(Si, Xc)
Si = np.subtract(np.array(Si), (0.0001 * deltaL))
gamma = optimise_gamma(Si, Xc, Bi, 0.04)
L1 = L2
L2 = evaluate_L(Si, Xc, Bi, gamma)
deltaL = L2 - L1
epoch += 1
S.append(Si)
B.append(Bi)
change_index = 0
for val in range(len(gamma)):
if (gamma[val] == 0):
del Xc[val - change_index]
change_index += 1
i += 1
return S, B
def optimise_beta(Si, Xc):
# Using equation 6 optimise beta value
beta = []
Si = np.array(Si)
Si_transpose = np.transpose(Si)
m = 0.00000003
# print Si.shape
# print Si_transpose.shape
for xj in Xc:
numpy_xj = np.array([xj])
Si_T_Si = np.dot(Si_transpose, Si)
# TODO: funcion *det* may cause errors
# [[4, 2], [10, 5]]
if (np.linalg.det(Si_T_Si) == 0):
Si_T_Si = np.add(Si_T_Si, m * np.eye(10, 10))
inverse_sit = np.linalg.inv(Si_T_Si)
dot_in_si = np.dot(inverse_sit, Si_transpose)
itr_beta = np.dot(dot_in_si, numpy_xj)
beta.append(itr_beta)
return beta
def optimise_gamma(Si, Xc, Bi, lamda):
gamma = []
Si = np.array(Si)
for xj in range(len(Xc)):
if ((((np.linalg.norm(np.subtract(np.array([Xc[xj]]), np.dot(Si, np.array(Bi[xj]))))) ** 2) ** 2) < lamda):
gamma.append(1)
else:
gamma.append(0)
return gamma
def evaluate_L(Si, Xc, Bi, gamma):
L = 0
Si = np.array(Si)
temp_l = []
for xj in range(len(Xc)):
l_iter_val = gamma[xj] * (
((np.linalg.norm(np.subtract(np.array([Xc[xj]]), np.dot(Si, np.array(Bi[xj]))))) ** 2) ** 2)
temp_l.append(l_iter_val)
return sum(temp_l)
def framesToFeatures(frames_path, pattern="*.jpg"):
print(frames_path)
features = []
file_list = []
for path, subdirs, files in os.walk(frames_path):
for name in files:
if fnmatch(name, pattern):
file_list.append(str(os.path.join(path, name)))
numOfFiles = len(file_list)
i = 0
time = 0
number_cubes = 0
# reading 5 images at a time
while (numOfFiles - i >= 5):
time += 1
# 转换为灰度图
img1 = cv2.cvtColor(cv2.imread(file_list[i]), cv2.COLOR_BGR2GRAY);
i += 1;
img2 = cv2.cvtColor(cv2.imread(file_list[i]), cv2.COLOR_BGR2GRAY);
i += 1;
img3 = cv2.cvtColor(cv2.imread(file_list[i]), cv2.COLOR_BGR2GRAY);
i += 1;
img4 = cv2.cvtColor(cv2.imread(file_list[i]), cv2.COLOR_BGR2GRAY);
i += 1;
img5 = cv2.cvtColor(cv2.imread(file_list[i]), cv2.COLOR_BGR2GRAY);
i += 1;
image_set = [img1, img2, img3, img4, img5]
# Create 3 different scale for each image
re_img_2020_set = []
re_img_4030_set = []
re_img_160120_set = []
for image in image_set:
img_2020 = cv2.resize(image, (20, 20))
img_4030 = cv2.resize(image, (40, 30))
img_160120 = cv2.resize(image, (160, 120))
re_img_2020_set.append(img_2020)
re_img_4030_set.append(img_4030)
re_img_160120_set.append(img_160120)
resize_image_set = [re_img_2020_set, re_img_4030_set, re_img_160120_set]
# Collect non-overlaping patches form all the scale
patches_all = [[], [], []]
i1 = 0
for images_set in resize_image_set:
for resize_img in images_set:
patch_list = []
patch = []
for start in range(0, len(resize_img[0]), 10):
count = 1
for row in resize_img:
patch.append(row[start:start + 10])
if (count == 10):
count = 0
patch_list.append(patch)
patch = []
count += 1
patches_all[i1].append(patch_list)
i1 += 1
# Generate cubes and list of all cubes
cubes = []
for resolution_patch_set in patches_all:
for i1 in range(len(resolution_patch_set[0])):
p_one = resolution_patch_set[0][i1];
p_two = resolution_patch_set[1][i1];
p_three = resolution_patch_set[2][i1];
p_four = resolution_patch_set[3][i1];
p_five = resolution_patch_set[4][i1];
cubes.append([p_one, p_two, p_three, p_four, p_five])
number_cubes += len(cubes)
# features=[]
for cub in cubes:
# 表示的是示导的阶数,0 表示这个方向上没有求导,一般为 0,1,2
# Calculate the x, y and t derivative for each cubes
sobelx = cv2.Sobel(np.array(cub), cv2.CV_64F, 1, 0, ksize=-1)
sobely = cv2.Sobel(np.array(cub), cv2.CV_64F, 0, 1, ksize=-1)
sobelt = cv2.Sobel(np.array(zip(*cub)), cv2.CV_64F, 0, 1, ksize=-1)
sobelt = zip(*sobelt)
feature = []
# feature=np.array(feature)
# Concatinate all the x,y,t values at each pixel to generate 1500 dimension feature
for time_value in range(5):
for y_value in range(10):
for x_value in range(10):
feature.append(sobelx[time_value][y_value][x_value])
feature.append(sobely[time_value][y_value][x_value])
feature.append(sobelt[time_value][y_value][x_value])
features.append(feature)
print "--------------Done Feature Extraction------------"
print "Number of cubes generated : ", number_cubes
print "Number of feature generated : ", len(features)
print "Length of each feature : ", len(features[0])
print "-------------------------------------------------"
return features
def show_image(folder, file_list_no, pattern="*.png"):
file_list = []
for path, subdirs, files in os.walk(folder):
for name in files:
# if fnmatch(name, pattern):
file_list.append(str(os.path.join(path, name)))
numOfFiles = len(file_list)
# print file_list
file_to_print = []
for f_no in file_list_no:
if (f_no == 0):
file_to_print.append(file_list[0])
file_to_print.append(file_list[1])
file_to_print.append(file_list[2])
file_to_print.append(file_list[3])
file_to_print.append(file_list[4])
file_to_print.append(file_list[5])
elif (f_no <= numOfFiles):
file_to_print.append(file_list[f_no - 1])
if (f_no - 1 + 1 < numOfFiles):
file_to_print.append(file_list[f_no - 1 + 1])
if (f_no - 1 + 2 < numOfFiles):
file_to_print.append(file_list[f_no - 1 + 2])
if (f_no - 1 + 3 < numOfFiles):
file_to_print.append(file_list[f_no - 1 + 3])
if (f_no - 1 + 4 < numOfFiles):
file_to_print.append(file_list[f_no - 1 + 4])
if (f_no - 1 + 5 < numOfFiles):
file_to_print.append(file_list[f_no - 1 + 5])
else:
break
for file in file_to_print:
img = cv2.imread(file) # read a picture using OpenCV
cv2.imshow('image', img) # Display the picture
cv2.waitKey(150) # wait for closing
cv2.destroyAllWindows()
def testing_algorithm(x, S, T):
# print S
R = getR(S)
# print R
return_list = []
file_list = []
# print xs
i = 0
time = 0
flag = 0
for xi in x:
i += 1
flag = 0
mean = []
for Ri in R:
val = np.linalg.norm(np.dot(np.array(Ri), np.array([xi]))) ** 2
mean.append(val)
if (val < T):
flag = 1
break
if (i == 208):
i = 0
min_mean = min(mean)
if ((str("Abnormal at time" + str(time) + " seconds.") not in return_list) and min_mean > 0.0000000014):
return_list.append(str("Abnormal at time" + str(time) + " seconds."))
file_list.append(time)
print "time:small : ", time, min_mean
time += 5
mean = []
return file_list, return_list
def getR(S):
R = [];
m = 0.00000003
for Si in S:
Si = np.array(Si);
Si_transpose = np.transpose(Si);
Si_T_Si = np.dot(Si_transpose, Si)
if (np.linalg.det(Si_T_Si) == 0):
Si_T_Si = np.add(Si_T_Si, m * np.eye(10, 10))
Ri = np.subtract(np.dot(Si, np.dot(np.linalg.inv(Si_T_Si), Si_transpose)), np.identity(len(Si)));
R.append(Ri);
return R;
main()
def search_threhold(candidate_t):
pass
|
{"hexsha": "6c6d799ea4afef1fa95d9b8579308cce202ea457", "size": 12018, "ext": "py", "lang": "Python", "max_stars_repo_path": "AbnormalEventDetection/src/project_train_test.py", "max_stars_repo_name": "Scott1123/ComputerVisionLab", "max_stars_repo_head_hexsha": "a588d328a6c9a4519bdaa81382968cd07711d648", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "AbnormalEventDetection/src/project_train_test.py", "max_issues_repo_name": "Scott1123/ComputerVisionLab", "max_issues_repo_head_hexsha": "a588d328a6c9a4519bdaa81382968cd07711d648", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "AbnormalEventDetection/src/project_train_test.py", "max_forks_repo_name": "Scott1123/ComputerVisionLab", "max_forks_repo_head_hexsha": "a588d328a6c9a4519bdaa81382968cd07711d648", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5433070866, "max_line_length": 116, "alphanum_fraction": 0.5464303545, "include": true, "reason": "import numpy", "num_tokens": 3221}
|
# Copyright (c) 2020 fortiss GmbH
#
# Authors: Patrick Hart, Julian Bernhard, Klemens Esterle, and
# Tobias Kessler
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import numpy as np
from bark.core.models.behavior import BehaviorModel, BehaviorMPContinuousActions
from bark.core.models.dynamic import SingleTrackModel
from bark_ml.commons.py_spaces import Discrete
class BehaviorDiscreteML(BehaviorMPContinuousActions):
def __init__(self,
params=None):
BehaviorMPContinuousActions.__init__(
self,
params)
self._min_max_acc = params["ML"]["BehaviorDiscreteML"][
"MinMaxAcc", "", [-3., 3.]]
self._acc_d_steps = params["ML"]["BehaviorDiscreteML"][
"AccDiscretizationSteps", "", 10]
self._min_max_steer = params["ML"]["BehaviorDiscreteML"][
"MinMaxSteeringRate", "", [-.2, .2]]
self._steer_d_steps = params["ML"]["BehaviorDiscreteML"][
"SteeringRateDiscretizationSteps", "", 5]
# add motion primitives
for acc in np.linspace(
self._min_max_acc[0], self._min_max_acc[1], self._acc_d_steps):
for steering_rate in np.linspace(
self._min_max_steer[0], self._min_max_steer[1], self._steer_d_steps):
super().AddMotionPrimitive(
np.array([acc, steering_rate], dtype=np.float32))
@property
def action_space(self):
return Discrete(self.GetNumMotionPrimitives(None))
|
{"hexsha": "3864d48073624ab1cf7374ffbd7cbac671dc282e", "size": 1429, "ext": "py", "lang": "Python", "max_stars_repo_path": "bark_ml/behaviors/discrete_behavior.py", "max_stars_repo_name": "GAIL-4-BARK/bark-ml", "max_stars_repo_head_hexsha": "c61c897842c2184ee842428e451bae3be2cd7242", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-04-25T12:38:55.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-29T11:55:45.000Z", "max_issues_repo_path": "bark_ml/behaviors/discrete_behavior.py", "max_issues_repo_name": "SebastianGra/bark-ml_MCTS_RL", "max_issues_repo_head_hexsha": "8334f141d02bdc012a0bc6ac00d679018e0f46f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-05-05T13:53:17.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-13T15:58:51.000Z", "max_forks_repo_path": "bark_ml/behaviors/discrete_behavior.py", "max_forks_repo_name": "SebastianGra/bark-ml_MCTS_RL", "max_forks_repo_head_hexsha": "8334f141d02bdc012a0bc6ac00d679018e0f46f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-27T13:08:46.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-27T13:08:46.000Z", "avg_line_length": 34.0238095238, "max_line_length": 80, "alphanum_fraction": 0.7081875437, "include": true, "reason": "import numpy", "num_tokens": 370}
|
# -*- coding: utf-8 -*-
"""
decode
======
"""
# import standard libraries
import os
import subprocess
from pathlib import Path
# import third-party libraries
import matplotlib.pyplot as plt
import numpy as np
# import my libraries
import test_pattern_generator2 as tpg
import plot_utility as pu
# information
__author__ = 'Toru Yoshihara'
__copyright__ = 'Copyright (C) 2021 - Toru Yoshihara'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Toru Yoshihara'
__email__ = 'toru.ver.11 at-sign gmail.com'
__all__ = []
def decode_mp4(in_fname="./captured_video/capture_sample.mp4"):
"""
decode video
return single 10bit data
"""
stem_name = Path(Path(in_fname).name).stem
out_fname = f'./decoded_png/{stem_name}.png'
cmd = "ffmpeg"
ops = [
'-i', in_fname, '-vframes', '1',
str(out_fname), '-y'
]
args = [cmd] + ops
print(" ".join(args))
subprocess.run(args)
return out_fname
def extract_gray_patch_from_tp(img_name):
gray_st_pos_h = 64
gray_pos_v = 826
gray_ed_pos_h = 1858
gray_patch_num = 65
max_level = 1023
step = (max_level + 1) // (gray_patch_num - 1)
level_list = [x * step for x in range(gray_patch_num - 1)] + [max_level]
pos_h_list = np.uint16(
np.round(np.linspace(gray_st_pos_h, gray_ed_pos_h, gray_patch_num)))
pos_v_list = np.ones_like(pos_h_list) * gray_pos_v
pos_list = np.dstack([pos_h_list, pos_v_list]).reshape((gray_patch_num, 2))
img_float = tpg.img_read_as_float(img_name)
img_10bit = np.uint16(np.round(img_float * 1023))
buf = ""
for idx, pos in enumerate(pos_list):
rgb = img_10bit[pos[1], pos[0]]
line_str = f"{level_list[idx]},{rgb[0]},{rgb[1]},{rgb[2]}\n"
print(line_str.rstrip())
buf += line_str
stem_name = Path(Path(img_name).name).stem
csv_name = f"./csv/{stem_name}.csv"
with open(csv_name, 'wt') as f:
f.write(buf)
return csv_name
def plot_cautured_data_main(csv_file, graph_title):
data = np.loadtxt(csv_file, delimiter=',')
x = data[..., 0]
rr = data[..., 1]
gg = data[..., 2]
bb = data[..., 3]
fig, ax1 = pu.plot_1_graph(
fontsize=20,
figsize=(10, 8),
graph_title=graph_title,
graph_title_size=None,
xlabel="Source Code Value (10bit)",
ylabel="Captured Code Value (10bit)",
axis_label_size=None,
legend_size=17,
xlim=[-30, 1060],
ylim=[-30, 1060],
xtick=[x * 128 for x in range(8)] + [1023],
ytick=[x * 128 for x in range(8)] + [1023],
xtick_size=None, ytick_size=None,
linewidth=2,
minor_xtick_num=None,
minor_ytick_num=None,
return_figure=True)
ax1.plot(x, x, '-o', color='k', label="Expected value")
ax1.plot(x, rr, '-o', color=pu.RED, label="R data")
ax1.plot(x, gg, '-o', color=pu.GREEN, label="G data")
ax1.plot(x, bb, '-o', color=pu.SKY, label="B data")
plt.legend(loc='upper left')
stem_name = Path(Path(csv_file).name).stem
graph_name = f"./graph/{stem_name}.png"
plt.savefig(graph_name, bbox_inches='tight', pad_inches=0.1)
# plt.show()
plt.close(fig)
def plot_captured_data(
in_fname="./captured_video/capture_sample.mp4",
graph_title="Code value of the captured AVIF"):
"""
* decode video
* capture 0, 16, 32, ..., 1023 CV
* save captured data
* plot captured data
"""
decoded_img_name = decode_mp4(in_fname=in_fname)
csv_name = extract_gray_patch_from_tp(img_name=decoded_img_name)
plot_cautured_data_main(csv_name, graph_title)
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# plot_captured_data(in_fname="./captured_video/capture_sample.mp4")
# plot_captured_data(in_fname="./captured_video/decklink_output.mp4")
# plot_captured_data(in_fname="./captured_video/yuv444_10bit_rav1e.mp4")
# plot_captured_data(in_fname="./captured_video/yuv420_10bit_svt.mp4")
# GTX1060
# plot_captured_data(
# in_fname="./captured_video/GTX1060_AVIF.mp4",
# graph_title="AVIF (10 bit, YUV444, Full Range, GTX 1060 Super)")
# plot_captured_data(
# in_fname="./captured_video/GTX1060_YouTube.mp4",
# graph_title="YouTube (GTX 1060 Super)")
# plot_captured_data(
# in_fname="./captured_video/GTX1060_MPC-BE_madVR.mp4",
# graph_title="MPC-BE with madVR (GTX 1060 Super)")
# plot_captured_data(
# in_fname="./captured_video/GTX1060_Movies_and_TV_0.1-1000.mp4",
# graph_title="Movies & TV (GTX 1060 Super (0.1-1000 cd/m2))")
# plot_captured_data(
# in_fname="./captured_video/GTX1060_Movies_and_TV_10-10000.mp4",
# graph_title="Movies & TV (GTX 1060 Super (10-10000 cd/m2))")
# plot_captured_data(
# in_fname="./captured_video/GTX1060_YouTube.mp4",
# graph_title="YouTube (GTX 1060 Super)")
# plot_captured_data(
# in_fname="./captured_video/GTX1060_VLC.mp4",
# graph_title="VLC (GTX 1060 Super)")
# Ryzen
# plot_captured_data(
# in_fname="./captured_video/Ryzen_4500U_AVIF.mp4",
# graph_title="AVIF (10 bit, YUV444, Full Range, RYZEN_4500U")
# plot_captured_data(
# in_fname="./captured_video/Ryzen_4500U_VLC.mp4",
# graph_title="VLC (Ryzen 4500U)")
# plot_captured_data(
# in_fname="./captured_video/Ryzen_4500U_Movies_and_TV_0.1-1000.mp4",
# graph_title="Movies & TV (Ryzen 4500U)")
# plot_captured_data(
# in_fname="./captured_video/Ryzen_4500U_MPC-BE_madVR.mp4",
# graph_title="MPC-BE with madVR (Ryzen 4500U)")
# plot_captured_data(
# in_fname="./captured_video/Ryzen_4500U_YouTube.mp4",
# graph_title="YouTube (Ryzen 4500U)")
# Extra. changed the color accuracy mode on GTX1060 Super
# plot_captured_data(
# in_fname="./captured_video/GTX1060_AVIF_Ref-Mode.mp4",
# graph_title="AVIF (GTX 1060 Super, Reference-mode)")
# plot_captured_data(
# in_fname="./captured_video/GTX1060_MPC-BE_madVR_Ref-mode.mp4",
# graph_title="MPC-BE with madVR (GTX 1060 Super, Reference-mode)")
# plot_captured_data(
# in_fname="./captured_video/GTX1060_AVIF_RenderingIntent-ABS.mp4",
# graph_title="AVIF (GTX 1060 Super, Rendering Intent-Absolute)")
plot_captured_data(
in_fname="./captured_video/GTX1060_MPC-BE_madVR_RI-ABS.mp4",
graph_title="MPC-BE with madVR (GTX 1060 Super, RI-Absolute)")
|
{"hexsha": "02cbde70055dc77cf2d5cee65a78db42d6df007b", "size": 6603, "ext": "py", "lang": "Python", "max_stars_repo_path": "2021/03_investigate_avif/decode_captured_mp4.py", "max_stars_repo_name": "toru-ver4/sample_code", "max_stars_repo_head_hexsha": "9165b4cb07a3cb1b3b5a7f6b3a329be081bddabe", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2019-11-12T23:34:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T13:21:03.000Z", "max_issues_repo_path": "2021/03_investigate_avif/decode_captured_mp4.py", "max_issues_repo_name": "toru-ver4/sample_code", "max_issues_repo_head_hexsha": "9165b4cb07a3cb1b3b5a7f6b3a329be081bddabe", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 101, "max_issues_repo_issues_event_min_datetime": "2019-08-12T01:20:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T12:17:01.000Z", "max_forks_repo_path": "2021/03_investigate_avif/decode_captured_mp4.py", "max_forks_repo_name": "toru-ver4/sample_code", "max_forks_repo_head_hexsha": "9165b4cb07a3cb1b3b5a7f6b3a329be081bddabe", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-06-08T09:48:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T15:35:51.000Z", "avg_line_length": 34.0360824742, "max_line_length": 79, "alphanum_fraction": 0.6534908375, "include": true, "reason": "import numpy", "num_tokens": 2001}
|
"""
This is only meant to demonstrate the agreement of the `sweep` implementation
with `scipy.signal.chirp`. Can be used for unit testing of our sine sweep
implementation down the road, but it also shows that the current `sine_sweep`
function could be reworked to only call `scipy.signal.sweep` with minimal effort.
"""
import sys, os
my_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, my_path + '/../')
from scipy.signal import chirp
import pyExSi as es
import numpy as np
def test_chirp_vs_scipy(plot=False):
if plot:
import matplotlib.pyplot as plt
ts = np.linspace(0, 1, 1000)
f0 = 10
f1 = 100
t1 = ts[-1]
method = 'linear'
phi = 0
phi_scipy = (phi - np.pi / 2) / np.pi * 180
results = []
for method in ['linear', 'logarithmic']:
s_scipy = chirp(ts, f0, t1, f1, phi=phi_scipy, method=method)
s_own = es.sine_sweep(ts, phi, freq_start=f0, freq_stop=f1, mode=method)
results.append((s_scipy, s_own))
if plot:
fig, ax = plt.subplots(2, 1)
ax[0].set_title(f'time signal, mode={method}')
ax[0].plot(ts, s_scipy, label='scipy')
ax[0].plot(ts, s_own, '--', label='own')
S_scipy = np.fft.rfft(s_scipy) / len(ts) * 2
S_own = np.fft.rfft(s_own) / len(ts) * 2
freq_s = np.fft.rfftfreq(len(ts), ts[1] - ts[0])
ax[1].set_title(f'amplitude spectrum, mode={method}')
ax[1].plot(freq_s, np.abs(S_scipy), label='scipy')
ax[1].plot(freq_s, np.abs(S_own), '--', label='own')
ax[1].axvline(x=f0, color='k')
ax[1].axvline(x=f1, color='k')
ax[1].set_xlim(f0 - 10, f1 + 5)
ax[1].legend()
plt.tight_layout()
if plot:
plt.show()
for r in results:
np.testing.assert_allclose(*r, atol=1e-10)
if __name__ == "__main__":
test_chirp_vs_scipy(plot=True)
if __name__ == '__mains__':
np.testing.run_module_suite()
|
{"hexsha": "51ac30767695e86c679e82bac29b397415ac4c95", "size": 2016, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_scipy_chirp.py", "max_stars_repo_name": "nilswagner/pyExSi", "max_stars_repo_head_hexsha": "6fc0117eb2dbc9f8e2d390bdcf31987a939b25c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-12-21T07:46:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T06:56:31.000Z", "max_issues_repo_path": "tests/test_scipy_chirp.py", "max_issues_repo_name": "nilswagner/pyExSi", "max_issues_repo_head_hexsha": "6fc0117eb2dbc9f8e2d390bdcf31987a939b25c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-12-22T11:29:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-07T06:28:24.000Z", "max_forks_repo_path": "tests/test_scipy_chirp.py", "max_forks_repo_name": "nilswagner/pyExSi", "max_forks_repo_head_hexsha": "6fc0117eb2dbc9f8e2d390bdcf31987a939b25c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-12-22T10:34:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-14T19:17:12.000Z", "avg_line_length": 30.0895522388, "max_line_length": 81, "alphanum_fraction": 0.5942460317, "include": true, "reason": "import numpy,from scipy", "num_tokens": 611}
|
/-
Take funtion as argument and use it in implementation.
-/
def apply_nat_to_nat (f : ℕ → ℕ) (n : ℕ) : ℕ :=
f n
#eval apply_nat_to_nat nat.succ 1
#eval apply_nat_to_nat nat.pred 1
/-
Make idea completely general using polymorphism
-/
def apply {α β : Type} (f : α → β) (a : α) : β :=
f a
#eval apply nat.succ 1
#eval apply nat.pred 1
#eval apply string.length "I love logic!"
/-
Return function as a result
-/
def apply_twice {α : Type} (f : α → α ) : α → α :=
λ a, f (f a)
#reduce (apply_twice nat.succ)
#reduce apply_twice nat.pred
def double (n : ℕ) := 2 * n
def square (n : ℕ) := n ^ 2
#eval apply_twice nat.succ 3 -- application is left associative
#eval apply_twice nat.pred 3
#eval (apply_twice double) 3
def square_twice := apply_twice square
def double_twice := apply_twice double
#eval square_twice 5
/-
That's composition of a function with itself,
but we can also compose different functions.
Here's a special case.
-/
def compose_1 {α : Type} (g : α → α ) (f : α → α ): α → α :=
λ a, g (f a)
def double_inc := compose_1 double nat.succ
#reduce double_inc
#eval double_inc 3
-- Define and try out inc_double (first double then increment)
def is_even (n : ℕ) : bool := n % 2 = 0
#eval is_even 6
def compose {α β γ : Type} (g : β → γ) (f : α → β) : α → γ :=
λ (a : α), g (f a)
def even_length := compose is_even string.length
#eval even_length "I love logic!!!!!!!"
def even_length' := is_even ∘ string.length -- math notation
/-
Functions are objects, too, and there are associated
operations that apply to functions. Composition is one
such operation. Differentiation is another example of
a function of functions.
-/
/-
Exercise: apply_n
-/
/-
List map.
-/
def list_map {α β : Type} : (α → β) → list α → list β
| f [] := []
| f (h :: t) := list.cons (f h) (list_map f t)
-- exercise box_map
-- exercise option_map
-- exercise tree_map
|
{"author": "kevinsullivan", "repo": "dm.s20", "sha": "6f90ecb3881c602cdd1e3f12aad458bcdabd250a", "save_path": "github-repos/lean/kevinsullivan-dm.s20", "path": "github-repos/lean/kevinsullivan-dm.s20/dm.s20-6f90ecb3881c602cdd1e3f12aad458bcdabd250a/instructor/higher_order_funcs/higher_order_intro.lean"}
|
from gensim.models.word2vec import Word2Vec
import fileDispose
import numpy as np
def get_word2vec(corpus,size=500,window=3,sg=1,epochs=50,save_flag=False):
"""
获取word2vec词向量
:param corpus: 输入词库,如glove
:param size: 生成词向量维度
:param window: 词窗大小如glove
:param sg: 是否使用skip-grams模式,为0时使用CBOW模式
:param epochs: 训练次数
:param save_flag: 是否保存
:return: word2vec模型
"""
model = Word2Vec(size=size, workers=window, sg=sg, iter=epochs,min_count=0,alpha=0.005) # 生成词向量为500维,考虑上下30个单词共11个单词,采用sg=1的方法也就是skip-gram
model.build_vocab(corpus)
model.train(corpus, total_examples=model.corpus_count, epochs=model.iter)
if save_flag:
model.save('./Data/train/word2vec_model')
return model
def get_most_similar(model,word,topn=10,re_flag=False):
"""
获取相似单词
:param model: 生成的word2vec模型
:param word:相似度输入单词
:param topn:前n个单词
:param 是否有返回列表
:return:相似词列表
"""
sim_words = model.most_similar(positive=[word], topn=topn)
for word, similarity in sim_words:
print(word, similarity)
if re_flag:
return sim_words
def fit_vec(word2vec_model,dictionary):
"""
整理word2vec生成向量矩阵
:param word2vec_model:已生成word2vec模型
:param dictionary: 自己生成的word2id dictionary
:return: word_vec
[[word1_vec][word2_vec][word3_vec]...[wordn_vec]]
"""
word_vec = []
for word in dictionary:
word_vec.append(word2vec_model[word])
return word_vec
if __name__ == '__main__':
word2id = fileDispose.getFile('word2id.json')
list1 = fileDispose.getFile('total_list.json')
w2v_list = fileDispose.embedding_fit(list1)
print(w2v_list[:10])
w2v_model = get_word2vec(w2v_list)
wordvec = fit_vec(w2v_model,word2id)
print(wordvec[:2])
np.savetxt('./Data/train/word2vec_vec.txt',wordvec)
|
{"hexsha": "e455d8c39dc17e40eacdd4fe5933ba6bb94386c1", "size": 1832, "ext": "py", "lang": "Python", "max_stars_repo_path": "word2vec_embedding.py", "max_stars_repo_name": "fanglala997/test", "max_stars_repo_head_hexsha": "a6268ba9af517342c719dc99384ab6f7e42ef584", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "word2vec_embedding.py", "max_issues_repo_name": "fanglala997/test", "max_issues_repo_head_hexsha": "a6268ba9af517342c719dc99384ab6f7e42ef584", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "word2vec_embedding.py", "max_forks_repo_name": "fanglala997/test", "max_forks_repo_head_hexsha": "a6268ba9af517342c719dc99384ab6f7e42ef584", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1714285714, "max_line_length": 143, "alphanum_fraction": 0.6954148472, "include": true, "reason": "import numpy", "num_tokens": 636}
|
from ppadb.client import Client
from PIL import Image
from numpy import array, uint8
from time import sleep
adb = Client(host='127.0.0.1', port=5037)
devices = adb.devices()
if len(devices) == 0:
print('no device attached')
quit()
device = devices[0]
sleep(30)
# device.shell('input touchscreen swipe 534 1464 534 1464 350')
while True:
image = device.screencap()
with open('screen.png', 'wb') as f:
f.write(image)
image = Image.open('screen.png')
image = array(image, dtype=uint8)
failsafe = [list(i[:3]) for i in image[1820]][0]
for i, pixel in enumerate(failsafe):
r, g, b = [int(i) for i in pixel]
print(r,g,b)
if r + g + b < 10:
print('You Lost!')
raise SystemExit
else:
continue
quit()
# 1,440x3,168 pixels
# 220 just under ninja
row = [list(i[:3]) for i in image[1820]]
transitions = []
ignore = True
black = True
cherry = False
tap = False
for i, pixel in enumerate(row):
r, g, b = [int(i) for i in pixel]
# print(r, g, b)
if ignore and (r + g + b) != 0:
continue
ignore = False
# Hasn't found cherry yet and then it does find one
if not cherry and r == 225 and g == 13 and b == 13:
# Tap screen to make ninja
# go down and hit cherry
# need to tap again to
# put it right way up again
tap = True
cherry = True
if black and (r + g + b) != 0:
black = not black
transitions.append(i)
continue
if not black and (r + g + b) == 0:
black = not black
transitions.append(i)
continue
# End of first pillar, start of second and end of second
# print(transitions)
start, t1, t2 = transitions
gap = t1 - start
t = t2 - t1
distance = (gap + t / 2) * .98
print(round(distance))
# fails 381.71
if cherry and distance > int(419):
device.shell(f'input touchscreen swipe 534 1464 534 1464 {round(distance) - 100}')
device.shell("sleep 0.46; input tap 534 1464; input tap 534 1464")
else:
device.shell(f'input touchscreen swipe 534 1464 534 1464 {int(distance)}')
sleep(3)
|
{"hexsha": "4bc15ba04a229b3e3a473a78ebfac65ae2f23aaf", "size": 2095, "ext": "py", "lang": "Python", "max_stars_repo_path": "StickHeroBot.py", "max_stars_repo_name": "Ben-Donnelly/ADBAutomation", "max_stars_repo_head_hexsha": "8f1305c4128cc560b22480a221bf788837094886", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "StickHeroBot.py", "max_issues_repo_name": "Ben-Donnelly/ADBAutomation", "max_issues_repo_head_hexsha": "8f1305c4128cc560b22480a221bf788837094886", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "StickHeroBot.py", "max_forks_repo_name": "Ben-Donnelly/ADBAutomation", "max_forks_repo_head_hexsha": "8f1305c4128cc560b22480a221bf788837094886", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.1616161616, "max_line_length": 85, "alphanum_fraction": 0.6128878282, "include": true, "reason": "from numpy", "num_tokens": 659}
|
# input ---------------------------------------------------------------
input_data_tab<-function(){
tabItem(tabName = "input_data_tab",
fluidRow(
box(width=12,title="",
includeMarkdown("save_data.md"))
),
fluidRow(
box(width=12,title="",
numericInput(inputId="id_n",label="id_n",value=1, min = NA, max = NA, step = NA)%>%
shinyInput_label_embed(
icon("info") %>%
bs_embed_tooltip(title = "Change this help text for input")
),
textInput(inputId="id_fecha",label="id_fecha",placeholder="Change placeholder")%>%
shinyInput_label_embed(
icon("info") %>%
bs_embed_tooltip(title = "Change this help text for input")
),
textInput(inputId="op_tipo",label="op_tipo",placeholder="Change placeholder")%>%
shinyInput_label_embed(
icon("info") %>%
bs_embed_tooltip(title = "Change this help text for input")
),
textInput(inputId="activo_tipo",label="activo_tipo",placeholder="Change placeholder")%>%
shinyInput_label_embed(
icon("info") %>%
bs_embed_tooltip(title = "Change this help text for input")
),
textInput(inputId="activo_código",label="activo_código",placeholder="Change placeholder")%>%
shinyInput_label_embed(
icon("info") %>%
bs_embed_tooltip(title = "Change this help text for input")
),
textInput(inputId="activo_mercado",label="activo_mercado",placeholder="Change placeholder")%>%
shinyInput_label_embed(
icon("info") %>%
bs_embed_tooltip(title = "Change this help text for input")
),
textAreaInput(inputId="at_desc",label="at_desc",placeholder="Change placeholder", width = "1000px", height = "100px")%>%
shinyInput_label_embed(
icon("info") %>%
bs_embed_tooltip(title = "Change this help text for input")
),
textInput(inputId="at_conclusión",label="at_conclusión",placeholder="Change placeholder")%>%
shinyInput_label_embed(
icon("info") %>%
bs_embed_tooltip(title = "Change this help text for input")
),
textInput(inputId="at_señal_entrada_esperada",label="at_señal_entrada_esperada",placeholder="Change placeholder")%>%
shinyInput_label_embed(
icon("info") %>%
bs_embed_tooltip(title = "Change this help text for input")
),
textInput(inputId="agenda_fecha",label="agenda_fecha",placeholder="Change placeholder")%>%
shinyInput_label_embed(
icon("info") %>%
bs_embed_tooltip(title = "Change this help text for input")
),
textInput(inputId="agenda_op_tipo",label="agenda_op_tipo",placeholder="Change placeholder")%>%
shinyInput_label_embed(
icon("info") %>%
bs_embed_tooltip(title = "Change this help text for input")
))
),fluidRow(
actionButton("save", "Save")
)
)
}
# browse tab function -------------------------------------------------
browse_data_tab<-function(){
tabItem(tabName = "browse_data_tab",
fluidRow(
box(width=12,title="",
includeMarkdown("browse_data.md"))
),
fluidRow(
box(width=12,title="Browse Data",
withSpinner(DT::dataTableOutput('data')),
downloadButton("download_data")
)
)
)
}
# Dashboard -----------------------------------------------------------
dashboardPage(
dashboardHeader(title = "Quality Limpieza"),
dashboardSidebar(width = 150,
sidebarMenu(id = "tabs",
menuItem("Alta", tabName = "input_data_tab", icon=icon("file-alt")),
menuItem("Busqueda",tabName = "browse_data_tab",icon = icon("archive")))),
dashboardBody(
#tags$head(includeScript("tracking.js")),
useShinyjs(),
#shinyjs::inlineCSS(appCSS),
tags$head(tags$style(HTML(".shiny-split-layout > div {overflow: visible;}"))),
tabItems(
input_data_tab(),
browse_data_tab()),
use_bs_tooltip())
)
|
{"hexsha": "90982073806ab12064ede89dc2c1375bf38ed40e", "size": 4245, "ext": "r", "lang": "R", "max_stars_repo_path": "ui.r", "max_stars_repo_name": "qualitylimpieza/qualitylimpieza", "max_stars_repo_head_hexsha": "8394aed84cee434664da2a5941eb773cbfc1412a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ui.r", "max_issues_repo_name": "qualitylimpieza/qualitylimpieza", "max_issues_repo_head_hexsha": "8394aed84cee434664da2a5941eb773cbfc1412a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ui.r", "max_forks_repo_name": "qualitylimpieza/qualitylimpieza", "max_forks_repo_head_hexsha": "8394aed84cee434664da2a5941eb773cbfc1412a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.6728971963, "max_line_length": 120, "alphanum_fraction": 0.5684334511, "num_tokens": 905}
|
import csv
import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler
from torchtext import data
import pandas as pd
import re
import nltk
# word tokenization
#nltk.download('punkt')
from nltk.tokenize import word_tokenize
# Stemming
from nltk.stem import PorterStemmer
ps = PorterStemmer()
# Lemmatization
#nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
# stopwords
#nltk.download('stopwords')
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
def preprocessData(data):
def clean(data):
data = re.sub('[^A-Za-z" "]+', '',
data) # Removes all special characters and numericals leaving the alphabets and the quotation marks("")
data = re.sub('[""]+', '', data) # removes the quotation marks("")
return data
def token_stem_stop(string):
stem_rew = " "
tokens = word_tokenize(string) # word tokenization
for word in tokens:
if word.lower() not in stop_words: # removing stop words
stem_word = ps.stem(word) # stemming
stem_rew = stem_rew + " " + stem_word
return str(stem_rew)
def classDesignation(score):
if score <= 0.2:
return 0
elif score <= 0.4:
return 1
elif score <= 0.6:
return 2
elif score <= 0.8:
return 3
else:
return 4
data['phrase'] = data['phrase'].apply(clean)
data['tokstem'] = data['phrase'].apply(token_stem_stop)
data['class'] = data['label'].apply(classDesignation)
return data
if __name__ == '__main__':
td = pd.read_csv('data/train.csv')
testd = pd.read_csv('data/test.csv')
vd = pd.read_csv('data/val.csv')
preprocessData(td).to_csv("data/pt_data.csv", encoding='utf-8', index=False)
preprocessData(testd).to_csv("data/ptest_data.csv", encoding='utf-8', index=False)
preprocessData(vd).to_csv("data/pval_data.csv", encoding='utf-8', index=False)
|
{"hexsha": "36f1fb5ea60408eb0eaf4d4710439d602ae57124", "size": 2108, "ext": "py", "lang": "Python", "max_stars_repo_path": "a4/clean_data.py", "max_stars_repo_name": "zhangtravis/IntSys-Education", "max_stars_repo_head_hexsha": "407e0e1f60b57a922b84f6813378a0bddc178c3a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "a4/clean_data.py", "max_issues_repo_name": "zhangtravis/IntSys-Education", "max_issues_repo_head_hexsha": "407e0e1f60b57a922b84f6813378a0bddc178c3a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "a4/clean_data.py", "max_forks_repo_name": "zhangtravis/IntSys-Education", "max_forks_repo_head_hexsha": "407e0e1f60b57a922b84f6813378a0bddc178c3a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8767123288, "max_line_length": 128, "alphanum_fraction": 0.6404174573, "include": true, "reason": "import numpy", "num_tokens": 539}
|
from enum import unique
from lib2to3.pytree import convert
import numpy as np
import mpmath as mp
from scipy.fftpack import diff
mp.mp.dps = 300
mp.mp.pretty = False
def enumerate_classes(inverse_hash,limit):
letters = list(inverse_hash.keys())
unique_letters = []
for letter in letters:
if inverse_hash[letter] not in unique_letters:
unique_letters.append(letter)
enumerations = ["𝟙"]+unique_letters
def inverse(string):
new_string = []
for x in string[::-1]:
new_string.append(inverse_hash[x])
return ''.join(new_string)
def check_powers(string):
for power in range(2,int(len(string)/len(enumerations[-1])+8)):
for letter in enumerations:
if string == letter*power:
return letter
return string
def reverse(string):
return string[::-1]
def cyclic(string):
n = len(string)
new_string = string
for i in range(n):
new_temp_string = [x for x in new_string]
new_temp_string.insert(0,new_string[-1])
new_temp_string.pop()
new_string = ''.join(new_temp_string)
for other_string in enumerations:
if other_string == new_string or inverse(new_string) == other_string:
# if string == "AbaB":
# print(enumerations)
# print(inverse(new_string))
# print('did return')
return new_string
return new_string
checklist = [lambda x: x, cyclic,inverse, check_powers]
def recursive_add(string):
current_enumeration = []
all_current_enumerations = []
for letter in letters:
all_current_enumerations.append(string+letter)
for letter in letters:
if letter == inverse_hash[string[-1]] or letter == inverse_hash[string[0]]:
continue
result = ''.join(f'{string}{letter}')
skip = False
for check in checklist:
if check(result) in enumerations:
skip=True
break
if not skip:
current_enumeration.append(result)
for result in current_enumeration:
enumerations.append(result)
if current_enumeration and len(current_enumeration[-1])>limit:
return
for result in all_current_enumerations:
recursive_add(result)
recursive_add("A")
return enumerations
def convert_string_to_index(string):
letters = [x for x in string]
indices = []
i=0
while i <= len(letters)-1:
power = 1
letter = letters[i]
index = [letter]
j=i+1
while j <= len(letters)-1:
if letters[i] == letters[j]:
power+=1
else:
break
j+=1
index.append(power)
i=j
indices.append(index)
return indices
#convert_string_to_index("AAAbABBbbb")
#print(enumerate_classes({"A": "a", "a": "A", "B": "b", "b":"B"},4))
def reduce_conjugacy_class(string):
inverse_hash = {"A": "a", "a": "A", "B": "b", "b":"B"}
if len(string)>=2:
current_length = len(string)
next_length = 0
while next_length < current_length:
current_length = len(string)
reduced_string = []
i = 0
while i < len(string)-1:
if string[i] == inverse_hash[string[i+1]]:
i+=2
else:
reduced_string.append(string[i])
i+=1
if len(string)>=2 and string[-1] != inverse_hash[string[-2]]:
reduced_string.append(string[-1])
next_length = len(reduced_string)
string = ''.join(reduced_string)
if len(string) == 1:
break
next_length = len(reduced_string)
did_reduce = True
conjugacy_left = ["A","a","B","b"]
while did_reduce:
did_reduce=False
if len(string)>=2:
for element in conjugacy_left:
new_string = [x for x in string]
if new_string[0] == inverse_hash[element] and new_string[-1] == element:
new_string.remove(new_string[0])
new_string.remove(new_string[-1])
if len(string)> len(new_string):
did_reduce = True
string = ''.join(new_string)
break
string = ''.join(string)
if not string:
string = "𝟙"
return string
#print(reduce_conjugacy_class("ABab"))
def k_smallest_lengths_add(k_smallest_lengths, new_length, difference_precision=0.1):
k = len(k_smallest_lengths)
smallest_index_larger_than = 0
while smallest_index_larger_than < k and k_smallest_lengths[smallest_index_larger_than] < new_length + difference_precision:
smallest_index_larger_than+=1
if smallest_index_larger_than < k and not abs(k_smallest_lengths[smallest_index_larger_than-1] - new_length) < difference_precision:
k_lengths_temp = list(k_smallest_lengths)
k_lengths_temp.pop()
k_lengths_temp.insert(smallest_index_larger_than,new_length)
k_smallest_lengths = np.array(k_lengths_temp)
if smallest_index_larger_than == k-1:
if k_smallest_lengths[smallest_index_larger_than-1] > new_length + difference_precision:
k_smallest_lengths[-1] = new_length
#print(k_smallest_lengths)
return k_smallest_lengths
# k_smallest_lengths = [np.inf,np.inf]
# k_smallest_lengths = k_smallest_lengths_add(k_smallest_lengths, 0)
# k_smallest_lengths = k_smallest_lengths_add(k_smallest_lengths, 3.85)
def compute_translation_matrix_torus(x):
[A,B,a_minus,a_plus,b_minus,b_plus,e_minus,e_plus] = x
alpha1 = edge_matrix(e_plus,e_minus)*mp.inverse(triangle_matrix(A))*edge_matrix(b_minus, b_plus)*triangle_matrix(B)
alpha2 = triangle_matrix(B)*edge_matrix(a_plus,a_minus)*mp.inverse(triangle_matrix(A))*edge_matrix(e_minus,e_plus)
return [alpha1,alpha2]
def a_to_x_coordinate_torus(x):
[A,B,a_minus,a_plus,b_minus,b_plus,e_minus,e_plus] = x
qe_plus = compute_q_plus(A,b_minus, B, a_minus)
qe_minus = compute_q_plus(B,b_plus, A, a_plus)
A_t = compute_t(a_minus, b_minus, e_minus, a_plus, b_plus, e_plus)
B_t = compute_t(e_plus, a_plus, b_plus, e_minus, a_minus, b_minus)
qb_plus = compute_q_plus(A, a_minus, B, e_minus)
qb_minus = compute_q_plus(B, a_plus, A, e_plus)
qa_plus = compute_q_plus(A,e_minus, B, b_minus)
qa_minus = compute_q_plus(B,e_plus, A, b_plus)
y = np.array([A_t,B_t,qa_minus, qa_plus, qb_minus, qb_plus, qe_minus, qe_plus])
return y
def get_length(matrix):
#eigenvalues = np.linalg.eigvals(matrix)
eigenvalues = mp.eig(mp.matrix(matrix))[0]
absolute_eigenvalues = [abs(e) for e in eigenvalues]
absolute_eigenvalues = [absolute_eigenvalues[i] for i in np.argsort(absolute_eigenvalues)]
smallest_eigenvalue = absolute_eigenvalues[0]
largest_eigenvalue = absolute_eigenvalues[-1]
length = mp.log(largest_eigenvalue/smallest_eigenvalue)
return length, eigenvalues
def edge_matrix(q_plus,q_minus):
coefficient = mp.power((q_plus/q_minus), (1/3))
matrix = mp.matrix([[0,0,q_minus],[0,-1,0],[1/q_plus, 0, 0]])
return coefficient*matrix
def triangle_matrix(t):
coefficient = 1/mp.power(t, (1/3))
matrix = mp.matrix([[0,0,1],[0,-1,-1],[t,t+1,1]])
return matrix*coefficient
def string_fraction_to_float(string):
if '/' in string:
string = string.rsplit('/')
return float(string[0])/float(string[1])
return float(string)
def integer_to_script(value, up=True):
value = str(value)
return_value = []
if up:
superscripts = {"-": "⁻","0": "⁰", "1": "¹","2": "²","3": "³","4": "⁴","5": "⁵","6": "⁶","7": "⁷", "8": "⁸","9": "⁹"}
for digit in value:
return_value.append(superscripts[digit])
else:
subscripts = {"0": "₀", "1": "₁", "2": "₂","3": "₃","4": "₄", "5": "₅","6": "₆", "7": "₇", "8": "₈", "9":"₉"}
for digit in value:
return_value.append(subscripts[digit])
return "".join(return_value)
def beziercurve(P0,P1,P2):
return lambda t : (1-t)**2*P0+2*(1-t)*t*P1+t**2*P2
def outitude_edge_params(A,B,a_minus,a_plus, b_minus, b_plus, e_minus, e_plus):
return A*(e_plus*a_plus+e_minus*b_minus-e_minus*e_plus) + B*(e_plus*b_plus+e_minus*a_minus - e_minus*e_plus)
def compute_m_inverse(r0, r2, c0, c2, e03, e23):
C = np.array([r0, r2, np.cross(c0, c2)])
A = np.array([[1 / e03, 0, 0], [0, 1 / e23, 0], [0, 0, 1 / (e03 * e23)]])
B = np.array([c2, c0, np.cross(r2, r0)]).T
#m_inverse = np.matmul(A, B)
m_inverse = np.linalg.inv(C)
return m_inverse
def compute_c3(m_inverse, e03, e23, A023):
c3 = np.matmul(m_inverse, np.array([[e03], [e23], [A023]]))
c3 = c3.T.flatten()
return c3
def compute_r3(c0, c2, c3, e30, e32):
A = np.array([c0, c2, c3])
r3 = np.matmul(np.linalg.inv(A), np.array([[e30], [e32], [0]]))
r3 = r3.T.flatten()
return r3
def compute_outitude_sign(c0,c1,c2,c3):
D = [c1,c2,c3]
D_prime = [c0,c1,c3]
C = [c0,c1,c2]
C_prime = [c0,c2,c3]
return np.linalg.det(D) + np.linalg.det(D_prime) - np.linalg.det(C) - np.linalg.det(C_prime)
def compute_t(a_minus, b_minus, e_minus, a_plus, b_plus, e_plus):
return a_minus*b_minus*e_minus/(a_plus*b_plus*e_plus)
def compute_q_plus(A, d_minus, B, a_minus):
return A*d_minus/(B*a_minus)
def compute_all_until_r3c3(r0, r2, c0, c2, e03, e23, e30, e32, A023):
m_inverse = compute_m_inverse(r0, r2, c0, c2, e03, e23)
c3 = compute_c3(m_inverse, e03, e23, A023)
r3 = compute_r3(c0, c2, c3, e30, e32)
return (r3, c3)
|
{"hexsha": "85452c9f4b0b624193e60ba82e45e6cff9d81222", "size": 10208, "ext": "py", "lang": "Python", "max_stars_repo_path": "helper_functions/add_new_triangle_functions.py", "max_stars_repo_name": "sepehrsaryazdi/cpsvis", "max_stars_repo_head_hexsha": "fe57797091a9bf5c116da9827a19cf7e48b45e98", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "helper_functions/add_new_triangle_functions.py", "max_issues_repo_name": "sepehrsaryazdi/cpsvis", "max_issues_repo_head_hexsha": "fe57797091a9bf5c116da9827a19cf7e48b45e98", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "helper_functions/add_new_triangle_functions.py", "max_forks_repo_name": "sepehrsaryazdi/cpsvis", "max_forks_repo_head_hexsha": "fe57797091a9bf5c116da9827a19cf7e48b45e98", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.800623053, "max_line_length": 136, "alphanum_fraction": 0.5918887147, "include": true, "reason": "import numpy,from scipy,import mpmath", "num_tokens": 2726}
|
import numpy as np
from collections import namedtuple
INPUT = open("advent2018_day18_input.txt", "r").read().split("\n")
OPEN_GROUND = 0
TREE = 1
LUMBERYARD = 2
PRINT_DICT = {OPEN_GROUND: '.',
TREE: '|',
LUMBERYARD: '#'}
READ_DICT = {v: k for k, v in PRINT_DICT.iteritems()}
np.set_printoptions(threshold=np.nan, linewidth=2000, formatter={'int': lambda x: PRINT_DICT[x] if x in PRINT_DICT.keys() else "%d" % x})
Coord = namedtuple('Coord', ['y', 'x'])
def make_field(input):
width = len(input[0])
height = len(input)
np_field = np.fromfunction(shape=(height, width), dtype=np.int, function=np.vectorize(lambda y, x: READ_DICT[input[y][x]]))
return np_field
def compute_next_step(old_field):
def _compute_next_step(y, x):
coord = Coord(x=x, y=y)
max_y, max_x = old_field.shape
slice = old_field[max(coord.y-1, 0):min(coord.y+2, max_y+1), max(coord.x-1, 0):min(coord.x+2, max_x+1)]
if old_field[coord] == OPEN_GROUND:
num_trees = (slice == TREE).sum()
return TREE if num_trees >= 3 else OPEN_GROUND
elif old_field[coord] == TREE:
num_lumberyards = (slice == LUMBERYARD).sum()
return LUMBERYARD if num_lumberyards >= 3 else TREE
elif old_field[coord] == LUMBERYARD:
num_trees = (slice == TREE).sum()
# Don't count the lumberyard in the middle
num_lumberyards = (slice == LUMBERYARD).sum() - 1
return LUMBERYARD if (num_trees >= 1 and num_lumberyards >= 1) else OPEN_GROUND
return _compute_next_step
ALTINPUT = """
.#.#...|#.
.....#|##|
.|..|...#.
..|#.....#
#.#|||#|#|
...#.||...
.|....|...
||...#|.#|
|.||||..|.
...#.|..|.""".split("\n")[1:]
np_field = make_field(INPUT)
old_field = np_field.copy()
for i in xrange(1, 1000000000):
np_field = np.fromfunction(np.vectorize(compute_next_step(np_field)), np_field.shape, dtype=int)
if i == 10:
num_trees = (np_field == TREE).sum()
num_lumberyards = (np_field == LUMBERYARD).sum()
print("After 10 minutes, Trees: %d Lumberyards: %d Total value: %d" %
(num_trees, num_lumberyards, num_trees * num_lumberyards))
# Dumped the value each iteration and discovered it repeated every 28 cycles once it settles
if i % 28 == (1000000000 % 28):
if np.array_equal(np_field, old_field):
num_trees = (np_field == TREE).sum()
num_lumberyards = (np_field == LUMBERYARD).sum()
print("After 1000000000 minutes, Trees: %d Lumberyards: %d Total value: %d" %
(num_trees, num_lumberyards, num_trees * num_lumberyards))
break
else:
old_field = np_field.copy()
|
{"hexsha": "7e71900c6e39c3820f5752afbac504175b0176b0", "size": 2816, "ext": "py", "lang": "Python", "max_stars_repo_path": "advent2018_day18.py", "max_stars_repo_name": "coandco/advent2018", "max_stars_repo_head_hexsha": "5d51780cbcf425857f99c1f6b2c648a3e5852581", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "advent2018_day18.py", "max_issues_repo_name": "coandco/advent2018", "max_issues_repo_head_hexsha": "5d51780cbcf425857f99c1f6b2c648a3e5852581", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "advent2018_day18.py", "max_forks_repo_name": "coandco/advent2018", "max_forks_repo_head_hexsha": "5d51780cbcf425857f99c1f6b2c648a3e5852581", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1025641026, "max_line_length": 138, "alphanum_fraction": 0.5848721591, "include": true, "reason": "import numpy", "num_tokens": 796}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Author : windz
Date : 2021-10-13 14:18:24
LastEditTime : 2021-10-13 16:28:30
LastEditors : windz
FilePath : /flair/script/get_unique_isoform.py
Description : get isoform unique to a_bed
'''
import concurrent.futures
import pyranges as pr
import numpy as np
import click
from tqdm import tqdm
MAX_DIFFERENCE = 10
class Cluster:
'''A cluster of transcript.
'''
def __init__(self, transcript):
# TODO 将均值换成密度
self.chrom = str(transcript.Chromosome)
self.start = [transcript.Start]
self.end = [transcript.End]
self.strand = transcript.Strand
self.splice_sites = [transcript.splice_sites]
self.splice_sites_values = transcript.splice_sites
self.read_id = [transcript.Name]
self.count = 1
def add_transcript(self, transcript):
'''Add transcript to cluster
'''
self.start.append(transcript.Start)
self.end.append(transcript.End)
self.splice_sites.append(transcript.splice_sites)
self.splice_sites_values = np.percentile(self.splice_sites, 50, interpolation='nearest', axis=0)
self.count += 1
self.read_id.append(transcript.Name)
def in_cluster(self, transcript):
'''Determine if the transcript belong to cluster
'''
# ignore intronless transcript
if transcript.Strand != self.strand and len(transcript.splice_sites) == 0:
return False
# splice_sites = np.percentile(self.splice_sites, 50, interpolation='nearest', axis=0)
splice_sites = self.splice_sites_values
if len(splice_sites) == len(transcript.splice_sites):
# spliced_site 相差不超过 max_difference
res = (abs(splice_sites - transcript.splice_sites) < MAX_DIFFERENCE).all()
return res
else:
return False
def __call__(self):
'''Return isoform info of the cluster
'''
chrom = self.chrom
start = np.percentile(self.start, 50, interpolation='nearest', axis=0)
end = np.percentile(self.end, 50, interpolation='nearest', axis=0)
strand = self.strand
count = str(self.count)
splice_sites = np.percentile(self.splice_sites, 50, interpolation='nearest', axis=0)
splice_sites = ','.join(list(map(str, splice_sites)))
read_id = ','.join(self.read_id)
return chrom, start, end, strand, count, splice_sites, read_id
def get_splice_sites(df):
'''get splice site from BED12 files
'''
a = df.Start+df.BlockStarts
b = df.Start+df.BlockSizes+df.BlockStarts
splice_sites = np.array(list(zip(a,b))).flatten()
return splice_sites[1:-1]
def read_bed(infile):
bed_file = pr.read_bed(infile, as_df=True)
bed_file['BlockSizes'] = bed_file['BlockSizes'].map(lambda x: np.fromstring(x, sep=',', dtype='int'))
bed_file['BlockStarts'] = bed_file['BlockStarts'].map(lambda x: np.fromstring(x, sep=',', dtype='int'))
bed_file['splice_sites'] = bed_file.apply(get_splice_sites, axis=1)
return bed_file
@click.command()
@click.option('--a_bed', required=True)
@click.option('--b_bed', required=True)
@click.option('--outfile', required=True)
@click.option('--max_difference', required=False, default = 10)
def main(a_bed: str, b_bed: str, outfile: str, max_difference: int):
global MAX_DIFFERENCE
MAX_DIFFERENCE = max_difference
# load BED12 file
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as e:
a_bed = e.submit(read_bed, a_bed).result()
b_bed = e.submit(read_bed, b_bed).result()
isoform_dict = {}
for chrom in a_bed['Chromosome'].unique():
isoform_dict[str(chrom)] = []
for isoform in a_bed.itertuples():
isoform = Cluster(isoform)
isoform_dict[str(isoform.chrom)].append(isoform)
for b_isoform in tqdm(b_bed.itertuples(), desc='Step1: search isoforms unique to a_bed', total=len(b_bed)):
chrom = str(b_isoform.Chromosome)
for a_isoform in isoform_dict[chrom]:
if a_isoform.in_cluster(b_isoform):
a_isoform.count = -1
with open(outfile, 'w') as out:
for chrom in tqdm(isoform_dict, desc='Step2: write isoforms'):
for isoform in isoform_dict[chrom]:
isoform = isoform()
if int(isoform[4]) != -1:
print('\t'.join(list(map(str, isoform))), file=out)
if __name__ == "__main__":
main()
|
{"hexsha": "d578a2866109598ce51542aff1d82ecbc8f629d3", "size": 4550, "ext": "py", "lang": "Python", "max_stars_repo_path": "script/get_unique_isoform.py", "max_stars_repo_name": "WeipengMO/flair", "max_stars_repo_head_hexsha": "e6c9990bcfdd1d2e585bab1f45b7f8dc68b21fbc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "script/get_unique_isoform.py", "max_issues_repo_name": "WeipengMO/flair", "max_issues_repo_head_hexsha": "e6c9990bcfdd1d2e585bab1f45b7f8dc68b21fbc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "script/get_unique_isoform.py", "max_forks_repo_name": "WeipengMO/flair", "max_forks_repo_head_hexsha": "e6c9990bcfdd1d2e585bab1f45b7f8dc68b21fbc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2116788321, "max_line_length": 111, "alphanum_fraction": 0.6424175824, "include": true, "reason": "import numpy", "num_tokens": 1131}
|
//==================================================================================================
/*!
@file
@copyright 2016 NumScale SAS
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
//==================================================================================================
#ifndef BOOST_SIMD_FUNCTION_ATAN2_HPP_INCLUDED
#define BOOST_SIMD_FUNCTION_ATAN2_HPP_INCLUDED
#if defined(DOXYGEN_ONLY)
namespace boost { namespace simd
{
/*!
@ingroup group-trigonometric
Function object implementing atan2 capabilities
quadrant aware atan2 function.
@par Semantic:
For every parameters @c x and @c y of same floating type
@code
auto r = atan2(y, x);
@endcode
@par Notes
- For any real arguments @c x and @c y not both equal to zero, <tt>atan2(y, x)</tt>
is the angle in radians between the positive x-axis of a plane and the point
given by the coordinates <tt>(x, y)</tt>.
- It is also the angle in \f$[-\pi,\pi[\f$ for which
\f$x/\sqrt{x^2+y^2}\f$ and \f$y/\sqrt{x^2+y^2}\f$
are respectively the sine and the cosine.
- Following IEEE norms, we should have:
- If y is \f$\pm0\f$ and x is negative or -0,\f$\pm\pi\f$ is returned
- If y is \f$\pm0\f$ and x is positive or +0, \f$\pm0\f$ is returned
- If y is \f$\pm\infty\f$ and x is finite, \f$\pm\pi/2\f$ is returned
- If y is \f$\pm\infty\f$ and x is \f$-\infty\f$,\f$\pm3\pi/4\f$ is returned
- If y is \f$\pm\infty\f$ and x is \f$+\infty\f$, \f$\pm\pi/4\f$ is returned
- If x is \f$\pm0\f$ and y is negative, \f$-\pi/2\f$ is returned
- If x is \f$\pm0\f$ and y is positive, \f$+\pi/2\f$ is returned
- If x is \f$-\infty\f$ and y is finite and positive, \f$+\pi\f$ is returned
- If x is \f$-\infty\f$ and y is finite and negative, \f$-\pi\f$ is returned
- If x is \f$+\infty\f$ and y is finite and positive, +0 is returned
- If x is \f$+\infty\f$ and y is finite and negative, -0 is returned
- If either x is Nan or y is Nan, Nan is returned
The pedantic_ decorator ensures all these conditions, but the regular version
will return a NaN if x and y are both either null or infinite, result which in fact
is not more absurd than the IEEE choices. It will be conforming in all other cases.
@par Decorators
- std_ provides access to std::atan2
- pedantic_ ensures the respect of all IEEE limits
@see atan, atand, atanpi
**/
Value atan2(Value const &y, Value const &x);
} }
#endif
#include <boost/simd/function/scalar/atan2.hpp>
#include <boost/simd/function/simd/atan2.hpp>
#endif
|
{"hexsha": "929e4e0999a49f0d65496945ca163797411e51d4", "size": 2733, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/simd/function/atan2.hpp", "max_stars_repo_name": "nickporubsky/boost-simd-clone", "max_stars_repo_head_hexsha": "b81dfcd9d6524a131ea714f1eebb5bb75adddcc7", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2018-02-20T11:21:12.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-12T13:45:09.000Z", "max_issues_repo_path": "include/boost/simd/function/atan2.hpp", "max_issues_repo_name": "nickporubsky/boost-simd-clone", "max_issues_repo_head_hexsha": "b81dfcd9d6524a131ea714f1eebb5bb75adddcc7", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/simd/function/atan2.hpp", "max_forks_repo_name": "nickporubsky/boost-simd-clone", "max_forks_repo_head_hexsha": "b81dfcd9d6524a131ea714f1eebb5bb75adddcc7", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2017-11-17T15:30:36.000Z", "max_forks_repo_forks_event_max_datetime": "2018-03-01T02:06:25.000Z", "avg_line_length": 35.0384615385, "max_line_length": 100, "alphanum_fraction": 0.6037321625, "num_tokens": 815}
|
import glob
import pandas as pd
import geopandas as gpd
import shared
import numpy as np
xwalk = pd.read_csv('data/GeogXWalk2010_Blocks_MAZ_TAZ.csv')
maz_controls = pd.read_csv("data/maz_controls.csv")
buildings = glob.glob("cache/*buildings_match_controls.csv")
juris_names = [b.replace("_buildings_match_controls.csv", "").
replace("cache/", "") for b in buildings]
buildings = [pd.read_csv(b) for b in buildings]
for i in range(len(buildings)):
buildings[i]["juris_name"] = juris_names[i]
buildings = pd.concat(buildings)
# the foreign key apn has to have the transformation we apply to the parcel
# apn below
# FIXME this appends the whole juris name to the apn to make it unique
# instead this should be 4 character abbreviations
buildings["apn"] = buildings.juris_name.str.cat(
buildings.apn.astype("str"), sep="-")
buildings.loc[buildings.building_type == 'RT', 'building_type'] = 'HT'
buildings.loc[buildings.building_type == 'RC', 'building_type'] = 'REC'
buildings.loc[buildings.building_type == 0.0, 'building_type'] = ''
buildings = buildings.loc[~buildings.building_type.isin(['VAC', 'VA',
'VT', 'VP'])]
buildings.drop("building_id", axis=1, inplace=True)
buildings["maz_id"] = buildings.maz_id.astype("int")
# there are at least 2 reasons right now to have a dummy building per maz which
# does not technically have a parcel link - 1) for group quarters and 2) for
# jobs in mazs which have no building. instead of just randomly selecting a
# parcel to add a building record to, we leave them associated with each maz
def add_dummy_buildings_per_maz(buildings):
dummy_df = pd.DataFrame({"maz_id": maz_controls.MAZ_ORIGINAL})
dummy_df["name"] = "MAZ-level dummy building"
dummy_df['maz_building_id'] = [
"MAZBLDG-" + str(d) for d in dummy_df.maz_id.values]
df = pd.concat([buildings, dummy_df])
return df
buildings = add_dummy_buildings_per_maz(buildings)
buildings.loc[buildings.name == 'MAZ-level dummy building', 'apn'] = \
buildings.loc[buildings.name == 'MAZ-level dummy building',
'maz_building_id'].str.replace('BLDG', 'PCL')
buildings.reset_index(drop=True, inplace=True)
buildings.index += 1
buildings.drop('geometry', axis=1).to_csv(
"cache/merged_buildings.csv", index_label="building_id")
buildings[['geometry']].to_csv(
"cache/buildings_geometry.csv", index_label="building_id")
print "Finished writing buildings"
parcels = glob.glob("cache/*moved_attribute_parcels.csv")
juris_names = [p.replace("_moved_attribute_parcels.csv", "").
replace("cache/", "") for p in parcels]
parcels = [gpd.read_geocsv(p) for p in parcels]
for i in range(len(parcels)):
parcels[i]["juris_name"] = juris_names[i]
parcels = gpd.GeoDataFrame(pd.concat(parcels))
# FIXME this appends the whole juris name to the apn to make it unique
# instead this should be 4 character abbreviations
parcels["apn"] = parcels.juris_name.str.cat(
parcels.apn.astype("str"), sep="-")
maz_pcls = xwalk.groupby('MAZ_ORIGINAL').TAZ_ORIGINAL.first()
mazpcl_dummies = buildings.loc[buildings.name == 'MAZ-level dummy building',
['apn', 'maz_id']]
mazpcl_dummies['taz_id'] = mazpcl_dummies.maz_id.map(maz_pcls)
for col in parcels.columns[~parcels.columns.isin(mazpcl_dummies.columns)]:
mazpcl_dummies[col] = np.nan
parcels = pd.concat([parcels, mazpcl_dummies])
parcels["maz_id"] = parcels.maz_id.astype("int")
parcels["taz_id"] = parcels.taz_id.fillna(-1).astype("int")
parcels[['apn', 'county_id', 'geometry', 'maz_id', 'taz_id', 'orig_apn',
'juris_name']].to_csv("cache/merged_parcels.csv", index=False)
|
{"hexsha": "a743dc0f41e1a13d517eb62ed694002138f8f53e", "size": 3712, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/merge_cities.py", "max_stars_repo_name": "oaklandanalytics/parcel_cutting_board", "max_stars_repo_head_hexsha": "c134ab3c239090e7acb04d1257186763bf437640", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/merge_cities.py", "max_issues_repo_name": "oaklandanalytics/parcel_cutting_board", "max_issues_repo_head_hexsha": "c134ab3c239090e7acb04d1257186763bf437640", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/merge_cities.py", "max_forks_repo_name": "oaklandanalytics/parcel_cutting_board", "max_forks_repo_head_hexsha": "c134ab3c239090e7acb04d1257186763bf437640", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7078651685, "max_line_length": 79, "alphanum_fraction": 0.7077047414, "include": true, "reason": "import numpy", "num_tokens": 1021}
|
import os
import pathlib
import tempfile
import numpy
import pytest
from pandas.util.testing import assert_frame_equal, assert_dict_equal, assert_index_equal
from pandas import DataFrame
from tfs import read_tfs, write_tfs, TfsDataFrame
from tfs.handler import TfsFormatError
CURRENT_DIR = pathlib.Path(__file__).parent
def test_tfs_read_pathlib_input(_tfs_file_pathlib: pathlib.Path):
test_file = read_tfs(_tfs_file_pathlib, index="NAME")
assert len(test_file.headers) > 0
assert len(test_file.columns) > 0
assert len(test_file.index) > 0
assert len(str(test_file)) > 0
assert isinstance(test_file.index[0], str)
with pytest.raises(AttributeError):
test_var = test_file.Not_HERE
with pytest.raises(KeyError):
test_var = test_file["Not_HERE"]
def test_tfs_read_str_input(_tfs_file_str: str):
test_file = read_tfs(_tfs_file_str, index="NAME")
assert len(test_file.headers) > 0
assert len(test_file.columns) > 0
assert len(test_file.index) > 0
assert len(str(test_file)) > 0
assert isinstance(test_file.index[0], str)
with pytest.raises(AttributeError):
test_var = test_file.Not_HERE
with pytest.raises(KeyError):
test_var = test_file["Not_HERE"]
def tfs_indx_pathlib_input(_tfs_file_pathlib: pathlib.Path):
test_file = read_tfs(_tfs_file_pathlib)
assert test_file.indx["BPMYB.5L2.B1"] == test_file.set_index("NAME")["BPMYB.5L2.B1"]
def tfs_indx_str_input(_tfs_file_str: str):
test_file = read_tfs(_tfs_file_str)
assert test_file.indx["BPMYB.5L2.B1"] == test_file.set_index("NAME")["BPMYB.5L2.B1"]
def test_tfs_write_read(_dataframe: TfsDataFrame, _test_file: str):
write_tfs(_test_file, _dataframe)
assert pathlib.Path(_test_file).is_file()
new = read_tfs(_test_file)
assert_frame_equal(_dataframe, new, check_exact=False) # float precision can be an issue
assert_dict_equal(_dataframe.headers, new.headers, compare_keys=True)
def test_tfs_write_read_pandasdf(_pddataframe: DataFrame, _test_file: str):
write_tfs(_test_file, _pddataframe)
assert pathlib.Path(_test_file).is_file()
new = read_tfs(_test_file)
assert_frame_equal(_pddataframe, new,
check_exact=False, # float precision can be an issue
check_frame_type=False, # read df is TfsDF
)
def test_write_read_spaces_in_strings(_test_file: str):
df = TfsDataFrame(
data=["This is", "a test", 'with spaces'],
columns=["A"]
)
write_tfs(_test_file, df)
new = read_tfs(_test_file)
assert_frame_equal(df, new)
def test_tfs_write_read_autoindex(_dataframe: TfsDataFrame, _test_file: str):
df = _dataframe.set_index("a")
df1 = _dataframe.set_index("a")
write_tfs(_test_file, df, save_index=True)
assert_frame_equal(df, df1)
df_read = read_tfs(_test_file)
assert_index_equal(df.index, df_read.index, check_exact=False)
assert_dict_equal(_dataframe.headers, df_read.headers, compare_keys=True)
def test_tfs_read_write_read_pathlib_input(_tfs_file_pathlib: pathlib.Path, _test_file: str):
original = read_tfs(_tfs_file_pathlib)
write_tfs(_test_file, original)
new = read_tfs(_test_file)
assert_frame_equal(original, new)
assert_dict_equal(original.headers, new.headers, compare_keys=True)
def test_tfs_read_write_read_str_input(_tfs_file_str: str, _test_file: str):
original = read_tfs(_tfs_file_str)
write_tfs(_test_file, original)
new = read_tfs(_test_file)
assert_frame_equal(original, new)
assert_dict_equal(original.headers, new.headers, compare_keys=True)
def test_tfs_write_empty_columns_dataframe(_test_file: str):
df = TfsDataFrame(
index=range(3),
columns=[],
data=numpy.random.rand(3, 0),
headers={"Title": "Tfs Title", "Value": 3.3663},
)
write_tfs(_test_file, df, save_index=True)
assert pathlib.Path(_test_file).is_file()
new = read_tfs(_test_file)
assert_frame_equal(df, new)
assert_dict_equal(df.headers, new.headers, compare_keys=True)
def test_tfs_write_empty_index_dataframe(_test_file: str):
df = TfsDataFrame(
index=[],
columns=["a", "b", "c"],
data=numpy.random.rand(0, 3),
headers={"Title": "Tfs Title", "Value": 3.3663},
)
write_tfs(_test_file, df)
assert pathlib.Path(_test_file).is_file()
new = read_tfs(_test_file)
assert_frame_equal(df, new)
assert_dict_equal(df.headers, new.headers, compare_keys=True)
def test_header_print():
headers = {"param": 3, "other": "hello"}
df = TfsDataFrame(headers=headers)
print_out = str(df)
assert "Headers" in print_out
for key, val in headers.items():
assert key in print_out
assert str(val) in print_out
def test_fail_on_non_unique_columns():
df = TfsDataFrame(columns=["A", "B", "A"])
with pytest.raises(TfsFormatError):
write_tfs('', df)
def test_fail_on_non_unique_index():
df = TfsDataFrame(index=["A", "B", "A"])
with pytest.raises(TfsFormatError):
write_tfs('', df)
def test_fail_on_spaces_columns():
df = TfsDataFrame(columns=["allowed", "not allowed"])
with pytest.raises(TfsFormatError):
write_tfs('', df)
def test_fail_on_spaces_headers():
df = TfsDataFrame(headers={"allowed": 1, "not allowed": 2})
with pytest.raises(TfsFormatError):
write_tfs('', df)
@pytest.fixture()
def _tfs_file_pathlib() -> pathlib.Path:
return CURRENT_DIR / "inputs" / "file_x.tfs"
@pytest.fixture()
def _tfs_file_str() -> str:
return os.path.join(os.path.dirname(__file__), "inputs", "file_x.tfs")
@pytest.fixture()
def _test_file() -> str:
with tempfile.TemporaryDirectory() as cwd:
yield os.path.join(cwd, "test_file.tfs")
@pytest.fixture()
def _dataframe() -> TfsDataFrame:
return TfsDataFrame(
index=range(3),
columns="a b c d e".split(),
data=numpy.random.rand(3, 5),
headers={"Title": "Tfs Title", "Value": 3.3663},
)
@pytest.fixture()
def _pddataframe() -> DataFrame:
return DataFrame(
index=range(3),
columns="a b c d e".split(),
data=numpy.random.rand(3, 5),
)
|
{"hexsha": "e46cb77e03e95ce3322247893b08e4655e38879d", "size": 6260, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_handler.py", "max_stars_repo_name": "nikitakuklev/tfs", "max_stars_repo_head_hexsha": "7c515aa3c4dd311622380efb48ea98b557d04c01", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_handler.py", "max_issues_repo_name": "nikitakuklev/tfs", "max_issues_repo_head_hexsha": "7c515aa3c4dd311622380efb48ea98b557d04c01", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_handler.py", "max_forks_repo_name": "nikitakuklev/tfs", "max_forks_repo_head_hexsha": "7c515aa3c4dd311622380efb48ea98b557d04c01", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3896713615, "max_line_length": 93, "alphanum_fraction": 0.6958466454, "include": true, "reason": "import numpy", "num_tokens": 1677}
|
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals, print_function, absolute_import
from pyvisa.testsuite import BaseTestCase
from pyvisa import util
try:
# noinspection PyPackageRequirements
import numpy as np
except ImportError:
np = None
class TestParser(BaseTestCase):
def test_parse_binary(self):
s = b'#A@\xe2\x8b<@\xe2\x8b<@\xe2\x8b<@\xe2\x8b<@\xde\x8b<@\xde\x8b<@\xde\x8b<' \
b'@\xde\x8b<@\xe0\x8b<@\xe0\x8b<@\xdc\x8b<@\xde\x8b<@\xe2\x8b<@\xe0\x8b<'
e = [0.01707566, 0.01707566, 0.01707566, 0.01707566, 0.01707375,
0.01707375, 0.01707375, 0.01707375, 0.01707470, 0.01707470,
0.01707280, 0.01707375, 0.01707566, 0.01707470]
p = util.parse_binary(s, is_big_endian=False, is_single=True)
for a, b in zip(p, e):
self.assertAlmostEqual(a, b)
p = util.from_ieee_block(s, datatype='f', is_big_endian=False)
for a, b in zip(p, e):
self.assertAlmostEqual(a, b)
def test_ieee_integer(self):
values = list(range(99))
containers = (list, tuple) #+ ((np.asarray,) if np else ())
for fmt in 'bBhHiIfd':
for endi in (True, False):
for cont in containers:
conv = cont(values)
msg = 'fmt=%s, endianness=%s, container=%s' % (fmt, endi, cont.__name__)
try:
block = util.to_ieee_block(conv, fmt, endi)
parsed = util.from_ieee_block(block, fmt, endi, cont)
except Exception as e:
raise Exception(msg + '\n' + repr(e))
self.assertEqual(conv, parsed, msg)
def test_ieee_noninteger(self):
values = [val + 0.5 for val in range(99)]
containers = (list, tuple) #+ ((np.asarray,) if np else ())
for fmt in 'fd':
for endi in (True, False):
for cont in containers:
conv = cont(values)
msg = 'fmt=%s, endianness=%s, container=%s' % (fmt, endi, cont.__name__)
try:
block = util.to_ieee_block(conv, fmt, endi)
parsed = util.from_ieee_block(block, fmt, endi, cont)
except Exception as e:
raise Exception(msg + '\n' + repr(e))
self.assertEqual(conv, parsed, msg)
|
{"hexsha": "85ff8c70c74e1a33446aae581434ed0b041e3594", "size": 2451, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyvisa/testsuite/test_util.py", "max_stars_repo_name": "ap98nb26u/pyvisa", "max_stars_repo_head_hexsha": "6c36592c1bc26fc49785a43160cd6f27623a50fc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-09T23:39:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-09T23:39:30.000Z", "max_issues_repo_path": "pyvisa/testsuite/test_util.py", "max_issues_repo_name": "ap98nb26u/pyvisa", "max_issues_repo_head_hexsha": "6c36592c1bc26fc49785a43160cd6f27623a50fc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyvisa/testsuite/test_util.py", "max_forks_repo_name": "ap98nb26u/pyvisa", "max_forks_repo_head_hexsha": "6c36592c1bc26fc49785a43160cd6f27623a50fc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1803278689, "max_line_length": 92, "alphanum_fraction": 0.5414116687, "include": true, "reason": "import numpy", "num_tokens": 677}
|
import numpy as np
import cv2
# https://stackoverflow.com/questions/22937589/how-to-add-noise-gaussian-salt-and-pepper-etc-to-image-in-python-with-opencv
class Noiser(object):
def __init__(self, cfg):
self.cfg = cfg
def apply(self, img):
"""
:param img: word image with big background
"""
p = []
funcs = []
if self.cfg.noise.gauss.enable:
p.append(self.cfg.noise.gauss.fraction)
funcs.append(self.apply_gauss_noise)
if self.cfg.noise.uniform.enable:
p.append(self.cfg.noise.uniform.fraction)
funcs.append(self.apply_uniform_noise)
if self.cfg.noise.salt_pepper.enable:
p.append(self.cfg.noise.salt_pepper.fraction)
funcs.append(self.apply_sp_noise)
if self.cfg.noise.poisson.enable:
p.append(self.cfg.noise.poisson.fraction)
funcs.append(self.apply_poisson_noise)
if len(p) == 0:
return img
noise_func = np.random.choice(funcs, p=p)
return noise_func(img)
def apply_gauss_noise(self, img):
"""
Gaussian-distributed additive noise.
"""
row, col, channel = img.shape
mean = 0
stddev = np.sqrt(15)
gauss_noise = np.zeros((row, col, channel))
cv2.randn(gauss_noise, mean, stddev)
out = img + gauss_noise
return out
def apply_uniform_noise(self, img):
"""
Apply zero-mean uniform noise
"""
row, col, channel = img.shape
alpha = 0.05
gauss = np.random.uniform(0 - alpha, alpha, (row, col, channel))
gauss = gauss.reshape(row, col, channel)
out = img + img * gauss
return out
def apply_sp_noise(self, img):
"""
Salt and pepper noise. Replaces random pixels with 0 or 255.
"""
row, col, channel = img.shape
s_vs_p = 0.5
amount = np.random.uniform(0.004, 0.01)
out = np.copy(img)
# Salt mode
num_salt = np.ceil(amount * img.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in img.shape]
out[coords] = 255.
# Pepper mode
num_pepper = np.ceil(amount * img.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in img.shape]
out[coords] = 0
return out
def apply_poisson_noise(self, img):
"""
Poisson-distributed noise generated from the data.
"""
vals = len(np.unique(img))
vals = 2 ** np.ceil(np.log2(vals))
if vals < 0:
return img
noisy = np.random.poisson(img * vals) / float(vals)
return noisy
|
{"hexsha": "d4c1650103c160474be62086a7944c599d6a9d66", "size": 2788, "ext": "py", "lang": "Python", "max_stars_repo_path": "textrenderer/noiser.py", "max_stars_repo_name": "light1003/text_renderer", "max_stars_repo_head_hexsha": "cd727dee9538067fc8f9f2e05265938007b71bc1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-26T05:47:12.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-26T05:47:12.000Z", "max_issues_repo_path": "textrenderer/noiser.py", "max_issues_repo_name": "light1003/text_renderer", "max_issues_repo_head_hexsha": "cd727dee9538067fc8f9f2e05265938007b71bc1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "textrenderer/noiser.py", "max_forks_repo_name": "light1003/text_renderer", "max_forks_repo_head_hexsha": "cd727dee9538067fc8f9f2e05265938007b71bc1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-28T08:22:19.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-28T08:22:19.000Z", "avg_line_length": 28.4489795918, "max_line_length": 123, "alphanum_fraction": 0.5613342898, "include": true, "reason": "import numpy", "num_tokens": 686}
|
import copy
import numpy as np
from .util import is_ccw
from .. import util
from .. import grouping
from .. import constants
try:
import networkx as nx
except BaseException as E:
# create a dummy module which will raise the ImportError
# or other exception only when someone tries to use networkx
from ..exceptions import ExceptionModule
nx = ExceptionModule(E)
def vertex_graph(entities):
"""
Given a set of entity objects generate a networkx.Graph
that represents their vertex nodes.
Parameters
--------------
entities : list
Objects with 'closed' and 'nodes' attributes
Returns
-------------
graph : networkx.Graph
Graph where node indexes represent vertices
closed : (n,) int
Indexes of entities which are 'closed'
"""
graph = nx.Graph()
closed = []
for index, entity in enumerate(entities):
if entity.closed:
closed.append(index)
else:
graph.add_edges_from(entity.nodes,
entity_index=index)
return graph, np.array(closed)
def vertex_to_entity_path(vertex_path,
graph,
entities,
vertices=None):
"""
Convert a path of vertex indices to a path of entity indices.
Parameters
----------
vertex_path : (n,) int
Ordered list of vertex indices representing a path
graph : nx.Graph
Vertex connectivity
entities : (m,) list
Entity objects
vertices : (p, dimension) float
Vertex points in space
Returns
----------
entity_path : (q,) int
Entity indices which make up vertex_path
"""
def edge_direction(a, b):
"""
Given two edges, figure out if the first needs to be
reversed to keep the progression forward.
[1,0] [1,2] -1 1
[1,0] [2,1] -1 -1
[0,1] [1,2] 1 1
[0,1] [2,1] 1 -1
Parameters
------------
a : (2,) int
b : (2,) int
Returns
------------
a_direction : int
b_direction : int
"""
if a[0] == b[0]:
return -1, 1
elif a[0] == b[1]:
return -1, -1
elif a[1] == b[0]:
return 1, 1
elif a[1] == b[1]:
return 1, -1
else:
constants.log.debug(
'edges not connected!\n'
'vertex path %s\n'
'entity path: %s\n'
'entity[a]: %s\n'
'entity[b]: %s',
vertex_path,
entity_path,
entities[ea].points,
entities[eb].points)
return None, None
if vertices is None or vertices.shape[1] != 2:
ccw_direction = 1
else:
ccw_check = is_ccw(vertices[np.append(vertex_path,
vertex_path[0])])
ccw_direction = (ccw_check * 2) - 1
# make sure vertex path is correct type
vertex_path = np.asanyarray(vertex_path, dtype=np.int64)
# we will be saving entity indexes
entity_path = []
# loop through pairs of vertices
for i in np.arange(len(vertex_path) + 1):
# get two wrapped vertex positions
vertex_path_pos = np.mod(np.arange(2) + i, len(vertex_path))
vertex_index = vertex_path[vertex_path_pos]
entity_index = graph.get_edge_data(*vertex_index)['entity_index']
entity_path.append(entity_index)
# remove duplicate entities and order CCW
entity_path = grouping.unique_ordered(entity_path)[::ccw_direction]
# check to make sure there is more than one entity
if len(entity_path) == 1:
# apply CCW reverse in place if necessary
if ccw_direction < 0:
index = entity_path[0]
entities[index].reverse()
return entity_path
# traverse the entity path and reverse entities in place to
# align with this path ordering
round_trip = np.append(entity_path, entity_path[0])
round_trip = zip(round_trip[:-1], round_trip[1:])
for ea, eb in round_trip:
da, db = edge_direction(entities[ea].end_points,
entities[eb].end_points)
if da is not None:
entities[ea].reverse(direction=da)
entities[eb].reverse(direction=db)
entity_path = np.array(entity_path)
return entity_path
def closed_paths(entities, vertices):
"""
Paths are lists of entity indices.
We first generate vertex paths using graph cycle algorithms,
and then convert them to entity paths.
This will also change the ordering of entity.points in place
so a path may be traversed without having to reverse the entity.
Parameters
-------------
entities : (n,) entity objects
Entity objects
vertices : (m, dimension) float
Vertex points in space
Returns
-------------
entity_paths : sequence of (n,) int
Ordered traversals of entities
"""
# get a networkx graph of entities
graph, closed = vertex_graph(entities)
# add entities that are closed as single- entity paths
entity_paths = np.reshape(closed, (-1, 1)).tolist()
# look for cycles in the graph, or closed loops
vertex_paths = nx.cycles.cycle_basis(graph)
# loop through every vertex cycle
for vertex_path in vertex_paths:
# a path has no length if it has fewer than 2 vertices
if len(vertex_path) < 2:
continue
# convert vertex indices to entity indices
entity_paths.append(
vertex_to_entity_path(vertex_path,
graph,
entities,
vertices))
return entity_paths
def discretize_path(entities, vertices, path, scale=1.0):
"""
Turn a list of entity indices into a path of connected points.
Parameters
-----------
entities : (j,) entity objects
Objects like 'Line', 'Arc', etc.
vertices: (n, dimension) float
Vertex points in space.
path : (m,) int
Indexes of entities
scale : float
Overall scale of drawing used for
numeric tolerances in certain cases
Returns
-----------
discrete : (p, dimension) float
Connected points in space that lie on the
path and can be connected with line segments.
"""
# make sure vertices are numpy array
vertices = np.asanyarray(vertices)
path_len = len(path)
if path_len == 0:
raise ValueError('Cannot discretize empty path!')
if path_len == 1:
# case where we only have one entity
discrete = np.asanyarray(entities[path[0]].discrete(
vertices,
scale=scale))
else:
# run through path appending each entity
discrete = []
for i, entity_id in enumerate(path):
# the current (n, dimension) discrete curve of an entity
current = entities[entity_id].discrete(vertices, scale=scale)
# check if we are on the final entity
if i >= (path_len - 1):
# if we are on the last entity include the last point
discrete.append(current)
else:
# slice off the last point so we don't get duplicate
# points from the end of one entity and the start of another
discrete.append(current[:-1])
# stack all curves to one nice (n, dimension) curve
discrete = np.vstack(discrete)
# make sure 2D curves are are counterclockwise
if vertices.shape[1] == 2 and not is_ccw(discrete):
# reversing will make array non c- contiguous
discrete = np.ascontiguousarray(discrete[::-1])
return discrete
class PathSample:
def __init__(self, points):
# make sure input array is numpy
self._points = np.array(points)
# find the direction of each segment
self._vectors = np.diff(self._points, axis=0)
# find the length of each segment
self._norms = util.row_norm(self._vectors)
# unit vectors for each segment
nonzero = self._norms > constants.tol_path.zero
self._unit_vec = self._vectors.copy()
self._unit_vec[nonzero] /= self._norms[nonzero].reshape((-1, 1))
# total distance in the path
self.length = self._norms.sum()
# cumulative sum of section length
# note that this is sorted
self._cum_norm = np.cumsum(self._norms)
def sample(self, distances):
# return the indices in cum_norm that each sample would
# need to be inserted at to maintain the sorted property
positions = np.searchsorted(self._cum_norm, distances)
positions = np.clip(positions, 0, len(self._unit_vec) - 1)
offsets = np.append(0, self._cum_norm)[positions]
# the distance past the reference vertex we need to travel
projection = distances - offsets
# find out which dirction we need to project
direction = self._unit_vec[positions]
# find out which vertex we're offset from
origin = self._points[positions]
# just the parametric equation for a line
resampled = origin + (direction * projection.reshape((-1, 1)))
return resampled
def truncate(self, distance):
"""
Return a truncated version of the path.
Only one vertex (at the endpoint) will be added.
"""
position = np.searchsorted(self._cum_norm, distance)
offset = distance - self._cum_norm[position - 1]
if offset < constants.tol_path.merge:
truncated = self._points[:position + 1]
else:
vector = util.unitize(np.diff(
self._points[np.arange(2) + position],
axis=0).reshape(-1))
vector *= offset
endpoint = self._points[position] + vector
truncated = np.vstack((self._points[:position + 1],
endpoint))
assert (util.row_norm(np.diff(
truncated, axis=0)).sum() -
distance) < constants.tol_path.merge
return truncated
def resample_path(points,
count=None,
step=None,
step_round=True):
"""
Given a path along (n,d) points, resample them such that the
distance traversed along the path is constant in between each
of the resampled points. Note that this can produce clipping at
corners, as the original vertices are NOT guaranteed to be in the
new, resampled path.
ONLY ONE of count or step can be specified
Result can be uniformly distributed (np.linspace) by specifying count
Result can have a specific distance (np.arange) by specifying step
Parameters
----------
points: (n, d) float
Points in space
count : int,
Number of points to sample evenly (aka np.linspace)
step : float
Distance each step should take along the path (aka np.arange)
Returns
----------
resampled : (j,d) float
Points on the path
"""
points = np.array(points, dtype=np.float64)
# generate samples along the perimeter from kwarg count or step
if (count is not None) and (step is not None):
raise ValueError('Only step OR count can be specified')
if (count is None) and (step is None):
raise ValueError('Either step or count must be specified')
sampler = PathSample(points)
if step is not None and step_round:
if step >= sampler.length:
return points[[0, -1]]
count = int(np.ceil(sampler.length / step))
if count is not None:
samples = np.linspace(0, sampler.length, count)
elif step is not None:
samples = np.arange(0, sampler.length, step)
resampled = sampler.sample(samples)
check = util.row_norm(points[[0, -1]] - resampled[[0, -1]])
assert check[0] < constants.tol_path.merge
if count is not None:
assert check[1] < constants.tol_path.merge
return resampled
def split(path):
"""
Split a Path2D into multiple Path2D objects where each
one has exactly one root curve.
Parameters
--------------
path : trimesh.path.Path2D
Input geometry
Returns
-------------
split : list of trimesh.path.Path2D
Original geometry as separate paths
"""
# avoid a circular import by referencing class of path
Path2D = type(path)
# save the results of the split to an array
split = []
# get objects from cache to avoid a bajillion
# cache checks inside the tight loop
paths = path.paths
discrete = path.discrete
polygons_closed = path.polygons_closed
enclosure_directed = path.enclosure_directed
for root_index, root in enumerate(path.root):
# get a list of the root curve's children
connected = list(enclosure_directed[root].keys())
# add the root node to the list
connected.append(root)
# store new paths and entities
new_paths = []
new_entities = []
for index in connected:
nodes = paths[index]
# add a path which is just sequential indexes
new_paths.append(np.arange(len(nodes)) +
len(new_entities))
# save the entity indexes
new_entities.extend(nodes)
# store the root index from the original drawing
metadata = copy.deepcopy(path.metadata)
metadata['split_2D'] = root_index
# we made the root path the last index of connected
new_root = np.array([len(new_paths) - 1])
# prevents the copying from nuking our cache
with path._cache:
# create the Path2D
split.append(Path2D(
entities=copy.deepcopy(path.entities[new_entities]),
vertices=copy.deepcopy(path.vertices),
metadata=metadata))
# add back expensive things to the cache
split[-1]._cache.update(
{'paths': new_paths,
'polygons_closed': polygons_closed[connected],
'discrete': [discrete[c] for c in connected],
'root': new_root})
# set the cache ID
split[-1]._cache.id_set()
return np.array(split)
|
{"hexsha": "f4417b85cccfdf3a6ec10f73179b1f5574129d65", "size": 14481, "ext": "py", "lang": "Python", "max_stars_repo_path": "trimesh/path/traversal.py", "max_stars_repo_name": "jpmaterial/trimesh", "max_stars_repo_head_hexsha": "4f493ff0a96a14e62eb7c748964fd8f4e44064c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1882, "max_stars_repo_stars_event_min_datetime": "2015-04-21T06:51:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T03:16:12.000Z", "max_issues_repo_path": "trimesh/path/traversal.py", "max_issues_repo_name": "jpmaterial/trimesh", "max_issues_repo_head_hexsha": "4f493ff0a96a14e62eb7c748964fd8f4e44064c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1400, "max_issues_repo_issues_event_min_datetime": "2016-01-22T14:05:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T19:13:55.000Z", "max_forks_repo_path": "trimesh/path/traversal.py", "max_forks_repo_name": "jpmaterial/trimesh", "max_forks_repo_head_hexsha": "4f493ff0a96a14e62eb7c748964fd8f4e44064c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 465, "max_forks_repo_forks_event_min_datetime": "2015-05-29T21:27:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T17:45:46.000Z", "avg_line_length": 32.5415730337, "max_line_length": 76, "alphanum_fraction": 0.593052966, "include": true, "reason": "import numpy,import networkx", "num_tokens": 3242}
|
# -*- coding:utf-8 -*-
from preprocessing import Tokenizer
import random
import csv
import json
import numpy as np
import sentencepiece as spm
from konlpy.tag import Okt
import torch
from torch.utils.data import Dataset, DataLoader
class BertLMDataset(Dataset):
def __init__(self, dataset, tokenizer: Tokenizer, vocab_size=5000):
self.tokenizer = tokenizer
# 데이터 로딩
with open(dataset, 'r', encoding='utf-8') as f:
self.data = json.load(f)
# 데이터 전처리 (str to int)
for i, d in enumerate(self.data):
self.data[i]['content'] = tokenizer.tokens_to_ids(d['content'])
# masking을 위한 토큰 클래스 로딩
self.total_tokens = tokenizer.get_tokens(vocab_prefix=f'vocab_{vocab_size}', for_masking=True)
def __getitem__(self, item):
tokens = self.data[item]['content']
masked_tokens, candi_index, answers = self._masking(tokens)
masked_tokens = torch.LongTensor(masked_tokens)
mask = np.zeros_like(masked_tokens)
mask[candi_index] = 1 # ex) [0, 1, 1, 0, 0, 1, ...]
mask = torch.from_numpy(mask).long()
sparse_answers = np.zeros_like(masked_tokens)
sparse_answers[candi_index] = answers # ex) [0, 32, 5, 0, 0, 12, ...]
sparse_answers = torch.from_numpy(sparse_answers).long()
return masked_tokens, mask, sparse_answers
def _masking(self, tokens):
sep_idx = tokens.index(self.tokenizer.token_to_id('[SEP]'))
t_tokens = tokens[1:sep_idx]
k = int(len(t_tokens) * 0.15)
candi_index = list(range(1, len(t_tokens)+1)) # CLS를 제외했기 때문에 +1
random.shuffle(candi_index)
candi_index = candi_index[:k]
random_token_index = candi_index[:int(k * 0.1)] # 랜덤 마스킹
# correct_token_index = candi_index[int(k * 0.1):int(k * 0.2)] # 정답 마스킹
mask_token_index = candi_index[int(k * 0.2):] # 마스크토큰 마스킹
masked_tokens = np.array(tokens)
answers = masked_tokens[candi_index] # MASK에 해당하는 라벨 토큰
for idx in random_token_index:
masked_tokens[idx] = self.tokenizer.token_to_id(random.choice(self.total_tokens))
masked_tokens[mask_token_index] = self.tokenizer.token_to_id('[MASK]')
return masked_tokens, candi_index, answers
def __len__(self):
return len(self.data)
class BertClsDataset(Dataset):
def __init__(self, dataset, tokenizer: Tokenizer, max_num_seq=20, inference=False, vocab_size=5000, is_train=True):
self.max_num_seq = max_num_seq
self.inference = inference
self.is_train = is_train
self.tokenizer = tokenizer
self.total_tokens = tokenizer.get_tokens(vocab_prefix=f'vocab_{vocab_size}', for_masking=True)
# 데이터 로딩
with open(dataset, 'r', encoding='utf-8') as f:
self.data = json.load(f)
# 데이터 전처리 (str to int)
for i, d in enumerate(self.data):
doc = d['content']
n_doc = []
for sub_doc in doc:
n_doc.append(self.tokenizer.tokens_to_ids(sub_doc))
# n_doc.append(list(map(self.tokenizer.PieceToId, sub_doc.split())))
self.data[i]['content'] = n_doc
def __getitem__(self, item):
doc = self.data[item]['content']
if not self.inference and len(doc) > self.max_num_seq: # 문장 수가 많으면 일부 문장만 선택
sp = random.choice(list(range(len(doc) - self.max_num_seq)))
doc = doc[sp:sp + self.max_num_seq]
if self.is_train:
for i, sub_doc in enumerate(doc): ##
doc[i] = self._masking(sub_doc, mask_rate=0.3)
doc = torch.LongTensor(doc)
label = self.data[item]['label']
return doc, label
def _masking(self, tokens, mask_rate=0.1):
sep_idx = list(tokens).index(self.tokenizer.token_to_id('[SEP]'))
t_tokens = tokens[1:sep_idx]
k = int(len(t_tokens) * mask_rate)
candi_index = list(range(1, len(t_tokens)+1)) # CLS를 제외했기 때문에 +1
random.shuffle(candi_index)
candi_index = candi_index[:k]
random_token_index = candi_index[:int(k * 0.2)] # 랜덤 마스킹
mask_token_index = candi_index[int(k * 0.8):] # UNK 마스킹
masked_tokens = np.array(tokens)
for idx in random_token_index:
masked_tokens[idx] = self.tokenizer.token_to_id(random.choice(self.total_tokens))
masked_tokens[mask_token_index] = self.tokenizer.token_to_id('[UNK]')
return masked_tokens
def __len__(self):
return len(self.data)
if __name__ == '__main__':
dataset = BertClsDataset('bertcls_val_v5000_t128.json')
data_loader = DataLoader(dataset, batch_size=1, shuffle=False)
for i, (doc, label) in enumerate(data_loader):
print(doc.shape)
print(doc)
print(label)
if i > 0:
break
|
{"hexsha": "fe3df81cb837bc5953fd3a5d8a012b03a26b4a30", "size": 4993, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets.py", "max_stars_repo_name": "sdh9446/document-classification-BERT", "max_stars_repo_head_hexsha": "1b5a0eb72bbd9b67693209e6735af71ab382516a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datasets.py", "max_issues_repo_name": "sdh9446/document-classification-BERT", "max_issues_repo_head_hexsha": "1b5a0eb72bbd9b67693209e6735af71ab382516a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datasets.py", "max_forks_repo_name": "sdh9446/document-classification-BERT", "max_forks_repo_head_hexsha": "1b5a0eb72bbd9b67693209e6735af71ab382516a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7132352941, "max_line_length": 120, "alphanum_fraction": 0.61225716, "include": true, "reason": "import numpy", "num_tokens": 1328}
|
[STATEMENT]
lemma intro_bind_refine_id:
assumes "m \<le> (SPEC ((=) m'))"
assumes "f m' \<le> \<Down>R m''"
shows "bind m f \<le> \<Down>R m''"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. m \<bind> f \<le> \<Down> R m''
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
m \<le> SPEC ((=) m')
f m' \<le> \<Down> R m''
goal (1 subgoal):
1. m \<bind> f \<le> \<Down> R m''
[PROOF STEP]
apply (simp add: pw_le_iff refine_pw_simps)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>nofail m \<and> (\<forall>x. inres m x \<longrightarrow> x = m'); nofail m'' \<longrightarrow> nofail (f m') \<and> (\<forall>x. inres (f m') x \<longrightarrow> (\<exists>s'. (x, s') \<in> R \<and> inres m'' s'))\<rbrakk> \<Longrightarrow> nofail m'' \<longrightarrow> (\<forall>x. inres m x \<longrightarrow> nofail (f x)) \<and> (\<forall>x. (\<exists>y. inres m y \<and> inres (f y) x) \<longrightarrow> (\<exists>s'. (x, s') \<in> R \<and> inres m'' s'))
[PROOF STEP]
apply blast
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 451, "file": "Refine_Monadic_Refine_Basic", "length": 4}
|
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
class Conv1dSame(nn.Module):
"""
Add PyTorch compatible support for Tensorflow/Keras padding option: padding='same'.
Discussions regarding feature implementation:
https://discuss.pytorch.org/t/converting-tensorflow-model-to-pytorch-issue-with-padding/84224
https://github.com/pytorch/pytorch/issues/3867#issuecomment-598264120
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1):
super().__init__()
self.cut_last_element = (
kernel_size % 2 == 0 and stride == 1 and dilation % 2 == 1
)
self.padding = np.ceil((1 - stride + dilation * (kernel_size - 1)) / 2)
self.conv = nn.Conv1d(
in_channels,
out_channels,
kernel_size,
padding=self.padding,
stride=stride,
dilation=dilation,
)
def forward(self, x):
if self.cut_last_element:
return self.conv(x)[:, :, :-1]
else:
return self.conv(x)
def hard_sigmoid(x):
return torch.clip(0.2 * x + 0.5, 0, 1)
class ActivationLSTMCell(nn.Module):
"""
LSTM Cell using variable gating activation, by default hard sigmoid
If gate_activation=torch.sigmoid this is the standard LSTM cell
Uses recurrent dropout strategy from https://arxiv.org/abs/1603.05118 to match Keras implementation.
"""
def __init__(
self, input_size, hidden_size, gate_activation=hard_sigmoid, recurrent_dropout=0
):
super(ActivationLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.gate_activation = gate_activation
self.recurrent_dropout = recurrent_dropout
self.weight_ih = nn.Parameter(torch.randn(4 * hidden_size, input_size))
self.weight_hh = nn.Parameter(torch.randn(4 * hidden_size, hidden_size))
self.bias_ih = nn.Parameter(torch.randn(4 * hidden_size))
self.bias_hh = nn.Parameter(torch.randn(4 * hidden_size))
self.init_weights()
def init_weights(self):
with torch.no_grad():
for param in [self.weight_hh, self.weight_ih]:
for idx in range(4):
mul = param.shape[0] // 4
torch.nn.init.xavier_uniform_(param[idx * mul : (idx + 1) * mul])
def forward(self, input, state):
if state is None:
hx = torch.zeros(
input.shape[0], self.hidden_size, device=input.device, dtype=input.dtype
)
cx = torch.zeros(
input.shape[0], self.hidden_size, device=input.device, dtype=input.dtype
)
else:
hx, cx = state
gates = (
torch.mm(input, self.weight_ih.t())
+ self.bias_ih
+ torch.mm(hx, self.weight_hh.t())
+ self.bias_hh
)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = self.gate_activation(ingate)
forgetgate = self.gate_activation(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = self.gate_activation(outgate)
if self.recurrent_dropout > 0:
cellgate = F.dropout(cellgate, p=self.recurrent_dropout)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
return hy, (hy, cy)
class CustomLSTM(nn.Module):
"""
LSTM to be used with custom cells
"""
def __init__(self, cell, *cell_args, bidirectional=True, **cell_kwargs):
super(CustomLSTM, self).__init__()
self.cell_f = cell(*cell_args, **cell_kwargs)
self.bidirectional = bidirectional
if self.bidirectional:
self.cell_b = cell(*cell_args, **cell_kwargs)
def forward(self, input, state=None):
# Forward
state_f = state
outputs_f = []
for i in range(len(input)):
out, state_f = self.cell_f(input[i], state_f)
outputs_f += [out]
outputs_f = torch.stack(outputs_f)
if not self.bidirectional:
return outputs_f, None
# Backward
state_b = state
outputs_b = []
l = input.shape[0] - 1
for i in range(len(input)):
out, state_b = self.cell_b(input[l - i], state_b)
outputs_b += [out]
outputs_b = torch.flip(torch.stack(outputs_b), dims=[0])
output = torch.cat([outputs_f, outputs_b], dim=-1)
# Keep second argument for consistency with PyTorch LSTM
return output, None
|
{"hexsha": "8efb51c0c256eddc50181aa8198f0f1edee52dc8", "size": 4648, "ext": "py", "lang": "Python", "max_stars_repo_path": "project/utils.py", "max_stars_repo_name": "asifreal/Magnet", "max_stars_repo_head_hexsha": "b71302997fcf71b2e7b5ed7ec6d714babf35cb0e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "project/utils.py", "max_issues_repo_name": "asifreal/Magnet", "max_issues_repo_head_hexsha": "b71302997fcf71b2e7b5ed7ec6d714babf35cb0e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "project/utils.py", "max_forks_repo_name": "asifreal/Magnet", "max_forks_repo_head_hexsha": "b71302997fcf71b2e7b5ed7ec6d714babf35cb0e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5034965035, "max_line_length": 104, "alphanum_fraction": 0.6028399312, "include": true, "reason": "import numpy", "num_tokens": 1102}
|
import torch
import torch.nn.functional as F
import numpy as np
import utils
# U update
def update_U(model, eval_loader, z_dim, device):
model.eval()
FF = []
with torch.no_grad():
for batch_idx, (x, y, _) in enumerate(eval_loader):
x = x.to(device)
y = y.to(device)
# Forward
shared, _ = model.encode([x, y])
FF.append(torch.cat(shared, 1))
FF = torch.cat(FF, 0)
# The projection step, i.e., subtract the mean
FF = FF - torch.mean(FF, 0, True)
h=[]
for i in range(2):
h.append(FF[:,i*z_dim:(i+1)*z_dim])
FF = torch.stack(h, dim=2)
# The SVD step
S, _, T = torch.svd(torch.sum(FF, dim=2))
U = torch.mm(S, T.t())
U = U*(FF.shape[0])**0.5
return U
# Compute correlation of x1 and x2
def compute_corr(x1, x2):
# Subtract the mean
x1_mean = torch.mean(x1, 0, True)
x1 = x1 - x1_mean
x2_mean = torch.mean(x2, 0, True)
x2 = x2 - x2_mean
# Compute the cross correlation
sigma1 = torch.sqrt(torch.mean(x1.pow(2)))
sigma2 = torch.sqrt(torch.mean(x2.pow(2)))
corr = torch.abs(torch.mean(x1*x2))/(sigma1*sigma2)
return corr
# The loss function for matching and reconstruction
def loss_matching_recons(s, x_hat, x, U_batch):
l = torch.nn.MSELoss(reduction='sum')
# Matching loss
match_err = l(torch.cat(s, 1), U_batch.repeat(1, 2))/s[0].shape[0]
# Reconstruction loss
recons_err = l(x_hat[0], x[0])
recons_err += l(x_hat[1], x[1])
recons_err /= s[0].shape[0]
return match_err, recons_err
# The loss function for independence regularization
def loss_independence(phi_z1, tau_c1, phi_z2, tau_c2):
# Correlation
corr = compute_corr(phi_z1, tau_c1) + compute_corr(phi_z2, tau_c2)
return corr
# Training function
def train(model, mmcca1, mmcca2, U, train_loader_b1, train_loader_b2,
corr_iter, args, optimizer, device):
model.train()
mmcca1.train()
mmcca2.train()
for batch_idx, (x, y, idxes) in enumerate(train_loader_b1):
x = x.to(device)
y = y.to(device)
# Forward with batch1
shared, private, recons = model([x, y])
# Get a batch2
try:
x_b2, y_b2, _ = next(corr_iter)
except StopIteration:
corr_iter = iter(train_loader_b2)
x_b2, y_b2, _ = next(corr_iter)
x_b2 = x_b2.to(device)
y_b2 = y_b2.to(device)
# Forward with batch2
shared_b2, private_b2 = model.encode([x_b2, y_b2])
# Using batch1
match_err, recons_err = loss_matching_recons(shared, recons,
[x, y], U[idxes, :])
# Using batch2
# Independence regularizer loss
phi_z1, tau_c1 = mmcca1(shared_b2[0], private_b2[0])
phi_z2, tau_c2 = mmcca2(shared_b2[1], private_b2[1])
corr = loss_independence(phi_z1, tau_c1, phi_z2, tau_c2)
# Compute the overall loss, note that we use the gradient reversal trick
# and that's why we have a 'minus' for the last term
loss = match_err + args.beta*recons_err - args._lambda*corr
# Update all the parameters
optimizer.zero_grad()
loss.backward()
optimizer.step()
return corr_iter
# Evaluate on the training set
def eval_train(model, mmcca1, mmcca2, itr, U, eval_loader, args, device):
model.eval()
mmcca1.eval()
mmcca2.eval()
match_err = 0
recons_err = 0
# For indepnence computation over the wholse set
s0,s1,p0,p1 = [],[],[],[]
with torch.no_grad():
for batch_idx, (x, y, idxes) in enumerate(eval_loader):
x = x.to(device)
y = y.to(device)
# Forward
shared, private, recons = model([x, y])
s0.append(shared[0])
s1.append(shared[1])
p0.append(private[0])
p1.append(private[1])
# Matching and reconstruction loss
m_e, r_e = loss_matching_recons(shared, recons, [x, y], U[idxes, :])
match_err += m_e.item()*x.shape[0]
recons_err += r_e.item()*x.shape[0]
s0 = torch.cat(s0, 0)
s1 = torch.cat(s1, 0)
p0 = torch.cat(p0, 0)
p1 = torch.cat(p1, 0)
phi_z1, tau_c1 = mmcca1(s0, p0)
phi_z2, tau_c2 = mmcca2(s1, p1)
# Correlation over the whole set
corr = loss_independence(phi_z1, tau_c1, phi_z2, tau_c2)
match_err /= len(eval_loader.dataset)
recons_err /= len(eval_loader.dataset)
print('====> Iteration: {} total = {:.4f}, match = {:.4f}, recons = {:.4f}, corr = {:.7f}'.format(
itr, match_err + args.beta*recons_err + args._lambda*corr.item(),
match_err, recons_err, corr.item()))
return match_err, recons_err, corr.item()
|
{"hexsha": "c881bd0e77c0261cf35b020e4ccb27bbe85c14b1", "size": 4862, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "llvqi/multiview_and_self-supervision", "max_stars_repo_head_hexsha": "1ae8ba14a250fd4a235b64cdd535e93c0ac06608", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-02-13T12:02:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T14:44:32.000Z", "max_issues_repo_path": "train.py", "max_issues_repo_name": "llvqi/multiview_and_self-supervision", "max_issues_repo_head_hexsha": "1ae8ba14a250fd4a235b64cdd535e93c0ac06608", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-15T00:38:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-15T00:38:07.000Z", "max_forks_repo_path": "train.py", "max_forks_repo_name": "llvqi/multiview_and_self-supervision", "max_forks_repo_head_hexsha": "1ae8ba14a250fd4a235b64cdd535e93c0ac06608", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-11-06T12:07:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T14:44:59.000Z", "avg_line_length": 26.861878453, "max_line_length": 102, "alphanum_fraction": 0.5859728507, "include": true, "reason": "import numpy", "num_tokens": 1422}
|
using DifferentiableStateSpaceModels
using Test, LinearAlgebra
# The BLAS threads is still an issue in Julia 1.7
# This has no effect with MKL
DifferentiableStateSpaceModels.set_blas_threads()
println("Running Testsuite with Threads.nthreads() = $(Threads.nthreads()) BLAS.vendor = $(BLAS.vendor()), and BLAS.num_threads = $(BLAS.get_num_threads()) \n")
# Delete the .function_cache
# e.g. ENV["DSSM_TEST_DELETE_CACHE"] = "false" environment variable to turn off, can be global
get(ENV, "DSSM_TEST_DELETE_CACHE", "true") == "true" &&
rm(default_model_cache_location(); force = true, recursive = true)
include("make_perturbation_model.jl")
include("first_order_perturbation.jl")
include("second_order_perturbation.jl")
# include("first_order_sequence.jl")
# include("second_order_sequence.jl")
# include("rbc_estimation.jl")
include("sgu.jl")
include("FVGQ20.jl")
include("symbolic_utils.jl")
include("utils.jl")
|
{"hexsha": "054f2d501a999b02de49f5834ad3b328dc4f6bd8", "size": 919, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "HighDimensionalEconLab/DifferentiableStateSpaceModels.jl", "max_stars_repo_head_hexsha": "da7700a84fdabc07edc73728d09905e5d7cb31be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2021-07-02T20:09:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-22T18:03:29.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "HighDimensionalEconLab/DifferentiableStateSpaceModels.jl", "max_issues_repo_head_hexsha": "da7700a84fdabc07edc73728d09905e5d7cb31be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 70, "max_issues_repo_issues_event_min_datetime": "2021-06-14T22:12:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T17:27:31.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "HighDimensionalEconLab/DifferentiableStateSpaceModels.jl", "max_forks_repo_head_hexsha": "da7700a84fdabc07edc73728d09905e5d7cb31be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-03T18:27:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-03T18:27:39.000Z", "avg_line_length": 38.2916666667, "max_line_length": 160, "alphanum_fraction": 0.7660500544, "num_tokens": 247}
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A demo that runs object detection on camera frames using OpenCV.
TEST_DATA=../all_models
Run face detection model:
python3 detect.py \
--model ${TEST_DATA}/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite
Run coco model:
python3 detect.py \
--model ${TEST_DATA}/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite \
--labels ${TEST_DATA}/coco_labels.txt
"""
import argparse
import collections
import common
import cv2
import numpy as np
import os
from PIL import Image
import re
import tflite_runtime.interpreter as tflite
Object = collections.namedtuple('Object', ['id', 'score', 'bbox'])
def load_labels(path):
p = re.compile(r'\s*(\d+)(.+)')
with open(path, 'r', encoding='utf-8') as f:
lines = (p.match(line).groups() for line in f.readlines())
return {int(num): text.strip() for num, text in lines}
class BBox(collections.namedtuple('BBox', ['xmin', 'ymin', 'xmax', 'ymax'])):
"""Bounding box.
Represents a rectangle which sides are either vertical or horizontal, parallel
to the x or y axis.
"""
__slots__ = ()
def get_output(interpreter, score_threshold, top_k, image_scale=1.0):
"""Returns list of detected objects."""
boxes = common.output_tensor(interpreter, 0)
class_ids = common.output_tensor(interpreter, 1)
scores = common.output_tensor(interpreter, 2)
count = int(common.output_tensor(interpreter, 3))
def make(i):
ymin, xmin, ymax, xmax = boxes[i]
return Object(
id=int(class_ids[i]),
score=scores[i],
bbox=BBox(xmin=np.maximum(0.0, xmin),
ymin=np.maximum(0.0, ymin),
xmax=np.minimum(1.0, xmax),
ymax=np.minimum(1.0, ymax)))
return [make(i) for i in range(top_k) if scores[i] >= score_threshold]
def main():
default_model_dir = '../all_models'
default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
#default_model = 'mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite'
#default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite'
default_labels = 'coco_labels.txt'
#default_labels = 'imagenet_labels.txt'
parser = argparse.ArgumentParser()
parser.add_argument('--model', help='.tflite model path',
default=os.path.join(default_model_dir,default_model))
parser.add_argument('--labels', help='label file path',
default=os.path.join(default_model_dir, default_labels))
parser.add_argument('--top_k', type=int, default=10,
help='number of categories with highest score to display')
parser.add_argument('--camera_idx', type=int, help='Index of which video source to use. ', default = 0)
parser.add_argument('--threshold', type=float, default=0.3,
help='classifier score threshold')
args = parser.parse_args()
print('Loading {} with {} labels.'.format(args.model, args.labels))
interpreter = common.make_interpreter(args.model)
#print(interpreter)
interpreter.allocate_tensors()
labels = load_labels(args.labels)
#cap = cv2.VideoCapture(args.camera_idx)
filepath = "/home/pi/testPJ/vtest.avi"
cap = cv2.VideoCapture(filepath)
#print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
#print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
center_widht = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / 2
center_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) / 2
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
cv2_im = frame
cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(cv2_im_rgb)
common.set_input(interpreter, pil_im)
interpreter.invoke()
objs = get_output(interpreter, score_threshold=args.threshold, top_k=args.top_k)
cv2_im = append_objs_to_img(cv2_im, objs, labels, center_widht, center_height)
drawing_line(cv2_im, center_widht, center_height)
cv2.imshow('frame', cv2_im)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def append_objs_to_img(cv2_im, objs, labels, center_widht, center_height):
top_left_count = 0
top_right_count = 0
bottom_left_count = 0
bottom_right_count = 0
height, width, channels = cv2_im.shape
for obj in objs:
#print(obj)
if labels.get(obj.id, obj.id) == "person":
x0, y0, x1, y1 = list(obj.bbox)
x0, y0, x1, y1 = int(x0*width), int(y0*height), int(x1*width), int(y1*height)
percent = int(100 * obj.score)
label = '{}% {}'.format(percent, labels.get(obj.id, obj.id))
#print(labels.get(obj.id, obj.id))
cv2_im = cv2.rectangle(cv2_im, (x0, y0), (x1, y1), (0, 255, 0), 2)
cv2_im = cv2.putText(cv2_im, label, (x0, y0+30),
cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 0), 2)
if center_height >= (y0+y1)/2 and center_widht >= (x0+x1)/2:
top_left_count+=1
if center_height >= (y0+y1)/2 and center_widht < (x0+x1)/2:
top_right_count+=1
if center_height < (y0+y1)/2 and center_widht >= (x0+x1)/2:
bottom_left_count+=1
if center_height < (y0+y1)/2 and center_widht < (x0+x1)/2:
bottom_right_count+=1
#print('person_count:{}'.format(count))
cv2_im = cv2.putText(cv2_im, 'person_count:{}'.format(top_left_count),(10,30),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2)
cv2_im = cv2.putText(cv2_im, 'person_count:{}'.format(top_right_count),(int(center_widht)+10,30),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2)
cv2_im = cv2.putText(cv2_im, 'person_count:{}'.format(bottom_left_count),(10,int(center_height)+30),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2)
cv2_im = cv2.putText(cv2_im, 'person_count:{}'.format(bottom_right_count),(int(center_widht)+10,int(center_height)+30),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2)
return cv2_im
def drawing_line(cv2_im, center_widht, center_height):
cv2.line(cv2_im, (0, int(center_height)), (int(center_widht)*2, int(center_height)), (255, 255, 255))
cv2.line(cv2_im, (int(center_widht), 0), (int(center_widht), int(center_height)*2), (255, 255, 255))
if __name__ == '__main__':
main()
|
{"hexsha": "9c415eb9aa2aff6c9688bc508233b096fc8bf06a", "size": 7003, "ext": "py", "lang": "Python", "max_stars_repo_path": "opencv/[video]person_count_rev0.1.py", "max_stars_repo_name": "sity0825/examples-camera", "max_stars_repo_head_hexsha": "63d7844211d7141eee75de15043b895248bf2d91", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "opencv/[video]person_count_rev0.1.py", "max_issues_repo_name": "sity0825/examples-camera", "max_issues_repo_head_hexsha": "63d7844211d7141eee75de15043b895248bf2d91", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "opencv/[video]person_count_rev0.1.py", "max_forks_repo_name": "sity0825/examples-camera", "max_forks_repo_head_hexsha": "63d7844211d7141eee75de15043b895248bf2d91", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4797687861, "max_line_length": 173, "alphanum_fraction": 0.6560045695, "include": true, "reason": "import numpy", "num_tokens": 1900}
|
/**
* @project zapdos
* @file include/http/AsioCompat.hpp
* @author S Roychowdhury < sroycode at gmail dot com >
* @version 1.0.0
*
* @section LICENSE
*
* Copyright (c) 2018-2020 S Roychowdhury
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* @section DESCRIPTION
*
* AsioCompat.hpp : Asio compatibility for Web Server ( Modified from eidheim/Simple-Web-Server )
*
*/
#ifndef _ZPDS_HTTP_ASIO_COMPAT_HPP_
#define _ZPDS_HTTP_ASIO_COMPAT_HPP_
#include <memory>
#include <boost/asio.hpp>
#include <boost/asio/steady_timer.hpp>
namespace zpds {
namespace http {
namespace asio = boost::asio;
namespace error = asio::error;
using error_code = boost::system::error_code;
namespace errc = boost::system::errc;
using system_error = boost::system::system_error;
namespace make_error_code = boost::system::errc;
#if (BOOST_ASIO_VERSION >= 101300)
using io_context = asio::io_context;
using io_whatever = asio::io_context;
using resolver_results = asio::ip::tcp::resolver::results_type;
using async_connect_endpoint = asio::ip::tcp::endpoint;
template <typename handler_type>
inline void post(io_context &context, handler_type &&handler)
{
asio::post(context, std::forward<handler_type>(handler));
}
inline void restart(io_context &context) noexcept
{
context.restart();
}
inline asio::ip::address make_address(const std::string &str) noexcept
{
return asio::ip::make_address(str);
}
template <typename socket_type, typename duration_type>
std::unique_ptr<asio::steady_timer> make_steady_timer(socket_type &socket, std::chrono::duration<duration_type> duration)
{
return std::unique_ptr<asio::steady_timer>(new asio::steady_timer(socket.get_executor(), duration));
}
template <typename handler_type>
void async_resolve(asio::ip::tcp::resolver &resolver, const std::pair<std::string, std::string> &host_port, handler_type &&handler)
{
resolver.async_resolve(host_port.first, host_port.second, std::forward<handler_type>(handler));
}
inline asio::executor_work_guard<io_context::executor_type> make_work_guard(io_context &context)
{
return asio::make_work_guard(context);
}
#else
using io_context = asio::io_service;
using io_whatever = asio::io_service;
using resolver_results = asio::ip::tcp::resolver::iterator;
using async_connect_endpoint = asio::ip::tcp::resolver::iterator;
template <typename handler_type>
inline void post(io_context &context, handler_type &&handler)
{
context.post(std::forward<handler_type>(handler));
}
inline void restart(io_context &context) noexcept
{
context.reset();
}
inline asio::ip::address make_address(const std::string &str) noexcept
{
return asio::ip::address::from_string(str);
}
template <typename socket_type, typename duration_type>
std::unique_ptr<asio::steady_timer> make_steady_timer(socket_type &socket, std::chrono::duration<duration_type> duration)
{
return std::unique_ptr<asio::steady_timer>(new asio::steady_timer(socket.get_io_service(), duration));
}
template <typename handler_type>
void async_resolve(asio::ip::tcp::resolver &resolver, const std::pair<std::string, std::string> &host_port, handler_type &&handler)
{
resolver.async_resolve(asio::ip::tcp::resolver::query(host_port.first, host_port.second), std::forward<handler_type>(handler));
}
inline io_context::work make_work_guard(io_context &context)
{
return io_context::work(context);
}
#endif
} // namespace http
} // namespace zpds
#endif // _ZPDS_HTTP_ASIO_COMPAT_HPP_
|
{"hexsha": "fa9b98c7e865321b4f16a8dad924d547e9de3cb6", "size": 4437, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/http/AsioCompat.hpp", "max_stars_repo_name": "sroycode/zapdos", "max_stars_repo_head_hexsha": "8818ef109e072dcbe990914d9a2a6d70ef190d3e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2018-11-11T21:09:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-25T12:46:41.000Z", "max_issues_repo_path": "include/http/AsioCompat.hpp", "max_issues_repo_name": "vnaad/zapdos", "max_issues_repo_head_hexsha": "8818ef109e072dcbe990914d9a2a6d70ef190d3e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2020-08-02T09:12:57.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-02T09:12:57.000Z", "max_forks_repo_path": "include/http/AsioCompat.hpp", "max_forks_repo_name": "vnaad/zapdos", "max_forks_repo_head_hexsha": "8818ef109e072dcbe990914d9a2a6d70ef190d3e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0731707317, "max_line_length": 131, "alphanum_fraction": 0.7696641875, "num_tokens": 1066}
|
import numpy as np
from otk import sdb
from otk.sdb import demoscenes, webex
scene = demoscenes.make_primitives()
eye_to_world = sdb.lookat(scene.eye, scene.center)
projection = sdb.Perspective(np.pi/3, scene.z_near, scene.z_far)
#projection = sdb.Orthographic(scene.z_far*np.tan(np.pi/6), scene.z_far)
webex.gen_html('primitives.html', scene.sdb_glsl, eye_to_world, projection, 100, 1e-2, (1, 1, 1, 1))
|
{"hexsha": "53b4aaf29f8862c988a0e9dee49c23cb6407b764", "size": 404, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/sdb/make_primitives_html.py", "max_stars_repo_name": "draustin/otk", "max_stars_repo_head_hexsha": "c6e91423ec79b85b380ee9385f6d27c91f92503d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-05-17T14:26:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T04:52:54.000Z", "max_issues_repo_path": "examples/sdb/make_primitives_html.py", "max_issues_repo_name": "uamhforever/otk", "max_issues_repo_head_hexsha": "c6e91423ec79b85b380ee9385f6d27c91f92503d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2020-04-10T22:50:00.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-18T04:54:19.000Z", "max_forks_repo_path": "examples/sdb/make_primitives_html.py", "max_forks_repo_name": "uamhforever/otk", "max_forks_repo_head_hexsha": "c6e91423ec79b85b380ee9385f6d27c91f92503d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-14T04:52:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-14T04:52:45.000Z", "avg_line_length": 44.8888888889, "max_line_length": 100, "alphanum_fraction": 0.7648514851, "include": true, "reason": "import numpy", "num_tokens": 132}
|
import cv2
import numpy as np
class pySaliencyImage:
def __init__(self):
return None
#--------------------Extraccion de colores--------------------
def SMExtractRGBI(self, inputImage):
#Convierte la imagen en un array
src = np.float32(inputImage) * 1./255
#Regresa una lista de acuerdo al separador indicado (por defecto ' ')
(B, G, R) = cv2.split(src)
#Extrae la intensidad de la imagen
I = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) #Lo convierte a escala de grises
return R, G, B, I
#Mapa de rasgos intensificados
def IFMGetFM(self, I): #Recibe la imagen en escala de grises
return self.FMGaussianPyrCSD(I)
#Construccion de una piramide gausiana y toma de diferencias centro elvonvente
def FMGaussianPyrCSD(self, src):
GaussianMaps = self.FMCreateGaussianPyr(src) #Contiene la imagen en escala de grises y la imagen reducida en escala de grises
dst = self.FMCenterSurroundDiff(GaussianMaps)
return dst
#--------------------Mapa de caracteristicas--------------------
#Construyendo la piramide gausiana
def FMCreateGaussianPyr(self, src):
dst = list()
dst.append(src) #Agrega la imagen gris a la lista
for i in range(1,9): #Empieza en 1 y termina en 8
nowdst = cv2.pyrDown(dst[i-1]) #Reduce el tamano y resolucion de la imagen
dst.append(nowdst) #Agrega a la lista la nueva imagen reducida
return dst
##Toma de diferencias centro elvonvente
def FMCenterSurroundDiff(self, GaussianMaps):
dst = list()
for s in range(2,5): #Empieza en 2 y termina en 4
now_size = GaussianMaps[s].shape #Crea un arreglo con las dimensiones de la imagen
now_size = (now_size[1], now_size[0]) #(width, height)
tmp = cv2.resize(GaussianMaps[s+3], now_size, interpolation=cv2.INTER_LINEAR) #Fuente, tamano deseado, remuestreo
nowdst = cv2.absdiff(GaussianMaps[s], tmp) #Diferencia entre GaussianMaps y tmp
dst.append(nowdst) #Agrega el elemento a la lista
tmp = cv2.resize(GaussianMaps[s+4], now_size, interpolation=cv2.INTER_LINEAR)
nowdst = cv2.absdiff(GaussianMaps[s], tmp)
dst.append(nowdst)
return dst
|
{"hexsha": "026db44797ff0cd338994ef018ea7f0925eaf17d", "size": 2061, "ext": "py", "lang": "Python", "max_stars_repo_path": "pySaliencyImage.py", "max_stars_repo_name": "ErickJuarez/SaliencyMaps", "max_stars_repo_head_hexsha": "2d41af2c2a95e81c3fccceebc3ffd292760b185a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pySaliencyImage.py", "max_issues_repo_name": "ErickJuarez/SaliencyMaps", "max_issues_repo_head_hexsha": "2d41af2c2a95e81c3fccceebc3ffd292760b185a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pySaliencyImage.py", "max_forks_repo_name": "ErickJuarez/SaliencyMaps", "max_forks_repo_head_hexsha": "2d41af2c2a95e81c3fccceebc3ffd292760b185a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.8, "max_line_length": 127, "alphanum_fraction": 0.7166424066, "include": true, "reason": "import numpy", "num_tokens": 640}
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal35.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma lfind_hyp_test : (@eq natural (plus (mult (Zero) (Zero)) (Zero)) (mult (Zero) (Succ (Zero)))).
Admitted.
QuickChick lfind_hyp_test.
|
{"author": "yalhessi", "repo": "lemmaranker", "sha": "53bc2ad63ad7faba0d7fc9af4e1e34216173574a", "save_path": "github-repos/coq/yalhessi-lemmaranker", "path": "github-repos/coq/yalhessi-lemmaranker/lemmaranker-53bc2ad63ad7faba0d7fc9af4e1e34216173574a/benchmark/clam/_lfind_clam_lf_goal35_mult_succ_78_plus_commut/lfindlfind_hyp_test.v"}
|
__author__ = 'lucabasa'
__version__ = '1.0.0'
__status__ = 'development'
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, roc_auc_score, f1_score, classification_report
def report_oof(df_train, oof):
acc = accuracy_score((oof > 0.5).astype(int), df_train.target)
f1 = f1_score((oof > 0.5).astype(int), df_train.target)
roc = roc_auc_score(df_train.target, oof)
print(f'Oof accuracy: \t {acc}')
print(f'Oof f1 score: \t {f1}')
print(f'Oof area under the roc curve: \t {roc}')
print('Classification report: ')
print(classification_report((oof > 0.5).astype(int), df_train.target))
def clean_cols(data, col_list):
df = data.copy()
for col in col_list:
try:
del df[col]
except KeyError:
pass
return df
def general_processing(train, test):
# cleaning up unused columns
to_drop = ['id', 'target']
train = clean_cols(train, to_drop)
test = clean_cols(test, to_drop)
train['wheezy-copper-turtle-magic'] = train['wheezy-copper-turtle-magic'].astype('category')
test['wheezy-copper-turtle-magic'] = test['wheezy-copper-turtle-magic'].astype('category')
return train, test
def plot_results(oof, preds, df_train, save_name):
if not save_name.endswith('.png'):
save_name += '.png'
pd.Series(oof).hist(bins=50, label='oof', alpha=0.7)
pd.Series(preds).hist(bins=50, label='prediction')
plt.grid(False)
plt.legend()
plt.savefig('plots/preds_' + save_name)
plt.close()
err = df_train.copy()
err['oof'] = oof
plt.figure(figsize=(15,8))
sns.scatterplot(data=err, y='oof', x='wheezy-copper-turtle-magic', hue='target', alpha=0.7)
plt.axhline(y=0.5, color='r', linestyle='--')
plt.savefig('plots/oof_' + save_name)
plt.close()
def subs(df_train, df_test, oof, preds, save_name, n_folds, sel='var', sample=250):
train = df_train[['id', 'target']].copy()
test = df_test[['id']].copy()
train[save_name] = oof
test[save_name] = preds
train.to_csv(f'off_preds/{save_name}_{sel}_{n_folds}_{sample}_oof.csv', index=False)
test.to_csv(f'oof_preds/{save_name}_{sel}_{n_folds}_{sample}_preds.csv', index=False)
|
{"hexsha": "d68e449fcba69fbeeee42fc86964731b68fa5985", "size": 2300, "ext": "py", "lang": "Python", "max_stars_repo_path": "instantgratification/utility.py", "max_stars_repo_name": "lucabasa/kaggle_competitions", "max_stars_repo_head_hexsha": "15296375dc303218093aa576533fb809a4540bb8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-31T19:33:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-31T19:33:30.000Z", "max_issues_repo_path": "instantgratification/utility.py", "max_issues_repo_name": "lucabasa/kaggle_competitions", "max_issues_repo_head_hexsha": "15296375dc303218093aa576533fb809a4540bb8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-08-23T21:00:16.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-23T21:07:45.000Z", "max_forks_repo_path": "instantgratification/utility.py", "max_forks_repo_name": "lucabasa/kaggle_competitions", "max_forks_repo_head_hexsha": "15296375dc303218093aa576533fb809a4540bb8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4871794872, "max_line_length": 96, "alphanum_fraction": 0.6586956522, "include": true, "reason": "import numpy", "num_tokens": 671}
|
c subroutine qrbd (ipass,q,e,nn,v,mdv,nrv,c,mdc,ncc)
c c.l.lawson and r.j.hanson, jet propulsion laboratory, 1973 jun 12
c to appear in 'solving least squares problems', prentice-hall, 1974
c qr algorithm for singular values of a bidiagonal matrix.
c
c the bidiagonal matrix
c
c (q1,e2,0... )
c ( q2,e3,0... )
c d= ( . )
c ( . 0)
c ( .en)
c ( 0,qn)
c
c is pre and post multiplied by
c elementary rotation matrices
c ri and pi so that
c
c rk...r1*d*p1**(t)...pk**(t) = diag(s1,...,sn)
c
c to within working accuracy.
c
c 1. ei and qi occupy e(i) and q(i) as input.
c
c 2. rm...r1*c replaces 'c' in storage as output.
c
c 3. v*p1**(t)...pm**(t) replaces 'v' in storage as output.
c
c 4. si occupies q(i) as output.
c
c 5. the si's are nonincreasing and nonnegative.
c
c this code is based on the paper and 'algol' code..
c ref..
c 1. reinsch,c.h. and golub,g.h. 'singular value decomposition
c and least squares solutions' (numer. math.), vol. 14,(1970).
c
subroutine qrbd (ipass,q,e,nn,v,mdv,nrv,c,mdc,ncc)
logical wntv ,havers,fail
dimension q(nn),e(nn),v(mdv,nn),c(mdc,ncc)
zero=0.
one=1.
two=2.
c
n=nn
ipass=1
if (n.le.0) return
n10=10*n
wntv=nrv.gt.0
havers=ncc.gt.0
fail=.false.
nqrs=0
e(1)=zero
dnorm=zero
do 10 j=1,n
10 dnorm=amax1(abs(q(j))+abs(e(j)),dnorm)
do 200 kk=1,n
k=n+1-kk
c
c test for splitting or rank deficiencies..
c first make test for last diagonal term, q(k), being small.
20 if(k.eq.1) go to 50
if(diff(dnorm+q(k),dnorm)) 50,25,50
c
c since q(k) is small we will make a special pass to
c transform e(k) to zero.
c
25 cs=zero
sn=-one
do 40 ii=2,k
i=k+1-ii
f=-sn*e(i+1)
e(i+1)=cs*e(i+1)
call g1 (q(i),f,cs,sn,q(i))
c transformation constructed to zero position (i,k).
c
if (.not.wntv) go to 40
do 30 j=1,nrv
30 call g2 (cs,sn,v(j,i),v(j,k))
c accumulate rt. transformations in v.
c
40 continue
c
c the matrix is now bidiagonal, and of lower order
c since e(k) .eq. zero..
c
50 do 60 ll=1,k
l=k+1-ll
if(diff(dnorm+e(l),dnorm)) 55,100,55
55 if(diff(dnorm+q(l-1),dnorm)) 60,70,60
60 continue
c this loop can't complete since e(1) = zero.
c
go to 100
c
c cancellation of e(l), l.gt.1.
70 cs=zero
sn=-one
do 90 i=l,k
f=-sn*e(i)
e(i)=cs*e(i)
if(diff(dnorm+f,dnorm)) 75,100,75
75 call g1 (q(i),f,cs,sn,q(i))
if (.not.havers) go to 90
do 80 j=1,ncc
80 call g2 (cs,sn,c(i,j),c(l-1,j))
90 continue
c
c test for convergence..
100 z=q(k)
if (l.eq.k) go to 170
c
c shift from bottom 2 by 2 minor of b**(t)*b.
x=q(l)
y=q(k-1)
g=e(k-1)
h=e(k)
f=((y-z)*(y+z)+(g-h)*(g+h))/(two*h*y)
g=sqrt(one+f**2)
if (f.lt.zero) go to 110
t=f+g
go to 120
110 t=f-g
120 f=((x-z)*(x+z)+h*(y/t-h))/x
c
c next qr sweep..
cs=one
sn=one
lp1=l+1
do 160 i=lp1,k
g=e(i)
y=q(i)
h=sn*g
g=cs*g
call g1 (f,h,cs,sn,e(i-1))
f=x*cs+g*sn
g=-x*sn+g*cs
h=y*sn
y=y*cs
if (.not.wntv) go to 140
c
c accumulate rotations (from the right) in 'v'
do 130 j=1,nrv
130 call g2 (cs,sn,v(j,i-1),v(j,i))
140 call g1 (f,h,cs,sn,q(i-1))
f=cs*g+sn*y
x=-sn*g+cs*y
if (.not.havers) go to 160
do 150 j=1,ncc
150 call g2 (cs,sn,c(i-1,j),c(i,j))
c apply rotations from the left to
c right hand sides in 'c'..
c
160 continue
e(l)=zero
e(k)=f
q(k)=x
nqrs=nqrs+1
if (nqrs.le.n10) go to 20
c return to 'test for splitting'.
c
fail=.true.
c ..
c cutoff for convergence failure. 'nqrs' will be 2*n usually.
170 if (z.ge.zero) go to 190
q(k)=-z
if (.not.wntv) go to 190
do 180 j=1,nrv
180 v(j,k)=-v(j,k)
190 continue
c convergence. q(k) is made nonnegative..
c
200 continue
if (n.eq.1) return
do 210 i=2,n
if (q(i).gt.q(i-1)) go to 220
210 continue
if (fail) ipass=2
return
c ..
c every singular value is in order..
220 do 270 i=2,n
t=q(i-1)
k=i-1
do 230 j=i,n
if (t.ge.q(j)) go to 230
t=q(j)
k=j
230 continue
if (k.eq.i-1) go to 270
q(k)=q(i-1)
q(i-1)=t
if (.not.havers) go to 250
do 240 j=1,ncc
t=c(i-1,j)
c(i-1,j)=c(k,j)
240 c(k,j)=t
250 if (.not.wntv) go to 270
do 260 j=1,nrv
t=v(j,i-1)
v(j,i-1)=v(j,k)
260 v(j,k)=t
270 continue
c end of ordering algorithm.
c
if (fail) ipass=2
return
end
|
{"hexsha": "d9a12a24413e7ac0623abde98f4656a09535cb78", "size": 4634, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "iraf.v2161/math/llsq/original_f/qrbd.f", "max_stars_repo_name": "ysBach/irafdocgen", "max_stars_repo_head_hexsha": "b11fcd75cc44b01ae69c9c399e650ec100167a54", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-12-01T15:19:09.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-02T16:48:42.000Z", "max_issues_repo_path": "math/llsq/qrbd.f", "max_issues_repo_name": "kirxkirx/iraf", "max_issues_repo_head_hexsha": "fcd7569b4e0ddbea29f7dbe534a25759e0c31883", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-11-30T13:48:50.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-02T19:40:25.000Z", "max_forks_repo_path": "math/llsq/qrbd.f", "max_forks_repo_name": "kirxkirx/iraf", "max_forks_repo_head_hexsha": "fcd7569b4e0ddbea29f7dbe534a25759e0c31883", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.1722488038, "max_line_length": 72, "alphanum_fraction": 0.5548122572, "num_tokens": 1942}
|
"""K nearest neighbors.
Probably should be an odd number of K so that there cannot be a tie
Groups applications by distance to other points."""
#we will do Euclidian distance, which is a super slow algorithm for large data
#can be threaded decently but still is slow
#uses breast cancer data from https://archive.ics.uci.edu/ml/datasets.html
import numpy as np
from sklearn import preprocessing, model_selection, neighbors
import pandas as pd
import random
df = pd.read_csv('breast-cancer-wisconsin-data.txt')
df = df.replace('?', pd.np.nan).dropna(axis=0, how='any')
#drops missing data
#df.replace('?', -99999, inplace=True)
#changes missing data to be clear outliers
df.drop(['id'], 1, inplace=True)
X = np.array(df.drop(['class'],1))
y = np.array(df['class'])
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.15)
clf = neighbors.KNeighborsClassifier()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
print(accuracy)
example_measure = np.array([[10,2,1,4,1,2,3,2,1], [6,3,5,7,8,4,2,4,7]])
example_measure = example_measure.reshape(len(example_measure),-1)
prediction = clf.predict(example_measure)
print prediction
|
{"hexsha": "3e437362b4a577c91f3882189a67182ef125a360", "size": 1219, "ext": "py", "lang": "Python", "max_stars_repo_path": "ML_Tutorials/Breast_cancer_kneighbors.py", "max_stars_repo_name": "Ryandry1st/Machine-Learning", "max_stars_repo_head_hexsha": "f5de4c699edb350c8996bf6b14aae28e20cbccf3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ML_Tutorials/Breast_cancer_kneighbors.py", "max_issues_repo_name": "Ryandry1st/Machine-Learning", "max_issues_repo_head_hexsha": "f5de4c699edb350c8996bf6b14aae28e20cbccf3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-06-25T21:16:05.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-02T20:22:08.000Z", "max_forks_repo_path": "ML_Tutorials/Breast_cancer_kneighbors.py", "max_forks_repo_name": "Ryandry1st/Machine-Learning", "max_forks_repo_head_hexsha": "f5de4c699edb350c8996bf6b14aae28e20cbccf3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2564102564, "max_line_length": 90, "alphanum_fraction": 0.7235438884, "include": true, "reason": "import numpy", "num_tokens": 320}
|
import math
import numpy as np
def calc_dist(p1, p2):
'''
Calculate distance between point 1 and point 2
'''
return np.sqrt((p2[0] - p1[0]) ** 2 +
(p2[1] - p1[1]) ** 2)
def phi(x):
'Cumulative distribution function for the standard normal distribution'
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
class StateMachine:
def __init__(self, tick, difficulty=1):
'''
ticks: The number of ticks between state transitions
state: Integer value of state which determines behaviour
Possible States:
0: Idle/Initialised
1: Aggressive Mode
2: Defensive Mode
clock: internal clock of movement
position: stores current player position
'''
self.state = 0
self.difficulty = difficulty - 6
self.tick = tick
self.clock = 0
self.life = 3
self.position = None
self.movement = 0
self.nearest_bullet = 0
def state_check(self, entities):
# Increment Clock tick
self.clock = (self.clock+1) % self.tick
self.life = entities['player'][2]
self.position = entities['player'][:2]
self.nearest_bullet = min(entities['bullets'],key=lambda x: calc_dist(self.position, x), default=(-1, -1))
if self.clock == 0:
# If Clock reaches limit, check environment and change state of AI Model
return self.state_change(entities)
else:
# Take an action according to the state and entities
return self.action(entities)
def state_change(self,entities):
nearest_boss_loc = min(list(map(lambda x: calc_dist(self.position, x), entities['bosses'])), default=1000)
nearest_mob_loc = min(list(map( lambda x: calc_dist(self.position, x) ,entities['mobs'])), default=1000)
if self.state == 0:
# If mobs are far away and have a lot of life, go to aggressive behaviour
if self.life > 1 and (nearest_mob_loc > 400 or nearest_boss_loc > 500 ):
self.state = 1
# If mobs are far but life is low, enter defensive behaviour
elif self.life == 1 and (nearest_mob_loc < 400 or nearest_boss_loc < 500 ):
self.state = 2
elif self.state == 1:
#if mobs are moderately far away and lives not 1
if self.life > 1 or (nearest_mob_loc > 300 or nearest_boss_loc > 400 ):
self.state = 0
# if life is 1 but enemy position is dire, maintain aggression
elif self.life == 1 and not (nearest_boss_loc <200 or nearest_boss_loc <300):
self.state = 1
else:
if self.life >1 and (nearest_mob_loc > 300 or nearest_boss_loc > 400 ):
self.life = 0
elif self.life == 1 and (nearest_boss_loc <200 or nearest_boss_loc <300):
self.state = 1
return self.action(entities)
def get_state(self):
'''
Returns internal state of the AI
'''
return self.state
def action(self,entities):
# with the current state, take actions according to the current policy
if self.state == 0:
return self.balanced(entities)
elif self.state == 1:
return self.aggressive(entities)
else:
return self.defensive(entities)
def dodging_decision_control(self):
# normalising position of player
middle = (self.position[0] - 300) / 300
# Using middle of screen as the base position of player, dodge with respect to
# current position relative to the base position
prob = 0.05
# if random value > prob, take action from left set
if self.clock != 0:
direction = self.nearest_bullet[0] - self.position[0]
rand = np.random.rand()
if self.nearest_bullet == (-1, -1) or rand < prob:
self.movement = np.random.choice([1, 2, 3, 4, 5], p=[0.325, 0.325, 0.05, 0.15, 0.15])
elif direction >= 0 and rand > prob:
self.movement = np.random.choice([ 1, 3, 4],p=[ 0.65, 0.05, 0.3])
elif direction < 0 and rand > prob:
# Take action from right movement set
self.movement = np.random.choice([2, 3, 5], p = [0.65, 0.05, 0.3])
# If current position is beyond set boundaries, move back to centralise bounds
if self.position[0] >550:
self.movement = np.random.choice([ 1, 3, 4],p=[ 0.65, 0.05, 0.3])
elif self.position[0]<50:
self.movement = np.random.choice([1, 3, 4], p=[0.65, 0.05, 0.3])
return self.movement
def balanced(self,entities):
# {'mobs': enemy1, 'bosses': enemy2, 'bullets': eb, 'enemy_player': ep, 'player': (curr_x, curr_y)}
# (self.shoot, self.move_left, self.move_right, lambda: 1, self.move_shoot(True),
# self.move_shoot(False))
# If there is a enemy above but no bullets above, shoot
#print(entities)
if len(entities['bullets'])==0 and (len(entities['mobs']) != 0 or len(entities['bosses']) != 0
or entities['enemy_player'] != 'None'):
return np.random.choice([0 , 3, 4, 5],p = [0.4 ,0.1 ,0.25,0.25])
# Determine if the ai will take a random action or take a smart decision
rand = np.random.rand()
nearest_bullet_loc = min(list(map(lambda x: calc_dist(self.position,x),entities['bullets'])),default=1000)
# If bullet is nearby, move away
if nearest_bullet_loc < 300 and rand > (0.5-0.05*self.difficulty):
return self.dodging_decision_control()
nearest_boss_loc = min(list(map(lambda x: calc_dist(self.position,x), entities['bosses'])),default=1000)
nearest_mob_loc = min(list(map(lambda x: calc_dist(self.position,x),entities['mobs'])),default=1000)
# if a mob is closeby, perform a shooting action
if (nearest_mob_loc < 400 or nearest_boss_loc < 500 or entities['enemy_player'] != 'None') and rand < (0.85-0.05*self.difficulty):
return np.random.choice([ 0, 3, 4, 5], p = [0.2, 0.1, 0.35, 0.35])
else:
return np.random.choice([ 0, 1, 2, 3, 4, 5], p=[0.2, 0.1, 0.15, 0.05, 0.25, 0.25])
def aggressive(self, entities):
# {'mobs': enemy1, 'bosses': enemy2, 'bullets': eb, 'enemy_player': ep, 'player': (curr_x, curr_y, life)}
# (self.shoot, self.move_left, self.move_right, lambda: 1, self.move_shoot(True),
# self.move_shoot(False))
# If there is a enemy above but no bullets above, shoot
if len(entities['bullets'])==0 and (len(entities['mobs']) != 0 or len(entities['bosses']) != 0
or entities['enemy_player'] != 'None'):
return np.random.choice([0 , 3, 4, 5],p = [0.4 ,0.1 ,0.25,0.25])
# Determine if the ai will take a random action or take a smart decision
rand = np.random.rand()
nearest_bullet_loc = min(list(map(lambda x: calc_dist(self.position,x) ,entities['bullets'])),default=1000)
# If bullet is nearby, move away
if nearest_bullet_loc < 200 and rand < (0.5-0.05*self.difficulty):
return self.dodging_decision_control()
nearest_boss_loc = min(list(map(lambda x: calc_dist(self.position,x), entities['bosses'])),default=1000)
nearest_mob_loc = min(list(map(lambda x: calc_dist(self.position,x) ,entities['mobs'])),default=1000)
# if a mob is closeby, perform a shooting action
if (nearest_mob_loc < 400 or nearest_boss_loc < 500 or entities['enemy_player'] != 'None') and rand < (0.8-0.05*self.difficulty):
return np.random.choice([0, 3, 4, 5], p=[0.2, 0.1, 0.35, 0.35])
else:
return np.random.choice([0, 1, 2, 3, 4, 5], p=[0.2, 0.1, 0.15, 0.05, 0.25, 0.25])
def defensive(self,entities):
# {'mobs': enemy1, 'bosses': enemy2, 'bullets': eb, 'enemy_player': ep, 'player': (curr_x, curr_y, life)}
# (self.shoot, self.move_left, self.move_right, lambda: 1, self.move_shoot(True),
# self.move_shoot(False))
# If there is a enemy above but no bullets above, shoot
if len(entities['bullets'])==0 and (len(entities['mobs']) != 0 or len(entities['bosses']) != 0
or entities['enemy_player'] != 'None'):
return np.random.choice([0, 3, 4, 5], p=[0.4, 0.1, 0.25, 0.25])
# Determine if the ai will take a random action or take a smart decision
rand = np.random.rand()
nearest_bullet_loc = min(list(map(lambda x: calc_dist(self.position,x) ,entities['bullets'])),default=1000)
# If bullet is nearby, move away
if nearest_bullet_loc < 400 and rand < (0.85-0.05*self.difficulty):
return self.dodging_decision_control()
nearest_boss_loc = min(list(map(lambda x: calc_dist(self.position,x) ,entities['bosses'])),default=1000)
nearest_mob_loc = min(list(map(lambda x: calc_dist(self.position,x) ,entities['mobs'])),default=1000)
# if a mob is closeby, perform a shooting action
if (nearest_mob_loc < 400 or nearest_boss_loc < 500 or entities['enemy_player'] != 'None') and rand < (0.35-0.04*self.difficulty):
return np.random.choice([0, 3, 4, 5], p=[0.2, 0.1, 0.35, 0.35])
else:
return np.random.choice([0, 1, 2, 3, 4, 5], p=[0.2, 0.1, 0.15, 0.05, 0.25, 0.25])
if __name__ == '__main__':
machine = StateMachine(1)
# e1 = {'mobs': [(400,300)], 'bosses': [(400,300)], 'bullets': [(350,200)],
# 'enemy_player': 'None', 'player': (350, 200 , 1)}
e1 = {'mobs': [], 'bosses': [], 'bullets': [], 'enemy_player': 'None',
'player': (350, 200, 2)}
e2 = {'mobs': [], 'bosses': [], 'bullets': [], 'enemy_player': 'None',
'player': (350, 200, 3)}
e3 = {'mobs': [(300, 400)], 'bosses': [], 'bullets': [], 'enemy_player': 'None',
'player': (350, 200, 1)}
# for i in range(18):
print('state', machine.state)
print(machine.state_check(e1))
print('state', machine.state)
print(machine.state_check(e2))
print('state', machine.state)
print(machine.state_check(e3))
print('state', machine.state)
|
{"hexsha": "a3b6751ac70de2fbbba96c61d1c10206440db1a9", "size": 10392, "ext": "py", "lang": "Python", "max_stars_repo_path": "gym_invaders/gym_game/envs/classes/Game/Sprites/statemachine.py", "max_stars_repo_name": "Jh123x/Orbital", "max_stars_repo_head_hexsha": "6f8f2da4fd26ef1d77c0c6183230c3a5e6bf0bb9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-05-15T11:17:09.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-30T01:11:41.000Z", "max_issues_repo_path": "gym_invaders/gym_game/envs/classes/Game/Sprites/statemachine.py", "max_issues_repo_name": "Jh123x/Orbital", "max_issues_repo_head_hexsha": "6f8f2da4fd26ef1d77c0c6183230c3a5e6bf0bb9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-05-16T10:45:32.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-27T07:17:02.000Z", "max_forks_repo_path": "gym_invaders/gym_game/envs/classes/Game/Sprites/statemachine.py", "max_forks_repo_name": "Jh123x/Orbital", "max_forks_repo_head_hexsha": "6f8f2da4fd26ef1d77c0c6183230c3a5e6bf0bb9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.2511848341, "max_line_length": 138, "alphanum_fraction": 0.5849692071, "include": true, "reason": "import numpy", "num_tokens": 2911}
|
import math
import numpy as np
from multiprocessing import Pool
class paramAdapter(object):
"""This object stores the variables required to implement an adaptive
step size and number of leapfrog steps as detailed in "Adaptive Hamiltonian
and Riemann Manifold Monte Carlo Samplers" by Wang, Mohamed, and de Freitas.
This method performs Bayesian inference on these paramaters assuming a
uniform prior between specified values. Over time, the probability of
a new state being proposed decreases so that the values will converge to
specific values.
In order to more rapidly search through the grid of possible step sizes
and leapfrog steps this object uses parallel processing so that all available
computing resources are used.
"""
def __init__(self,e1,L1,el,eu,Ll,Lu,m,k,a=4,delta=0.1,cores=4):
""" Creates a paramAdapter object.
Arguments:
* e1: starting step size
* L1: starting number of leapfrog steps
* el: lower step size bound
* eu: upper step size bound
* Ll: lower leapfrog bound
* Lu: upper leapfrog bound
* m: number of averaging steps
* k: iterations before proposal probability starts decreasing
* a: constant, 4 in paper
* delta: constant, 0.1 in paper
* cores: number of cores to use in processing
"""
self.currentE=e1
self.currentL=L1
self.eGrid=np.linspace(el,eu,num=500)
self.lGrid=np.array(range(Ll,Lu+1))
self.delta=delta
kappa=0.2
self.sigma=np.diag([1/((kappa*(eu-el))**2),1/((kappa*(Lu-Ll))**2)])
self.previousGamma=[]
self.allSD=[]
self.k=k
self.K=None
self.m=m
self.currentData=[]
self.allData=[]
self.maxR=1
self.a=a
self.i=-2
self.previous_state=None
self.current_state=None
np.random.seed(10)
self.cores=cores
def calck(self,gammaI,gammaJ):
""" Calculates the covariance k between two states
Arguments:
* gammaI: state 1
* gammaJ: state 2
Returns:
* k: covaraiance between gammaI and gammaJ
"""
k=np.exp(-0.5*(np.matmul(np.transpose(gammaI),np.matmul(self.sigma,gammaJ))))
return(k)
def calcUCB(self,testGamma):
""" Calculates a varraint of the upper confidence bound for a test
state.
Arguments:
* testGamma: the test state
* s: a scaling factor
* inverse: inverse of the covariance matrix
* inverseR: inverse of the covariance matrix time the data
* p: the decay value
* rootBeta: a constant based on the number of variables in the state
Returns:
* ucb: upper confidence bound
"""
k=[None]*self.inverse.shape[0]
for gamma,index in zip(self.previousGamma,range(len(self.previousGamma))):
k[index]=self.calck(gamma,testGamma)
mean=np.matmul(np.transpose(k),self.inverseR)*self.s
variance=np.matmul(self.inverse,k)
variance=np.matmul(np.transpose(k),variance)
variance=self.calck(gamma, gamma)-variance
ucb=mean+variance*self.p*self.rootbeta
return(ucb)
def reset(self):
"""Resets the adapter"""
self.previousGamma=[]
self.allSD=[]
self.K=None
self.currentData=[]
self.allData=[]
self.maxR=1
self.i=-2
self.previous_state=None
self.current_state=None
def processChunk(self,eList,lList):
"""Processes a chunk of the e, L combinations.
Arguments:
* eList: list of step sizes to check
* lList: list of leapfrog steps to check
Returns:
* best: a tuple of the form ((best e, best L), ucb) where the e and
L selected are those with the highest ucb, which is also included
"""
best=((eList[0],lList[0]),0)
for e in eList:
for L in lList:
ucb=self.calcUCB([e,L])
if(ucb>best[1]):
best=((e,L),ucb)
return(best)
def update(self,state):
""" Steps the adapter forward by one step
Arguments:
* state: the newest state proposed by the HMC algorithm
Returns:
* currentE: the new step size
* currentL: the new number of leapfrog steps
"""
self.previous_state,self.current_state=self.current_state,state
#Calculate the square jumping distance scaled by L^(-0.5)
if(self.previous_state is not None):
val=0
for old, new in zip(self.previous_state, self.current_state):
val+=np.sum(np.square(new-old))/(self.currentL**(0.5))
print("SJD:",str(val))
self.currentData.append(val)
#Update E and L if this is not just an averaging step
if(self.i%self.m==0 and self.i>0):
u=np.random.uniform(low=0,high=1)
self.p=max(self.i/self.m-self.k+1,1)**(-0.5)
if(u<self.p): #Over time the probability of updating will decay
newPoint=0
mean=np.mean(self.currentData)
sd=np.std(self.currentData)
self.currentData=[]
self.allData.append(mean)
self.allSD.append(sd)
self.maxR=max(self.maxR,newPoint)
#Update the covariance matrix
self.previousGamma.append((self.currentE,self.currentL))
size=len(self.previousGamma)
newK=np.ones([size,size])
if(size>0):
newK[:size-1,:size-1]=self.K
for gamma,index in zip(self.previousGamma,range(len(self.previousGamma))):
k=self.calck(gamma,self.previousGamma[-1])
newK[-1,index]=k
newK[index,-1]=k
self.K=newK
self.s=self.a/self.maxR #update scalling constant
sigmaNu=np.mean(self.allSD) #Variance of noise
#calculate inverse and other values only once
try: #In case the covaraince matrix is singular
self.inverse=np.linalg.inv(self.K+(sigmaNu**2)*np.eye(self.K.shape[0]))
except:
self.inverse=np.linalg.inv(self.K+0.1*np.eye(self.K.shape[0]))
self.inverseR=np.matmul(self.inverse,self.allData)
self.rootbeta=2*np.log((self.i/self.m+1)**(3)*math.pi**2/(3*self.delta))
self.rootbeta=self.rootbeta*(0.5)
#Evenly split up search space between cores
increment=len(self.lGrid)//self.cores
eList=[]
lList=[]
for x in range(self.cores-1):
temp=self.lGrid[x*increment:(x+1)*increment]
eList.append(self.eGrid)
lList.append(temp)
temp=self.lGrid[(self.cores-1)*increment:]
eList.append(self.eGrid)
lList.append(temp)
#Start parallel searches, take best result found
best=((self.eGrid[0],self.lGrid[0]),0)
with Pool(processes=self.cores) as pool:
for i in pool.starmap(self.processChunk,zip(eList,lList)):
print(i[1])
if(i[1]>best[1]):
best=i
#Pick the state with the highest upper confidence bound
self.currentE=np.float32(best[0][0])
self.currentL=np.int64(best[0][1])
if(size==50):
self.K=self.K[1:,1:]
self.previousGamma=self.previousGamma[1:]
self.allData=self.allData[1:]
self.allSD=self.allSD[1:]
self.i+=1
return(self.currentE,self.currentL)
|
{"hexsha": "072c295edd78600cd6927fd83dcb0fdd3a182bd1", "size": 8405, "ext": "py", "lang": "Python", "max_stars_repo_path": "bayesianNetwork2.0/paramAdapter.py", "max_stars_repo_name": "brkronheim/BNNs-for-SUSY", "max_stars_repo_head_hexsha": "1f845e7cd5437970cfd6b2bd0b4af6c26354ce78", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bayesianNetwork2.0/paramAdapter.py", "max_issues_repo_name": "brkronheim/BNNs-for-SUSY", "max_issues_repo_head_hexsha": "1f845e7cd5437970cfd6b2bd0b4af6c26354ce78", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bayesianNetwork2.0/paramAdapter.py", "max_forks_repo_name": "brkronheim/BNNs-for-SUSY", "max_forks_repo_head_hexsha": "1f845e7cd5437970cfd6b2bd0b4af6c26354ce78", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6905829596, "max_line_length": 91, "alphanum_fraction": 0.5368233195, "include": true, "reason": "import numpy", "num_tokens": 1849}
|
#coverage:ignore
import os
from uuid import uuid4
import scipy.optimize
import jax.numpy as jnp
from jax.config import config
from jax import jit, grad
import h5py
import numpy
import numpy.random
import numpy.linalg
from scipy.optimize import minimize
from .adagrad import adagrad
# set mkl thread count for numpy einsum/tensordot calls
# leave one CPU un used so we can still access this computer
os.environ["MKL_NUM_THREADS"] = "{}".format(os.cpu_count() - 1)
config.update("jax_enable_x64", True)
def thc_objective_jax(xcur, norb, nthc, eri):
"""
Loss function for THC factorization using jax numpy
0.5 sum_{pqrs}(eri(pqrs) - G(pqrs))^{2}
G(pqrs) = sum_{uv}X_{u,p}X_{u,q}Z_{uv}X_{v,r}X_{v,s}
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:return:
"""
etaPp = xcur[:norb * nthc].reshape(nthc, norb) # leaf tensor nthc x norb
MPQ = xcur[norb * nthc:norb * nthc + nthc * nthc].reshape(
nthc, nthc) # central tensor
CprP = jnp.einsum("Pp,Pr->prP", etaPp,
etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = jnp.einsum('pqU,UV,rsV->pqrs',
CprP,
MPQ,
CprP,
optimize=[(0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * jnp.sum((deri)**2)
return res
def thc_objective_grad_jax(xcur, norb, nthc, eri):
"""
Gradient for THC least-squares objective jax compatible
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and inf norm
"""
etaPp = xcur[:norb * nthc].reshape(nthc, norb) # leaf tensor nthc x norb
MPQ = xcur[norb * nthc:norb * nthc + nthc * nthc].reshape(
nthc, nthc) # central tensor
# m indexes the nthc and p,q,r,s are orbital indices
CprP = jnp.einsum("Pp,Pr->prP", etaPp,
etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = jnp.einsum('pqU,UV,rsV->pqrs',
CprP,
MPQ,
CprP,
optimize=[(0, 1), (0, 1)])
deri = eri - Iapprox
# O(norb^5)
dL_dZab = -jnp.einsum(
'pqrs,pqA,rsB->AB', deri, CprP, CprP, optimize=[(0, 1), (0, 1)])
# O(norb^5)
dL_dX_GT = -2 * jnp.einsum('Tqrs,Gq,Gv,rsv->GT',
deri,
etaPp,
MPQ,
CprP,
optimize=[(0, 3), (1, 2), (0, 1)])
dL_dX_GT -= 2 * jnp.einsum('pqTs,pqu,uG,Gs->GT',
deri,
CprP,
MPQ,
etaPp,
optimize=[(0, 1), (0, 2), (0, 1)])
return jnp.hstack((dL_dX_GT.ravel(), dL_dZab.ravel()))
def thc_objective(xcur, norb, nthc, eri, verbose=False):
"""
Loss function for THC factorization
0.5 sum_{pqrs}(eri(pqrs) - G(pqrs))^{2}
G(pqrs) = sum_{uv}X_{u,p}X_{u,q}Z_{uv}X_{v,r}X_{v,s}
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and inf norm
:return:
"""
etaPp = xcur[:norb * nthc].reshape(nthc, norb) # leaf tensor nthc x norb
MPQ = xcur[norb * nthc:norb * nthc + nthc * nthc].reshape(
nthc, nthc) # central tensor
CprP = numpy.einsum("Pp,Pr->prP", etaPp,
etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs',
CprP,
MPQ,
CprP,
optimize=['einsum_path', (0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * numpy.sum((deri)**2)
if verbose:
print("res, max, lambda = {}, {}".format(res,
numpy.max(numpy.abs(deri))))
return res
def thc_objective_regularized(xcur,
norb,
nthc,
eri,
penalty_param,
verbose=False):
"""
Loss function for THC factorization
0.5 sum_{pqrs}(eri(pqrs) - G(pqrs))^{2}
G(pqrs) = sum_{uv}X_{u,p}X_{u,q}Z_{uv}X_{v,r}X_{v,s}
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and inf norm
:return:
"""
etaPp = xcur[:norb * nthc].reshape(nthc, norb) # leaf tensor nthc x norb
MPQ = xcur[norb * nthc:norb * nthc + nthc * nthc].reshape(
nthc, nthc) # central tensor
CprP = jnp.einsum("Pp,Pr->prP", etaPp,
etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = jnp.einsum('pqU,UV,rsV->pqrs',
CprP,
MPQ,
CprP,
optimize=[(0, 1), (0, 1)])
deri = eri - Iapprox
SPQ = etaPp.dot(
etaPp.T) # (nthc x norb) x (norb x nthc) -> (nthc x nthc) metric
cP = jnp.diag(jnp.diag(
SPQ)) # grab diagonal elements. equivalent to np.diag(np.diagonal(SPQ))
# no sqrts because we have two normalized THC vectors (index by mu and nu)
# on each side.
MPQ_normalized = cP.dot(MPQ).dot(cP) # get normalized zeta in Eq. 11 & 12
lambda_z = jnp.sum(jnp.abs(MPQ_normalized)) * 0.5
res = 0.5 * jnp.sum((deri)**2) + penalty_param * (lambda_z**2)
if verbose:
print("res, max, lambda**2 = {}, {}".format(res, lambda_z**2))
return res
def thc_objective_grad(xcur, norb, nthc, eri, verbose=False):
"""
Gradient for THC least-squares objective
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and inf norm
"""
etaPp = numpy.array(xcur[:norb * nthc]).reshape(
nthc, norb) # leaf tensor nthc x norb
MPQ = numpy.array(xcur[norb * nthc:norb * nthc + nthc * nthc]).reshape(
nthc, nthc) # central tensor
# m indexes the nthc and p,q,r,s are orbital indices
CprP = numpy.einsum("Pp,Pr->prP", etaPp,
etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs',
CprP,
MPQ,
CprP,
optimize=['einsum_path', (0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * numpy.sum((deri)**2)
if verbose:
print("res, max, lambda = {}, {}".format(res,
numpy.max(numpy.abs(deri))))
# O(norb^5)
dL_dZab = -numpy.einsum('pqrs,pqA,rsB->AB',
deri,
CprP,
CprP,
optimize=['einsum_path', (0, 1), (0, 1)])
# O(norb^5)
dL_dX_GT = -2 * numpy.einsum(
'Tqrs,Gq,Gv,rsv->GT',
deri,
etaPp,
MPQ,
CprP,
optimize=['einsum_path', (0, 3), (1, 2), (0, 1)])
dL_dX_GT -= 2 * numpy.einsum(
'pqTs,pqu,uG,Gs->GT',
deri,
CprP,
MPQ,
etaPp,
optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
return numpy.hstack((dL_dX_GT.ravel(), dL_dZab.ravel()))
def thc_objective_and_grad(xcur, norb, nthc, eri, verbose=False):
"""
Loss function for THC factorization
0.5 sum_{pqrs}(eri(pqrs) - G(pqrs))^{2}
G(pqrs) = sum_{uv}X_{u,p}X_{u,q}Z_{uv}X_{v,r}X_{v,s}
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and inf norm
:return:
"""
etaPp = xcur[:norb * nthc].reshape(nthc, norb) # leaf tensor nthc x norb
MPQ = xcur[norb * nthc:norb * nthc + nthc * nthc].reshape(
nthc, nthc) # central tensor
CprP = numpy.einsum("Pp,Pr->prP", etaPp,
etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs',
CprP,
MPQ,
CprP,
optimize=['einsum_path', (0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * numpy.sum((deri)**2)
# O(norb^5)
dL_dZab = -numpy.einsum('pqrs,pqA,rsB->AB',
deri,
CprP,
CprP,
optimize=['einsum_path', (0, 1), (0, 1)])
# O(norb^4 * nthc)
dL_dX_GT = -2 * numpy.einsum(
'Tqrs,Gq,Gv,rsv->GT',
deri,
etaPp,
MPQ,
CprP,
optimize=['einsum_path', (0, 3), (1, 2), (0, 1)])
dL_dX_GT -= 2 * numpy.einsum(
'pqTs,pqu,uG,Gs->GT',
deri,
CprP,
MPQ,
etaPp,
optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
return res, numpy.hstack((dL_dX_GT.ravel(), dL_dZab.ravel()))
def cp_ls_cholesky_factor_objective(beta_gamma,
norb,
nthc,
cholesky_factor,
calcgrad=False):
"""cholesky_factor is reshaped into (norb, norb, num_cholesky)
Cholesky factor B_{ab,x}
Lst sq fit obj ||B_{ab,x} - sum_{r}beta_{a,x}beta_{b,x}gamma_{ab,x}||
This function provides the objective function value and gradient with
respect to beta and gamma
"""
# compute objective
num_cholfactors = cholesky_factor.shape[-1]
beta_bR = beta_gamma[:norb * nthc].reshape((norb, nthc))
gamma_yR = beta_gamma[norb * nthc:norb * nthc +
nthc * num_cholfactors].reshape(
(num_cholfactors, nthc))
beta_abR = numpy.einsum('aR,bR->abR', beta_bR, beta_bR)
chol_approx = numpy.einsum('abR,XR->abX', beta_abR, gamma_yR)
delta = cholesky_factor - chol_approx
fval = 0.5 * numpy.sum((delta)**2)
if calcgrad:
# compute grad
# \partial O / \partial beta_{c,s}
grad_beta = -2 * numpy.einsum('Cbx,bS,xS->CS',
delta,
beta_bR,
gamma_yR,
optimize=['einsum_path', (0, 2), (0, 1)])
grad_gamma = -numpy.einsum('abY,aS,bS->YS',
delta,
beta_bR,
beta_bR,
optimize=['einsum_path', (1, 2), (0, 1)])
grad = numpy.hstack((grad_beta.ravel(), grad_gamma.ravel()))
return fval, grad
else:
return fval
|
{"hexsha": "ecb0af3217d5cb5652376ff498589d53c67fba8e", "size": 11686, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/openfermion/resource_estimates/thc/utils/thc_objectives.py", "max_stars_repo_name": "cvmxn1/OpenFermion", "max_stars_repo_head_hexsha": "cf53c063d0f124a02ff8776bb7f8afb110d4bde6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/openfermion/resource_estimates/thc/utils/thc_objectives.py", "max_issues_repo_name": "cvmxn1/OpenFermion", "max_issues_repo_head_hexsha": "cf53c063d0f124a02ff8776bb7f8afb110d4bde6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/openfermion/resource_estimates/thc/utils/thc_objectives.py", "max_forks_repo_name": "cvmxn1/OpenFermion", "max_forks_repo_head_hexsha": "cf53c063d0f124a02ff8776bb7f8afb110d4bde6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7797619048, "max_line_length": 80, "alphanum_fraction": 0.5143761766, "include": true, "reason": "import numpy,import scipy,from scipy,import jax,from jax", "num_tokens": 3599}
|
import os
import shutil
import tempfile
import zipfile
import h5py
import numpy
import six
from PIL import Image
from numpy.testing import assert_raises
from fuel import config
from fuel.converters.dogs_vs_cats import convert_dogs_vs_cats
from fuel.datasets.dogs_vs_cats import DogsVsCats
from fuel.streams import DataStream
from fuel.schemes import SequentialScheme
def setup():
config._old_data_path = config.data_path
config.data_path = tempfile.mkdtemp()
_make_dummy_data(config.data_path[0])
def _make_dummy_data(output_directory):
data = six.BytesIO()
Image.new('RGB', (1, 1)).save(data, 'JPEG')
image = data.getvalue()
output_files = [os.path.join(output_directory,
'dogs_vs_cats.{}.zip'.format(set_))
for set_ in ['train', 'test1']]
with zipfile.ZipFile(output_files[0], 'w') as zip_file:
zif = zipfile.ZipInfo('train/')
zip_file.writestr(zif, "")
for i in range(25000):
zip_file.writestr('train/cat.{}.jpeg'.format(i), image)
with zipfile.ZipFile(output_files[1], 'w') as zip_file:
zif = zipfile.ZipInfo('test1/')
zip_file.writestr(zif, "")
for i in range(12500):
zip_file.writestr('test1/{}.jpeg'.format(i), image)
def teardown():
shutil.rmtree(config.data_path[0])
config.data_path = config._old_data_path
del config._old_data_path
def test_dogs_vs_cats():
_test_conversion()
_test_dataset()
def _test_conversion():
convert_dogs_vs_cats(config.data_path[0], config.data_path[0])
output_file = "dogs_vs_cats.hdf5"
output_file = os.path.join(config.data_path[0], output_file)
with h5py.File(output_file, 'r') as h5:
assert numpy.all(h5['targets'][:25000] == 0)
assert numpy.all(h5['targets'][25000:] == 1)
assert numpy.all(numpy.array(
[img for img in h5['image_features'][:]]) == 0)
assert numpy.all(h5['image_features_shapes'][:, 0] == 3)
assert numpy.all(h5['image_features_shapes'][:, 1:] == 1)
def _test_dataset():
train = DogsVsCats(('train',))
assert train.num_examples == 25000
assert_raises(ValueError, DogsVsCats, ('valid',))
test = DogsVsCats(('test',))
stream = DataStream.default_stream(
test, iteration_scheme=SequentialScheme(10, 10))
data = next(stream.get_epoch_iterator())[0][0]
assert data.dtype.kind == 'f'
test_dogs_vs_cats.setup = setup
test_dogs_vs_cats.teardown = teardown
|
{"hexsha": "8ccee6169137d3e6cc819ac97b167bc6b7aa3c54", "size": 2509, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_dogs_vs_cats.py", "max_stars_repo_name": "zaimusho/fuel", "max_stars_repo_head_hexsha": "a5ae89c2c77c7865544e2c68dff3207a62085dd6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 767, "max_stars_repo_stars_event_min_datetime": "2015-05-21T03:32:01.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-10T13:34:41.000Z", "max_issues_repo_path": "tests/test_dogs_vs_cats.py", "max_issues_repo_name": "zaimusho/fuel", "max_issues_repo_head_hexsha": "a5ae89c2c77c7865544e2c68dff3207a62085dd6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 274, "max_issues_repo_issues_event_min_datetime": "2015-05-16T19:18:45.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-06T07:13:38.000Z", "max_forks_repo_path": "tests/test_dogs_vs_cats.py", "max_forks_repo_name": "leprophec/fuel", "max_forks_repo_head_hexsha": "a5ae89c2c77c7865544e2c68dff3207a62085dd6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 258, "max_forks_repo_forks_event_min_datetime": "2015-05-21T04:55:25.000Z", "max_forks_repo_forks_event_max_datetime": "2019-01-23T07:34:27.000Z", "avg_line_length": 30.2289156627, "max_line_length": 68, "alphanum_fraction": 0.6707851734, "include": true, "reason": "import numpy,from numpy", "num_tokens": 644}
|
import sys
from pathlib import Path
import numpy as np
import pandas as pd
sys.path.append("./")
from brainrender import Scene, settings
from brainrender.actors import Points
from data.dbase.db_tables import Probe
from myterial import blue_grey, grey_darker
settings.SHOW_AXES = False
CONFIGURATION = "longcolumn"
probes = (Probe).fetch()
save_fld = Path(r"D:\Dropbox (UCL)\Rotation_vte\Locomotion\analysis\ephys")
scene = Scene(screenshots_folder=save_fld)
regions = ["CUN", "PPN"]
regions_meshes = scene.add_brain_region(*regions, alpha=0.3, silhouette=False)
scene.slice(plane="frontal", actors=[scene.root])
for probe in probes[1:]:
# get and visualize the probe from the reconstruction file.
mouse = probe["mouse_id"][-3:]
rec_file = list(
Path(
r"D:\Dropbox (UCL)\Rotation_vte\Locomotion\reconstructed_probe_location"
).glob(mouse + "_atlas_space_0.npy")
)
if len(rec_file) == 0:
continue
probe_points = np.load(rec_file[0])[
::-1
] # flipped so that the first point is at the bottom of the probe like in brain
scene.add(Points(probe_points[::5], colors=grey_darker, radius=15))
# get and visualize the probe's recording sites
mouse = probe["mouse_id"]
rsites = pd.DataFrame(
(
Probe.RecordingSite
& f'mouse_id="{mouse}"'
& f'probe_configuration="{CONFIGURATION}"'
).fetch()
)
rsites = rsites.loc[rsites.brain_region.isin(regions)]
track = np.vstack(rsites.registered_brain_coordinates.values)
colors = [
color
if region in regions
else (blue_grey if region not in ("unknown", "OUT") else "k")
for color, region in zip(
rsites.color.values, rsites.brain_region.values
)
]
pts = scene.add(Points(track, colors=colors, radius=30))
scene.add_silhouette(pts, lw=2)
scene.render(camera="frontal", interactive=False, zoom=1.5)
scene.screenshot(name="probes_3d_1")
scene.render(camera="sagittal", interactive=False, zoom=1.5)
scene.screenshot(name="probes_3d_2")
scene.render(zoom=1.0)
scene.screenshot(name="probes_3d_3")
|
{"hexsha": "a2ef216168c9fd7046f92c094443872b44a37486", "size": 2161, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/ephys/probe_3d_probes.py", "max_stars_repo_name": "FedeClaudi/LocomotionControl", "max_stars_repo_head_hexsha": "1281f7894825096ad212407351463a2105c5152a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis/ephys/probe_3d_probes.py", "max_issues_repo_name": "FedeClaudi/LocomotionControl", "max_issues_repo_head_hexsha": "1281f7894825096ad212407351463a2105c5152a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/ephys/probe_3d_probes.py", "max_forks_repo_name": "FedeClaudi/LocomotionControl", "max_forks_repo_head_hexsha": "1281f7894825096ad212407351463a2105c5152a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.0649350649, "max_line_length": 84, "alphanum_fraction": 0.6793151319, "include": true, "reason": "import numpy", "num_tokens": 577}
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import contextlib
import numpy as np
import torch
from fairseq.data import FairseqDataset, data_utils
logger = logging.getLogger(__name__)
class ExtractedFeaturesDataset(FairseqDataset):
def __init__(
self,
path,
split,
min_length=3,
max_length=None,
labels=None,
label_dict=None,
shuffle=True,
sort_by_length=True,
):
super().__init__()
self.min_length = min_length
self.max_length = max_length
self.shuffle = shuffle
self.sort_by_length = sort_by_length
self.label_dict = label_dict
if labels is not None:
assert label_dict is not None
self.sizes = []
self.offsets = []
self.labels = []
path = os.path.join(path, split)
data_path = path
self.data = np.load(data_path + ".npy", mmap_mode="r")
offset = 0
skipped = 0
if not os.path.exists(path + f".{labels}"):
labels = None
#; label相应的值: train的时候为None -- valid 对应为 phn
with open(data_path + ".lengths", "r") as len_f, open(
path + f".{labels}", "r"
) if labels is not None else contextlib.ExitStack() as lbl_f:
for line in len_f:
length = int(line.rstrip())
lbl = None if labels is None else next(lbl_f).rstrip().split()
if length >= min_length and (
max_length is None or length <= max_length
):
self.sizes.append(length)
self.offsets.append(offset)
if lbl is not None:
self.labels.append(lbl)
#; 这里目的是把pca后的特征数量大于等于3对应的提取出来
offset += length
self.sizes = np.asarray(self.sizes)
self.offsets = np.asarray(self.offsets)
logger.info(f"loaded {len(self.offsets)}, skipped {skipped} samples")
def __getitem__(self, index):
offset = self.offsets[index]
end = self.sizes[index] + offset
feats = torch.from_numpy(self.data[offset:end].copy()).float()
res = {"id": index, "features": feats}
if len(self.labels) > 0:
res["target"] = self.label_dict.encode_line(
self.labels[index],
line_tokenizer=lambda x: x,
append_eos=False,
)
return res
def __len__(self):
return len(self.sizes)
def collater(self, samples):
if len(samples) == 0:
return {}
features = [s["features"] for s in samples]
sizes = [len(s) for s in features]
target_size = max(sizes)
collated_features = features[0].new_zeros(
len(features), target_size, features[0].size(-1)
)
padding_mask = torch.BoolTensor(collated_features.shape[:-1]).fill_(False)
for i, (f, size) in enumerate(zip(features, sizes)):
collated_features[i, :size] = f
padding_mask[i, size:] = True
res = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": {"features": collated_features, "padding_mask": padding_mask},
}
if len(self.labels) > 0:
target = data_utils.collate_tokens(
[s["target"] for s in samples],
pad_idx=self.label_dict.pad(),
left_pad=False,
)
res["target"] = target
return res
def num_tokens(self, index):
return self.size(index)
def size(self, index):
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
if self.sort_by_length:
order.append(self.sizes)
return np.lexsort(order)[::-1]
else:
return order[0]
|
{"hexsha": "22071509f43bfe32d32f4745a69a9fa43744705d", "size": 4274, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/wav2vec/unsupervised/data/extracted_features_dataset.py", "max_stars_repo_name": "Matrix-Zheng/fairseq", "max_stars_repo_head_hexsha": "4b5267aaedc2d1e05111bd7a9bfc947fba74e2b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/wav2vec/unsupervised/data/extracted_features_dataset.py", "max_issues_repo_name": "Matrix-Zheng/fairseq", "max_issues_repo_head_hexsha": "4b5267aaedc2d1e05111bd7a9bfc947fba74e2b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/wav2vec/unsupervised/data/extracted_features_dataset.py", "max_forks_repo_name": "Matrix-Zheng/fairseq", "max_forks_repo_head_hexsha": "4b5267aaedc2d1e05111bd7a9bfc947fba74e2b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.074829932, "max_line_length": 87, "alphanum_fraction": 0.5552175948, "include": true, "reason": "import numpy", "num_tokens": 975}
|
%%%%%%%%%%%%%%%%%%%% author.tex %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% sample root file for your "contribution" to a contributed volume
%
% Use this file as a template for your own input.
%
%%%%%%%%%%%%%%%% Springer %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% RECOMMENDED %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\documentclass[graybox]{svmult}
% choose options for [] as required from the list
% in the Reference Guide
\usepackage{mathptmx} % selects Times Roman as basic font
\usepackage{helvet} % selects Helvetica as sans-serif font
\usepackage{courier} % selects Courier as typewriter font
\usepackage{type1cm} % activate if the above 3 fonts are
% not available on your system
%
\usepackage{makeidx} % allows index generation
\usepackage{url} % links
\usepackage{graphicx} % standard LaTeX graphics tool
% when including figure files
\usepackage{multicol} % used for the two-column index
\usepackage[bottom]{footmisc}% places footnotes at page bottom
\usepackage{algorithm}
\usepackage[noend]{algpseudocode}
% see the list of further useful packages
% in the Reference Guide
\makeindex % used for the subject index
% please use the style svind.ist with
% your makeindex program
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
\title*{Accelerated Load Balancing of Unstructured Meshes}
% Use \titlerunning{Short Title} for an abbreviated version of
% your contribution title if the original one is too long
\author{
Gerrett Diamond,
Lucas Davis,
and Cameron W. Smith
}
\institute{
Gerrett Diamond \email{diamog@rpi.edu}
\and Lucas Davis \email{davisl3@rpi.edu}
\and Cameron W. Smith \email{smithc11@rpi.edu}
\at Rensselaer Polytechnic Institute, Troy, NY
}
\authorrunning{G.Diamond et al.}
\maketitle
\abstract{
Unstructured mesh applications running on large, parallel, distributed memory
systems require the computational work related to mesh entities to be evenly
distributed across processes in order to achieve maximal performance.
To efficiently balance meshes on systems with accelerators the balancing
procedure must utilize the accelerator.
This work presents algorithms and speedup results using OpenCL and Kokkos to
accelerate critical portions of the EnGPar diffusive load balancer.
}
\section{Introduction} \label{sec:intro}
%\begin{itemize}
% \item briefly motivate dynamic load balancing
% \item quantify how GPUs are providing the majority of computing performance (\# of systems with GPUs in top 10 systems of top500, graph500, HPCG)
% \item end with a sentence that says what engpar does (diffusion) and how we are
%extending it to run on GPUs
%\end{itemize}
While common partitioning techniques such as multilevel or geometric methods are
good for creating an initial distribution of load, those techniques are not as
applicable to simulations where the mesh and load changes.
These evolving simulations require dynamic load balancing techniques that are
quick to improve the partition.
Diffusive load balancing methods allow quick partition refinement for the
relatively small changes to imbalance that are seen in adaptive mesh
simulations.
EnGPar's diffusive balancer has been shown to quickly produce high quality
partitions at up to 512Ki processes~\cite{engparSC17}.
\section{EnGPar Dynamic Load Balancing} \label{sec:engpar}
%\begin{itemize}
% \item multi-graph, high-level diffusion algorithm (targeting, selection, migration)
% \item indicate that we will accelerate selection via BFS for distance computation and coloring for cavity selection
%\end{itemize}
EnGPar is a partition improvement tool that utilizes a multi-hypergraph,
called the N-graph, to describe the portions of the mesh that require load balancing.
The N-graph consists of vertices which represent the primary dimension entities of the
mesh. The vertices are connected by hyperedges created from the secondary dimensions of
the mesh that require load balancing.
EnGPar's diffusive algorithm is an iterative local refinement strategy.
In each iteration the target criteria is improved until the imbalance is under a
given tolerance or the imbalance cannot be improved further.
Each iteration consists of three steps: targeting, selection, and migration.
The targeting step gathers metrics on the part and its neighbors
in order to determine which neighboring parts to send weight to and how much
weight to send.
The selection step is where graph vertices on the boundary are chosen to be sent
to neighboring parts in order to satisfy the weights determined by the targeting
phase.
Finally, the migration step sends the graph entities that were selected to the
destination parts and the graph is reconstructed.
In this work, we target accelerating distance computation and cavity selection.
These two procedures consume up to 50\% of the total execution time and are well
suited to acceleration as they do not require inter-process communications~\cite{engparSC17}.
Distance computation is performed during selection by ordering hyperedges on the
boundary based on their distance from the center of the part from furthest to
closest.
EnGPar computes this distance with two breadth first traversals of the graph.
The first traversal starts at the part boundary and works its way in while
marking the depth of visited hyperedges.
The second traversal starts from a hyperedge with the largest depth and works
its way out to the boundary while marking the distance from the starting point.
Cavity selection determines if a cavity, defined as a hyperedge and the vertices
that are connected by it, on the part boundary should be sent to one of the
neighboring parts.
A cavity is selected for migration if (1) the part that the hyperedge is shared
with is a target part, (2) the target part has not been sent more weight than
the limit, and (3) the size of the cavity is small.
\section{Accelerating Distance Computation} \label{sec:dist}
Distance computation's breadth first traversal is accelerated with an OpenCL
data-parallel ND-Range kernel for execution on GPUs and many-core processors.
The host process calls the kernel once for each frontier in the traversal.
The kernel implements a `pull' based approach by iterating over the graph
vertices pinned to each hyperedge twice.
The first iteration determines if in the previous kernel call which vertices were
updated.
If a vertex is found, then the second iteration updates the distance of the other
vertices.
The baseline OpenCL implementation uses a compressed sparse row (CSR)
hypergraph representation and a pull based traversal.
In Figure~\ref{fig:bfs} the performance of optimized implementations
relative to the baseline `csr' implementation are shown.
`scg' in the name of the implementation indicates that use of the
Sell-C-$\sigma$ data structure~\cite{sellCSigma}, `int' indicates use of four
byte ints instead of eight byte ints, and `unroll' indicates manual vertex loop
unrolling.
Runs were executed on graphs created from meshes of the 2014 RPI
Formula Hybrid suspension upright with up to 28M (million) tetrahedron
(DOI:~\url{10.5281/zenodo.1194576}).
All tests were executed on an NVIDIA 1080ti using CUDA 9.2.
The chunk size of the `scg' tests was fixed at 64; given the uniform degree of
the element to vertex adjacencies, there was little performance
difference between different chunk size settings.
The given results are the average of three runs and include data transfers to
and from the GPU.
The OpenCL JIT compilation is not included in the timing as this one-time
cost would be amortized across an entire run.
\begin{figure}
\centering
\includegraphics[width=0.8\textwidth]{images/bfsPerformance.png}
\caption{
Performance of breadth first traversal implementations.
}
\label{fig:bfs}
\end{figure}
On the 28M mesh, the `scg\_int\_unroll' is 11 times
faster than the serial, C++, push implementation, and 4.78 times faster than
the `csr' implementation.
The performance boost given by loop unrolling and use of Sell-C-$\sigma$ are the
result of improved memory coalescing.
Reducing the integer size by half improves performance by 24\% for the 28
million element mesh.
\section{Accelerating Cavity Selection} \label{sec:select}
Accelerating the selection of cavities requires simultaneously evaluating many
cavities.
The current single threaded selection procedure evaluates cavities in order of
their descending distance from the topological center.
Since the ordered selection exposes no concurrency an alternative application of
the topological distance is needed.
The proposed approach applies a parallel topological distance sorting after a
coloring based parallel cavity evaluation has executed.
Critical to concurrent cavity evaluation is avoiding race conditions when
deciding which part to migrate a given graph vertex.
Hyperedge coloring ensures that any two hyperedges that share a common vertex
will be assigned a different color, and thus entire sets of like-colored
hyperedges can be evaluated concurrently.
The Kokkos-kernels graph coloring procedure~\cite{kokkosColoring}
is used to color the hyperedges of the EnGPar hypergraph.
This procedure is driven by a symmetric adjacency matrix.
To color hyperedges we must create the hyperedge-to-vertex-to-hyperedge
graph; the dual of the hypergraph.
The dual graph has one vertex for each hyperedge, and an edge between two
hyperedges if they share at least one common vertex.
The construction of the dual is listed in Algorithm~\ref{alg:dual}.
It starts by making a set, using a Kokkos \texttt{unordered\_map}, that stores
hyperedge-to-hyperedge adjacencies ($l.$\ref{alg:mapStart}-$l.$\ref{alg:mapEnd}).
A parallel reduction and prefix sum then compute the degree list \verb|deg|
($l.$\ref{alg:degStart}-$l.$\ref{alg:degEnd}).
The hyperedge list, \verb|edgeList|, is then filled with a parallel loop over
the hyperedges.
This loop utilizes a Kokkos atomic read-then-increment operation
($l.$\ref{alg:atomic}) to determine the position of the hyperedge in the list of
adjacent hyperedges.
The resulting CSR, \verb|deg| and \verb|edgeList|, are passed directly to
the Kokkos coloring procedure.
\algloopdefx[PFor]{PFor}[1]{\textbf{parallel for} #1 \textbf{do}}
\algblock[Name]{Start}{End}
\algblockdefx[NAME]{START}{END}
[1][a]{\textbf{parallel for} #1 \textbf{do}}
\begin{algorithm}
\caption{Dual Graph Converter}
\label{alg:dual}
\small
\begin{multicols}{2}
\begin{algorithmic}[1]
\Procedure{dual}{$G=(V,E)$}
\State $n = 0$ \label{alg:mapStart}
\PFor{$v \in V$}
\ForAll{$(i,v) \in E$}
\ForAll{$(j,v) \in E\setminus\{(i,v)\}$}
\State $n$++
\EndFor
\EndFor
\State //n is an upper bound on the set's size
\State set of int pair $m$ (n)
\PFor{$v \in V$}
\ForAll{$(i,v) \in E$}
\ForAll{$(j,v) \in E\setminus\{(i,v)\}$}
\State m.
insert ( $(i,j)$ )
\EndFor
\EndFor \label{alg:mapEnd}
\algstore{convert}
\end{algorithmic}
\columnbreak
\begin{algorithmic}[1]
\algrestore{convert}
\State $N=|E|$
\State deg = [$N+1$] \label{alg:degStart}
\PFor {$k\in m$}
\State deg($k$.first+1)++
\PFor{$i=0,1,\ldots,N$}
\State deg[$i$] = sum(deg[$0:i$]) \label{alg:degEnd}
\State edgeList = [deg[N]]
\State degreeCount = [N]
%\PFor {$k\in m$}
\START[$k\in m$]
\State e = deg[k.first]
\State i = degreeCount[k.first]++ \label{alg:atomic}
\State edgeList[e+i] = k.second
\END
\EndProcedure
\end{algorithmic}
\end{multicols}
\label{alg:dual}
\end{algorithm}
The speedup of the dual and coloring procedures relative to serial
implementations is shown in Figure~\ref{fig:coloringSpeedup}.
Tests were executed on the same system and series of graphs used in
Section~\ref{sec:dist}.
Construction of the dual has a nearly flat speedup relative to the graphs.
Conversely, the speedup of Kokkos coloring improves with graph size.
Profiling of these tests is required to determine how effectively GPU resources
are utilized and identify bottlenecks.
\begin{figure}
\centering
\includegraphics[width=.7\textwidth]{images/Parallel_Speedup.png}
\caption{The ratio of serial execution time to parallel for dual graph
construction and graph coloring.}
\label{fig:coloringSpeedup}
\end{figure}
%\begin{itemize}
% \item Figure out a better title for this section
% \item flow control strong scaling case with 1.3B tets: \\
%64Ki \url{https://zenodo.org/record/833519#.WztuxXWYV1M} \\
%128Ki \url{https://zenodo.org/record/834946#.Wztu-HWYV1M} \\
%256Ki \url{https://zenodo.org/record/835483#.WztvCXWYV1M} \\
%512Ki \url{https://zenodo.org/record/835742#.WztvG3WYV1M}
% \item the number of elements per-process may be too small for nodes with large GPUs to run efficiently (data transfer may become more costly than running the selection on the CPU!) - especially as we approach 512Ki parts
% \item we will have to run the condense tool to create an 2Ki (640k elms/part),
% 4Ki (320k), 8Ki (160k), 16Ki (80k), and 32Ki (40k) meshes -
% fun3d uses between 75M and 2.3M elements per GPU on summitdev (from siampp18
% presentation)
% \item run on ORNL titan or summit (if accessible)
% \item compare runtimes versus results from SC17 paper~\cite{engparSC17}
% \item use mesh vertex = graph vertex and mesh edge = graph edge for tests -
% will need to run MPI only engpar to establish the baseline performance
% \item plot the time spent in MPI only selection vs kokkos coloring selection
% vs part size - the comparison must start and end at equivalent points in the
% code - start before selection and end just before migration (or whatever the
% next stage) begins
% \item plot the breakdown of time spent in coloring selection vs part size -
% data transfer, color computation, selection (possibly broken into cavity
% selection and filtering for distance), tranferring the selection list
% back to the host
% \item I expect there will be some reduction in partition quality using the
% coloring based selection since we won't have as fine-grained control over
% the process as the MPI only procedure. As long as the quality reduction is
% controlled and performance is better we should be OK.
%\end{itemize}
\section{Closing Remarks} \label{sec:closing}
Speedup results of two critical procedures used by EnGPar were demonstrated.
Parallel distance computation performance is over an order of magnitude greater
and is a direct replacement for the existing procedure.
Parallel coloring for the selection process demonstrates high performance
across a range of mesh sizes.
Ongoing work is focused on fully integrating these advances into the production
version for evaluation of overall performance and partition quality.
\begin{acknowledgement}
This research was supported by the U.S. Department of Energy, Office of Science,
Office of Advanced Scientific Computing Research, under award DE-AC52-07NA27344
(FASTMath SciDAC Institute) and by the National Science Foundation under Grant
No. ACI 1533581, (SI2-SSE: Fast Dynamic Load Balancing Tools for Extreme Scale
Systems).
Any opinions, findings, and conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views
of the National Science Foundation.
\end{acknowledgement}
\bibliographystyle{acm}
\bibliography{scorec-refs/scorec-refs}
\end{document}
|
{"hexsha": "65a5387a26bfaeaee1be1837ac4895847d262da7", "size": 15557, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "IMR_18/engpar_imr18.tex", "max_stars_repo_name": "SCOREC/EnGPar-Docs", "max_stars_repo_head_hexsha": "e99ac24b81842e2638f34420abce7cf981efbca1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "IMR_18/engpar_imr18.tex", "max_issues_repo_name": "SCOREC/EnGPar-Docs", "max_issues_repo_head_hexsha": "e99ac24b81842e2638f34420abce7cf981efbca1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "IMR_18/engpar_imr18.tex", "max_forks_repo_name": "SCOREC/EnGPar-Docs", "max_forks_repo_head_hexsha": "e99ac24b81842e2638f34420abce7cf981efbca1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.8328530259, "max_line_length": 223, "alphanum_fraction": 0.7563797647, "num_tokens": 3811}
|
import cv2
import numpy as np
import time
import packages.dcci as dcci
import img_resources as imr
import timeit
# Init
window_name = "UpScaling"
# img = cv2.imread(img_files.pixel_art[0], cv2.IMREAD_GRAYSCALE)
# def time_results(fn):
# time_start = time.clock()
# output = fn
# time_stop = time.clock()
# time_passed = time_stop - time_start
# return output, time_passed
setup = '''
import cv2
import packages.dcci as dcci
import img_resources as imr
img = cv2.imread(imr.pixel_art[1], cv2.IMREAD_GRAYSCALE)
'''
N_dc=100
N_other=10000
dc = timeit.timeit('img2 = dcci.Dccix2(img)', setup=setup, number=N_dc)
print(f"DCCI took {dc/N_dc}s")
bc = timeit.timeit('img2 = cv2.resize(img, (0,0), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)', setup=setup, number=N_other)
print(f"Bicubic took {bc/N_other}s")
lenc = timeit.timeit('img2 = cv2.resize(img, (0,0), fx=2, fy=2, interpolation=cv2.INTER_LANCZOS4)', setup=setup, number=N_other)
print(f"Lenczos took {lenc/N_other}s")
# dcci_img, dcci_time = time_results(dcci.Dccix2(np.float64(img)))
# bicubic_img, bicubic_time = time_results()
# lenczos_img, lenczos_time = time_results(cv2.resize(img, (0,0), fx=2, fy=2, interpolation=cv2.INTER_LANCZOS4))
# # Return time results
# print('DCCI took {}'.format(dcci_time))
# print('Bicubic took {}'.format(bicubic_time))
# print('lenczos took {}'.format(lenczos_time))
# cv2.imshow("Original TEST", img)
# cv2.imshow("DCCI TEST", dcci_img)
# cv2.imshow("Bicubic TEST", bicubic_img)
# cv2.imshow("lenczos TEST", lenczos_img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
|
{"hexsha": "e3620b63453632bc8341278827732aed80d72d2b", "size": 1580, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "revent-studio/organa_hqx", "max_stars_repo_head_hexsha": "81a4027da856c6484b78c53e2478d73a010ce798", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2017-10-30T22:56:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T02:11:38.000Z", "max_issues_repo_path": "test.py", "max_issues_repo_name": "VeprUA/organa_hqx", "max_issues_repo_head_hexsha": "81a4027da856c6484b78c53e2478d73a010ce798", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2017-11-02T00:16:10.000Z", "max_issues_repo_issues_event_max_datetime": "2017-12-12T00:39:44.000Z", "max_forks_repo_path": "test.py", "max_forks_repo_name": "VeprUA/organa_hqx", "max_forks_repo_head_hexsha": "81a4027da856c6484b78c53e2478d73a010ce798", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-05-13T13:01:28.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-13T13:01:28.000Z", "avg_line_length": 31.6, "max_line_length": 128, "alphanum_fraction": 0.7234177215, "include": true, "reason": "import numpy", "num_tokens": 499}
|
import abc
import numpy as np
import mlalgorithms.checks as checks
class IModel(abc.ABC):
@abc.abstractmethod
def train(self, train_samples, train_labels, **kwargs):
"""
Train current model.
:param train_samples: array-like, sparse matrix.
Training data.
:param train_labels: array-like, sparse matrix.
Target values. Will be cast to train_samples’s dtype if necessary.
:param kwargs: dict, optional(default={}).
Additional keyword arguments.
"""
raise NotImplementedError("Called abstract class method!")
@abc.abstractmethod
def predict(self, samples, **kwargs):
"""
Makes predictions based on the transmitted data.
User must override this method.
:param samples: array-like, sparse matrix.
Data for prediction.
:param kwargs: dict, optional(default={}).
Additional keyword arguments.
:return: array.
Returns predicted values.
"""
raise NotImplementedError("Called abstract class method!")
class SimpleModel(IModel):
def __init__(self, model=None):
"""
Constructor of abstract model class which initialize model for working.
:param model: object.
Instance of model class.
"""
if type(self) is IModel:
raise Exception("IModel is an abstract class and cannot be "
"instantiated directly")
self.model = model
def train(self, train_samples, train_labels, **kwargs):
"""
Train current model.
:param train_samples: array-like, sparse matrix.
Training data.
:param train_labels: array-like, sparse matrix.
Target values. Will be cast to train_samples’s dtype if necessary.
:param kwargs: dict, optional(default={}).
Additional keyword arguments.
"""
checks.check_equality(len(train_samples), len(train_labels),
message="Samples and labels have different "
"sizes")
self.model.fit(train_samples, train_labels, **kwargs)
def predict(self, samples, **kwargs):
"""
Makes predictions based on the transmitted data.
User must override this method.
:param samples: array-like, sparse matrix.
Data for prediction.
:param kwargs: dict, optional(default={}).
Additional keyword arguments.
:return: array.
Returns predicted values.
"""
predictions = []
for sample in samples:
prediction = self.model.predict(np.array(sample).reshape(1, -1))[0]
predictions.append(prediction)
return predictions
|
{"hexsha": "22c62d9a43099bc91d8597b106b8663bbb162f00", "size": 2830, "ext": "py", "lang": "Python", "max_stars_repo_path": "mlalgorithms/models/model.py", "max_stars_repo_name": "danila19991/tinkoff-web-service", "max_stars_repo_head_hexsha": "ccc8ac4e8dae6aae5e1c843ddf8730f6216e8450", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mlalgorithms/models/model.py", "max_issues_repo_name": "danila19991/tinkoff-web-service", "max_issues_repo_head_hexsha": "ccc8ac4e8dae6aae5e1c843ddf8730f6216e8450", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 75, "max_issues_repo_issues_event_min_datetime": "2018-08-18T15:16:22.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-04T20:28:57.000Z", "max_forks_repo_path": "mlalgorithms/models/model.py", "max_forks_repo_name": "robot-lab/tinkoff-web-service", "max_forks_repo_head_hexsha": "ccc8ac4e8dae6aae5e1c843ddf8730f6216e8450", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4791666667, "max_line_length": 79, "alphanum_fraction": 0.596819788, "include": true, "reason": "import numpy", "num_tokens": 523}
|
module CPUTime
export
CPUtime_us,
CPUtic,
CPUtoq,
CPUtoc,
@CPUtime,
@CPUelapsed
function CPUtime_us()
rusage = Libc.malloc(4*sizeof(Clong) + 14*sizeof(UInt64)) # sizeof(uv_rusage_t); this is different from sizeof(rusage)
ccall(:uv_getrusage, Cint, (Ptr{Nothing},), rusage)
utime = UInt64(1000000)*unsafe_load(convert(Ptr{Clong}, rusage + 0*sizeof(Clong))) + # user CPU time
unsafe_load(convert(Ptr{Clong}, rusage + 1*sizeof(Clong)))
stime = UInt64(1000000)*unsafe_load(convert(Ptr{Clong}, rusage + 2*sizeof(Clong))) + # system CPU time
unsafe_load(convert(Ptr{Clong}, rusage + 3*sizeof(Clong)))
ttime = utime + stime # total CPU time
Libc.free(rusage)
return ttime
end
function CPUtic()
t0 = CPUtime_us()
task_local_storage(:CPUTIMERS, (t0, get(task_local_storage(), :CPUTIMERS, ())))
return t0
end
function CPUtoq()
t1 = CPUtime_us()
timers = get(task_local_storage(), :CPUTIMERS, ())
if timers === ()
error("CPUtoc() without CPUtic()")
end
t0 = timers[1]::UInt64
task_local_storage(:CPUTIMERS, timers[2])
(t1-t0)/1e6
end
function CPUtoc()
t = CPUtoq()
println("elapsed CPU time: ", t, " seconds")
return t
end
# print elapsed CPU time, return expression value
macro CPUtime(ex)
quote
local t0 = CPUtime_us()
local val = $(esc(ex))
local t1 = CPUtime_us()
println("elapsed CPU time: ", (t1-t0)/1e6, " seconds")
val
end
end
# print nothing, return elapsed CPU time
macro CPUelapsed(ex)
quote
local t0 = CPUtime_us()
local val = $(esc(ex))
(CPUtime_us()-t0)/1e6
end
end
end # module
|
{"hexsha": "4a843750088e960572638276f911332e4027c22a", "size": 1746, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/CPUTime.jl", "max_stars_repo_name": "JuliaTagBot/CPUTime.jl", "max_stars_repo_head_hexsha": "b2968b0e90ded508273c93da89ec0341ed5f3334", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/CPUTime.jl", "max_issues_repo_name": "JuliaTagBot/CPUTime.jl", "max_issues_repo_head_hexsha": "b2968b0e90ded508273c93da89ec0341ed5f3334", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/CPUTime.jl", "max_forks_repo_name": "JuliaTagBot/CPUTime.jl", "max_forks_repo_head_hexsha": "b2968b0e90ded508273c93da89ec0341ed5f3334", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0597014925, "max_line_length": 123, "alphanum_fraction": 0.6162657503, "num_tokens": 528}
|
// Copyright Nick Thompson, 2017
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt
// or copy at http://www.boost.org/LICENSE_1_0.txt)
#define BOOST_TEST_MODULE Gauss Kronrod_quadrature_test
#include <complex>
#include <boost/config.hpp>
#include <boost/detail/workaround.hpp>
#if !defined(BOOST_NO_CXX11_DECLTYPE) && !defined(BOOST_NO_CXX11_TRAILING_RESULT_TYPES) && !defined(BOOST_NO_SFINAE_EXPR)
#include <boost/math/concepts/real_concept.hpp>
#include <boost/test/included/unit_test.hpp>
#include <boost/test/tools/floating_point_comparison.hpp>
#include <boost/math/quadrature/gauss_kronrod.hpp>
#include <boost/math/special_functions/sinc.hpp>
#include <boost/multiprecision/cpp_bin_float.hpp>
#include <boost/multiprecision/cpp_dec_float.hpp>
#include <boost/multiprecision/debug_adaptor.hpp>
#ifdef BOOST_HAS_FLOAT128
#include <boost/multiprecision/complex128.hpp>
#endif
#if !defined(TEST1) && !defined(TEST1A) && !defined(TEST2) && !defined(TEST3)
# define TEST1
# define TEST1A
# define TEST2
# define TEST3
#endif
#ifdef _MSC_VER
#pragma warning(disable:4127) // Conditional expression is constant
#endif
using std::expm1;
using std::atan;
using std::tan;
using std::log;
using std::log1p;
using std::asinh;
using std::atanh;
using std::sqrt;
using std::isnormal;
using std::abs;
using std::sinh;
using std::tanh;
using std::cosh;
using std::pow;
using std::exp;
using std::sin;
using std::cos;
using std::string;
using boost::math::quadrature::gauss_kronrod;
using boost::math::constants::pi;
using boost::math::constants::half_pi;
using boost::math::constants::two_div_pi;
using boost::math::constants::two_pi;
using boost::math::constants::half;
using boost::math::constants::third;
using boost::math::constants::half;
using boost::math::constants::third;
using boost::math::constants::catalan;
using boost::math::constants::ln_two;
using boost::math::constants::root_two;
using boost::math::constants::root_two_pi;
using boost::math::constants::root_pi;
using boost::multiprecision::cpp_bin_float_quad;
using boost::multiprecision::cpp_dec_float_50;
using boost::multiprecision::debug_adaptor;
using boost::multiprecision::number;
//
// Error rates depend only on the number of points in the approximation, not the type being tested,
// define all our expected errors here:
//
enum
{
test_ca_error_id,
test_ca_error_id_2,
test_three_quad_error_id,
test_three_quad_error_id_2,
test_integration_over_real_line_error_id,
test_right_limit_infinite_error_id,
test_left_limit_infinite_error_id
};
template <unsigned Points>
double expected_error(unsigned)
{
return 0; // placeholder, all tests will fail
}
template <>
double expected_error<15>(unsigned id)
{
switch (id)
{
case test_ca_error_id:
return 1e-7;
case test_ca_error_id_2:
return 2e-5;
case test_three_quad_error_id:
return 1e-8;
case test_three_quad_error_id_2:
return 3.5e-3;
case test_integration_over_real_line_error_id:
return 6e-3;
case test_right_limit_infinite_error_id:
case test_left_limit_infinite_error_id:
return 1e-5;
}
return 0; // placeholder, all tests will fail
}
template <>
double expected_error<17>(unsigned id)
{
switch (id)
{
case test_ca_error_id:
return 1e-7;
case test_ca_error_id_2:
return 2e-5;
case test_three_quad_error_id:
return 1e-8;
case test_three_quad_error_id_2:
return 3.5e-3;
case test_integration_over_real_line_error_id:
return 6e-3;
case test_right_limit_infinite_error_id:
case test_left_limit_infinite_error_id:
return 1e-5;
}
return 0; // placeholder, all tests will fail
}
template <>
double expected_error<21>(unsigned id)
{
switch (id)
{
case test_ca_error_id:
return 1e-12;
case test_ca_error_id_2:
return 3e-6;
case test_three_quad_error_id:
return 2e-13;
case test_three_quad_error_id_2:
return 2e-3;
case test_integration_over_real_line_error_id:
return 6e-3; // doesn't get any better with more points!
case test_right_limit_infinite_error_id:
case test_left_limit_infinite_error_id:
return 5e-8;
}
return 0; // placeholder, all tests will fail
}
template <>
double expected_error<31>(unsigned id)
{
switch (id)
{
case test_ca_error_id:
return 6e-20;
case test_ca_error_id_2:
return 3e-7;
case test_three_quad_error_id:
return 1e-19;
case test_three_quad_error_id_2:
return 6e-4;
case test_integration_over_real_line_error_id:
return 6e-3; // doesn't get any better with more points!
case test_right_limit_infinite_error_id:
case test_left_limit_infinite_error_id:
return 5e-11;
}
return 0; // placeholder, all tests will fail
}
template <>
double expected_error<41>(unsigned id)
{
switch (id)
{
case test_ca_error_id:
return 1e-26;
case test_ca_error_id_2:
return 1e-7;
case test_three_quad_error_id:
return 3e-27;
case test_three_quad_error_id_2:
return 3e-4;
case test_integration_over_real_line_error_id:
return 5e-5; // doesn't get any better with more points!
case test_right_limit_infinite_error_id:
case test_left_limit_infinite_error_id:
return 1e-15;
}
return 0; // placeholder, all tests will fail
}
template <>
double expected_error<51>(unsigned id)
{
switch (id)
{
case test_ca_error_id:
return 5e-33;
case test_ca_error_id_2:
return 1e-8;
case test_three_quad_error_id:
return 1e-32;
case test_three_quad_error_id_2:
return 3e-4;
case test_integration_over_real_line_error_id:
return 1e-14;
case test_right_limit_infinite_error_id:
case test_left_limit_infinite_error_id:
return 3e-19;
}
return 0; // placeholder, all tests will fail
}
template <>
double expected_error<61>(unsigned id)
{
switch (id)
{
case test_ca_error_id:
return 5e-34;
case test_ca_error_id_2:
return 5e-9;
case test_three_quad_error_id:
return 4e-34;
case test_three_quad_error_id_2:
return 1e-4;
case test_integration_over_real_line_error_id:
return 1e-16;
case test_right_limit_infinite_error_id:
case test_left_limit_infinite_error_id:
return 3e-23;
}
return 0; // placeholder, all tests will fail
}
template<class Real, unsigned Points>
void test_linear()
{
std::cout << "Testing linear functions are integrated properly by gauss_kronrod on type " << boost::typeindex::type_id<Real>().pretty_name() << "\n";
Real tol = boost::math::tools::epsilon<Real>() * 10;
Real error;
auto f = [](const Real& x)->Real
{
return 5*x + 7;
};
Real L1;
Real Q = gauss_kronrod<Real, Points>::integrate(f, (Real) 0, (Real) 1, 0, 0, &error, &L1);
BOOST_CHECK_CLOSE_FRACTION(Q, 9.5, tol);
BOOST_CHECK_CLOSE_FRACTION(L1, 9.5, tol);
Q = gauss_kronrod<Real, Points>::integrate(f, (Real) 1, (Real) 0, 0, 0, &error, &L1);
BOOST_CHECK_CLOSE_FRACTION(Q, -9.5, tol);
BOOST_CHECK_CLOSE_FRACTION(L1, 9.5, tol);
Q = gauss_kronrod<Real, Points>::integrate(f, (Real) 0, (Real) 0, 0, 0, &error, &L1);
BOOST_CHECK_CLOSE(Q, Real(0), tol);
}
template<class Real, unsigned Points>
void test_quadratic()
{
std::cout << "Testing quadratic functions are integrated properly by Gauss Kronrod on type " << boost::typeindex::type_id<Real>().pretty_name() << "\n";
Real tol = boost::math::tools::epsilon<Real>() * 10;
Real error;
auto f = [](const Real& x)->Real { return 5*x*x + 7*x + 12; };
Real L1;
Real Q = gauss_kronrod<Real, Points>::integrate(f, 0, 1, 0, 0, &error, &L1);
BOOST_CHECK_CLOSE_FRACTION(Q, (Real) 17 + half<Real>()*third<Real>(), tol);
BOOST_CHECK_CLOSE_FRACTION(L1, (Real) 17 + half<Real>()*third<Real>(), tol);
}
// Examples taken from
//http://crd-legacy.lbl.gov/~dhbailey/dhbpapers/quadrature.pdf
template<class Real, unsigned Points>
void test_ca()
{
std::cout << "Testing integration of C(a) on type " << boost::typeindex::type_id<Real>().pretty_name() << "\n";
Real tol = expected_error<Points>(test_ca_error_id);
Real L1;
Real error;
auto f1 = [](const Real& x)->Real { return atan(x)/(x*(x*x + 1)) ; };
Real Q = gauss_kronrod<Real, Points>::integrate(f1, 0, 1, 0, 0, &error, &L1);
Real Q_expected = pi<Real>()*ln_two<Real>()/8 + catalan<Real>()*half<Real>();
BOOST_CHECK_CLOSE_FRACTION(Q, Q_expected, tol);
BOOST_CHECK_CLOSE_FRACTION(L1, Q_expected, tol);
auto f2 = [](Real x)->Real { Real t0 = x*x + 1; Real t1 = sqrt(t0); return atan(t1)/(t0*t1); };
Q = gauss_kronrod<Real, Points>::integrate(f2, 0 , 1, 0, 0, &error, &L1);
Q_expected = pi<Real>()/4 - pi<Real>()/root_two<Real>() + 3*atan(root_two<Real>())/root_two<Real>();
BOOST_CHECK_CLOSE_FRACTION(Q, Q_expected, tol);
BOOST_CHECK_CLOSE_FRACTION(L1, Q_expected, tol);
tol = expected_error<Points>(test_ca_error_id_2);
auto f5 = [](Real t)->Real { return t*t*log(t)/((t*t - 1)*(t*t*t*t + 1)); };
Q = gauss_kronrod<Real, Points>::integrate(f5, 0, 1, 0);
Q_expected = pi<Real>()*pi<Real>()*(2 - root_two<Real>())/32;
BOOST_CHECK_CLOSE_FRACTION(Q, Q_expected, tol);
}
template<class Real, unsigned Points>
void test_three_quadrature_schemes_examples()
{
std::cout << "Testing integral in 'A Comparison of Three High Precision Quadrature Schemes' on type " << boost::typeindex::type_id<Real>().pretty_name() << "\n";
Real tol = expected_error<Points>(test_three_quad_error_id);
Real Q;
Real Q_expected;
// Example 1:
auto f1 = [](const Real& t)->Real { return t*boost::math::log1p(t); };
Q = gauss_kronrod<Real, Points>::integrate(f1, 0 , 1, 0);
Q_expected = half<Real>()*half<Real>();
BOOST_CHECK_CLOSE_FRACTION(Q, Q_expected, tol);
// Example 2:
auto f2 = [](const Real& t)->Real { return t*t*atan(t); };
Q = gauss_kronrod<Real, Points>::integrate(f2, 0 , 1, 0);
Q_expected = (pi<Real>() -2 + 2*ln_two<Real>())/12;
BOOST_CHECK_CLOSE_FRACTION(Q, Q_expected, 2 * tol);
// Example 3:
auto f3 = [](const Real& t)->Real { return exp(t)*cos(t); };
Q = gauss_kronrod<Real, Points>::integrate(f3, 0, half_pi<Real>(), 0);
Q_expected = boost::math::expm1(half_pi<Real>())*half<Real>();
BOOST_CHECK_CLOSE_FRACTION(Q, Q_expected, tol);
// Example 4:
auto f4 = [](Real x)->Real { Real t0 = sqrt(x*x + 2); return atan(t0)/(t0*(x*x+1)); };
Q = gauss_kronrod<Real, Points>::integrate(f4, 0 , 1, 0);
Q_expected = 5*pi<Real>()*pi<Real>()/96;
BOOST_CHECK_CLOSE_FRACTION(Q, Q_expected, tol);
tol = expected_error<Points>(test_three_quad_error_id_2);
// Example 5:
auto f5 = [](const Real& t)->Real { return sqrt(t)*log(t); };
Q = gauss_kronrod<Real, Points>::integrate(f5, 0 , 1, 0);
Q_expected = -4/ (Real) 9;
BOOST_CHECK_CLOSE_FRACTION(Q, Q_expected, tol);
// Example 6:
auto f6 = [](const Real& t)->Real { return sqrt(1 - t*t); };
Q = gauss_kronrod<Real, Points>::integrate(f6, 0 , 1, 0);
Q_expected = pi<Real>()/4;
BOOST_CHECK_CLOSE_FRACTION(Q, Q_expected, tol);
}
template<class Real, unsigned Points>
void test_integration_over_real_line()
{
std::cout << "Testing integrals over entire real line in 'A Comparison of Three High Precision Quadrature Schemes' on type " << boost::typeindex::type_id<Real>().pretty_name() << "\n";
Real tol = expected_error<Points>(test_integration_over_real_line_error_id);
Real Q;
Real Q_expected;
Real L1;
Real error;
auto f1 = [](const Real& t)->Real { return 1/(1+t*t);};
Q = gauss_kronrod<Real, Points>::integrate(f1, -boost::math::tools::max_value<Real>(), boost::math::tools::max_value<Real>(), 0, 0, &error, &L1);
Q_expected = pi<Real>();
BOOST_CHECK_CLOSE_FRACTION(Q, Q_expected, tol);
BOOST_CHECK_CLOSE_FRACTION(L1, Q_expected, tol);
}
template<class Real, unsigned Points>
void test_right_limit_infinite()
{
std::cout << "Testing right limit infinite for Gauss Kronrod in 'A Comparison of Three High Precision Quadrature Schemes' on type " << boost::typeindex::type_id<Real>().pretty_name() << "\n";
Real tol = expected_error<Points>(test_right_limit_infinite_error_id);
Real Q;
Real Q_expected;
Real L1;
Real error;
// Example 11:
auto f1 = [](const Real& t)->Real { return 1/(1+t*t);};
Q = gauss_kronrod<Real, Points>::integrate(f1, 0, boost::math::tools::max_value<Real>(), 0, 0, &error, &L1);
Q_expected = half_pi<Real>();
BOOST_CHECK_CLOSE(Q, Q_expected, 100*tol);
auto f4 = [](const Real& t)->Real { return 1/(1+t*t); };
Q = gauss_kronrod<Real, Points>::integrate(f4, 1, boost::math::tools::max_value<Real>(), 0, 0, &error, &L1);
Q_expected = pi<Real>()/4;
BOOST_CHECK_CLOSE(Q, Q_expected, 100*tol);
}
template<class Real, unsigned Points>
void test_left_limit_infinite()
{
std::cout << "Testing left limit infinite for Gauss Kronrod in 'A Comparison of Three High Precision Quadrature Schemes' on type " << boost::typeindex::type_id<Real>().pretty_name() << "\n";
Real tol = expected_error<Points>(test_left_limit_infinite_error_id);
Real Q;
Real Q_expected;
// Example 11:
auto f1 = [](const Real& t)->Real { return 1/(1+t*t);};
Q = gauss_kronrod<Real, Points>::integrate(f1, -boost::math::tools::max_value<Real>(), Real(0), 0);
Q_expected = half_pi<Real>();
BOOST_CHECK_CLOSE(Q, Q_expected, 100*tol);
}
template<class Complex>
void test_complex_lambert_w()
{
std::cout << "Testing that complex-valued integrands are integrated correctly by Gaussian quadrature on type " << boost::typeindex::type_id<Complex>().pretty_name() << "\n";
typedef typename Complex::value_type Real;
Real tol = 10e-9;
using boost::math::constants::pi;
Complex z{2, 3};
auto lw = [&z](Real v)->Complex {
using std::cos;
using std::sin;
using std::exp;
Real sinv = sin(v);
Real cosv = cos(v);
Real cotv = cosv/sinv;
Real cscv = 1/sinv;
Real t = (1-v*cotv)*(1-v*cotv) + v*v;
Real x = v*cscv*exp(-v*cotv);
Complex den = z + x;
Complex num = t*(z/pi<Real>());
Complex res = num/den;
return res;
};
//N[ProductLog[2+3*I], 150]
boost::math::quadrature::gauss_kronrod<Real, 61> integrator;
Complex Q = integrator.integrate(lw, (Real) 0, pi<Real>());
BOOST_CHECK_CLOSE_FRACTION(Q.real(), boost::lexical_cast<Real>("1.09007653448579084630177782678166964987102108635357778056449870727913321296238687023915522935120701763447787503167111962008709116746523970476893277703"), tol);
BOOST_CHECK_CLOSE_FRACTION(Q.imag(), boost::lexical_cast<Real>("0.530139720774838801426860213574121741928705631382703178297940568794784362495390544411799468140433404536019992695815009036975117285537382995180319280835"), tol);
}
BOOST_AUTO_TEST_CASE(gauss_quadrature_test)
{
#ifdef TEST1
std::cout << "Testing 15 point approximation:\n";
test_linear<double, 15>();
test_quadratic<double, 15>();
test_ca<double, 15>();
test_three_quadrature_schemes_examples<double, 15>();
test_integration_over_real_line<double, 15>();
test_right_limit_infinite<double, 15>();
test_left_limit_infinite<double, 15>();
// test one case where we do not have pre-computed constants:
std::cout << "Testing 17 point approximation:\n";
test_linear<double, 17>();
test_quadratic<double, 17>();
test_ca<double, 17>();
test_three_quadrature_schemes_examples<double, 17>();
test_integration_over_real_line<double, 17>();
test_right_limit_infinite<double, 17>();
test_left_limit_infinite<double, 17>();
test_complex_lambert_w<std::complex<double>>();
test_complex_lambert_w<std::complex<long double>>();
#endif
#ifdef TEST1A
std::cout << "Testing 21 point approximation:\n";
test_linear<cpp_bin_float_quad, 21>();
test_quadratic<cpp_bin_float_quad, 21>();
test_ca<cpp_bin_float_quad, 21>();
test_three_quadrature_schemes_examples<cpp_bin_float_quad, 21>();
test_integration_over_real_line<cpp_bin_float_quad, 21>();
test_right_limit_infinite<cpp_bin_float_quad, 21>();
test_left_limit_infinite<cpp_bin_float_quad, 21>();
std::cout << "Testing 31 point approximation:\n";
test_linear<cpp_bin_float_quad, 31>();
test_quadratic<cpp_bin_float_quad, 31>();
test_ca<cpp_bin_float_quad, 31>();
test_three_quadrature_schemes_examples<cpp_bin_float_quad, 31>();
test_integration_over_real_line<cpp_bin_float_quad, 31>();
test_right_limit_infinite<cpp_bin_float_quad, 31>();
test_left_limit_infinite<cpp_bin_float_quad, 31>();
#endif
#ifdef TEST2
std::cout << "Testing 41 point approximation:\n";
test_linear<cpp_bin_float_quad, 41>();
test_quadratic<cpp_bin_float_quad, 41>();
test_ca<cpp_bin_float_quad, 41>();
test_three_quadrature_schemes_examples<cpp_bin_float_quad, 41>();
test_integration_over_real_line<cpp_bin_float_quad, 41>();
test_right_limit_infinite<cpp_bin_float_quad, 41>();
test_left_limit_infinite<cpp_bin_float_quad, 41>();
std::cout << "Testing 51 point approximation:\n";
test_linear<cpp_bin_float_quad, 51>();
test_quadratic<cpp_bin_float_quad, 51>();
test_ca<cpp_bin_float_quad, 51>();
test_three_quadrature_schemes_examples<cpp_bin_float_quad, 51>();
test_integration_over_real_line<cpp_bin_float_quad, 51>();
test_right_limit_infinite<cpp_bin_float_quad, 51>();
test_left_limit_infinite<cpp_bin_float_quad, 51>();
#endif
#ifdef TEST3
// Need at least one set of tests with expression templates turned on:
std::cout << "Testing 61 point approximation:\n";
test_linear<cpp_dec_float_50, 61>();
test_quadratic<cpp_dec_float_50, 61>();
test_ca<cpp_dec_float_50, 61>();
test_three_quadrature_schemes_examples<cpp_dec_float_50, 61>();
test_integration_over_real_line<cpp_dec_float_50, 61>();
test_right_limit_infinite<cpp_dec_float_50, 61>();
test_left_limit_infinite<cpp_dec_float_50, 61>();
#ifdef BOOST_HAS_FLOAT128
test_complex_lambert_w<boost::multiprecision::complex128>();
#endif
#endif
}
#else
int main() { return 0; }
#endif
|
{"hexsha": "c80ffe85405d58d41da7d27567c1104b225e7266", "size": 18332, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/gauss_kronrod_quadrature_test.cpp", "max_stars_repo_name": "oleg-alexandrov/math", "max_stars_repo_head_hexsha": "2137c31eb8e52129d997a76b893f71c1da0ccc5f", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 233.0, "max_stars_repo_stars_event_min_datetime": "2015-01-12T19:26:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T09:21:47.000Z", "max_issues_repo_path": "3rdparty/boost_1_73_0/libs/math/test/gauss_kronrod_quadrature_test.cpp", "max_issues_repo_name": "qingkouwei/mediaones", "max_issues_repo_head_hexsha": "cec475e1bfd5807b5351cc7e38d244ac5298ca16", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 626.0, "max_issues_repo_issues_event_min_datetime": "2015-02-05T18:12:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-20T13:19:18.000Z", "max_forks_repo_path": "Libs/boost_1_76_0/libs/math/test/gauss_kronrod_quadrature_test.cpp", "max_forks_repo_name": "Antd23rus/S2DE", "max_forks_repo_head_hexsha": "47cc7151c2934cd8f0399a9856c1e54894571553", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 243.0, "max_forks_repo_forks_event_min_datetime": "2015-01-17T17:46:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T12:56:26.000Z", "avg_line_length": 34.2014925373, "max_line_length": 229, "alphanum_fraction": 0.6986689941, "num_tokens": 5299}
|
# coding: utf-8
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 19 11:05:23 2017
@author: zhangji
"""
# from matplotlib import pyplot as plt
# plt.rcParams['figure.figsize'] = (18.5, 10.5)
# fontsize = 40
import codeStore.support_fun as spf
# import os
import glob
import numpy as np
# import matplotlib
# import re
# from scanf import scanf
# from scipy import interpolate, integrate
# from mpl_toolkits.axes_grid1.inset_locator import inset_axes
# from mpl_toolkits.mplot3d.art3d import Line3DCollection
# PWD = os.getcwd()
# font = {'size': 20}
# matplotlib.rc('font', **font)
# np.set_printoptions(linewidth=90, precision=5)
def read_array(text_headle, FILE_DATA, array_length=6):
return spf.read_array(text_headle, FILE_DATA, array_length)
def func_line(x, a0, a1):
return spf.func_line(x, a0, a1)
def fit_line(ax, x, y, x0, x1, ifprint=1):
return spf.fit_line(ax, x, y, x0, x1, ifprint)
def get_simulate_data(eq_dir):
import pandas as pd
absU = [] # abosultely velocity
absF = [] # force of head
zf = [] # zoom factor
wm = [] # motor spin
txt_names = glob.glob(eq_dir + '/*.txt')
for txt_name in txt_names:
with open(txt_name, 'r') as myinput:
FILE_DATA = myinput.read()
text_headle = 'absolute ref U \['
absU.append(read_array(text_headle, FILE_DATA, array_length=6))
text_headle = '\] and \['
t1 = read_array(text_headle, FILE_DATA, array_length=6)
if np.all(np.isfinite(t1)):
wm.append(read_array(text_headle, FILE_DATA, array_length=6))
else:
text_headle = 'sphere_0: relative velocity \['
t1 = read_array(text_headle, FILE_DATA, array_length=6)
text_headle = 'helix_0: relative velocity \['
t2 = read_array(text_headle, FILE_DATA, array_length=6)
t3 = t2 - t1
wm.append(t3)
text_headle = 'head resultant is \['
absF.append(read_array(text_headle, FILE_DATA, array_length=6))
text_headle = ' geometry zoom factor is'
temp1 = read_array(text_headle, FILE_DATA, array_length=1)
zf.append(0 if np.isclose(temp1, 1) else temp1)
absU = np.vstack(absU)
wm = np.vstack(wm)
absF = np.vstack(absF)
zf = np.hstack(zf)
tzf = zf.copy()
tzf[np.isclose(zf, 0)] = 1
data = pd.DataFrame({'uz': absU[:, 2] / tzf,
'wh': absU[:, 5],
'wm': wm[:, 5],
'fh': absF[:, 2] / tzf,
'Th': absF[:, 5] / (tzf ** 3) * (1 - 0.1 * zf),
'zf': zf}).dropna(how='all').pivot_table(index='zf')
Th = data.Th
uz = data.uz
# uz[uz < 0] = 0
uz[uz < 0] = np.abs(uz[uz < 0]) * 0.1
wm = data.wm
wh = data.wh
return uz, wm, wh, Th
|
{"hexsha": "ecbbe19950fa930f1c058be70ba121e211a5b98b", "size": 2849, "ext": "py", "lang": "Python", "max_stars_repo_path": "codeStore/post_support_fun.py", "max_stars_repo_name": "pcmagic/stokes_flow", "max_stars_repo_head_hexsha": "464d512d3739eee77b33d1ebf2f27dae6cfa0423", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-11-11T05:00:53.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-11T05:00:53.000Z", "max_issues_repo_path": "codeStore/post_support_fun.py", "max_issues_repo_name": "pcmagic/stokes_flow", "max_issues_repo_head_hexsha": "464d512d3739eee77b33d1ebf2f27dae6cfa0423", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "codeStore/post_support_fun.py", "max_forks_repo_name": "pcmagic/stokes_flow", "max_forks_repo_head_hexsha": "464d512d3739eee77b33d1ebf2f27dae6cfa0423", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6344086022, "max_line_length": 77, "alphanum_fraction": 0.5935415935, "include": true, "reason": "import numpy,from scipy", "num_tokens": 872}
|
# THEANO_FLAGS=device=gpu,floatX=float32 python train.py
# bug: training length should be larger than batch size
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from keras.utils.data_utils import get_file
from keras.models import load_model
import numpy as np
import random
import sys
import copy
import time
import re
import pysynth
### Define some constants
maxlen = 16 # The length of LSTM
epochs = 50
### Read the file and put the music into a list
def splitMusic(file_name = "dataset/data.txt"):
print("...Reading file %s and count the number of songs..." %(file_name))
with open(file_name, "r") as f:
m = f.readlines()
print ("%d songs found." %(len(m)))
m = [re.split('[\[\],\s\']*', p)[1:-1] for p in m]
return m
### Make the dict from index to length¬e and reverse
def makeDict(melody):
print("...Making the dict from index to length¬e and reverse...")
l = set()
n = set()
for melo in melody:
l = l | set(melo[1 : : 2])
n = n | set(melo[0 : : 2])
print ("%d type of note duration" %(len(l)))
print ("%d type of notes" %(len(n)))
dic_size = len(l) * len(n)
print ("input dict length: %d" %(dic_size))
m_to_i = {}
i_to_m = {}
count = 0
for a in l:
for b in n:
m_to_i[a + '\t' + b] = count
i_to_m[count] = a + '\t' + b
count += 1
return m_to_i, i_to_m, dic_size
### Form the training sets
def makeTrainset(melody, m_to_i):
print("...Making the training set...")
X = []
y = []
for i,melo in enumerate(melody):
ind = [m_to_i[melo[2*i+1] + '\t' + melo[2*i]] for i in range(len(melo) // 2)]
for i in range(len(ind) - maxlen):
X.append(ind[i : i + maxlen])
y.append(ind[i + maxlen])
dic_size = len(m_to_i)
X_train = np.zeros((len(X), maxlen, dic_size), dtype=np.bool)
y_train = np.zeros((len(X), dic_size), dtype=np.bool)
for i, m in enumerate(X):
for t, n in enumerate(m):
X_train[i, t, n] = 1
y_train[i, y[i]] = 1
# print ("Training set size: X_train", X_train.shape)
# print ("Training set size: y_train", y_train.shape)
return X_train, y_train
### Make the LSTM-model and training
def trainModel(X_train, y_train, seq_length, dic_size):
print ("...Making the LSTM-model...")
model = Sequential()
model.add(LSTM(512, return_sequences=True, input_shape=(seq_length, dic_size)))
model.add(Dropout(0.2))
model.add(LSTM(512, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(dic_size))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
print ("...Start training...")
model.fit(X_train, y_train, batch_size=256, nb_epoch=epochs)
return model
### Predict the output notes with feedin and write it out to music_out.txt
def predict(model, feedin, len, i_to_m):
predi = [list(f).index(True) for f in feedin]
m = copy.deepcopy(feedin)
for i in range(len):
y = model.predict(np.array([m]))
note = list(y[0]).index(max(y[0]))
predi.append(note)
n = copy.deepcopy(m)
n[0 : maxlen - 1] = m[1 : ]
n[-1] = np.zeros(dic_size, dtype=np.bool)
n[-1][note] = 1
m = n
# localtime = time.asctime( time.localtime(time.time()) )
# print("...Writing file back to %s" %("music_out_" + localtime+ ".txt"))
# with open("music_out_" + localtime+ ".txt", "w") as f:
# for i in predi:
# f.write(i_to_m[i] + '\t')
return(predi)
### The main program
melody = splitMusic("dataset/demo.txt")
melo_to_index, index_to_melo, dic_size = makeDict(melody)
X, y = makeTrainset(melody, melo_to_index)
# Use part to train and part as trigger to create music
X_train, y_train, X_test, y_test = X[0:90000], y[0:90000], X[-20000:], y[-20000:]
print ("Training set size: X_train", X_train.shape)
print ("Training set size: y_train", y_train.shape)
model = trainModel(X_train, y_train, maxlen, dic_size)
print ("..Training has finished...")
### Uncomment to save the model, for usage, please refer to Keras FAQ
# model.save('trained_model/demo.h5')
### Compose the music and save
comp = [predict(model, X_test[i], 200, index_to_melo) for i in range(0,20000,1000)]
wave = [[index_to_melo[i] for i in c] for c in comp]
wave = [[[m.split()[1], float(m.split()[0])] for m in w] for w in wave]
for i,w in enumerate(wave):
pysynth.make_wav(w, fn = "composed_melody/demo/demox"+str(i)+".wav")
|
{"hexsha": "9fa4a99d4c29099f8d097cee4d6d0fbce6b8b3de", "size": 4437, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "luseiee/project1", "max_stars_repo_head_hexsha": "5964738b095cee7d18715e057b1b93445d3e1a73", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2016-10-26T02:28:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T07:05:49.000Z", "max_issues_repo_path": "train.py", "max_issues_repo_name": "luseiee/musicComposition", "max_issues_repo_head_hexsha": "5964738b095cee7d18715e057b1b93445d3e1a73", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "luseiee/musicComposition", "max_forks_repo_head_hexsha": "5964738b095cee7d18715e057b1b93445d3e1a73", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3953488372, "max_line_length": 84, "alphanum_fraction": 0.6520171287, "include": true, "reason": "import numpy", "num_tokens": 1302}
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import collections
import itertools
import six
import math
import sys
import warnings
from functools import partial, reduce
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle import framework
from paddle.device import get_device, get_cudnn_version
from paddle.nn import initializer as I
from paddle.nn import Layer, LayerList
from paddle.fluid.layers import utils
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.utils import map_structure, flatten, pack_sequence_as
from paddle.fluid.data_feeder import convert_dtype
from paddle.fluid.param_attr import ParamAttr
from paddle import _C_ops
def resnet_unit(x, filter_x, scale_x, bias_x, mean_x, var_x, z, filter_z,
scale_z, bias_z, mean_z, var_z, stride, stride_z, padding,
dilation, groups, momentum, eps, data_format, fuse_add,
has_shortcut, use_global_stats, is_test, act):
helper = LayerHelper('resnet_unit', **locals())
bn_param_dtype = fluid.core.VarDesc.VarType.FP32
bit_mask_dtype = fluid.core.VarDesc.VarType.INT32
out = helper.create_variable_for_type_inference(x.dtype)
bit_mask = helper.create_variable_for_type_inference(
dtype=bit_mask_dtype, stop_gradient=True)
# intermediate_out for x
conv_x = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True)
saved_mean_x = helper.create_variable_for_type_inference(
dtype=bn_param_dtype, stop_gradient=True)
saved_invstd_x = helper.create_variable_for_type_inference(
dtype=bn_param_dtype, stop_gradient=True)
running_mean_x = mean_x
running_var_x = var_x
# intermediate_out for z
conv_z = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True)
saved_mean_z = helper.create_variable_for_type_inference(
dtype=bn_param_dtype, stop_gradient=True)
saved_invstd_z = helper.create_variable_for_type_inference(
dtype=bn_param_dtype, stop_gradient=True)
running_mean_z = helper.create_variable_for_type_inference(
dtype=bn_param_dtype, stop_gradient=True) if mean_z is None else mean_z
running_var_z = helper.create_variable_for_type_inference(
dtype=bn_param_dtype, stop_gradient=True) if var_z is None else var_z
inputs = {
'X': x,
'FilterX': filter_x,
'ScaleX': scale_x,
'BiasX': bias_x,
'MeanX': mean_x,
'VarX': var_x,
'Z': z,
'FilterZ': filter_z,
'ScaleZ': scale_z,
'BiasZ': bias_z,
'MeanZ': mean_z,
'VarZ': var_z
}
attrs = {
'stride': stride,
'stride_z': stride_z,
'padding': padding,
'dilation': dilation,
'group': groups,
'momentum': momentum,
'epsilon': eps,
'data_format': data_format,
'fuse_add': fuse_add,
'has_shortcut': has_shortcut,
'use_global_stats': use_global_stats,
'is_test': is_test,
'act_type': act
}
outputs = {
'Y': out,
'BitMask': bit_mask,
'ConvX': conv_x,
'SavedMeanX': saved_mean_x,
'SavedInvstdX': saved_invstd_x,
'RunningMeanX': running_mean_x,
'RunningVarX': running_var_x,
'ConvZ': conv_z,
'SavedMeanZ': saved_mean_z,
'SavedInvstdZ': saved_invstd_z,
'RunningMeanZ': running_mean_z,
'RunningVarZ': running_var_z,
}
helper.append_op(
type='resnet_unit', inputs=inputs, outputs=outputs, attrs=attrs)
return out
class ResNetUnit(Layer):
r"""
******Temporary version******.
ResNetUnit is designed for optimize the performence by using cudnnv8 API.
"""
def __init__(self,
num_channels_x,
num_filters,
filter_size,
stride=1,
momentum=0.9,
eps=1e-5,
data_format='NHWC',
act='relu',
fuse_add=False,
has_shortcut=False,
use_global_stats=False,
is_test=False,
filter_x_attr=None,
scale_x_attr=None,
bias_x_attr=None,
moving_mean_x_name=None,
moving_var_x_name=None,
num_channels_z=1,
stride_z=1,
filter_z_attr=None,
scale_z_attr=None,
bias_z_attr=None,
moving_mean_z_name=None,
moving_var_z_name=None):
super(ResNetUnit, self).__init__()
self._stride = stride
self._stride_z = stride_z
self._dilation = 1
self._kernel_size = utils.convert_to_list(filter_size, 2, 'kernel_size')
self._padding = (filter_size - 1) // 2
self._groups = 1
self._momentum = momentum
self._eps = eps
self._data_format = data_format
self._act = act
self._fuse_add = fuse_add
self._has_shortcut = has_shortcut
self._use_global_stats = use_global_stats
self._is_test = is_test
# check format
valid_format = {'NHWC'}
if data_format not in valid_format:
raise ValueError(
"conv_format must be one of {}, but got conv_format='{}'".
format(valid_format, data_format))
def _get_default_param_initializer(channels):
filter_elem_num = np.prod(self._kernel_size) * channels
std = (2.0 / filter_elem_num)**0.5
return I.Normal(0.0, std)
# initial filter
bn_param_dtype = fluid.core.VarDesc.VarType.FP32
bn_param_shape = [1, 1, 1, num_filters]
filter_x_shape = [num_filters, filter_size, filter_size, num_channels_x]
filter_z_shape = [num_filters, filter_size, filter_size, num_channels_z]
self.filter_x = self.create_parameter(
shape=filter_x_shape,
attr=filter_x_attr,
default_initializer=_get_default_param_initializer(num_channels_x))
self.scale_x = self.create_parameter(
shape=bn_param_shape,
attr=scale_x_attr,
dtype=bn_param_dtype,
default_initializer=I.Constant(1.0))
self.bias_x = self.create_parameter(
shape=bn_param_shape,
attr=bias_x_attr,
dtype=bn_param_dtype,
is_bias=True)
self.mean_x = self.create_parameter(
attr=ParamAttr(
name=moving_mean_x_name,
initializer=I.Constant(0.0),
trainable=False),
shape=bn_param_shape,
dtype=bn_param_dtype)
self.mean_x.stop_gradient = True
self.var_x = self.create_parameter(
attr=ParamAttr(
name=moving_var_x_name,
initializer=I.Constant(1.0),
trainable=False),
shape=bn_param_shape,
dtype=bn_param_dtype)
self.var_x.stop_gradient = True
if has_shortcut:
self.filter_z = self.create_parameter(
shape=filter_z_shape,
attr=filter_z_attr,
default_initializer=_get_default_param_initializer(
num_channels_z))
self.scale_z = self.create_parameter(
shape=bn_param_shape,
attr=scale_z_attr,
dtype=bn_param_dtype,
default_initializer=I.Constant(1.0))
self.bias_z = self.create_parameter(
shape=bn_param_shape,
attr=bias_z_attr,
dtype=bn_param_dtype,
is_bias=True)
self.mean_z = self.create_parameter(
attr=ParamAttr(
name=moving_mean_z_name,
initializer=I.Constant(0.0),
trainable=False),
shape=bn_param_shape,
dtype=bn_param_dtype)
self.mean_z.stop_gradient = True
self.var_z = self.create_parameter(
attr=ParamAttr(
name=moving_var_z_name,
initializer=I.Constant(1.0),
trainable=False),
shape=bn_param_shape,
dtype=bn_param_dtype)
self.var_z.stop_gradient = True
else:
self.filter_z = None
self.scale_z = None
self.bias_z = None
self.mean_z = None
self.var_z = None
def forward(self, x, z=None):
if self._fuse_add and z is None:
raise ValueError("z can not be None")
out = resnet_unit(
x, self.filter_x, self.scale_x, self.bias_x, self.mean_x,
self.var_x, z, self.filter_z, self.scale_z, self.bias_z,
self.mean_z, self.var_z, self._stride, self._stride_z,
self._padding, self._dilation, self._groups, self._momentum,
self._eps, self._data_format, self._fuse_add, self._has_shortcut,
self._use_global_stats, self._is_test, self._act)
return out
|
{"hexsha": "4ddcfbac8791f8324a875c62e469997330a1e273", "size": 9777, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/paddle/incubate/operators/resnet_unit.py", "max_stars_repo_name": "RangeKing/Paddle", "max_stars_repo_head_hexsha": "2d87300809ae75d76f5b0b457d8112cb88dc3e27", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2016-08-15T07:02:27.000Z", "max_stars_repo_stars_event_max_datetime": "2016-08-24T09:34:00.000Z", "max_issues_repo_path": "python/paddle/incubate/operators/resnet_unit.py", "max_issues_repo_name": "RangeKing/Paddle", "max_issues_repo_head_hexsha": "2d87300809ae75d76f5b0b457d8112cb88dc3e27", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/paddle/incubate/operators/resnet_unit.py", "max_forks_repo_name": "RangeKing/Paddle", "max_forks_repo_head_hexsha": "2d87300809ae75d76f5b0b457d8112cb88dc3e27", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3457249071, "max_line_length": 80, "alphanum_fraction": 0.614810269, "include": true, "reason": "import numpy", "num_tokens": 2197}
|
import pytest
import numpy as np
from symbolic_pymc.utils import HashableNDArray
from symbolic_pymc.meta import MetaSymbol, MetaOp, metatize
class SomeOp(object):
def __repr__(self):
return "<SomeOp>"
class SomeType(object):
def __init__(self, field1, field2):
self.field1 = field1
self.field2 = field2
def __repr__(self):
return f"SomeType({self.field1}, {self.field2})"
def __str__(self):
return f"SomeType<{self.field1}, {self.field2}>"
class SomeMetaSymbol(MetaSymbol):
__slots__ = ("field1", "field2", "_blah")
base = SomeType
def __init__(self, obj=None):
super().__init__(obj)
self.field1 = 1
self.field2 = 2
self._blah = "a"
class SomeMetaOp(MetaOp):
__slots__ = ()
base = SomeOp
def output_meta_types(self):
return [SomeMetaSymbol]
def __call__(self, *args, **kwargs):
return SomeMetaSymbol(*args, **kwargs)
def test_meta():
"""Make sure hash caching and slot manipulation works."""
some_mt = SomeMetaSymbol()
assert some_mt.__all_slots__ == ("_obj", "_hash", "_rands", "field1", "field2", "_blah")
assert some_mt.__all_props__ == ("field1", "field2")
assert some_mt.__props__ == ("field1", "field2")
assert some_mt.__volatile_slots__ == ("_obj", "_hash", "_rands", "_blah")
assert some_mt.obj is None
assert not hasattr(some_mt, "_hash")
some_hash = hash(some_mt)
assert some_mt._hash == some_hash
assert some_mt.field1 == 1
assert some_mt.field2 == 2
# This assignment shouldn't change the cached values
some_mt._blah = "b"
assert some_mt._hash == some_hash
# This should
some_mt.field1 = 10
assert some_mt._hash is None
assert some_mt._blah is None
some_new_hash = hash(some_mt)
assert some_mt._hash == some_new_hash
assert some_new_hash != some_hash
some_op_mt = SomeMetaOp(SomeOp())
with pytest.raises(AttributeError):
some_op_mt.obj = SomeOp()
def test_meta_inheritance():
class SomeOtherType(SomeType):
def __init__(self, field1, field2, field3):
super().__init__(field1, field2)
self.field3 = field3
class SomeOtherMetaSymbol(SomeMetaSymbol):
__slots__ = ("field3", "_bloh")
base = SomeOtherType
def __init__(self, obj=None):
super().__init__(obj)
self.field3 = 3
def __hash__(self):
return hash((super().__hash__(), self.field3))
some_mt = SomeMetaSymbol()
other_mt = SomeOtherMetaSymbol()
assert some_mt != other_mt
assert other_mt.__all_slots__ == (
"_obj",
"_hash",
"_rands",
"field1",
"field2",
"_blah",
"field3",
"_bloh",
)
assert other_mt.__all_props__ == ("field1", "field2", "field3")
assert other_mt.__props__ == ("field3",)
assert other_mt.__volatile_slots__ == ("_obj", "_hash", "_rands", "_blah", "_bloh")
def test_meta_str():
some_mt = SomeMetaSymbol()
assert repr(some_mt) == "SomeMetaSymbol(1, 2)"
assert str(some_mt) == repr(some_mt)
some_mt = SomeMetaSymbol(SomeType(1, 2))
assert repr(some_mt) == "SomeMetaSymbol(1, 2, obj=SomeType(1, 2))"
assert str(some_mt) == "SomeMetaSymbol(1, 2)"
some_op_mt = SomeMetaOp()
assert repr(some_op_mt) == "SomeMetaOp(obj=None)"
some_op_mt = SomeMetaOp(SomeOp())
assert repr(some_op_mt) == "SomeMetaOp(obj=<SomeOp>)"
def test_meta_pretty():
pretty_mod = pytest.importorskip("IPython.lib.pretty")
from symbolic_pymc.meta import meta_repr
some_mt = SomeMetaSymbol()
assert pretty_mod.pretty(some_mt) == "SomeMetaSymbol(field1=1, field2=2)"
meta_repr.print_obj = True
assert pretty_mod.pretty(some_mt) == "SomeMetaSymbol(field1=1, field2=2)"
some_mt = SomeMetaSymbol(SomeType(1, 2))
assert pretty_mod.pretty(some_mt) == "SomeMetaSymbol(field1=1, field2=2, obj=SomeType(1, 2))"
meta_repr.print_obj = False
some_mt = SomeMetaSymbol(SomeType(1, 2))
some_mt.field1 = SomeMetaSymbol(SomeType(3, 4))
some_mt.field1.field2 = SomeMetaSymbol(SomeType(5, 6))
assert (
pretty_mod.pretty(some_mt)
== "SomeMetaSymbol(\n field1=SomeMetaSymbol(field1=1, field2=SomeMetaSymbol(field1=1, field2=2)),\n field2=2)"
)
some_op_mt = SomeMetaOp()
assert pretty_mod.pretty(some_op_mt) == "SomeMetaOp()"
def test_metatize():
x_mt = metatize(np.r_[1, 2, 3])
assert isinstance(x_mt, HashableNDArray)
y_mt = metatize(np.r_[1, 2, 3, 4])
assert isinstance(y_mt, HashableNDArray)
assert x_mt != y_mt
assert x_mt != 1
|
{"hexsha": "632432817bef3ea5f3c4dff10cadf74499e5f238", "size": 4701, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_meta.py", "max_stars_repo_name": "josephwillard/symbolic-pymc", "max_stars_repo_head_hexsha": "7bef08dd572c3ddc32ddc8e8e3c0b1809b4ce654", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 59, "max_stars_repo_stars_event_min_datetime": "2019-02-16T21:07:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T01:01:45.000Z", "max_issues_repo_path": "tests/test_meta.py", "max_issues_repo_name": "josephwillard/symbolic-pymc", "max_issues_repo_head_hexsha": "7bef08dd572c3ddc32ddc8e8e3c0b1809b4ce654", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 56, "max_issues_repo_issues_event_min_datetime": "2019-02-20T09:06:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-08T21:22:23.000Z", "max_forks_repo_path": "tests/test_meta.py", "max_forks_repo_name": "josephwillard/symbolic-pymc", "max_forks_repo_head_hexsha": "7bef08dd572c3ddc32ddc8e8e3c0b1809b4ce654", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-02-22T06:22:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-05T10:05:35.000Z", "avg_line_length": 25.2741935484, "max_line_length": 120, "alphanum_fraction": 0.6458200383, "include": true, "reason": "import numpy", "num_tokens": 1342}
|
function not_disappearing(data)
idx=[]
for s in 1:data.S
flag = true
for t in 1:data.T-1
if data.counts[s,1,t]==0 && data.counts[s,1,t+1]>0
flag = false
break
end
end
if flag
push!(idx,s)
end
end
return idx
end
function filter_counts(data, rounds::Vector{Int}, counts_threshold::Int)
@assert maximum(rounds) <= data.T
@assert length(rounds) <= data.T
data_filtered = deepcopy(data)
for t in rounds
data_filtered = subdata(data, findall(data.counts[:,1,t] .>= counts_threshold))
end
return data_filtered
end
function add_pseudocounts(data, pc)
data_new = deepcopy(data)
data_new.counts .+= pc
return data_new
end
export not_disappearing, filter_counts, add_pseudocounts
|
{"hexsha": "85f7852edb84369b010fc54b3f5e12d070a1306a", "size": 845, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/preprocess_utils.jl", "max_stars_repo_name": "matteodeleonardis/UAF2.jl", "max_stars_repo_head_hexsha": "83af0afd03a6e9bacdc809c7fcbcdd2760bbb98d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/preprocess_utils.jl", "max_issues_repo_name": "matteodeleonardis/UAF2.jl", "max_issues_repo_head_hexsha": "83af0afd03a6e9bacdc809c7fcbcdd2760bbb98d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/preprocess_utils.jl", "max_forks_repo_name": "matteodeleonardis/UAF2.jl", "max_forks_repo_head_hexsha": "83af0afd03a6e9bacdc809c7fcbcdd2760bbb98d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.125, "max_line_length": 87, "alphanum_fraction": 0.601183432, "num_tokens": 228}
|
cxx"""
static cv::UMat image;
static bool backprojMode = false;
static bool selectObject = false;
static int trackObject = 0;
static bool showHist = true;
static cv::Rect selection;
static int vmin = 10, vmax = 256, smin = 30;
//int argc = 0;
//char** argv;
//cv::String keys;
//cv::CommandLineParser parser(int argc, const char** argv, cv::String &keys);
static void onMouse(int event, int x, int y, int, void*)
{
static cv::Point origin;
if (selectObject)
{
selection.x = std::min(x, origin.x);
selection.y = std::min(y, origin.y);
selection.width = std::abs(x - origin.x);
selection.height = std::abs(y - origin.y);
selection &= cv::Rect(0, 0, image.cols, image.rows);
}
switch (event)
{
case cv::EVENT_LBUTTONDOWN:
origin = cv::Point(x, y);
selection = cv::Rect(x, y, 0, 0);
selectObject = true;
break;
case cv::EVENT_LBUTTONUP:
selectObject = false;
if (selection.width > 0 && selection.height > 0)
trackObject = -1;
break;
default:
break;
}
}
static void help()
{
std::cout << "\nThis is a demo that shows mean-shift based tracking using Transparent API\n"
"You select a color objects such as your face and it tracks it.\n"
"This reads from video camera (0 by default, or the camera number the user enters\n"
"Usage: \n"
" ./camshiftdemo [camera number]\n";
std::cout << "\n\nHot keys: \n"
"\tESC - quit the program\n"
"\ts - stop the tracking\n"
"\tb - switch to/from backprojection view\n"
"\th - show/hide object histogram\n"
"\tp - pause video\n"
"\tc - use OpenCL or not\n"
"To initialize tracking, select the object with mouse\n";
}
int ocl_camshift()
{
help();
cv::VideoCapture cap;
cv::Rect trackWindow;
int hsize = 16;
float hranges[2] = { 0, 180 };
//const char * const keys = { "{@camera_number| 0 | camera number}" };
int camNum = 0; //parser.get<int>(0)
cap.open(camNum);
if (!cap.isOpened())
{
help();
std::cout << "***Could not initialize capturing...***\n";
std::cout << "Current parameter's value: \n";
//parser.printMessage();
return EXIT_FAILURE;
}
cv::namedWindow("Histogram", cv::WINDOW_NORMAL);
cv::namedWindow("CamShift Demo", cv::WINDOW_NORMAL);
cv::setMouseCallback("CamShift Demo", onMouse);
cv::createTrackbar("Vmin", "CamShift Demo", &vmin, 256);
cv::createTrackbar("Vmax", "CamShift Demo", &vmax, 256);
cv::createTrackbar("Smin", "CamShift Demo", &smin, 256);
cv::Mat frame, histimg(200, 320, CV_8UC3, cv::Scalar::all(0));
cv::UMat hsv, hist, hue, mask, backproj;
bool paused = false;
for ( ; ; )
{
if (!paused)
{
cap >> frame;
if (frame.empty())
break;
}
frame.copyTo(image);
if (!paused)
{
cv::cvtColor(image, hsv, cv::COLOR_BGR2HSV);
if (trackObject)
{
int _vmin = vmin, _vmax = vmax;
cv::inRange(hsv, cv::Scalar(0, smin, std::min(_vmin, _vmax)),
cv::Scalar(180, 256, std::max(_vmin, _vmax)), mask);
int fromTo[2] = { 0,0 };
hue.create(hsv.size(), hsv.depth());
cv::mixChannels(std::vector<cv::UMat>(1, hsv), std::vector<cv::UMat>(1, hue), fromTo, 1);
if (trackObject < 0)
{
cv::UMat roi(hue, selection), maskroi(mask, selection);
cv::calcHist(std::vector<cv::Mat>(1, roi.getMat(cv::ACCESS_READ)), std::vector<int>(1, 0),
maskroi, hist, std::vector<int>(1, hsize), std::vector<float>(hranges, hranges + 2));
cv::normalize(hist, hist, 0, 255, cv::NORM_MINMAX);
trackWindow = selection;
trackObject = 1;
histimg = cv::Scalar::all(0);
int binW = histimg.cols / hsize;
cv::Mat buf (1, hsize, CV_8UC3);
for (int i = 0; i < hsize; i++)
buf.at<cv::Vec3b>(i) = cv::Vec3b(cv::saturate_cast<uchar>(i*180./hsize), 255, 255);
cv::cvtColor(buf, buf, cv::COLOR_HSV2BGR);
{
cv::Mat _hist = hist.getMat(cv::ACCESS_READ);
for (int i = 0; i < hsize; i++)
{
int val = cv::saturate_cast<int>(_hist.at<float>(i)*histimg.rows/255);
cv::rectangle(histimg, cv::Point(i*binW, histimg.rows),
cv::Point((i+1)*binW, histimg.rows - val),
cv::Scalar(buf.at<cv::Vec3b>(i)), -1, 8);
}
}
}
cv::calcBackProject(std::vector<cv::UMat>(1, hue), std::vector<int>(1, 0), hist, backproj,
std::vector<float>(hranges, hranges + 2), 1.0);
cv::bitwise_and(backproj, mask, backproj);
cv::RotatedRect trackBox = cv::CamShift(backproj, trackWindow,
cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 10, 1));
if (trackWindow.area() <= 1)
{
int cols = backproj.cols, rows = backproj.rows, r = (std::min(cols, rows) + 5)/6;
trackWindow = cv::Rect(trackWindow.x - r, trackWindow.y - r,
trackWindow.x + r, trackWindow.y + r) &
cv::Rect(0, 0, cols, rows);
}
if (backprojMode)
cv::cvtColor(backproj, image, cv::COLOR_GRAY2BGR);
{
cv::Mat _image = image.getMat(cv::ACCESS_RW);
cv::ellipse(_image, trackBox, cv::Scalar(0, 0, 255), 3, cv::LINE_AA);
}
}
}
else if (trackObject < 0)
paused = false;
if (selectObject && selection.width > 0 && selection.height > 0)
{
cv::UMat roi(image, selection);
cv::bitwise_not(roi, roi);
}
cv::imshow("CamShift Demo", image);
if (showHist)
cv::imshow("Histogram", histimg);
char c = (char)cv::waitKey(10);
if (c == 27)
break;
switch(c)
{
case 'b':
backprojMode = !backprojMode;
break;
case 't':
trackObject = 0;
histimg = cv::Scalar::all(0);
break;
case 'h':
showHist = !showHist;
if (!showHist)
cv::destroyWindow("Histogram");
else
cv::namedWindow("Histogram", cv::WINDOW_AUTOSIZE);
break;
case 'p':
paused = !paused;
break;
case 'c':
cv::ocl::setUseOpenCL(!cv::ocl::useOpenCL());
default:
break;
}
}
return EXIT_SUCCESS;
}
"""
|
{"hexsha": "15241967ef1fe3f61413c0c21500e3e982ddf89c", "size": 7323, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/cxx/ocl_camshift.jl", "max_stars_repo_name": "vernwalrahul/opencv.jl", "max_stars_repo_head_hexsha": "d2851b46b6226a547706aaf256507a1a17cccd1f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 100, "max_stars_repo_stars_event_min_datetime": "2015-01-13T09:49:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T15:36:17.000Z", "max_issues_repo_path": "test/cxx/ocl_camshift.jl", "max_issues_repo_name": "vernwalrahul/opencv.jl", "max_issues_repo_head_hexsha": "d2851b46b6226a547706aaf256507a1a17cccd1f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 35, "max_issues_repo_issues_event_min_datetime": "2015-01-06T11:46:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-29T19:02:38.000Z", "max_forks_repo_path": "test/cxx/ocl_camshift.jl", "max_forks_repo_name": "vernwalrahul/opencv.jl", "max_forks_repo_head_hexsha": "d2851b46b6226a547706aaf256507a1a17cccd1f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 42, "max_forks_repo_forks_event_min_datetime": "2015-01-27T14:27:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-13T15:58:46.000Z", "avg_line_length": 32.8385650224, "max_line_length": 118, "alphanum_fraction": 0.4849105558, "num_tokens": 1873}
|
[STATEMENT]
lemma stc_mult:
"\<lbrakk>x \<in> HFinite; y \<in> HFinite\<rbrakk>
\<Longrightarrow> stc (x * y) = stc(x) * stc(y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>x \<in> HFinite; y \<in> HFinite\<rbrakk> \<Longrightarrow> stc (x * y) = stc x * stc y
[PROOF STEP]
by (simp add: stc_unique stc_SComplex stc_approx_self approx_mult_HFinite)
|
{"llama_tokens": 169, "file": null, "length": 1}
|
#!/usr/bin/env python3
import time
import os
import tempfile
import shutil
import logging
from argparse import ArgumentParser, Namespace
from netCDF4 import Dataset, MFDataset
import geopandas as gpd
import numpy as np
domain_nodes_shp = "gis/ssm domain nodes.shp"
def get_node_ids(shp):
domain_nodes = gpd.read_file(shp)
return domain_nodes['node_id'].sort_values().to_numpy()
def init_output(output_cdf, times_ct, nodes):
do_output = Dataset(output_cdf, "w")
timeDim = do_output.createDimension('time', times_ct)
nodeDim = do_output.createDimension('node', len(nodes))
nodeVar = do_output.createVariable('node', "i4", ('node'))
do_output['node'][:] = nodes
timeVar = do_output.createVariable('time', "f4", ('time'))
return do_output
def append_output(output_cdf):
return Dataset(output_cdf, 'a')
def init_output_var(output, var):
output.createVariable(var, 'f4', ('time','node'))
# Gotten from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i+n]
def main():
script_home = os.path.dirname(os.path.realpath(__file__))
parser = ArgumentParser(description="Extract data from SSM netcdf output files")
parser.add_argument("incdf", nargs="+", help="each input CDF file")
parser.add_argument("outcdf",
help="the output CDF file (created if it doesn't exist)")
parser.add_argument("outvar",
help="the variable to store extracted data in the output CDF")
parser.add_argument("-d", dest="domain_node_shapefile",
help="Specify a domain node shapefile")
parser.add_argument("--invar", dest="input_var",
help="Extract the values of a different output variable")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
help="Print progress messages during the extraction")
parser.add_argument("-c", "--chunk-size", type=int, dest="chunk_size",
help="Process this many CDF files at once")
parser.add_argument("--cache", dest="cache", action="store_true",
help="Use a read/write cache in a temporary directory")
parser.set_defaults(chunk_size=4,
domain_node_shapefile=os.path.join(script_home, domain_nodes_shp),
input_var="DOXG", verbose=False)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO if args.verbose else logging.WARNING)
if args.cache:
with tempfile.TemporaryDirectory() as tmpdir:
exist_cdfs = []
logging.info("Caching input files...")
for infile in args.incdf:
newpath = os.path.join(tmpdir, os.path.basename(infile))
shutil.copy(infile, newpath)
exist_cdfs.append(newpath)
output_cdf = os.path.join(tmpdir, os.path.basename(args.outcdf))
if os.path.exists(args.outcdf):
logging.info("Caching output file...")
shutil.copy(args.outcdf, output_cdf)
do_extract(exist_cdfs, output_cdf, **vars(args))
# Copy the resulting output CDF back
logging.info("Saving output file...")
shutil.copy(output_cdf, args.outcdf)
logging.info("Finished.")
else:
do_extract(args.incdf, args.outcdf, **vars(args))
def do_extract(exist_cdfs, output_cdf, **kwargs):
args = Namespace(**kwargs)
logging.info("Determining scope of work...")
indata = MFDataset(exist_cdfs) if len(exist_cdfs) > 1 else Dataset(exist_cdfs[0])
node_ids = get_node_ids(args.domain_node_shapefile)
times_ct = len(indata.dimensions['time'])
logging.info("Initializing output file...")
if not os.path.exists(output_cdf):
outdata = init_output(output_cdf, times_ct, node_ids)
outdata['time'][:] = indata['time'][:] / 3600 / 24
else:
outdata = append_output(output_cdf)
init_output_var(outdata, args.outvar)
# Attempts to use the entire MFDataset don't seem to scale well.
# Instead, I'm resorting to a blocking approach where MFDatasets are
# created for only a few netCDF files at a time
indata.close()
i = 0
total = 0
logging.info("Beginning extraction...")
start_time = time.perf_counter()
for cdfchunk in chunks(exist_cdfs, args.chunk_size):
c = MFDataset(cdfchunk) if len(cdfchunk) > 1 else Dataset(cdfchunk[0])
chunk_times = len(c.dimensions['time'])
data = c[args.input_var][:, -1, node_ids - 1]
outdata[args.outvar][i:i+chunk_times,:] = data
i += chunk_times
c.close()
if args.verbose:
elapsed = (time.perf_counter() - start_time)
to_go = elapsed * (times_ct / i - 1)
total += data.size * data.itemsize
logging.info("{0}/{1} ({2}s elapsed, {3}s to go, {4}KBps)".format(i,
times_ct, int(elapsed), int(to_go), int(total/elapsed/1000)))
logging.info("Extraction finished.")
outdata.close()
if __name__ == "__main__": main()
|
{"hexsha": "3b1f91e68aad4b98a908395abd289bf83c53d91e", "size": 5161, "ext": "py", "lang": "Python", "max_stars_repo_path": "do_rawcdf_extraction.py", "max_stars_repo_name": "bedaro/ssm-analysis", "max_stars_repo_head_hexsha": "09880dbfa5733d6301b84accc8f42a5ee320d698", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "do_rawcdf_extraction.py", "max_issues_repo_name": "bedaro/ssm-analysis", "max_issues_repo_head_hexsha": "09880dbfa5733d6301b84accc8f42a5ee320d698", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "do_rawcdf_extraction.py", "max_forks_repo_name": "bedaro/ssm-analysis", "max_forks_repo_head_hexsha": "09880dbfa5733d6301b84accc8f42a5ee320d698", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.6209677419, "max_line_length": 117, "alphanum_fraction": 0.6552993606, "include": true, "reason": "import numpy", "num_tokens": 1250}
|
#!/usr/bin/env python3
# coding: utf-8
import time
import wiringpi as wi
import numpy as np
# from scipy.optimize import least_squares
class MPU9250(object):
wi.wiringPiSetup()
i2c = wi.I2C()
_address = 0x68 # addresses of gyroscope and accelerometer
_addr_AK8963 = 0x0C # a address of magnetometer (self.AK8963)
# sensor constant
_REG_PWR_MGMT_1 = 0x6B
_REG_INT_PIN_CFG = 0x37
_REG_ACCEL_CONFIG1 = 0x1C
_REG_ACCEL_CONFIG2 = 0x1D
_REG_GYRO_CONFIG = 0x1B
_MAG_MODE_POWERDOWN = 0 # 磁気センサpower down
_MAG_MODE_SERIAL_1 = 1 # 磁気センサ8Hz連続測定モード
_MAG_MODE_SERIAL_2 = 2 # 磁気センサ100Hz連続測定モード
_MAG_MODE_SINGLE = 3 # 磁気センサ単発測定モード
_MAG_MODE_EX_TRIGER = 4 # 磁気センサ外部トリガ測定モード
_MAG_MODE_SELF_TEST = 5 # 磁気センサセルフテストモード
_MAG_ACCESS = False # 磁気センサへのアクセス可否
_MAG_MODE = 0 # 磁気センサモード
_MAG_BIT = 16 # 磁気センサが出力するbit数
_gyro_range = 1000 # 250, 500, 1000, 2000 'dps'から選択
_accel_range = 8 # +-2, +-4, +-8, +-16 'g'から選択
_mag_range = 4912 # 'μT'
mpu9250 = i2c.setup(_address) # i2cアドレス0x68番地をmpu9250として設定(アドレスは$sudo i2cdetect 1で見られる)
AK8963 = i2c.setup(_addr_AK8963)
# 加速度の測定レンジを設定
_val = 8 # val = 16, 8, 4, 2(default)
def __init__(self):
# オフセット用変数
self._offset_gyro_x = 0
self._offset_gyro_y = 0
self._offset_gyro_z = 0
self._offset_accel_x = 0
self._offset_accel_y = 0
self._offset_accel_z = 0
self._offset_mag_x = -7.497336140
self._offset_mag_y = -57.461323250
self._offset_mag_z = 63.096101850
self._reset_register()
self._power_wakeup()
self._gyro_coefficient = self._gyro_range / float(0x8000) # coefficient : sensed decimal val to dps val.
self._accel_coefficient = self._accel_range / float(0x8000) # coefficient : sensed decimal val to g val
self._mag_coefficient_16 = self._mag_range / 32760.0 # coefficient : sensed decimal val to μT val (16bit)
self._mag_coefficient_14 = self._mag_range / 8190.0 # coefficient : sensed decimal val to μT val (14bit)
self._set_accel_range(val=self._accel_range, _calibration=True)
self._set_gyro_range(val=self._gyro_range, _calibration=True)
# self._set_accel_range(val=self._accel_range, _calibration=False)
# setself._gyro_range(val=self._gyro_range, _calibration=False)
self._set_mag_register('100Hz', '16bit')
# レジスタを初期設定に戻す。
def _reset_register(self):
if self._MAG_ACCESS is True:
self.i2c.writeReg8(self.AK8963, 0x0B, 0x01)
self.i2c.writeReg8(self.mpu9250, 0x6B, 0x80)
self._MAG_ACCESS = False
time.sleep(0.1)
# センシング可能な状態にする。
def _power_wakeup(self):
# PWR_MGMT_1をクリア
self.i2c.writeReg8(self.mpu9250, self._REG_PWR_MGMT_1, 0x00)
time.sleep(0.1)
# I2Cで磁気センサ機能(self.AK8963)へアクセスできるようにする(BYPASS_EN=1)
self.i2c.writeReg8(self.mpu9250, self._REG_INT_PIN_CFG, 0x02)
self._MAG_ACCESS = True
time.sleep(0.1)
def _set_accel_range(self, val, _calibration=False):
# +-2g (00), +-4g (01), +-8g (10), +-16g (11)
if val == 16:
_accel_range = 16
_data = 0x18
elif val == 8:
_accel_range = 8
_data = 0x10
elif val == 4:
_accel_range = 4
_data = 0x08
else:
_accel_range = 2
_data = 0x00
print("set _accel_range=%d [g]" % _accel_range)
self.i2c.writeReg8(self.mpu9250, self._REG_ACCEL_CONFIG1, _data)
self._accel_coefficient = _accel_range / float(0x8000)
time.sleep(0.1)
# Calibration
if _calibration is True:
self._calib_accel(1000)
return
# ジャイロの測定レンジを設定します。
# val= 2000, 1000, 500, 250(default)
def _set_gyro_range(self, val, _calibration=False):
if val == 2000:
_gyro_range = 2000
_data = 0x18
elif val == 1000:
_gyro_range = 1000
_data = 0x10
elif val == 500:
_gyro_range = 500
_data = 0x08
else:
_gyro_range = 250
_data = 0x00
print("set _gyro_range=%d [dps]" % _gyro_range)
self.i2c.writeReg8(self.mpu9250, self._REG_GYRO_CONFIG, _data)
self._gyro_coefficient = _gyro_range / float(0x8000)
time.sleep(0.1)
# Calibration
if _calibration is True:
self._calib_gyro(1000)
return
# 磁気センサのレジスタを設定する
def _set_mag_register(self, _mode, _bit, _calibration=False):
if self._MAG_ACCESS is False:
# 磁気センサへのアクセスが有効になっていない場合は例外
raise Exception('001 Access to a sensor is invalid.')
_writeData = 0x00
# 測定モードの設定
if _mode == '8Hz': # Continuous measurement mode 1
_writeData = 0x02
self._MAG_MODE = self._MAG_MODE_SERIAL_1
elif _mode == '100Hz': # Continuous measurement mode 2
_writeData = 0x06
self._MAG_MODE = self._MAG_MODE_SERIAL_2
elif _mode == 'POWER_DOWN': # Power down mode
_writeData = 0x00
self._MAG_MODE = self._MAG_MODE_POWERDOWN
elif _mode == 'EX_TRIGER': # Trigger measurement mode
_writeData = 0x04
self._MAG_MODE = self._MAG_MODE_EX_TRIGER
elif _mode == 'SELF_TEST': # self test mode
_writeData = 0x08
self._MAG_MODE = self._MAG_MODE_SELF_TEST
else: # _mode='SINGLE' # single measurment mode
_writeData = 0x01
self._MAG_MODE = self._MAG_MODE_SINGLE
# 出力するbit数
if _bit == '14bit': # output 14bit
_writeData = _writeData | 0x00
self._MAG_BIT = 14
else: # _bit='16bit' # output 16bit
_writeData = _writeData | 0x10
self._MAG_BIT = 16
print("set self._MAG_MODE=%s, %d bit" % (_mode, self._MAG_BIT))
self.i2c.writeReg8(self.AK8963, 0x0A, _writeData)
time.sleep(0.1)
# Calibration
if _calibration is True:
self._calib_mag(3000)
return
# センサからのデータはそのまま使おうとするとunsignedとして扱われるため、signedに変換(16ビット限定)
def _u2s(self, unsigneddata):
if unsigneddata & (0x01 << 15):
return -1 * ((unsigneddata ^ 0xffff) + 1)
return unsigneddata
# 加速度値を取得
def get_accel(self):
mpu9250 = self.mpu9250
i2c = self.i2c
ACCEL_XOUT_H = i2c.readReg8(mpu9250, 0x3B)
ACCEL_XOUT_L = i2c.readReg8(mpu9250, 0x3C)
ACCEL_YOUT_H = i2c.readReg8(mpu9250, 0x3D)
ACCEL_YOUT_L = i2c.readReg8(mpu9250, 0x3E)
ACCEL_ZOUT_H = i2c.readReg8(mpu9250, 0x3F)
ACCEL_ZOUT_L = i2c.readReg8(mpu9250, 0x40)
raw_x = self._accel_coefficient * self._u2s(ACCEL_XOUT_H << 8 | ACCEL_XOUT_L) + self._offset_accel_x
raw_y = self._accel_coefficient * self._u2s(ACCEL_YOUT_H << 8 | ACCEL_YOUT_L) + self._offset_accel_y
raw_z = self._accel_coefficient * self._u2s(ACCEL_ZOUT_H << 8 | ACCEL_ZOUT_L) + self._offset_accel_z
return raw_x, raw_y, raw_z
# ジャイロ値を取得
def get_gyro(self):
mpu9250 = self.mpu9250
i2c = self.i2c
GYRO_XOUT_H = i2c.readReg8(mpu9250, 0x43)
GYRO_XOUT_L = i2c.readReg8(mpu9250, 0x44)
GYRO_YOUT_H = i2c.readReg8(mpu9250, 0x45)
GYRO_YOUT_L = i2c.readReg8(mpu9250, 0x46)
GYRO_ZOUT_H = i2c.readReg8(mpu9250, 0x47)
GYRO_ZOUT_L = i2c.readReg8(mpu9250, 0x48)
raw_x = self._gyro_coefficient * self._u2s(GYRO_XOUT_H << 8 | GYRO_XOUT_L) + self._offset_gyro_x
raw_y = self._gyro_coefficient * self._u2s(GYRO_YOUT_H << 8 | GYRO_YOUT_L) + self._offset_gyro_y
raw_z = self._gyro_coefficient * self._u2s(GYRO_ZOUT_H << 8 | GYRO_ZOUT_L) + self._offset_gyro_z
return raw_x, raw_y, raw_z
# 磁気値を取得
def get_mag(self):
AK8963 = self.AK8963
i2c = self.i2c
if self._MAG_ACCESS is False:
# 磁気センサへのアクセスが有効になっていない場合は例外
raise Exception('002 Access to a sensor is invalid.')
# 事前処理
if self._MAG_MODE == self._MAG_MODE_SINGLE:
# 単発測定モードは測定終了と同時にPower Downになるので、もう一度モードを変更する
if self._MAG_BIT == 14: # output 14bit
_writeData = 0x01
else: # output 16bit
_writeData = 0x11
self.i2c.writeReg8(self.AK8963, 0x0A, _writeData)
time.sleep(0.01)
elif self._MAG_MODE == self._MAG_MODE_SERIAL_1 or self._MAG_MODE == self._MAG_MODE_SERIAL_2:
status = self.i2c.readReg8(self.AK8963, 0x02)
if (status & 0x02) == 0x02:
# if (status[0] & 0x02) == 0x02:
# データオーバーランがあるので再度センシング
self.i2c.readReg8(self.AK8963, 0x09)
elif self._MAG_MODE == self._MAG_MODE_EX_TRIGER:
# 未実装
return
elif self._MAG_MODE == self._MAG_MODE_POWERDOWN:
raise Exception('003 Mag sensor power down')
# ST1レジスタを確認してデータ読み出しが可能か確認する
status = i2c.readReg8(AK8963, 0x02)
while (status & 0x01) != 0x01:
# while (status[0] & 0x01) != 0x01:
# Wait until data ready state.
# time.sleep(0.01)
status = self.i2c.readReg8(self.AK8963, 0x02)
# データ読み出し
MAG_XOUT_L = i2c.readReg8(AK8963, 0x03)
MAG_XOUT_H = i2c.readReg8(AK8963, 0x04)
MAG_YOUT_L = i2c.readReg8(AK8963, 0x05)
MAG_YOUT_H = i2c.readReg8(AK8963, 0x06)
MAG_ZOUT_L = i2c.readReg8(AK8963, 0x07)
MAG_ZOUT_H = i2c.readReg8(AK8963, 0x08)
MAG_OF = i2c.readReg8(AK8963, 0x09)
raw_x = self._u2s(MAG_XOUT_H << 8 | MAG_XOUT_L)
raw_y = self._u2s(MAG_YOUT_H << 8 | MAG_YOUT_L)
raw_z = self._u2s(MAG_ZOUT_H << 8 | MAG_ZOUT_L)
st2 = MAG_OF
# data = self.i2c.readReg8(addrAK8963, 0x03 ,7)
# raw_x = u2s(data[1] << 8 | data[0]) # Lower bit is ahead.
# raw_y = u2s(data[3] << 8 | data[2]) # Lower bit is ahead.
# raw_z = u2s(data[5] << 8 | data[4]) # Lower bit is ahead.
# st2 = data[6]
# オーバーフローチェック
if (st2 & 0x08) == 0x08:
# オーバーフローのため正しい値が得られていない
raise Exception('004 Mag sensor over flow')
# μTへの変換
if self._MAG_BIT == 16: # output 16bit
raw_x = raw_x * self._mag_coefficient_16
raw_y = raw_y * self._mag_coefficient_16
raw_z = raw_z * self._mag_coefficient_16
else: # output 14bit
raw_x = raw_x * self._mag_coefficient_14
raw_y = raw_y * self._mag_coefficient_14
raw_z = raw_z * self._mag_coefficient_14
# ---- offset by myself ----
# raw_x -= 7.49733614
# raw_y -= 57.46132325
# raw_z -= -63.09610185
# ---- offset by myself ----
raw_x += self._offset_mag_x
raw_y += self._offset_mag_y
raw_z += self._offset_mag_z
return raw_x, raw_y, raw_z
# 加速度センサを較正する
# 本当は緯度、高度、地形なども考慮する必要があるとは思うが、簡略で。
# z軸方向に正しく重力がかかっており、重力以外の加速度が発生していない前提
def _calib_accel(self, _count=1000):
print("Accel calibration start")
_sum = [0, 0, 0]
# データのサンプルを取る
for _ in range(_count):
_data = self.get_accel()
_sum[0] += _data[0]
_sum[1] += _data[1]
_sum[2] += _data[2]
# 平均値をオフセットにする
self._offset_accel_x = -1.0 * _sum[0] / _count
self._offset_accel_y = -1.0 * _sum[1] / _count
# self._offset_accel_z = -1.0 * _sum[2] / _count
self._offset_accel_z = -1.0 * ((_sum[2] / _count) - 1.0) # 重力分を差し引く
# I want to register an offset value in a register. But I do not know the behavior, so I will put it on hold.
print("Accel calibration complete")
print(
f'Accel error X: {self._offset_accel_x:.2f} Y: {self._offset_accel_y:.2f} Z: {self._offset_accel_z:.2f}')
# ジャイロセンサを較正する
# 各軸に回転が発生していない前提
def _calib_gyro(self, _count=1000):
print("Gyro calibration start")
_sum = [0, 0, 0]
# データのサンプルを取る
for _i in range(_count):
_data = self.get_gyro()
_sum[0] += _data[0]
_sum[1] += _data[1]
_sum[2] += _data[2]
# 平均値をオフセットにする
self._offset_gyro_x = -1.0 * _sum[0] / _count
self._offset_gyro_y = -1.0 * _sum[1] / _count
self._offset_gyro_z = -1.0 * _sum[2] / _count
# I want to register an offset value in a register. But I do not know the behavior, so I will put it on hold.
print("Gyro calibration complete")
print(f'Gyro error X: {self._offset_gyro_x:.2f} Y: {self._offset_gyro_y:.2f} Z: {self._offset_gyro_z:.2f}')
# # 地磁気センサを較正する
# # def _calib_mag(self, _count=3000):
#
# def model(param, x):
# return np.array([((xt[0] - param[0]) / param[3]) ** 2 + ((xt[1] - param[1]) / param[4]) ** 2 + (
# (xt[2] - param[2]) / param[5]) ** 2 for xt in x])
#
# # 誤差関数
# def residuals(param, x, y):
# return y - model(param, x)
#
# print("Mag calibration start")
# print("Please keep the sensor turning around")
# log = np.array([])
#
# # データのサンプルを取る
# for _ in range(_count):
# _data = self.get_gyro()
# np.append(log, [_data])
#
# x_s = log.T
# y_s = np.array([1.0] * len(x_s))
# predicted_offset = np.array([5.0, 55, -65, 40, 40, 40]) # パラメータ初期値
# res = least_squares(residuals, predicted_offset, args=(x_s, y_s))
# offset_values = res.x
#
# # 平均値をオフセットにする
# self._offset_mag_x = -1.0 * offset_values[0]
# self._offset_mag_y = -1.0 * offset_values[1]
# self._offset_mag_z = -1.0 * offset_values[2]
#
# # I want to register an offset value in a register. But I do not know the behavior, so I will put it on hold.
# print("Mag calibration complete")
# print(f'Mag error X: {self._offset_mag_x:.2f} Y: {self._offset_mag_y:.2f} Z: {self._offset_mag_z:.2f}')
|
{"hexsha": "dec481cfc2c8fff73bad31eb4b2d4fc49b6531bf", "size": 14205, "ext": "py", "lang": "Python", "max_stars_repo_path": "client/attiude-est/mpu9250/mpu9250.py", "max_stars_repo_name": "tetsuzawa/rpi_ahrs", "max_stars_repo_head_hexsha": "5db341073b7c711c33d8854a22535655170d82c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-05T01:23:22.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-05T01:23:22.000Z", "max_issues_repo_path": "client/attiude-est/mpu9250/mpu9250.py", "max_issues_repo_name": "tetsuzawa/RPi-AHRS-Python", "max_issues_repo_head_hexsha": "5db341073b7c711c33d8854a22535655170d82c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "client/attiude-est/mpu9250/mpu9250.py", "max_forks_repo_name": "tetsuzawa/RPi-AHRS-Python", "max_forks_repo_head_hexsha": "5db341073b7c711c33d8854a22535655170d82c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1858638743, "max_line_length": 119, "alphanum_fraction": 0.5969024991, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5343}
|
"""SK-learn grid-search and test scripts for Logistic Regression models"""
__author__ = "Gabriel Urbain"
__copyright__ = "Copyright 2017, Gabriel Urbain"
__license__ = "MIT"
__version__ = "0.2"
__maintainer__ = "Gabriel Urbain"
__email__ = "gabriel.urbain@ugent.be"
__status__ = "Research"
__date__ = "September 1st, 2017"
import preprocessing
import data
import utils
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
from pprint import pprint
import sys
import time
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import make_scorer
from sklearn.model_selection import *
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
DEFAULT_TRAIN_LOCATION = 'Train'
DEFAULT_PRED_LOCATION = "Predictions"
N_PROCESS = 4
def grid_search(clf='lr'):
# Get the data
print "Load data"
dataset = data.load_pickled_data()["train"]
target = preprocessing.create_target(dataset)
# Create the pipe
if clf == "lr":
ft_extractor = preprocessing.create_ft_ct()
classifier = LogisticRegression(verbose=0, penalty='l1')
parameters = {'clf__C': np.logspace(-3, 2, num=15).tolist()}
pipe = Pipeline([('ft', ft_extractor), ('clf', classifier)])
filename = DEFAULT_TRAIN_LOCATION + "/cv_lr_" + utils.timestamp() + ".pkl"
elif clf == "lr_all":
ft_extractor = preprocessing.create_ft_ct_pd_au()
classifier = LogisticRegression(verbose=0, penalty='l1')
parameters = {'clf__C': np.logspace(-3, 2, num=15).tolist()}
pipe = Pipeline([('ft', ft_extractor), ('clf', classifier)])
filename = DEFAULT_TRAIN_LOCATION + "/cv_lr_all_" + utils.timestamp() + ".pkl"
elif clf == "lr_all_svd":
ft_extractor = preprocessing.create_ft_ct()
ft_reductor = TruncatedSVD()
classifier = LogisticRegression(verbose=0, penalty='l1')
parameters = {'clf__C': np.logspace(-5, 0, num=6).tolist(),
'ft_red__n_components': np.logspace(1, 3.3, num=4).astype(int).tolist()}
pipe = Pipeline([('ft', ft_extractor), ('ft_red', ft_reductor), ('clf', classifier)])
filename = DEFAULT_TRAIN_LOCATION + "/cv_lr_all_svd_" + utils.timestamp() + ".pkl"
elif clf == "lr_mixed_svd":
ft_extractor = preprocessing.create_ft_ctsvd_pd_au()
classifier = LogisticRegression(verbose=0, penalty='l1')
parameters = {'clf__C': np.logspace(-2, 2, num=5).tolist(),
'ft__ft_extractor__content__reductor__n_components':
np.logspace(3.7, 1, num=5).astype(int).tolist()}
pipe = Pipeline([('ft', ft_extractor), ('clf', classifier)])
filename = DEFAULT_TRAIN_LOCATION + "/cv_lr_mixed_svd_" + utils.timestamp() + ".pkl"
elif clf == "rf_all":
ft_extractor = preprocessing.create_ft_ctsvd_pd_au()
classifier = RandomForestClassifier()
parameters = {'clf__max_depth': np.logspace(0, 4, num=5).tolist(),
'ft__ft_extractor__content__reductor__n_components':
np.logspace(2.5, 1, num=3).astype(int).tolist()}
pipe = Pipeline([('ft', ft_extractor), ('clf', classifier)])
filename = DEFAULT_TRAIN_LOCATION + "/cv_lr_rf_all_" + utils.timestamp() + ".pkl"
else:
ft_extractor = preprocessing.create_ft_ct()
classifier = LogisticRegression(verbose=0, penalty='l1')
parameters = {'clf__C': np.logspace(-2, 2, num=15).tolist()}
filename = DEFAULT_TRAIN_LOCATION + "/cv_logistic_regression_" + utils.timestamp() + ".pkl"
pipe = Pipeline([('ft', ft_extractor), ('clf', classifier)])
# Create the cross-validation search method
loss = make_scorer(preprocessing.loss_fct, greater_is_better=False)
gs = GridSearchCV(pipe, parameters, n_jobs=N_PROCESS, verbose=2, scoring=loss)
# Run the cross-validation
print "\nPerforming grid search..."
print " Pipeline:", [name for name, _ in pipe.steps]
print " Parameters: ",
pprint(parameters)
print ""
gs.fit(dataset, target)
# Save the results
r = gs.cv_results_
if clf == "lr" or clf == "lr_all":
results = (r['param_clf__C'], -r['mean_train_score'], r['std_train_score'], -r['mean_test_score'],
r['std_test_score'], r['mean_fit_time'], r['std_fit_time'], clf)
elif clf == "lr_all_svd":
results = (r['param_clf__C'], r['param_ft_red__n_components'], -r['mean_train_score'], r['std_train_score'],
-r['mean_test_score'], r['std_test_score'], r['mean_fit_time'], r['std_fit_time'], clf)
elif clf == "lr_mixed_svd":
results = (r['param_clf__C'], r['param_ft__ft_extractor__content__reductor__n_components'],
-r['mean_train_score'], r['std_train_score'], -r['mean_test_score'],
r['std_test_score'], r['mean_fit_time'], r['std_fit_time'], clf)
elif clf == "rf" or clf == "rf_all":
results = (r['param_clf__max_depth'], -r['mean_train_score'], r['std_train_score'], -r['mean_test_score'],
r['std_test_score'], r['mean_fit_time'], r['std_fit_time'], clf)
else:
results = (r['param_clf__C'], -r['mean_train_score'], r['std_train_score'], -r['mean_test_score'],
r['std_test_score'], r['mean_fit_time'], r['std_fit_time'], clf)
utils.dump_pickle(results, filename)
# Print the best individual
print "\nBest score: %0.3f" % -gs.best_score_
print " Best parameters set:"
best_parameters = gs.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
def test():
# 1. Import the features and target
print "1. Import the features and target\n"
feature, target = preprocessing.load_ft(preprocessing.DEFAULT_FT_LOCATION + "/ft_l_std_scaler.pkl")
training_ft, validate_ft, training_target, validate_target = \
train_test_split(feature, target, test_size=preprocessing.VALIDATE_PART, random_state=preprocessing.SEED)
print "Training features size: " + str(training_ft.shape) + \
" and validation features size: " + str(validate_ft.shape)
print "Training target size: " + str(len(training_target)) + \
" and validation target size: " + str(len(validate_target)) + "\n"
# 2. Create the NN
print "2. Create the Linear Classifier Network"
penalty = "l1"
c = 1e-3
clf = LogisticRegression(penalty=penalty, C=c, verbose=1)
classes = np.unique(training_target)
print "\tPenalty: " + str(penalty)
print "\tC: " + str(c)
print "\tClasses: " + str(classes) + "\n"
# 3. Train the Classifier
print "3. Train the Classifier"
t_in = time.time()
clf.fit_transform(training_ft, training_target)
t_training = time.time() - t_in
pred_training = clf.predict(training_ft)
t_in = time.time()
pred_validate = clf.predict(validate_ft)
t_validate = time.time() - t_in
# Compute scores
score_training = preprocessing.loss_fct(training_ft, pred_training)
score_validate = preprocessing.loss_fct(validate_ft, pred_validate)
print "Score on training set: %0.3f and validation set: %0.3f" % (score_training, score_validate)
print "Time dedicated for training; %0.3fs and for validation: %0.3f" % (t_training, t_validate)
def plot(filename):
results = utils.load_pickle(filename)
method = results[-1]
if method == "lr_all_svd" or method == "lr_mixed_svd" or method == "rf_all":
c = np.unique(np.array(results[0]))
k = np.unique(np.array(results[1]))
sv = np.array(results[4]).reshape(len(c), len(k))
sv_err = np.array(results[5]).reshape(len(c), len(k))
fig, ax1 = plt.subplots()
for ind, i in enumerate(k):
ax1.errorbar(c, sv[:, ind], sv_err[:, ind], label='k: ' + str(i))
ax1.set_ylabel('Score')
ax1.set_xlabel('Regularization parameter C')
ax1.tick_params('y', color=utils.get_style_colors()[0])
plt.xscale('log')
fig.tight_layout()
filename = DEFAULT_TRAIN_LOCATION + "/cv_" + method + ".png"
ax1.legend(loc=0)
plt.savefig(filename, format='png', dpi=300)
plt.close()
else:
x = np.array(results[0])
st = results[1]
st_err = results[2]
sv = results[3]
sv_err = results[4]
tt = results[5]
tt_err = results[6]
fig, ax1 = plt.subplots()
ax1.errorbar(x, st, st_err, color=utils.get_style_colors()[0])
ax1.errorbar(x, sv, sv_err, color=utils.get_style_colors()[1])
if method == "lr" or method == "lr_all":
ax1.set_xlabel('Evolution of regularization parameter C')
ax1.set_ylabel('Score')
ax1.tick_params('y', color=utils.get_style_colors()[0])
ax2 = ax1.twinx()
ax2.errorbar(x, tt, tt_err, color=utils.get_style_colors()[2], linewidth=1.5)
ax2.set_ylabel('Training time (s)', color=utils.get_style_colors()[2])
ax2.tick_params('y', colors=utils.get_style_colors()[2])
ax2.grid(b=False)
plt.xscale('log')
fig.tight_layout()
filename = DEFAULT_TRAIN_LOCATION + "/cv_" + method + ".png"
plt.savefig(filename, format='png', dpi=300)
plt.close()
if __name__ == '__main__':
args = sys.argv
if args[1] == "test":
test()
elif args[1] == "gs":
if len(args) > 2:
grid_search(args[2])
else:
grid_search()
elif args[1] == "plot":
plot(args[2])
else:
print "Option does not exist. Please, check the preprocessing.py file"
|
{"hexsha": "776e1bbd5fcaa0c47efdad01f74ab617d6b14a6b", "size": 9794, "ext": "py", "lang": "Python", "max_stars_repo_path": "linear.py", "max_stars_repo_name": "Gabs48/ML_competition", "max_stars_repo_head_hexsha": "e294b31036f11f2ec65f9971f750855f39167aaf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "linear.py", "max_issues_repo_name": "Gabs48/ML_competition", "max_issues_repo_head_hexsha": "e294b31036f11f2ec65f9971f750855f39167aaf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "linear.py", "max_forks_repo_name": "Gabs48/ML_competition", "max_forks_repo_head_hexsha": "e294b31036f11f2ec65f9971f750855f39167aaf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.6765957447, "max_line_length": 116, "alphanum_fraction": 0.6402899735, "include": true, "reason": "import numpy", "num_tokens": 2515}
|
[STATEMENT]
lemma loose_bvar1_subst_bvs1'_closeds: "\<not> loose_bvar1 t lev \<Longrightarrow> lev < k \<Longrightarrow> \<forall>x\<in>set us . is_closed x
\<Longrightarrow> \<not> loose_bvar1 (subst_bvs1' t k us) lev"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<not> loose_bvar1 t lev; lev < k; \<forall>x\<in>set us. is_closed x\<rbrakk> \<Longrightarrow> \<not> loose_bvar1 (subst_bvs1' t k us) lev
[PROOF STEP]
by (induction t k us arbitrary: lev rule: subst_bvs1'.induct)
(use is_open_def loose_bvar_iff_exist_loose_bvar1 in \<open>auto simp add: is_open_def\<close>)
|
{"llama_tokens": 235, "file": "Metalogic_ProofChecker_BetaNorm", "length": 1}
|
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
from setuptools.extension import Extension
from setuptools import dist
dist.Distribution().fetch_build_eggs(["Cython>=0.29", "numpy>=1.18"])
import numpy as np
from Cython.Build import cythonize
with open("README.md") as readme_file:
readme = readme_file.read()
requirements = ["numpy>=1.18", "Cython>=0.29", "matplotlib>=3.3", "antimony>=2.12"]
setup_requirements = ["Cython>=0.29", "numpy>=1.18"]
test_requirements = ["pytest", "pytest-runner", "pytest-benchmark"]
ext_modules = [
Extension(
"cayenne.algorithms.direct",
["cayenne/algorithms/direct.pyx"],
include_dirs=[np.get_include()],
),
Extension(
"cayenne.algorithms.tau_leaping",
["cayenne/algorithms/tau_leaping.pyx"],
include_dirs=[np.get_include()],
),
Extension(
"cayenne.algorithms.tau_adaptive",
["cayenne/algorithms/tau_adaptive.pyx"],
include_dirs=[np.get_include()],
),
Extension("cayenne.utils", ["cayenne/utils.pyx"], include_dirs=[np.get_include()]),
]
setup(
author="Dileep Kishore, Srikiran Chandrasekaran",
author_email="k.dileep1994@gmail.com",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
description="Python package for stochastic simulations",
install_requires=requirements + setup_requirements,
license="Apache Software License 2.0",
long_description=readme + "\n\n",
long_description_content_type="text/markdown",
include_package_data=True,
keywords="cayenne stochastic gillepsie simulation",
name="cayenne",
packages=find_packages(exclude=["tests"]),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/Heuro-labs/cayenne",
version="1.0.3",
zip_safe=False,
ext_modules=cythonize(
ext_modules,
annotate=True,
compiler_directives={"binding": True, "linetrace": False, "profile": False},
),
)
|
{"hexsha": "8a682143f91ba603e0969cf808d7e2be64c77481", "size": 2362, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "Heuro-labs/pyssa", "max_stars_repo_head_hexsha": "d2f368787eeb90e3459d405a0cf769035103433f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-07-18T21:41:35.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-24T16:34:51.000Z", "max_issues_repo_path": "setup.py", "max_issues_repo_name": "Heuro-labs/pyssa", "max_issues_repo_head_hexsha": "d2f368787eeb90e3459d405a0cf769035103433f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 86, "max_issues_repo_issues_event_min_datetime": "2018-12-01T22:45:17.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-11T21:07:24.000Z", "max_forks_repo_path": "setup.py", "max_forks_repo_name": "Heuro-labs/pyssa", "max_forks_repo_head_hexsha": "d2f368787eeb90e3459d405a0cf769035103433f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-15T18:38:57.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-15T18:38:57.000Z", "avg_line_length": 31.0789473684, "max_line_length": 87, "alphanum_fraction": 0.6689246401, "include": true, "reason": "import numpy", "num_tokens": 573}
|
\documentclass[a4paper, 11 pt, article, accentcolor=tud7b]{tudreport}
\usepackage[utf8]{inputenc}
\usepackage{amsmath}
\usepackage{placeins}
\title{CNuVS Exercise 4}
\author{Nils Rollshausen, Daniel Drodt}
\subtitle{Nils Rollshausen, Daniel Drodt}
\begin{document}
\maketitle
\section{Dynamic Source Routing}
\subsection*{a) Packet Types}
Route Request packets are sent by the sender node when a new connection has to be established. They are broadcast (flooded) to the entire network, the expected result being a response packet containing a route from the sender to the destination node. \medskip \\
Route Reply packets are the packets sent from the destination node to the sender as a response to a Route Request packet. In a non-bidirectional network, these packets may again be delivered using flooding, otherwise they can be delivered using the established route they contain. \medskip \\
Route Error packets are sent from nodes on the route between source and destination to the source node if there is a problem with the route and the packet can not be delivered, informing the sender that a new route has to be established.
\subsection*{b) Algorithm}
\begin{table}[h]
\centering
\begin{tabular}{| l | l |}
\hline
Turn1 & Message \\ \hline
$1 \rightarrow 2$ & RREQ(1,10,\{1,2\}) \\ \hline
\end{tabular}
\caption{Turn 1}
\end{table}
\begin{table}[h]
\centering
\begin{tabular}{| l | l |}
\hline
Turn2 & Message \\ \hline
$2 \rightarrow 3$ & RREQ(1,10,\{1,2,3\}) \\ \hline
$2 \rightarrow 4$ & RREQ(1,10,\{1,2,4\}) \\ \hline
\end{tabular}
\caption{Turn 2}
\end{table}
\begin{table}[h]
\centering
\begin{tabular}{| l | l |}
\hline
Turn3 & Message \\ \hline
$3 \rightarrow 7$ & RREQ(1,10,\{1,2,3,7\}) \\ \hline
$4 \rightarrow 5$ & RREQ(1,10,\{1,2,4,5\}) \\ \hline
$4 \rightarrow 6$ & RREQ(1,10,\{1,2,4,6\}) \\ \hline
\end{tabular}
\caption{Turn 3}
\end{table}
\begin{table}[h]
\centering
\begin{tabular}{| l | l |}
\hline
Turn4 & Message \\ \hline
$7 \rightarrow 8$ & RREQ(1,10,\{1,2,3,7,8\}) \\ \hline
$5 \rightarrow 7$ & RREQ(1,10,\{1,2,4,5,7\}) \\ \hline
$6 \rightarrow 9$ & RREQ(1,10,\{1,2,4,6,9\}) \\ \hline
\end{tabular}
\caption{Turn 4}
\end{table}
\begin{table}[h]
\centering
\begin{tabular}{| l | l |}
\hline
Turn5 & Message \\ \hline
$8 \rightarrow 9$ & RREQ(1,10,\{1,2,3,7,8,9\}) \\ \hline
$8 \rightarrow 10$ & RREQ(1,10,\{1,2,3,7,8,10\}) \\ \hline
$9 \rightarrow 8$ & RREQ(1,10,\{1,2,4,6,9,8\}) \\ \hline
\end{tabular}
\caption{Turn 5}
\end{table}
\begin{table}[h]
\centering
\begin{tabular}{| l | l |}
\hline
Node & Cache \\ \hline
1 & 1,10,\{1,2,3,7,8,10\}; 1,8,\{1,2,3,7,8\}; 1,7,\{1,2,3,7\}; 1,3,\{1,2,3\}\\ \hline
2 & 2,10,\{2,3,7,8,10\}; 2,8,\{2,3,7,8\}; 2,7,\{2,3,7\}\\ \hline
3 & 3,1,\{3,2,1\}; 3,10,\{3,7,8,10\}; 3,8,\{3,7,8\}\\ \hline
4 & 4,1,\{4,2,1\}\\ \hline
5 & 5,1,\{5,4,2,1\}; 5,2,\{5,4,2\}\\ \hline
6 & 6,1,\{6,4,2,1\}; 6,2,\{6,4,2\}\\ \hline
7 & 7,1,\{7,3,2,1\}; 7,2,\{7,3,2\}; 7,10,\{7,8,10\}\\ \hline
8 & 8,1,\{8,7,3,2,1\}; 8,2,\{8,7,3,2\}; 8,3,\{8,7,3\}\\ \hline
9 & 9,1,\{9,6,4,2,1\}; 9,2,\{9,6,4,2\}; 9,4,\{9,6,4\}\\ \hline
10 & 10,1,\{10,8,7,3,2,1\}; 10,2,\{10,8,7,3,2\}; 10,3,\{10,8,7,3\}; 10,7,\{10,8,7\}\\ \hline
\end{tabular}
\caption{Final Route Cache}
\end{table}
\FloatBarrier
The response sent by node 10 is RREP(10,1,\{1,2,3,7,8,10\}).
\subsection*{c) Resulting Path}
Node 1 would get the path $1 \rightarrow 2 \rightarrow 3 \rightarrow 7 \rightarrow 8 \rightarrow 10$. Other possible paths such as $1 \rightarrow 2 \rightarrow 4 \rightarrow 5 \rightarrow 7 \rightarrow 8 \rightarrow 10$ or $1 \rightarrow 2 \rightarrow 4 \rightarrow 6 \rightarrow 9 \rightarrow 8 \rightarrow 10$ would be discarded at nodes 7/8 as they already received a route request for the same route earlier and thus the later request cannot possibly be the optimal path between origin and destination.
\subsection*{d) Caching}
Node 4 would get a response from Node 2 immediately as Node 2 has the Route $2 \rightarrow 10$ in its Cache. The resulting route would be $4 \rightarrow 2 \rightarrow 3 \rightarrow 7 \rightarrow 8 \rightarrow 10$.
\subsection*{e) Considerations}
The Route obtained from Node 2 is obviously not optimal ($4 \rightarrow 5 \rightarrow 7 \rightarrow 8 \rightarrow 10$ would be shorter). However the route calculation was completed much faster and with less communication overhead, which might make the less optimal routes a worthwhile tradeoff.
\section{Overlay Routing}
\subsection*{a) A to C}
Path: $A \rightarrow C$ \\
Hop Count: 3 \\
Stretch: $3 / 3 = 1$
\subsection*{b) C to E}
Path: $C \rightarrow B \rightarrow E$ \\
Hop Count: 9 \\
Stretch: $9 / 3 = 3$
\subsection*{c) D to E}
Path: $D \rightarrow A \rightarrow C \rightarrow B \rightarrow E$ \\
Hop Count: 16 \\
Stretch: $16 / 2 = 8$
\end{document}
|
{"hexsha": "077f94b9532da6f2cf2f677c133a0a344ea6fba7", "size": 5122, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "ex4/ex4.tex", "max_stars_repo_name": "rec0de/CNuVS19", "max_stars_repo_head_hexsha": "52d07fe5c4380af707c63f718aa9533044224a3e", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ex4/ex4.tex", "max_issues_repo_name": "rec0de/CNuVS19", "max_issues_repo_head_hexsha": "52d07fe5c4380af707c63f718aa9533044224a3e", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ex4/ex4.tex", "max_forks_repo_name": "rec0de/CNuVS19", "max_forks_repo_head_hexsha": "52d07fe5c4380af707c63f718aa9533044224a3e", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0992366412, "max_line_length": 507, "alphanum_fraction": 0.6220226474, "num_tokens": 2014}
|
(* Author: Tobias Nipkow *)
theory Hoare_Examples imports Hoare begin
text{* Summing up the first @{text x} natural numbers in variable @{text y}. *}
fun sum :: "int \<Rightarrow> int" where
"sum i = (if i \<le> 0 then 0 else sum (i - 1) + i)"
lemma sum_simps[simp]:
"0 < i \<Longrightarrow> sum i = sum (i - 1) + i"
"i \<le> 0 \<Longrightarrow> sum i = 0"
by(simp_all)
declare sum.simps[simp del]
abbreviation "wsum ==
WHILE Less (N 0) (V ''x'')
DO (''y'' ::= Plus (V ''y'') (V ''x'');;
''x'' ::= Plus (V ''x'') (N (- 1)))"
subsubsection{* Proof by Operational Semantics *}
text{* The behaviour of the loop is proved by induction: *}
lemma while_sum:
"(wsum, s) \<Rightarrow> t \<Longrightarrow> t ''y'' = s ''y'' + sum(s ''x'')"
apply(induction wsum s t rule: big_step_induct)
apply(auto)
done
text{* We were lucky that the proof was automatic, except for the
induction. In general, such proofs will not be so easy. The automation is
partly due to the right inversion rules that we set up as automatic
elimination rules that decompose big-step premises.
Now we prefix the loop with the necessary initialization: *}
lemma sum_via_bigstep:
assumes "(''y'' ::= N 0;; wsum, s) \<Rightarrow> t"
shows "t ''y'' = sum (s ''x'')"
proof -
from assms have "(wsum,s(''y'':=0)) \<Rightarrow> t" by auto
from while_sum[OF this] show ?thesis by simp
qed
subsubsection{* Proof by Hoare Logic *}
text{* Note that we deal with sequences of commands from right to left,
pulling back the postcondition towards the precondition. *}
lemma "\<turnstile> {\<lambda>s. s ''x'' = n} ''y'' ::= N 0;; wsum {\<lambda>s. s ''y'' = sum n}"
apply(rule Seq)
prefer 2
apply(rule While' [where P = "\<lambda>s. (s ''y'' = sum n - sum(s ''x''))"])
apply(rule Seq)
prefer 2
apply(rule Assign)
apply(rule Assign')
apply simp
apply simp
apply(rule Assign')
apply simp
done
text{* The proof is intentionally an apply script because it merely composes
the rules of Hoare logic. Of course, in a few places side conditions have to
be proved. But since those proofs are 1-liners, a structured proof is
overkill. In fact, we shall learn later that the application of the Hoare
rules can be automated completely and all that is left for the user is to
provide the loop invariants and prove the side-conditions. *}
end
|
{"author": "SEL4PROJ", "repo": "jormungand", "sha": "bad97f9817b4034cd705cd295a1f86af880a7631", "save_path": "github-repos/isabelle/SEL4PROJ-jormungand", "path": "github-repos/isabelle/SEL4PROJ-jormungand/jormungand-bad97f9817b4034cd705cd295a1f86af880a7631/case_study/isabelle/src/HOL/IMP/Hoare_Examples.thy"}
|
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import fxpmath as fxp
from fxpmath.objects import Fxp
from fxpmath import utils
import numpy as np
def test_shift_bitwise():
# integer val
x = Fxp(32, True, 8, 0)
# left
assert (x << 1)() == 64
assert (x << 2)() == 128
assert (x << 2).n_word == 9
assert (x << 3)() == 256
assert (x << 10)() == 32*(2**10)
# right
assert (x >> 1)() == 16
assert (x >> 2)() == 8
assert (x >> 3)() == 4
assert (x >> 5)() == 1
assert (x >> 6)() == 0.5
# float val
x = Fxp(24.25, True, 8, 2)
#left
assert (x << 1)() == 48.5
assert (x << 4)() == 388.0
#right
x = Fxp(24.5, True, 8, 2)
assert (x >> 1)() == 12.25
assert (x >> 2)() == 6.125
# negative
x = Fxp(-24.25, True, 8, 2)
#left
assert (x << 1)() == -48.5
assert (x << 4)() == -388.0
#right
x = Fxp(-24.5, True, 8, 2)
assert (x >> 1)() == -12.25
assert (x >> 2)() == -6.125
# trunc shift
# left
x = Fxp(32, True, 8, 0, shifting='trunc')
assert (x << 1)() == 64
assert (x << 2)() == x.upper
# right
assert (x >> 3)() == 4
assert (x >> 5)() == 1
assert (x >> 6)() == 0
# unsigned
x = Fxp(32, False, 8, 0)
# left
assert (x << 1)() == 64
assert (x << 2)() == 128
assert (x << 3)() == 256
assert (x << 3).n_word == 9
assert (x << 10)() == 32*(2**10)
# right
assert (x >> 1)() == 16
assert (x >> 2)() == 8
assert (x >> 3)() == 4
assert (x >> 5)() == 1
assert (x >> 6)() == 0.5
# float val
x = Fxp(24.25, False, 8, 2)
#left
assert (x << 1)() == 48.5
assert (x << 4)() == 388.0
#right
x = Fxp(24.5, False, 8, 2)
assert (x >> 1)() == 12.25
assert (x >> 2)() == 6.125
# trunc left shift
x = Fxp(64, False, 8, 0, shifting='trunc')
assert (x << 1)() == 128
assert (x << 2)() == x.upper
def test_invert():
x = Fxp(None, True, 8, 4)
xu = Fxp(None, False, 8, 4)
x('0b 0010 1100')
y = ~x
assert y.bin() == '11010011'
x('0b0000 0000')
assert (~x).bin() == '11111111'
xu('0b0000 0000')
assert (~xu).bin() == '11111111'
x('0b 1111 1111')
assert (~x).bin() == '00000000'
xu('0b 1111 1111')
assert (~xu).bin() == '00000000'
x('0b 1000 0000')
assert (~x).bin() == '01111111'
xu('0b 1000 0000')
assert (~xu).bin() == '01111111'
x = Fxp(None, True, 32, 0)
xu = Fxp(None, False, 32, 0)
val_str = '10100000111101011100001100110101'
inv_str = '01011111000010100011110011001010'
x('0b'+val_str)
assert (~x).bin() == inv_str
xu('0b'+val_str)
assert (~xu).bin() == inv_str
def test_and():
x = Fxp(None, True, 8, 4)
xu = Fxp(None, False, 8, 4)
y = Fxp(None, True, 8, 4)
yu = Fxp(None, False, 8, 4)
val_str = '00110101'
mks_str = '11110000'
and_str = '00110000'
x('0b'+val_str)
xu('0b'+val_str)
y('0b'+mks_str)
yu('0b'+mks_str)
assert (x & y).bin() == and_str
assert (x & yu).bin() == and_str
assert (xu & y).bin() == and_str
assert (xu & yu).bin() == and_str
assert (x & utils.str2num('0b'+mks_str)).bin() == and_str
assert (xu & utils.str2num('0b'+mks_str)).bin() == and_str
assert (utils.str2num('0b'+mks_str) & x).bin() == and_str
assert (utils.str2num('0b'+mks_str) & xu).bin() == and_str
val_str = '10101100'
mks_str = '11001100'
and_str = '10001100'
x('0b'+val_str)
xu('0b'+val_str)
y('0b'+mks_str)
yu('0b'+mks_str)
assert (x & y).bin() == and_str
assert (x & yu).bin() == and_str
assert (xu & y).bin() == and_str
assert (xu & yu).bin() == and_str
assert (x & utils.str2num('0b'+mks_str)).bin() == and_str
assert (xu & utils.str2num('0b'+mks_str)).bin() == and_str
assert (utils.str2num('0b'+mks_str) & x).bin() == and_str
assert (utils.str2num('0b'+mks_str) & xu).bin() == and_str
def test_or():
x = Fxp(None, True, 8, 4)
xu = Fxp(None, False, 8, 4)
y = Fxp(None, True, 8, 4)
yu = Fxp(None, False, 8, 4)
val_str = '00110101'
mks_str = '11110000'
or_str = '11110101'
x('0b'+val_str)
xu('0b'+val_str)
y('0b'+mks_str)
yu('0b'+mks_str)
assert (x | y).bin() == or_str
assert (x | yu).bin() == or_str
assert (xu | y).bin() == or_str
assert (xu | yu).bin() == or_str
assert (x | utils.str2num('0b'+mks_str)).bin() == or_str
assert (xu | utils.str2num('0b'+mks_str)).bin() == or_str
assert (utils.str2num('0b'+mks_str) | x).bin() == or_str
assert (utils.str2num('0b'+mks_str) | xu).bin() == or_str
val_str = '10101100'
mks_str = '11001100'
or_str = '11101100'
x('0b'+val_str)
xu('0b'+val_str)
y('0b'+mks_str)
yu('0b'+mks_str)
assert (x | y).bin() == or_str
assert (x | yu).bin() == or_str
assert (xu | y).bin() == or_str
assert (xu | yu).bin() == or_str
assert (x | utils.str2num('0b'+mks_str)).bin() == or_str
assert (xu | utils.str2num('0b'+mks_str)).bin() == or_str
assert (utils.str2num('0b'+mks_str) | x).bin() == or_str
assert (utils.str2num('0b'+mks_str) | xu).bin() == or_str
def test_xor():
x = Fxp(None, True, 8, 4)
xu = Fxp(None, False, 8, 4)
y = Fxp(None, True, 8, 4)
yu = Fxp(None, False, 8, 4)
val_str = '00110101'
mks_str = '11110000'
xor_str = '11000101'
x('0b'+val_str)
xu('0b'+val_str)
y('0b'+mks_str)
yu('0b'+mks_str)
assert (x ^ y).bin() == xor_str
assert (x ^ yu).bin() == xor_str
assert (xu ^ y).bin() == xor_str
assert (xu ^ yu).bin() == xor_str
assert (x ^ utils.str2num('0b'+mks_str)).bin() == xor_str
assert (xu ^ utils.str2num('0b'+mks_str)).bin() == xor_str
assert (utils.str2num('0b'+mks_str) ^ x).bin() == xor_str
assert (utils.str2num('0b'+mks_str) ^ xu).bin() == xor_str
val_str = '10101100'
mks_str = '11001100'
xor_str = '01100000'
x('0b'+val_str)
xu('0b'+val_str)
y('0b'+mks_str)
yu('0b'+mks_str)
assert (x ^ y).bin() == xor_str
assert (x ^ yu).bin() == xor_str
assert (xu ^ y).bin() == xor_str
assert (xu ^ yu).bin() == xor_str
assert (x ^ utils.str2num('0b'+mks_str)).bin() == xor_str
assert (xu ^ utils.str2num('0b'+mks_str)).bin() == xor_str
assert (utils.str2num('0b'+mks_str) ^ x).bin() == xor_str
assert (utils.str2num('0b'+mks_str) ^ xu).bin() == xor_str
def test_arrays():
x = Fxp(None, True, 8, 4)
y = Fxp(None, True, 8, 4)
x(['0b00110101', '0b10101100'])
y('0b11110000')
z = x & y
assert z.bin()[0] == '00110000'
assert z.bin()[1] == '10100000'
def test_operations_with_combinations():
v = [-256, -64, -16, -4.75, -3.75, -3.25, -1, -0.75, -0.125, 0.0, 0.125, 0.75, 1, 1.5, 3.75, 4.0, 8.0, 32, 128]
for i in range(len(v)):
for j in range(len(v)):
vx, vy = v[i], v[j]
x = Fxp(vx)
y = Fxp(vy)
assert (vx + vy) == (x + y)()
assert (vy + vx) == (y + x)()
assert (vx - vy) == (x - y)()
assert -(vy - vx) == -(y - x)()
assert (vx * vy) == (x * y)()
assert (vy * vx) == (y * x)()
v = [-256, -64, -16, -4.75, -4.25, -1, -0.75, -0.125, 0.125, 0.75, 1, 1.5, 2.75, 4.0, 8.0, 32, 128]
d = [-256, -64, -16, -1, -0.5, -0.125, 0.125, 0.5, 1, 2, 4.0, 8.0, 32, 128]
for i in range(len(v)):
for j in range(len(d)):
vx, vy = v[i], d[j]
x = Fxp(vx)
y = Fxp(vy)
assert (vx / vy) == (x / y)()
assert (vx // vy) == (x // y)()
assert (vx % vy) == (x % y)()
def test_operations_with_constants_with_combinations():
v = [-256, -64, -16, -4.75, -3.75, -3.25, -1, -0.75, -0.125, 0.0, 0.125, 0.75, 1, 1.5, 3.75, 4.0, 8.0, 32, 128]
for i in range(len(v)):
for j in range(len(v)):
vx, vy = v[i], v[j]
x = Fxp(vx, True, 16, 3)
y = Fxp(vy, True, 16, 3)
assert (x + vy)() == (vx + vy) == (vx + y)() == (x + y)()
assert (vy + x)() == (vy + vx) == (y + vx)() == (y + x)()
assert (x - vy)() == (vx - vy) == (vx - y)() == (x - y)()
assert -(vy - x)() == -(vy - vx) == -(y - vx)() == -(y - x)()
for i in range(len(v)):
for j in range(len(v)):
vx, vy = v[i], v[j]
x = Fxp(vx, True, 24, 6)
y = Fxp(vy, True, 24, 6)
assert (x * vy)() == (vx * vy) == (vx * y)() == (x * y)()
assert (vy * x)() == (vy * vx) == (y * vx)() == (y * x)()
v = [-256, -64, -16, -4.75, -4.25, -1, -0.75, -0.125, 0.125, 0.75, 1, 1.5, 2.75, 4.0, 8.0, 32, 128]
d = [-256, -64, -16, -1, -0.5, -0.125, 0.125, 0.5, 1, 2, 4.0, 8.0, 32, 128]
for i in range(len(v)):
for j in range(len(d)):
vx, vy = v[i], d[j]
x = Fxp(vx, True, 32, 12)
y = Fxp(vy, True, 32, 12)
assert (x / vy)() == (vx / vy) == (vx / y)() == (x / y)()
# assert (vy / x)() == (vy / vx) == (y / vx)() == (y / x)()
assert (x // vy)() == (vx // vy) == (vx // y)() == (x // y)()
# assert (vy // x)() == (vy // vx) == (y // vx)() == (y // x)()
assert (x % vy)() == (vx % vy) == (vx % y)() == (x % y)()
# assert (vy % x)() == (vy % vx) == (y % vx)() == (y % x)()
def test_pow():
x = Fxp(16, True, n_int=14, n_frac=8)
n = Fxp(-1, True, n_int=14, n_frac=8)
assert(x**n)() == 1/16
v = 15
n_vals = [0, 1, 2, 3]
x = Fxp(v, signed=True, n_int=12, n_frac=0)
xu = Fxp(v, signed=False, n_int=12, n_frac=0)
for n in n_vals:
assert (x**n)() == v**n
assert (xu**n)() == v**n
v = -16
x = Fxp(v, signed=True, n_int=12, n_frac=0)
for n in n_vals:
assert (x**n)() == v**n
v = 16.0
n_vals = [-2, -1, 0, 1, 2, 3]
x = Fxp(v, signed=True, n_int=14, n_frac=8)
# xu = Fxp(v, signed=False, n_int=12, n_frac=0)
for n in n_vals:
assert (x**n)() == v**n
# assert (xu**n)() == v**n
v = -16.0
x = Fxp(v, signed=True, n_int=14, n_frac=8)
for n in n_vals:
assert (x**n)() == (v)**n
v = 81
n_vals = [0, 0.25, 0.5]
x = Fxp(v, signed=True, n_int=14, n_frac=8)
xu = Fxp(v, signed=False, n_int=14, n_frac=8)
for n in n_vals:
assert (x**n)() == v**n
assert (xu**n)() == v**n
v = 16.
n = 2
v_vals = [-4, -2, -1, 0, 1, 2, 4]
n_vals = [-2, -1, 0, 1, 2]
x = Fxp(v, signed=True, n_int=12, n_frac=0)
xu = Fxp(v, signed=False, n_int=12, n_frac=0)
p = Fxp(n, signed=True, n_int=8, n_frac=0)
assert ((x**p)() == np.power(v, n)).all()
assert ((xu**p)() == np.power(v, n)).all()
x = Fxp(v, signed=True, n_int=12, n_frac=8)
p_vals = Fxp(n_vals, signed=True, n_int=8, n_frac=0)
x.config.op_sizing = 'same'
assert ((x**p_vals)() == np.power(v, n_vals)).all()
p_vals = Fxp(n_vals, signed=True, n_int=8, n_frac=0)
assert ((x**p_vals)() == np.power(v, n_vals)).all()
x_vals = Fxp(v_vals, signed=True, n_int=12, n_frac=8)
p = Fxp(n, signed=True, n_int=8, n_frac=0)
x_vals.config.op_sizing = 'same'
assert ((x_vals**p)() == np.power(v_vals, n)).all()
p = Fxp(n, signed=True, n_int=8, n_frac=2)
assert ((x_vals**p)() == np.power(v_vals, n)).all()
v_vals = [-1, 1, 2, 3, 4]
n_vals = [-2, -1, 0, 1, 2]
x_vals = Fxp(v_vals, signed=True, n_int=12, n_frac=8)
p_vals = Fxp(n_vals, signed=True, n_int=8, n_frac=0)
x_vals.config.op_sizing = 'same'
assert ((x_vals**p_vals)() == np.array([vi**ni for vi, ni in zip(v_vals, n_vals)])).all()
p_vals = Fxp(n_vals, signed=True, n_int=8, n_frac=2)
assert ((x_vals**p_vals)() == np.array([vi**ni for vi, ni in zip(v_vals, n_vals)])).all()
v_vals = [[1, 2],[3, 4]]
n_vals = [[1, 2],[3, 4]]
x_vals = Fxp(v_vals, signed=True, n_int=12, n_frac=8)
p_vals = Fxp(n_vals, signed=True, n_int=8, n_frac=0)
x_vals.config.op_sizing = 'same'
assert ((x_vals**p_vals)() == np.power(v_vals, n_vals)).all()
def test_scaled():
x = Fxp(10.5, True, 16, 8, scale=2, bias=1)
assert x() == 10.5
assert x + 2 == 12.5
assert x - 2.5 == 8.0
assert x * 3 == 31.5
assert x / 2 == 5.25
|
{"hexsha": "a0d6f0683b2ef6c0c1059b0213f13a62e8f01e67", "size": 12423, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_operators.py", "max_stars_repo_name": "NJDFan/fxpmath", "max_stars_repo_head_hexsha": "a4d67e421c351c3901d62e22c60a5c81d427811d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 97, "max_stars_repo_stars_event_min_datetime": "2020-06-08T13:09:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T23:15:56.000Z", "max_issues_repo_path": "tests/test_operators.py", "max_issues_repo_name": "NJDFan/fxpmath", "max_issues_repo_head_hexsha": "a4d67e421c351c3901d62e22c60a5c81d427811d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 48, "max_issues_repo_issues_event_min_datetime": "2020-06-08T15:12:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-10T13:40:29.000Z", "max_forks_repo_path": "tests/test_operators.py", "max_forks_repo_name": "NJDFan/fxpmath", "max_forks_repo_head_hexsha": "a4d67e421c351c3901d62e22c60a5c81d427811d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 22, "max_forks_repo_forks_event_min_datetime": "2020-05-20T15:30:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T23:46:13.000Z", "avg_line_length": 29.3687943262, "max_line_length": 115, "alphanum_fraction": 0.4853900024, "include": true, "reason": "import numpy", "num_tokens": 5164}
|
#-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Martin Paces <martin.paces@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2014 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
import uuid
import os.path
from numpy.random import RandomState
from osgeo import gdal
from eoxserver.core import Component, implements
from eoxserver.services.ows.wps.exceptions import ExecuteError
from eoxserver.services.ows.wps.interfaces import ProcessInterface
from eoxserver.services.ows.wps.parameters import (
LiteralData, ComplexData, CDFile, CDByteBuffer,
FormatBinaryRaw, FormatBinaryBase64, AllowedRange,
)
try:
# Python 2
xrange
except NameError:
# Python 3, xrange is now named range
xrange = range
class TestProcess03(Component):
""" Test process generating binary complex data output. """
implements(ProcessInterface)
identifier = "TC03:image_generator:complex"
title = "Test Case 03: Complex data binary output with format selection."
description = (
"Test process generating binary complex data output (an image)."
)
metadata = {"test-metadata": "http://www.metadata.com/test-metadata"}
profiles = ["test_profile"]
inputs = [
("method", LiteralData(
'TC03:method', str, optional=True,
title="Complex data output passing method.",
abstract=(
"This option controls the method how the complex data output "
"payload is passed from process code."
),
allowed_values=('in-memory-buffer', 'file'), default='file',
)),
("seed", LiteralData(
'TC03:seed', int, optional=True, title="Random generator seed.",
abstract=(
"Optional random generator seed that can be used to obtain "
"reproducible random-generated result."
),
allowed_values=AllowedRange(0, None, dtype=int),
)),
]
outputs = [
("output", ComplexData(
'TC03:output00', title="Test case #02: Complex output #00",
abstract="Binary complex data output (random-generated image).",
formats=(
FormatBinaryRaw('image/png'),
FormatBinaryBase64('image/png'),
FormatBinaryRaw('image/jpeg'),
FormatBinaryBase64('image/jpeg'),
FormatBinaryRaw('image/tiff'),
FormatBinaryBase64('image/tiff'),
)
)),
]
# NOTE:
# The output complex data format has to be handled by the processes
# itself and the format selection has to be passed to the 'execute'
# subroutine. The output complex data format selection is passed
# to the process as an additional input argument - a simple dictionary
# with the 'mime_type', 'encoding', 'schema', and 'as_reference' keywords.
# In case no format being selected by the user this format selection
# is set to the default format. The name of the input argument holding
# the is controlled by the process output definition.
@staticmethod
def execute(method, seed, output):
# size of the output image
size_x, size_y = (768, 512)
# output format selection
if output['mime_type'] == "image/png":
extension = ".png"
driver = gdal.GetDriverByName("PNG")
options = []
elif output['mime_type'] == "image/jpeg":
extension = ".jpg"
driver = gdal.GetDriverByName("JPEG")
options = []
elif output['mime_type'] == "image/tiff":
extension = ".tif"
driver = gdal.GetDriverByName("GTiff")
options = ["TILED=YES", "COMPRESS=DEFLATE", "PHOTOMETRIC=RGB"]
else:
ExecuteError("Unexpected output format received! %r" % output)
# generate a random in-memory GDAL dataset
mem_driver = gdal.GetDriverByName("MEM")
mem_ds = mem_driver.Create("", size_x, size_y, 3, gdal.GDT_Byte)
random_state = RandomState(seed)
for i in xrange(3):
mem_ds.GetRasterBand(i+1).WriteArray(
(256.0 * random_state.rand(size_y, size_x)).astype('uint8')
)
# convert in-memory dataset to the desired output
tmp_filename = os.path.join("/tmp", str(uuid.uuid4()) + extension)
output_filename = "test03_binary_complex" + extension
try:
driver.CreateCopy(tmp_filename, mem_ds, 0, options)
del mem_ds
if method == 'file':
# Return object as a temporary Complex Data File.
# None that the object holds the format attributes!
# The 'filename' parameter sets the raw output
# 'Content-Disposition: filename=' HTTP header.
return CDFile(tmp_filename, filename=output_filename, **output)
elif method == 'in-memory-buffer':
# Return object as an in-memory Complex Data Buffer.
# None that the object holds the format attributes!
# The 'filename' parameter sets the raw output
# 'Content-Disposition: filename=' HTTP header.
with open(tmp_filename, 'rb') as fid:
_output = CDByteBuffer(
fid.read(), filename=output_filename, **output
)
os.remove(tmp_filename)
return _output
except:
# make sure no temporary file is left in case of an exception
if os.path.isfile(tmp_filename):
os.remove(tmp_filename)
raise
|
{"hexsha": "aebf213d07dfeeb062ead02469991706dd25f64d", "size": 6954, "ext": "py", "lang": "Python", "max_stars_repo_path": "autotest/autotest_services/processes/test03_binary_complex.py", "max_stars_repo_name": "kalxas/eoxserver", "max_stars_repo_head_hexsha": "8073447d926f3833923bde7b7061e8a1658dee06", "max_stars_repo_licenses": ["OML"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2015-08-10T19:34:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-05T08:28:01.000Z", "max_issues_repo_path": "autotest/autotest_services/processes/test03_binary_complex.py", "max_issues_repo_name": "kalxas/eoxserver", "max_issues_repo_head_hexsha": "8073447d926f3833923bde7b7061e8a1658dee06", "max_issues_repo_licenses": ["OML"], "max_issues_count": 153, "max_issues_repo_issues_event_min_datetime": "2015-01-20T08:35:49.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-16T11:00:56.000Z", "max_forks_repo_path": "autotest/autotest_services/processes/test03_binary_complex.py", "max_forks_repo_name": "kalxas/eoxserver", "max_forks_repo_head_hexsha": "8073447d926f3833923bde7b7061e8a1658dee06", "max_forks_repo_licenses": ["OML"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2015-01-23T15:48:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-21T15:41:18.000Z", "avg_line_length": 42.4024390244, "max_line_length": 80, "alphanum_fraction": 0.6075639919, "include": true, "reason": "from numpy", "num_tokens": 1428}
|
MODULE fockd2_I
INTERFACE
!...Generated by Pacific-Sierra Research 77to90 4.4G 12:41:19 03/10/06
SUBROUTINE fockd2 (F, PTOT, P, W, LMW, WJ, WK, NUMAT, NFIRST, NLAST, NW)
USE vast_kind_param,ONLY: DOUBLE
integer, INTENT(IN) :: LMW, NUMAT
real(DOUBLE), DIMENSION(*), INTENT(INOUT) :: F
real(DOUBLE), DIMENSION(*), INTENT(IN) :: PTOT
real(DOUBLE), DIMENSION(*), INTENT(IN) :: P
real(DOUBLE), DIMENSION(LMW,LMW), INTENT(IN) :: W
real(DOUBLE), DIMENSION(*), INTENT(IN) :: WJ
real(DOUBLE), DIMENSION(*), INTENT(IN) :: WK
integer, DIMENSION(NUMAT), INTENT(IN) :: NFIRST
integer, DIMENSION(NUMAT), INTENT(IN) :: NLAST
integer, DIMENSION(NUMAT), INTENT(IN) :: NW
END SUBROUTINE
END INTERFACE
END MODULE
|
{"hexsha": "331c09d621a5d7912fccbd9c609d5956e5bbaaff", "size": 844, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "2006_MOPAC7.1/src_interfaces/fockd2_I.f90", "max_stars_repo_name": "openmopac/MOPAC-archive", "max_stars_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-16T20:53:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-16T20:54:11.000Z", "max_issues_repo_path": "2006_MOPAC7.1/src_interfaces/fockd2_I.f90", "max_issues_repo_name": "openmopac/MOPAC-archive", "max_issues_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2006_MOPAC7.1/src_interfaces/fockd2_I.f90", "max_forks_repo_name": "openmopac/MOPAC-archive", "max_forks_repo_head_hexsha": "01510e44246de34a991529297a10bcf831336038", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.4210526316, "max_line_length": 80, "alphanum_fraction": 0.5817535545, "num_tokens": 277}
|
"""
The code is taken and changed from Deep Reinforcement Learning Hands on book author Max Lapan
link: https://github.com/PacktPublishing/Deep-Reinforcement-Learning-Hands-On
"""
import enum
import numpy as np
import globalvars
from utils import utils
np.random.seed(globalvars.GLOBAL_SEED)
class Actions(enum.Enum):
Skip = 0
Increase = 1
D_Increase = 2
Decrease = 3
D_Decrease = 4
class State:
def __init__(self, server, **kwargs):
"""
:param server:
:param kwargs:
"""
if 'bars_count' in kwargs:
bars_count = kwargs['bars_count']
else:
bars_count = globalvars.DEFAULT_BAR_COUNTS
if 'FIS' in kwargs:
FIS = kwargs['FIS']
else:
FIS = None
self.server = server
self.bars_count = bars_count
self.workloads = []
self.buffers = []
self.nb_instances = []
self.rates = []
self.rewards = []
print('Initializing State with bar count=',
self.bars_count)
if FIS is not None:
self.fis = FIS
print('Initializing State with Fuzzy')
else:
self.fis = None
def reset(self):
"""
:return: The first state
"""
self.workloads.clear()
self.buffers.clear()
self.nb_instances.clear()
self.server.serverpool.reset()
self.rewards.clear()
self.rates.clear()
nb_instance = self.server.serverpool.n
means = []
for i in range(10 + self.bars_count):
rate = self.server.serverpool.work(nb_instance)
result = self._monitoring(rate)
means.append(rate)
if result is None:
break
reward = np.mean(means[-1:])
reward = self._buffer_reward(reward,
nb_instance,
Actions.Skip)
self.rewards.append(reward)
return self.encode()
def _monitoring(self, rate):
"""
:param rate: recorded the rate
:return:
"""
state = self.server.serverpool.monitoring()
if state is None:
return None
w, b, n = state
self.workloads.append(w)
self.buffers.append(b)
self.nb_instances.append(n)
self.rates.append(rate)
return 0
@property
def shape(self):
"""
:return: return the shape of state
"""
if self.fis is not None:
return (self.bars_count*self.fis.rules.get_number_of_rules(), )
else:
return (2 * self.bars_count, )
def encode(self):
"""
Convert current state into numpy array.
"""
res = np.ndarray(shape=(2*self.bars_count,),
dtype=np.float32)
shift = 0
for bar_idx in range(-self.bars_count, 0):
res[shift] = self.workloads[bar_idx]
shift += 1
res[shift] = self.rates[bar_idx]
shift += 1
if self.fis is not None:
vals = self.to_fuzzy(res)
vals = vals.flatten()
return vals
else:
return res
def to_fuzzy(self, res):
"""
:param res: the input variables
:return: fuzzified input variables
"""
vals = []
for i in range(self.bars_count):
state = res[2*i:2*(i+1)]
truth_vals = self.fis.get_truth_values(state)
vals.append(truth_vals)
return np.array(vals)
def step(self, action):
"""
:param action: interact to environment with action
:return: the reward and done
"""
assert isinstance(action, Actions)
done = False
nb_instance = self.server.serverpool.n
if action == Actions.Skip:
nb_instance = self.server.serverpool.n
elif action == Actions.Increase:
nb_instance += 1
elif action == Actions.Decrease:
nb_instance -= 1
elif action == Actions.D_Increase:
nb_instance += 2
elif action == Actions.D_Decrease:
nb_instance -= 2
if nb_instance > self.server.serverpool.maximum_instances:
nb_instance = self.server.serverpool.maximum_instances
if nb_instance <= 0:
nb_instance = 1
means = []
for i in range(10 + self.bars_count):
rate = self.server.serverpool.work(nb_instance)
result = self._monitoring(rate)
if result is None:
done = True
break
means.append(rate)
if len(means) > 0:
reward = np.mean(means[-1:])
reward = self._buffer_reward(ht=reward,
nb_instance=nb_instance,
action=action)
else:
reward = 0.
self.rewards.append(reward)
return reward, done
def _buffer_reward(self, ht, nb_instance, action):
"""
Calculating reward
the purpose is to keep buffer in the safe zone
from (lo_threshold to hi_threshold)
reward value is in [-1, 1]
-1 -----> 0 ----> 1 ---> 0 ---> -1
max-----> hi----> mid--->low--->min = 0
results = w1*BU(t) + w2*(VM(t)/VM_max)
:param ht: the current buffer value
:param nb_instance: number of current instance
:param action: action have been taken
:return: value of reward
"""
max_threshold = 100.
hi_threshold = 70.
mid_threshold = 50.
lo_threshold = 30.
safe_zone = (hi_threshold - lo_threshold)/2
w1, w2 = 0.8, 0.2
if ht > hi_threshold:
reward = -(ht-hi_threshold)/(max_threshold-hi_threshold)
elif ht < lo_threshold:
reward = -(lo_threshold-ht)/lo_threshold
reward = w1*reward + w2*(1-nb_instance/self.server.serverpool.maximum_instances)
else:
if ht >= mid_threshold:
reward = (hi_threshold-ht)/safe_zone
else:
reward = (ht-lo_threshold)/safe_zone
reward = w1*reward + w2*(1-nb_instance/self.server.serverpool.maximum_instances)
return reward
class State1D(State):
"""
State with shape suitable for 1D convolution
"""
@property
def shape(self):
if self.fis is not None:
return (self.fis.rules.get_number_of_rules(),
self.bars_count)
else:
return 2, self.bars_count
def encode(self):
"""
To encode the data to 1D array or fuzzy data
:return: the state of environment
"""
res = np.zeros(shape=(2, self.bars_count),
dtype=np.float32)
ofs = self.bars_count
N = 5
if self.fis is not None:
x = self.workloads[-ofs - N + 1:]
res[0] = utils.running_mean(x, N)
x = self.rates[-ofs - N + 1:]
res[1] = utils.running_mean(x, N)
# res[2] = self.nb_instances[-ofs:]
vals = self.to_fuzzy(res)
return vals.transpose()
else:
res[0] = self.workloads[-ofs:]
res[1] = self.rates[-ofs:]
# res[2] = self.nb_instances[-ofs:]
return res
def to_fuzzy(self, res):
rows, cols = res.shape
vals = []
for i in range(cols):
state = (res[0, i], res[1, i]) # , res[2, i])
truth_vals = self.fis.get_truth_values(state)
vals.append(truth_vals)
return np.array(vals)
|
{"hexsha": "5f3f52b61b1d17f4217865929ba44e7eececfa65", "size": 7750, "ext": "py", "lang": "Python", "max_stars_repo_path": "envs/states.py", "max_stars_repo_name": "doandongnguyen/autoscaling", "max_stars_repo_head_hexsha": "f5e604bbacedbf352bd301038276c49767c18dd6", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-06-04T08:17:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T07:23:37.000Z", "max_issues_repo_path": "envs/states.py", "max_issues_repo_name": "doandongnguyen/autoscaling", "max_issues_repo_head_hexsha": "f5e604bbacedbf352bd301038276c49767c18dd6", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "envs/states.py", "max_forks_repo_name": "doandongnguyen/autoscaling", "max_forks_repo_head_hexsha": "f5e604bbacedbf352bd301038276c49767c18dd6", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-11-21T21:25:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-05T08:00:15.000Z", "avg_line_length": 31.0, "max_line_length": 93, "alphanum_fraction": 0.5294193548, "include": true, "reason": "import numpy", "num_tokens": 1791}
|
%------------------------------------------%
% Cannabis Data Science
% Date: 3/9/2022
%------------------------------------------%
\documentclass[xcolor={dvipsnames}]{beamer}
\hypersetup{pdfpagemode = FullScreen}
\mode<presentation>{
\usetheme{Boadilla}
\usecolortheme{orchid}
\usefonttheme{default}
\setbeamertemplate{navigation symbols}{}
\setbeamertemplate{caption}[numbered]
}
\setbeamersize{
text margin left = 0.5in,
text margin right = 0.5in
}
%------------------------------------------%
% Title
%------------------------------------------%
\title[\textbf{Cannabis Data Science \#56}]{}
\author{Cannabis Data Science}
\institute[]{\Large Cannabis Data Science \#56}
\date{March \nth{9}, 2022}
%------------------------------------------%
% Packages
%------------------------------------------%
\usepackage[english]{babel}
\usepackage[utf8x]{inputenc}
\usepackage{tikz} % For styling.
\usepackage{xparse}
%------------------------------------------%
% Colors
%------------------------------------------%
\definecolor{Green}{RGB}{34, 153, 84}
\definecolor{LightGreen}{RGB}{218, 247, 166}
\definecolor{DarkGreen}{RGB}{2, 48, 32}
\definecolor{Orange}{RGB}{255, 87, 51}
\definecolor{DarkOrange}{RGB}{199, 0, 57}
\definecolor{Yellow}{RGB}{255, 195, 0}
%------------------------------------------%
% Theme
%------------------------------------------%
\setbeamercolor*{palette primary}{bg=LightGreen, fg=DarkGreen}
\setbeamercolor*{palette secondary}{bg=LightGreen, fg=DarkGreen}
\setbeamercolor*{palette tertiary}{bg=LightGreen, fg=DarkGreen}
%------------------------------------------%
% Packages
%------------------------------------------%
\usepackage{amsmath}
\renewcommand*\footnoterule{} % No separating line on footnote.
\usepackage{mathtools} % For annotating equations.
\usepackage{hhline} % for double bars.
\usepackage[super]{nth} % For formatting 1st, 2nd, 3rd, etc.
\usepackage{graphicx, caption, subcaption}
%------------------------------------------%
% Commands
%------------------------------------------%
% Top space.
\newcommand\T{\rule{0pt}{2.5ex}}
% Bottom space.
\newcommand\B{\rule[-1.25ex]{0pt}{0pt}}
% Blocks.
\newenvironment<>{Block}[2][.9\textwidth]
{\setlength{\textwidth}{#1}
\begin{actionenv}#3
\def\insertblocktitle{#2}\par
\usebeamertemplate{block begin}}
{\par\usebeamertemplate{block end}
\end{actionenv}}
% Balls.
\defbeamertemplate{enumerate item}{largeball}
{\begin{pgfpicture}{-1ex}{-0.65ex}{1.5ex}{1.5ex}
\usebeamercolor[fg]{item projected}
{\pgftransformscale{2.5}\pgftext{\Large\pgfuseshading{bigsphere}}}
{\pgftransformshift{\pgfpoint{0pt}{0.5pt}}
\pgftext{\usebeamerfont*{item projected}\small\insertenumlabel}}
\end{pgfpicture}}
% Fancy arrows.
\NewDocumentCommand\UpArrow{O{2.0ex} O{black}}{%
\mathrel{\tikz[baseline] \draw [->, line width=0.5pt, #2] (0,0) -- ++(0,#1);}} % Fancy up-arrow.
\NewDocumentCommand\DownArrow{O{2.0ex} O{black}}{%
\mathrel{\tikz[baseline] \draw [<-, line width=0.5pt, #2] (0,0) -- ++(0,#1);}} % Fancy down-arrow.
% Equations with numbers on the left.
\makeatletter
\newcommand{\LeftEqNo}{\let\veqno\@@leqno}
\makeatother
%------------------------------------------%
% Presentation
%------------------------------------------%
\begin{document}
% Title page.
\begin{frame}{}
\includegraphics[scale=0.33]{images/logo.pdf}
\vspace*{-2\baselineskip}
\titlepage
% TODO: Add flare to title page?
% Background
%\tikz[remember picture, overlay]
%\node[opacity=1.0, inner sep=0pt] at (current page.center){
% \includegraphics[width=\paperwidth, height=\paperheight]{images/cover.pdf}
%};
\end{frame}
%------------------------------------------%
% Introduction
%------------------------------------------%
\section{Introduction}
\begin{frame}{}
% Question of the day
\begin{center}
\begin{minipage}{.9\linewidth}
\begin{Block}{Yield is the name of the game.}
\vspace{.5\baselineskip}
\begin{itemize}
\item Does the amount a producer produces affect the number of periods a producer has operated or if a producer has exited?
\end{itemize}
\vspace{.5\baselineskip}
\end{Block}
\end{minipage}
\end{center}
\end{frame}
%------------------------------------------%
% Yield
%------------------------------------------%
\begin{frame}{}
{\large \textbf{Yield}}\vspace{\baselineskip}\\
\end{frame}
%------------------------------------------%
% Consumption Rates
%------------------------------------------%
\begin{frame}{}
{\large \textbf{Consumption Rates}}\vspace{\baselineskip}\\
\end{frame}
%------------------------------------------%
% Natural Language Processing
%------------------------------------------%
\begin{frame}{}
{\large \textbf{Natural Language Processing}}\vspace{\baselineskip}\\
%Machine learning
%
%Artificial intelligence
\end{frame}
%------------------------------------------%
% Natural Language Processing with SpaCy
%------------------------------------------%
%\begin{frame}{}
%
%{\large \textbf{Natural Language Processing with SpaCy}}\vspace{\baselineskip}\\
%
%\begin{itemize}
%
%\item {\bfseries EntityRecognizer} -- This component is referred as ner. It is responsible for identifying named entities and assigning labels to them.
%
%\item {\bfseries EntityRuler} -- This component is called * entity_ruler*.It is responsible for assigning named entitile based on pattern rules. Revisit Rule Based Matching to know more.
%
%\end{itemize}
%
%\end{frame}
%Tokenizer : It is responsible for segmenting the text into tokens are turning a Doc object. This the first and compulsory step in a pipeline.
%Tagger : It is responsible for assigning Part-of-speech tags. It takes a Doc as input and createsDoc[i].tag
%DependencyParser : It is known as parser. It is responsible for assigning the dependency tags to each token. It takes a Doc as input and returns the processed Doc
%EntityRecognizer : This component is referred as ner. It is responsible for identifying named entities and assigning labels to them.
%TextCategorizer : This component is called textcat. It will assign categories to Docs.
%EntityRuler : This component is called * entity_ruler*.It is responsible for assigning named entitile based on pattern rules. Revisit Rule Based Matching to know more.
%Sentencizer : This component is called **sentencizer** and can perform rule based sentence segmentation.
%merge_noun_chunks : It is called mergenounchunks. This component is responsible for merging all noun chunks into a single token. It has to be add in the pipeline after tagger and parser.
%merge_entities : It is called merge_entities .This component can merge all entities into a single token. It has to added after the ner.
%merge_subtokens : It is called merge_subtokens. This component can merge the subtokens into a single token.
%------------------------------------------%
% Takeaway
%------------------------------------------%
\section{Takeaway}
\begin{frame}{}
\begin{center}
\begin{minipage}{3.85in}
% Thank you.
\includegraphics[width=.25in]{images/prayer.png} {\Large \textbf{Thank you for coming.}}\\
\begin{center}
\begin{minipage}{.9\linewidth}
\begin{Block}{Lesson of the Day}
\vspace{0.5\baselineskip}
\begin{itemize}
\item Yield matters.
\vspace{0.5\baselineskip}
\end{itemize}
\end{Block}
\end{minipage}
\end{center}
\vfill
\end{minipage}
\end{center}
\end{frame}
%------------------------------------------%
% Fin.
%------------------------------------------%
\end{document}
|
{"hexsha": "0919d3811adc54397f508b5e6a92c735578773e8", "size": 7458, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "2022-03-09/presentation/presentation.tex", "max_stars_repo_name": "GrahamAnto/cannabis-data-science", "max_stars_repo_head_hexsha": "1d5f3085e7b2858b6791840b90335be4669268b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-10T12:37:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T12:37:02.000Z", "max_issues_repo_path": "2022-03-09/presentation/presentation.tex", "max_issues_repo_name": "GrahamAnto/cannabis-data-science", "max_issues_repo_head_hexsha": "1d5f3085e7b2858b6791840b90335be4669268b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2022-03-09/presentation/presentation.tex", "max_forks_repo_name": "GrahamAnto/cannabis-data-science", "max_forks_repo_head_hexsha": "1d5f3085e7b2858b6791840b90335be4669268b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3622047244, "max_line_length": 187, "alphanum_fraction": 0.6063287745, "num_tokens": 1986}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Module to run a model of a parsec application.
Its possible define the number of threads to execute a model
on a fast way; The modelfunc to represent the application should be
provided by user on a python module file. Its possible, also, provide a
overhead function to integrate the model
usage: parsecpy_runmodel [-h] --config CONFIG -f PARSECPYFILEPATH
[-p PARTICLES] [-x MAXITERATIONS]
[-l LOWERVALUES] [-u UPPERVALUES]
[-n PROBLEMSIZES] [-o OVERHEAD] [-t THREADS]
[-r REPETITIONS] [-c CROSSVALIDATION]
[-v VERBOSITY]
Script to run modelling algorithm to predict a parsec application output
optional arguments:
-h, --help show this help message and exit
--config CONFIG Filepath from Configuration file configurations
parameters
-p PARSECPYDATAFILEPATH, --parsecpydatafilepath PARSECPYDATAFILEPATH
Path from input data file from Parsec specificated
package.
-f FREQUENCIES, --frequency FREQUENCIES
List of frequencies (KHz). Ex: 2000000, 2100000
-n PROBLEMSIZES, --problemsizes PROBLEMSIZES
List of problem sizes to model used. Ex:
native_01,native_05,native_08
-o OVERHEAD, --overhead OVERHEAD
If it consider the overhead
-t THREADS, --threads THREADS
Number of Threads
-r REPETITIONS, --repetitions REPETITIONS
Run with repetitions to find the best model
-c CROSSVALIDATION, --crossvalidation CROSSVALIDATION
If run the cross validation of modelling
-m MEASURESFRACTION, --measuresfraction MEASURESFRACTION
Fraction of measures data to calculate the model
-v VERBOSITY, --verbosity VERBOSITY
verbosity level. 0 = No verbose
Example
parsecpy_runmodel --config my_config.json
-p /var/myparsecsim.dat -c True -v 3
"""
import os
import sys
import json
import time
import argparse
from copy import deepcopy
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVR
from sklearn.kernel_ridge import KernelRidge
from sklearn.tree import DecisionTreeRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import ShuffleSplit, KFold
from sklearn.model_selection import cross_validate
from sklearn.model_selection import GridSearchCV
from parsecpy import ParsecData
from parsecpy import Swarm, CoupledAnnealer
from parsecpy import ParsecModel
from parsecpy import argsparselist, argsparseintlist, argsparsefraction
from parsecpy import data_detach, data_attach, measures_split_train_test
def argsparsevalidation():
"""
Validation of script arguments passed via console.
:return: argparse object with validated arguments.
"""
parser = argparse.ArgumentParser(description='Script to run optimizer '
'modelling algorithm to '
'predict a parsec application '
'output')
parser.add_argument('--config', required=True,
help='Filepath from Configuration file '
'configurations parameters')
parser.add_argument('-p', '--parsecpydatafilepath',
help='Path from input data file from Parsec '
'specificated package.')
parser.add_argument('-f', '--frequency', type=argsparseintlist,
help='List of frequencies (KHz). Ex: 2000000, 2100000')
parser.add_argument('-n', '--problemsizes', type=argsparselist,
help='List of problem sizes to model used. '
'Ex: native_01,native_05,native_08')
parser.add_argument('-s', '--serialfrequencyfixed', type=bool,
help='If it considers the serial time at fixed frequency')
parser.add_argument('-o', '--overhead', type=bool,
help='If it considers the overhead')
parser.add_argument('-t', '--threads', type=int,
help='Number of Threads')
group = parser.add_mutually_exclusive_group()
parser.add_argument('-r', '--repetitions', type=int,
help='Number of Repetitions')
group.add_argument('-c', '--crossvalidation', type=bool,
help='If run the cross validation of modelling')
group.add_argument('-m', '--measuresfraction', type=int,
help='Number of points to use on model train')
parser.add_argument('-v', '--verbosity', type=int,
help='verbosity level. 0 = No verbose')
args = parser.parse_args()
return args
def main():
"""
Main function executed from console run.
"""
# adjust list of arguments to avoid negative number values error
for i, arg in enumerate(sys.argv):
if (arg[0] == '-') and arg[1].isdigit():
sys.argv[i] = ' ' + arg
args = argsparsevalidation()
print("\n***** Processing the Model *****")
if args.config:
if not os.path.isfile(args.config):
print('Error: You should inform the correct config file path.')
sys.exit()
with open(args.config, 'r') as fconfig:
config = json.load(fconfig)
for i, v in vars(args).items():
if v is not None:
config[i] = v
else:
config = vars(args)
if 'repetitions' not in config.keys():
config['repetitions'] = 1
best_repetition = 1
if 'serialfrequencyfixed' not in config.keys():
config['serialfrequencyfixed'] = False
if config['algorithm'] in ['pso', 'csa']:
kwargsmodel = {'overhead': config['overhead']}
if not os.path.isfile(config['modelcodefilepath']):
print('Error: You should inform the correct module of '
'objective function to model')
sys.exit()
if not os.path.isfile(config['parsecpydatafilepath']):
print('Error: You should inform the correct parsecpy measures file')
sys.exit()
parsec_exec = ParsecData(config['parsecpydatafilepath'])
measure = parsec_exec.speedups(serialfrequencyfixed=config['serialfrequencyfixed'])
input_sizes = []
if 'size' in measure.dims:
input_sizes = measure.attrs['input_sizes']
input_ord = []
if 'problemsizes' in config.keys():
for i in config['problemsizes']:
if i not in input_sizes:
print('Error: Measures not has especified sizes')
sys.exit()
input_ord.append(input_sizes.index(i)+1)
measure = measure.sel(size=sorted(input_ord))
measure.attrs['input_sizes'] = sorted(config['problemsizes'])
if 'frequency' in measure.dims:
frequencies = measure.coords['frequency']
if 'frequency' in config.keys():
for i in config['frequency']:
if i not in frequencies:
print('Error: Measures not has especified frequencies')
sys.exit()
measure = measure.sel(size=sorted(config['frequencies']))
measure_detach = data_detach(measure)
if 'measuresfraction' in config.keys():
# xy_train_test = train_test_split(measure_detach['x'],
# measure_detach['y'],
# train_size=config['measuresfraction'])
xy_train_test = measures_split_train_test(measure,
train_size=config[
'measuresfraction'])
x_sample_train = xy_train_test[0]
y_sample_train = xy_train_test[2]
x_sample_test = xy_train_test[1]
y_sample_test = xy_train_test[3]
else:
x_sample_train = measure_detach['x']
y_sample_train = measure_detach['y']
x_sample_test = measure_detach['x']
y_sample_test = measure_detach['y']
starttime = time.time()
print('\nAlgorithm Execution...')
if config['algorithm'] in ['svr', 'tree', 'neural']:
measure_ml = measure.copy()
measure_ml.coords['frequency'] = measure_ml.coords['frequency']/1e6
measure_ml_detach = data_detach(measure_ml)
scaler = StandardScaler()
scaler.fit(measure_ml_detach['x'])
feature_standardized = scaler.transform(measure_ml_detach['x'])
measure_ml_standardized = data_attach({'x': feature_standardized,
'y': measure_ml_detach['y']},
measure_ml_detach['dims'])
for j in range(config['repetitions']):
print('Calculating model: Repetition=%d' % (j+1))
if 'measuresfraction' in config.keys():
# xy_train_test = train_test_split(measure_ml_detach['x'],
# measure_ml_detach['y'],
# train_size=config[
# 'measuresfraction'])
xy_train_test = measures_split_train_test(measure_ml,
train_size=config[
'measuresfraction'])
x_sample_train = xy_train_test[0]
y_sample_train = xy_train_test[2]
x_sample_test = xy_train_test[1]
y_sample_test = xy_train_test[3]
else:
x_sample_train = measure_ml_detach['x']
y_sample_train = measure_ml_detach['y']
x_sample_test = measure_ml_detach['x']
y_sample_test = measure_ml_detach['y']
if config['algorithm'] == 'svr':
gs_ml = GridSearchCV(SVR(),
cv=config['crossvalidation-folds'],
param_grid={"C": config['c_grid'],
"gamma": config['gamma_grid']})
gs_ml.fit(x_sample_train, y_sample_train)
best_params = gs_ml.best_params_
for i, v in best_params.items():
config[i] = v
elif config['algorithm'] == 'krr':
gs_ml = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1),
param_grid={"alpha": alpha,
"gamma": gamma})
gs_ml.fit(x_sample_train, y_sample_train)
elif config['algorithm'] == 'tree':
gs_ml = DecisionTreeRegressor(random_state=0, max_depth=4)
gs_ml.fit(x_sample_train, y_sample_train)
elif config['algorithm'] == 'neural':
alpha = 10.0 ** -np.arange(1, 7)
gs_ml = make_pipeline(StandardScaler(),
GridSearchCV(MLPRegressor(solver='lbfgs',
max_iter=2000,
random_state=0),
cv=3, param_grid={"alpha": alpha}))
gs_ml.fit(x_sample_train, y_sample_train)
y_predict = gs_ml.predict(x_sample_test)
error = mean_squared_error(y_sample_test, y_predict)
#errorrel = 100*error/np.mean(y_sample_test)
# MSPE - Mean Square Percentage Error
errorrel = 100 * np.sum(((y_predict - y_sample_test) / y_sample_test) ** 2) / np.size(y_sample_test)
print('\n\n***** %s - Modelling Results! *****\n' % config['algorithm'].upper())
print('Error: %.8f \nPercentual Error (Measured Mean): %.2f %%' %
(error, errorrel))
y_model = data_attach({'x': measure_ml_detach['x'],
'y': gs_ml.predict(measure_ml_detach['x'])},
measure_ml_detach['dims'])
# kf = KFold(n_splits=10, shuffle=True)
# scores = cross_validate(gs_ml, x_sample_train, y_sample_train,
# scoring='neg_mean_squared_error',
# cv=kf, return_train_score=False)
# if config['verbosity'] > 1:
# print(" ** Cross Validate Scores: ")
# print(scores)
y_model.coords['frequency'] = y_model.coords[
'frequency'] * 1e6
measure_ml.coords['frequency'] = measure_ml.coords[
'frequency'] * 1e6
model = ParsecModel(measure=measure_ml, y_model=y_model,
berr=error, berr_rel=errorrel,
modelexecparams=config,
modelresultsfolder=config['resultsfolder'])
if j == 0:
model_best = deepcopy(model)
else:
if model.error < model_best.error:
model_best = deepcopy(model)
best_repetition = j+1
else:
for j in range(config['repetitions']):
print('Calculating model: Repetition=%d' % (j+1))
if config['algorithm'] == 'pso':
optm = Swarm(config['lowervalues'], config['uppervalues'],
parsecpydatafilepath=config['parsecpydatafilepath'],
modelcodefilepath=config['modelcodefilepath'],
size=config['size'], w=config['w'],
c1=config['c1'], c2=config['c2'],
maxiter=config['maxiter'],
threads=config['threads'],
verbosity=config['verbosity'],
x_meas=x_sample_train, y_meas=y_sample_train,
kwargs=kwargsmodel)
elif config['algorithm'] == 'csa':
initial_state = np.array([np.random.uniform(size=config['dimension'])
for _ in range(config['size'])])
optm = CoupledAnnealer(initial_state,
parsecpydatafilepath=config['parsecpydatafilepath'],
modelcodefilepath=config['modelcodefilepath'],
size=config['size'],
steps=config['steps'],
update_interval=config['update_interval'],
tgen_initial=config['tgen_initial'],
tgen_upd_factor=config['tgen_upd_factor'],
tacc_initial=config['tacc_initial'],
alpha=config['alpha'],
desired_variance=config['desired_variance'],
lowervalues=config['lowervalues'],
uppervalues=config['uppervalues'],
threads=config['threads'],
verbosity=config['verbosity'],
x_meas=x_sample_train,
y_meas=y_sample_train,
kwargs=kwargsmodel)
else:
print('Error: You should inform the correct algorithm to use')
sys.exit()
error, solution = optm.run()
model = ParsecModel(bsol=solution,
berr=error,
measure=measure,
modelcodesource=optm.modelcodesource,
modelexecparams=optm.get_parameters(),
modelresultsfolder=config['resultsfolder'])
pred = model.predict(x_sample_test)
model.error = mean_squared_error(y_sample_test, pred['y'])
#model.errorrel = 100 * (model.error / np.mean(y_sample_test))
# MSPE - Mean Square Percentage Error
model.errorrel = 100*np.sum(((pred['y']-y_sample_test)/y_sample_test)**2)/np.size(y_sample_test)
if j == 0:
model_best = deepcopy(model)
else:
if model.error < model_best.error:
model_best = deepcopy(model)
best_repetition = j+1
endtime = time.time()
print('Best Model found on iteration = %d' % (best_repetition))
print('Execution time = %.2f seconds' % (endtime - starttime))
starttime = endtime
print('\n\n***** %s - Modelling Results! *****\n' % config['algorithm'].upper())
print('Error: %.8f \nPercentual Error (Measured Mean): %.2f %%' %
(model_best.error,
model_best.errorrel))
if config['verbosity'] > 0:
print('Best Parameters: \n', model_best.sol)
if config['verbosity'] > 1:
print('\nMeasured Speedup: \n', measure)
print('\nModeled Speedup: \n', model_best.y_model)
print('\n***** Modelling Done! *****\n')
if config['crossvalidation']:
print('\n\n***** Starting cross validation! *****\n')
starttime = time.time()
validation_model = deepcopy(model_best)
scores = validation_model.validate(kfolds=10)
print('\n Cross Validation (K-fold, K=10) Metrics: ')
if config['verbosity'] > 2:
print('\n Times: ')
for key, value in scores['times'].items():
print(' %s: %.8f' % (key, value.mean()))
print(' ', value)
print('\n Scores: ')
for key, value in scores['scores'].items():
if config['verbosity'] > 1:
print(' %s: %.8f' % (value['description'],
value['value'].mean()))
print(' ', value['value'])
else:
print(' %s: %.8f' % (value['description'],
value['value'].mean()))
endtime = time.time()
print(' Execution time = %.2f seconds' % (endtime - starttime))
model_best.validation = scores
print('\n***** Cross Validation Done! *****\n')
if 'measuresfraction' in config.keys():
model_best.measuresfraction = config['measuresfraction']
model_best.measuresfraction_points = x_sample_train
print('\n\n***** ALL DONE! *****\n')
fn = model_best.savedata(parsec_exec.config, ' '.join(sys.argv))
print('Model data saved on filename: %s' % fn)
if __name__ == '__main__':
main()
|
{"hexsha": "190f218eba7ca878f5ed3bf745f5647bbc17c479", "size": 19695, "ext": "py", "lang": "Python", "max_stars_repo_path": "parsecpy/runmodel.py", "max_stars_repo_name": "alexfurtunatoifrn/parsecpy", "max_stars_repo_head_hexsha": "8c77f8dc02c6c4f28bcf2da1dec1b99ff5cd5516", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-03-01T18:42:18.000Z", "max_stars_repo_stars_event_max_datetime": "2018-05-11T14:49:08.000Z", "max_issues_repo_path": "parsecpy/runmodel.py", "max_issues_repo_name": "alexfurtunato/parsecpy", "max_issues_repo_head_hexsha": "8c77f8dc02c6c4f28bcf2da1dec1b99ff5cd5516", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "parsecpy/runmodel.py", "max_forks_repo_name": "alexfurtunato/parsecpy", "max_forks_repo_head_hexsha": "8c77f8dc02c6c4f28bcf2da1dec1b99ff5cd5516", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.6296296296, "max_line_length": 112, "alphanum_fraction": 0.52256918, "include": true, "reason": "import numpy", "num_tokens": 3833}
|
import autodiff as ad
import numpy as np
if __name__ == '__main__':
print('Linear regression using SGD and self made autodiff')
N = 1500
D = 100.0
alpha = -1.45
beta = 2.2
xx = np.arange(N) / float(N) * D
yy = alpha * xx + beta + np.random.normal(loc=0, scale=0.125, size=N)
# Model
eta = 0.05 / N
epochs = 500
a = ad.Numtor(0.1)
b = ad.Numtor(0.2)
for ii in range(epochs):
for sample_index in np.arange(N):
x = ad.Numtor(xx[sample_index])
y = ad.Numtor(yy[sample_index])
yp = a*x + b
loss = (yp - y) * (yp - y)
loss.backward()
a = ad.Numtor(a.value - eta * a.grad)
b = ad.Numtor(b.value - eta * b.grad)
print(loss.value, a.value, b.value)
|
{"hexsha": "eaa8461d33aebf17672cb0c4fcf1954abfcd5e65", "size": 791, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "lutsker/simple-autodiff", "max_stars_repo_head_hexsha": "3eaecbae7e46566a51f12c923d72af305d39b4ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "lutsker/simple-autodiff", "max_issues_repo_head_hexsha": "3eaecbae7e46566a51f12c923d72af305d39b4ee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "lutsker/simple-autodiff", "max_forks_repo_head_hexsha": "3eaecbae7e46566a51f12c923d72af305d39b4ee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.275862069, "max_line_length": 73, "alphanum_fraction": 0.5284450063, "include": true, "reason": "import numpy", "num_tokens": 250}
|
# -*- coding: utf-8 -*-
import numpy as np
import time
import sys
import theano
import theano.tensor as T
import theano.printing as P
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
rng = np.random.RandomState(23455)
class LogisticRegression(object):
def __init__(self, input, n_in, n_out):
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(value=np.zeros((n_in, n_out),
dtype=theano.config.floatX),
name='W', borrow=True)
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(value=np.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
# compute vector of class-membership probabilities in symbolic form
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# compute prediction as class whose probability is maximal in
# symbolic form
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# parameters of the model
self.params = [self.W, self.b]
def negative_log_likelihood(self, y):
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
if y.ndim != self.y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred',
('y', target.type, 'y_pred', self.y_pred.type))
if y.dtype.startswith('int'):
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
#def __getstate__(self):
# (self.W.get_value(),self.b.get_value())
#def __setstate__(self,data):
# W,b = data
# (self.W.set_value(W),self.b.set_value(b))
class HiddenLayer(object):
def __init__(self, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
self.input = input
if W is None:
W_values = np.asarray(rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (lin_output if activation is None
else activation(lin_output))
# parameters of the model
self.params = [self.W, self.b]
# def __getstate__(self):
# (self.W.get_value(),self.b.get_value())
# def __setstate__(self,data):
# W,b = data
# (self.W.set_value(W),self.b.set_value(b))
class LeNetConvPoolLayer(object):
def __init__(self, input, filter_shape, image_shape, poolsize=(2, 2)):
assert image_shape[1] == filter_shape[1]
self.input = input
fan_in = np.prod(filter_shape[1:])
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /
np.prod(poolsize))
W_bound = np.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(np.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX),
borrow=True)
b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
conv_out = conv.conv2d(input=input, filters=self.W,
filter_shape=filter_shape, image_shape=image_shape)
pooled_out = downsample.max_pool_2d(input=conv_out,
ds=poolsize, ignore_border=True)
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
#def __getstate__(self):
# (self.W.get_value(),self.b.get_value())
#def __setstate__(self,data):
# W,b = data
# (self.W.set_value(W),self.b.set_value(b))
class Model(object):
def __init__(self,ishape=(28,28),nkerns=[20, 50],batch_size=100, outsize=10):
self.index = T.lscalar() # index to a [mini]batch
self.x = T.matrix('x') # the data is presented as rasterized images
self.y = T.ivector('y')
self.batch_size=batch_size
self.layer0_input = self.x.reshape((self.batch_size, 1, 28, 28))
self.layer0 = LeNetConvPoolLayer(input=self.layer0_input,
image_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 5, 5), poolsize=(2, 2))
self.layer1 = LeNetConvPoolLayer(input=self.layer0.output,
image_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 5, 5), poolsize=(2, 2))
self.layer2_input = self.layer1.output.flatten(2)
self.layer2 = HiddenLayer(input=self.layer2_input, n_in=nkerns[1] * 4 * 4,
n_out=500, activation=T.tanh)
self.layer3 = LogisticRegression(input=self.layer2.output, n_in=500, n_out=outsize)
self.cost = self.layer3.negative_log_likelihood(self.y)
self.params = self.layer3.params + self.layer2.params + self.layer1.params + self.layer0.params
self.grads = T.grad(self.cost, self.params)
self.apply_model = theano.function([self.x],self.layer3.p_y_given_x)
def loadData(self,dataset):
self.dataset = dataset
def prepareLearning(self,learning_rate=0.05):
self.n_train_batches = self.dataset.train_set[0].get_value(borrow=True).shape[0]
self.n_valid_batches = self.dataset.validation_set[0].get_value(borrow=True).shape[0]
self.n_train_batches /= self.batch_size
self.n_valid_batches /= self.batch_size
train_set_x, train_set_y = self.dataset.train_set
valid_set_x, valid_set_y = self.dataset.validation_set
self.validate_model = theano.function([self.index], self.layer3.errors(self.y),
givens={
self.x: valid_set_x[self.index * self.batch_size: (self.index + 1) * self.batch_size],
self.y: valid_set_y[self.index * self.batch_size: (self.index + 1) * self.batch_size]})
updates = []
for param_i, grad_i in zip(self.params, self.grads):
updates.append((param_i, param_i - learning_rate * grad_i))
self.train_model = theano.function([self.index], self.cost, updates=updates,
givens={
self.x: train_set_x[self.index * self.batch_size: (self.index + 1) * self.batch_size],
self.y: train_set_y[self.index * self.batch_size: (self.index + 1) * self.batch_size]})
#self.apply_model = theano.function([self.x],self.layer3.p_y_given_x) # Замени y_pred на p_y_given_x если нужны оценки вероятности классо
def prepareDumping():
self.dataset = None
def train(self,n_epochs,callback=None):
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(self.n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_params = None
best_validation_loss = np.inf
best_iter = 0
test_score = 0.
start_time = time.clock()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(self.n_train_batches):
iter = (epoch - 1) * self.n_train_batches + minibatch_index
# if iter % 100 == 0:
# print 'training @ iter = ', iter
cost_ij = self.train_model(minibatch_index)
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [self.validate_model(i) for i
in xrange(self.n_valid_batches)]
this_validation_loss = np.mean(validation_losses)
# print('epoch %i, minibatch %i/%i, validation error %f %%' % \
# (epoch, minibatch_index + 1, self.n_train_batches, \
# this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
#print((' epoch %i, error of best '
# 'model %f %%') %
print (epoch, minibatch_index + 1, self.n_train_batches, best_validation_loss * 100.)
if patience <= iter:
done_looping = True
break
if callable(callback):
done_looping = callback(epoch,best_validation_loss) # Здесь можно подавать в callback все, что угодно, равно как и останавливать обучение
end_time = time.clock()
print('Optimization complete.')
print('Best validation score of %f %% obtained at iteration %i' %
(best_validation_loss * 100., best_iter + 1))
return self
#print >> sys.stderr, ('The code for file ' +
# os.path.split(__file__)[1] +
# ' ran for %.2fm' % ((end_time - start_time) / 60.))
def apply(self,data, pos):
results = self.apply_model(data.apply_set[pos*100:(pos+1)*100].astype(theano.config.floatX))
#results = results[0:data.apply_size]
return results
def __getstate__(self):
return (self.layer0.W.get_value(),self.layer0.b.get_value(),
self.layer1.W.get_value(),self.layer1.b.get_value(),
self.layer2.W.get_value(),self.layer2.b.get_value(),
self.layer3.W.get_value(),self.layer3.b.get_value(),self.batch_size)
def __setstate__(self,data):
(l0W,l0b,l1W,l1b,l2W,l2b,l3W,l3b,batch_size) = data
self.__init__(batch_size=batch_size)
self.layer0.W.set_value(l0W)
self.layer0.b.set_value(l0b)
self.layer1.W.set_value(l1W)
self.layer1.b.set_value(l1b)
self.layer2.W.set_value(l2W)
self.layer2.b.set_value(l2b)
self.layer3.W.set_value(l3W)
self.layer3.b.set_value(l3b)
self.apply_model = theano.function([self.x],self.layer3.p_y_given_x)
#def chunks(l, n):
# """ Yield successive n-sized chunks from l.
# """
# for i in xrange(0, len(l), n):
# yield l[i:i+n]
|
{"hexsha": "afef3ac427e216360ef9e78654c5104994cd02e5", "size": 12291, "ext": "py", "lang": "Python", "max_stars_repo_path": "lenet5.py", "max_stars_repo_name": "allchemist/mnist-handwritten-task", "max_stars_repo_head_hexsha": "e79fceae95adfaf92349bf01b75b1905142dfbd1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lenet5.py", "max_issues_repo_name": "allchemist/mnist-handwritten-task", "max_issues_repo_head_hexsha": "e79fceae95adfaf92349bf01b75b1905142dfbd1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lenet5.py", "max_forks_repo_name": "allchemist/mnist-handwritten-task", "max_forks_repo_head_hexsha": "e79fceae95adfaf92349bf01b75b1905142dfbd1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.6914498141, "max_line_length": 161, "alphanum_fraction": 0.5555284354, "include": true, "reason": "import numpy,import theano,from theano", "num_tokens": 2828}
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
# Read in an image
image = mpimg.imread('resources/signs_vehicles_xygrad.png')
# Define a function that applies Sobel x or y,
# then takes an absolute value and applies a threshold.
# Note: calling your function with orient='x', thresh_min=20, thresh_max=100
# should produce output like the example image shown above this quiz.
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the derivative in x or y given orient = 'x' or 'y'
if orient=='x':
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
else:
sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Take the absolute value of the derivative or gradient
abs_sobel = np.absolute(sobel)
# 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# 5) Create a mask of 1's where the scaled gradient magnitude
# is > thresh_min and < thresh_max
binary_output = np.zeros_like(scaled_sobel)
thresh_min = thresh[0]
thresh_max = thresh[1]
binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# 6) Return this mask as your binary_output image
return binary_output
# Define a function that applies Sobel x and y,
# then computes the magnitude of the gradient
# and applies a threshold
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Calculate the magnitude
abs_sobelxy = np.sqrt((np.power(sobelx,2) + np.power(sobely,2)))
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobel = np.uint8(255*abs_sobelxy/np.max(abs_sobelxy))
# 5) Create a binary mask where mag thresholds are met
thresh_min = mag_thresh[0]
thresh_max = mag_thresh[1]
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# 6) Return this mask as your binary_output image
return binary_output
# Define a function that applies Sobel x and y,
# then computes the direction of the gradient
# and applies a threshold.
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Take the absolute value of the x and y gradients
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
dir_grad = np.arctan2(abs_sobely, abs_sobelx)
# 5) Create a binary mask where direction thresholds are met
binary_output = np.zeros_like(dir_grad)
thresh_min = thresh[0]
thresh_max = thresh[1]
binary_output[(dir_grad >= thresh_min) & (dir_grad <= thresh_max)] = 1
# 6) Return this mask as your binary_output image
return binary_output
# Choose a Sobel kernel size
ksize = 3 # Choose a larger odd number to smooth gradient measurements
# Apply each of the thresholding functions
gradx = abs_sobel_thresh(image, orient='x', sobel_kernel=ksize, thresh=(0, 255))
grady = abs_sobel_thresh(image, orient='y', sobel_kernel=ksize, thresh=(0, 255))
mag_binary = mag_thresh(image, sobel_kernel=ksize, mag_thresh=(0, 255))
dir_binary = dir_threshold(image, sobel_kernel=ksize, thresh=(0, np.pi/2))
combined = np.zeros_like(dir_binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
# Plot the result
f, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(1, 5, figsize=(24, 9))
f.tight_layout()
ax1.imshow(gradx)
ax1.set_title('gradx Image', fontsize=50)
ax2.imshow(grady)
ax2.set_title('grady Image', fontsize=50)
ax3.imshow(mag_binary)
ax3.set_title('mag_binary Image', fontsize=50)
ax4.imshow(dir_binary)
ax4.set_title('dir_binary Image', fontsize=50)
ax5.imshow(combined, cmap='gray')
ax5.set_title('Combined Thresholded', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
|
{"hexsha": "c854a4171e14f780b0832694b91d36608c513c68", "size": 4722, "ext": "py", "lang": "Python", "max_stars_repo_path": "lesson_quizzes/gradiant_color_space/combined_thresholds.py", "max_stars_repo_name": "WbHappy/advancedLaneLinesDetection", "max_stars_repo_head_hexsha": "04cce9d2a6fac62ca6322a21c7800eb95c3d4f95", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lesson_quizzes/gradiant_color_space/combined_thresholds.py", "max_issues_repo_name": "WbHappy/advancedLaneLinesDetection", "max_issues_repo_head_hexsha": "04cce9d2a6fac62ca6322a21c7800eb95c3d4f95", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lesson_quizzes/gradiant_color_space/combined_thresholds.py", "max_forks_repo_name": "WbHappy/advancedLaneLinesDetection", "max_forks_repo_head_hexsha": "04cce9d2a6fac62ca6322a21c7800eb95c3d4f95", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.5405405405, "max_line_length": 91, "alphanum_fraction": 0.7049978823, "include": true, "reason": "import numpy", "num_tokens": 1413}
|
import numpy as np
import pydicom
import glob
from read_roi import read_roi_file
import matplotlib.pyplot as plt
import cv2
__author__ = ['Giuseppe Filitto']
__email__ = ['giuseppe.filitto@studio.unibo.it']
def rescale(im, max_value, min_value):
'''
Rescale image in range (0,255)
Parameters
----------
im : array like
image to be rescaled
max_value : value
max image value
min_value : value
min image value
Returns
-------
rescaled image: array like
rescaled input image as type uint 8
'''
rescaled_image = ((im.astype(float) - min_value) * (1. / (max_value - min_value)) * 255.).astype('uint8')
return rescaled_image
def read_slices(filename):
'''
Read dicom file as pixel array
Parameters
----------
filename : str
name of file.dcm
Returns
-------
pix_arr : array
dcm file as array
Raises
------
ValueError
filename must be .dcm format
'''
_, ext = filename.split('.')
if ext != 'dcm':
raise ValueError('Input filename must be a DICOM file')
pix_arr = pydicom.dcmread(filename).pixel_array
return pix_arr
def get_slices(dir_path, uint8=True):
'''
Get full stack of slices from single dcm files ordered by "InstanceNumber" as a rescaled array of shape: depth, height, width
Parameters
----------
dir_path : str
directory of dcm slices
uint8 : bool
rescale the image to uint8, by default True
Returns
-------
slices: array
array of shape: depth, height, width , ordered by "InstanceNumber"
'''
files = glob.glob(dir_path + '/*.dcm')
# ordering as istance number
z = [float(pydicom.read_file(f, force=True).get(
"InstanceNumber", "0") - 1) for f in files]
order = np.argsort(z)
files = np.asarray(files)[order]
slices = [read_slices(f) for f in files]
if uint8:
Max = max([x.max() for x in slices])
Min = min([x.min() for x in slices])
slices = [rescale(x, Max, Min) for x in slices]
slices = np.asarray(slices)
else:
slices = np.asarray(slices)
return slices
def get_slices_info(slices):
'''
Print depth, height, width of the input slices
Parameters
----------
slices : array-like
'''
depth, height, width = slices.shape
print(f"The image object has the following dimensions: depth:{depth}, height:{height}, width:{width}")
def _dict(dict_list):
'''
Function to get true_dict from a dict of dict like {key : true_dict}
Parameters
----------
dict_list : list
list of dicts
Returns
-------
true_dict : list
list of true_dict
'''
true_dict = []
for i in dict_list:
_dict = list(i.values())
for j in _dict:
keys = j.keys()
vals = j.values()
_dict = {key: val for key, val in zip(keys, vals)}
true_dict.append(_dict)
return true_dict
def get_rois(roi_path):
'''
Get ImageJ rois from .roi files stored in roi_path
Parameters
----------
roi_path : str
path of dir containing .roi files
Returns
-------
roi: list
list of roi dicts orderd by position number and without "type":composite
'''
rois_list = glob.glob(roi_path + '/*.roi')
rois = [read_roi_file(roi) for roi in rois_list]
rois = _dict(rois)
# ordering dictionaries by positions and removing rois without x y coords
rois = sorted(rois, key=lambda d: list(d.values())[-1])
rois = list(filter(lambda d: d['type'] != 'composite', rois))
return rois
def make_mask(slices, layer, rois):
'''
Generate mask of a given slice
Parameters
----------
slices : array
array of shape depth, height, width
layer : int
value between (0, slices.shape[0])
rois : list
roi list
Returns
-------
label : array
return mask of a given slice. Pixels outside regions of interest are set to 0 (black), pixel inside regions of interest are set to 255 (white)
Raises
------
ValueError
if there are no regions of interest: "no labels found!"
'''
positions = [rois[i].get('position') - 1 for i in range(len(rois))]
if layer not in positions:
raise ValueError("no labels found!")
else:
background = np.zeros_like(slices[layer, :, :])
roi = list(filter(lambda d: d['position'] == layer + 1, rois))
x = [roi[i].get('x') for i in range(len(roi))]
y = [roi[i].get('y') for i in range(len(roi))]
points = []
for i in range(len(x)):
pts = np.array([(x, y) for(x, y) in zip(
x[i], y[i])])
points.append(pts)
label = cv2.fillPoly(background, points, 255)
return label
def mask_slices(slices, rois):
'''
Make an array of shape depth, height, width, containing for each layer the proper mask.
If no mask if found then masked_slices[layer, :, :] = 0
Parameters
----------
slices : array
array of shape depth, height, width
rois : list
roi list
Returns
-------
masked_slices : array
array of shape: depth, height, width containing for each layer the proper mask
'''
masked_slices = np.zeros_like(slices)
positions = [rois[i].get('position') - 1 for i in range(len(rois))]
for layer in range(slices.shape[0]):
if layer not in positions:
masked_slices[layer, ...] = 0
else:
masked_slices[layer, ...] = make_mask(
slices=slices, layer=layer, rois=rois)
return masked_slices
def explore_roi(slices, layer, rois): # pragma: no cover
'''
Show the regions of interest contours from a given slice
Parameters
----------
slices : array
array of shape depth, height, width
layer : int
value between (0, slices.shape[0])
rois : list
roi list
'''
# -1 to match slice
positions = [rois[i].get('position') - 1 for i in range(len(rois))]
if layer in positions:
plt.figure(figsize=(12, 7), constrained_layout=True)
plt.imshow(slices[layer, ...], cmap='gray')
plt.title(f'Exploring Slice {layer}', fontsize=20)
plt.axis('off')
roi = list(filter(lambda d: d['position'] == layer + 1, rois))
x = [roi[i].get('x') for i, _ in enumerate(roi)]
y = [roi[i].get('y') for i, _ in enumerate(roi)]
for i, _ in enumerate(roi):
plt.fill(x[i], y[i], edgecolor='r', fill=False)
else:
plt.figure(figsize=(12, 7), constrained_layout=True)
plt.imshow(slices[layer, ...], cmap='gray')
plt.title(f'Exploring Slice {layer}', fontsize=20)
plt.axis('off')
plt.show()
def plot_random_layer(slices): # pragma: no cover
'''
Show figure of the random slice between (0, slices.shape[0])
Parameters
----------
slices : array
array of shape depth, height, width
'''
maxval = slices.shape[0]
# Select random layer number
layer = np.random.randint(0, maxval)
# figure
explore_slices(slices=slices, layer=layer)
def explore_slices(slices, layer, **kwargs): # pragma: no cover
'''
Show figure of the given slice
Parameters
----------
slices : array
array of shape depth, height, width
layer : int
value between (0, slice.shape[0])
'''
if kwargs.get('figsize'):
figsize = kwargs.get('figsize')
plt.figure(figsize=figsize, constrained_layout=True)
plt.imshow(slices[layer, ...], cmap='gray')
else:
plt.figure(figsize=(6, 6), constrained_layout=True)
plt.imshow(slices[layer, ...], cmap='gray')
if kwargs.get('fontsize'):
fontsize = kwargs.get('fontsize')
plt.title(f'Exploring Slice {layer}', fontsize=fontsize)
else:
plt.title(f'Exploring Slice {layer}', fontsize=15)
plt.axis('off')
plt.show()
def display_image(img, figsize=(12, 7), **kwargs): # pragma: no cover
'''
Display greyscale image
Parameters
----------
img : image, array_like
image to be displayed
figsize : tuple, optional
figsize arg of matplotlib module, by default (12, 7)
'''
plt.figure(figsize=(figsize), constrained_layout=True)
plt.imshow(img, cmap='gray')
if kwargs:
title = kwargs.get('title')
if kwargs.get('fontsize'):
fontsize = kwargs.get('fontsize')
plt.title(title, fontsize=fontsize)
else:
plt.title(title)
def display_images(display_list, figsize=(12, 8), **kwargs): # pragma: no cover
'''
Display a list of greyscale images
Parameters
----------
display_list : list
list of images to be displayed
figsize : tuple, optional
figsize arg of matplotlib module, by default (12, 8)
'''
plt.figure(figsize=figsize)
for i, _ in enumerate(display_list):
plt.subplot(1, len(display_list), i + 1)
plt.imshow(display_list[i], cmap='gray')
if kwargs.get('titles'):
titles = kwargs.get('titles')
if kwargs.get('fontsize'):
fontsize = kwargs.get('fontsize')
plt.title(titles[i], fontsize=fontsize)
else:
plt.title(titles[i])
plt.show()
|
{"hexsha": "20d501bdd4143fa13c519519c58685a3cf367641", "size": 9550, "ext": "py", "lang": "Python", "max_stars_repo_path": "MRIsegm/utils.py", "max_stars_repo_name": "giuseppefilitto/img-segm", "max_stars_repo_head_hexsha": "27744083b412c8470ad58b484d20acfb4be91271", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-04-14T11:02:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-28T18:16:33.000Z", "max_issues_repo_path": "MRIsegm/utils.py", "max_issues_repo_name": "giuseppefilitto/img-utils", "max_issues_repo_head_hexsha": "27744083b412c8470ad58b484d20acfb4be91271", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MRIsegm/utils.py", "max_forks_repo_name": "giuseppefilitto/img-utils", "max_forks_repo_head_hexsha": "27744083b412c8470ad58b484d20acfb4be91271", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-06T14:51:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-06T14:51:24.000Z", "avg_line_length": 25.0, "max_line_length": 150, "alphanum_fraction": 0.5823036649, "include": true, "reason": "import numpy", "num_tokens": 2381}
|
# CompilerInvocation
CompilerInvocation() = CompilerInvocation(create_compiler_invocation())
"""
create_compiler_invocation() -> CXCompilerInvocation
Return a pointer to a `clang::CompilerInvocation` object.
"""
function create_compiler_invocation()
status = Ref{CXInit_Error}(CXInit_NoError)
invocation = clang_CompilerInvocation_create(status)
@assert status[] == CXInit_NoError
return invocation
end
"""
createFromCommandLine(src::String, args::Vector{String}=String[], diag::DiagnosticsEngine=DiagnosticsEngine()) -> CompilerInvocation
Return a `CompilerInvocation` created from command line arguments.
"""
function createFromCommandLine(src::String, args::Vector{String}=String[],
diag::DiagnosticsEngine=DiagnosticsEngine())
status = Ref{CXInit_Error}(CXInit_NoError)
args_with_src = copy(args)
push!(args_with_src, src)
invocation = clang_CompilerInvocation_createFromCommandLine(args_with_src,
length(args_with_src), diag,
status)
@assert status[] == CXInit_NoError
return CompilerInvocation(invocation)
end
# Options
function getCodeGenOpts(ci::CompilerInvocation)
@check_ptrs ci
return CodeGenOptions(clang_CompilerInvocation_getCodeGenOpts(ci))
end
function getDiagnosticOpts(ci::CompilerInvocation)
@check_ptrs ci
return DiagnosticOptions(clang_CompilerInvocation_getDiagnosticOpts(ci))
end
function getFrontendOpts(ci::CompilerInvocation)
@check_ptrs ci
return FrontendOptions(clang_CompilerInvocation_getFrontendOpts(ci))
end
function getHeaderSearchOpts(ci::CompilerInvocation)
@check_ptrs ci
return HeaderSearchOptions(clang_CompilerInvocation_getHeaderSearchOpts(ci))
end
function getPreprocessorOpts(ci::CompilerInvocation)
@check_ptrs ci
return PreprocessorOptions(clang_CompilerInvocation_getPreprocessorOpts(ci))
end
function getTargetOpts(ci::CompilerInvocation)
@check_ptrs ci
return TargetOptions(clang_CompilerInvocation_getTargetOpts(ci))
end
|
{"hexsha": "2a296d453cb88da3276ef1240bf6a6de21faeb53", "size": 2132, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/clang/api/Frontend/CompilerInvocation.jl", "max_stars_repo_name": "vchuravy/ClangCompiler.jl", "max_stars_repo_head_hexsha": "47080072b059465f8176349c6e67bc678fa238d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2021-08-24T04:01:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T00:43:19.000Z", "max_issues_repo_path": "src/clang/api/Frontend/CompilerInvocation.jl", "max_issues_repo_name": "vchuravy/ClangCompiler.jl", "max_issues_repo_head_hexsha": "47080072b059465f8176349c6e67bc678fa238d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-07-17T12:50:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-22T20:27:02.000Z", "max_forks_repo_path": "src/clang/api/Frontend/CompilerInvocation.jl", "max_forks_repo_name": "vchuravy/ClangCompiler.jl", "max_forks_repo_head_hexsha": "47080072b059465f8176349c6e67bc678fa238d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-03T20:49:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-03T20:49:33.000Z", "avg_line_length": 34.9508196721, "max_line_length": 136, "alphanum_fraction": 0.743902439, "num_tokens": 432}
|
"""Operations for [N, 2] numpy arrays or torch tensors representing segments.
Example segment operations that are supported:
* length: compute bounding box areas
* IOU: pairwise intersection-over-union scores
* intersection: pairwise intersection-over-union scores
TODO (refactor): rename module to segments_ops
"""
import numpy as np
import torch
def intersection(segments1, segments2):
"""Compute pairwise intersection length between segments.
Args:
segments1 (numpy array): shape [N, 2] holding N segments
segments2 (numpy array): shape [M, 2] holding M segments
Returns:
a numpy array with shape [N, M] representing pairwise intersection length
"""
[t_min1, t_max1] = np.split(segments1, 2, axis=1)
[t_min2, t_max2] = np.split(segments2, 2, axis=1)
all_pairs_min_tmax = np.minimum(t_max1, np.transpose(t_max2))
all_pairs_max_tmin = np.maximum(t_min1, np.transpose(t_min2))
intersect_length = np.maximum(np.zeros(all_pairs_max_tmin.shape),
all_pairs_min_tmax - all_pairs_max_tmin)
return intersect_length
def length(segments):
"""Computes length of segments.
Args:
segments (numpy array or torch tensor): shape [N, 2] holding N
segments.
Returns:
a numpy array (or torch tensor) with shape [N] representing segment
length.
Note:
it works with time, it would be off if using frames.
"""
return segments[:, 1] - segments[:, 0]
def iou(segments1, segments2):
"""Computes pairwise intersection-over-union between segment collections.
Args:
segments1 (numpy array): shape [N, 2] holding N segments
segments2 (numpy array): shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(segments1, segments2)
length1 = length(segments1)
length2 = length(segments2)
union = np.expand_dims(length1, axis=1) + np.expand_dims(
length2, axis=0) - intersect
return intersect / union
def non_maxima_suppresion(segments, scores, nms_threshold):
"""non-maxima suppresion over segments
Args:
segments (numpy array): shape [N, 2] holding N segments
scores (numpy array): shape [N] holding score of each segment.
Returns:
a numpy array with shape [M] representing indexes to pick after nms.
"""
t1, t2 = np.split(segments, 2, axis=1)
area = t2 - t1
idx = np.argsort(scores)
ind_pick = []
for i in range(len(idx)):
if len(idx) == 0:
break
p = idx[len(idx) - 1]
ind_pick.append(p)
tt1 = np.maximum(t1[p], t1[idx])
tt2 = np.minimum(t2[p], t2[idx])
wh = np.maximum(0, tt2 - tt1)
o = wh / (area[p] + area[idx] - wh)
ind_rm_i = np.where(o >= nms_threshold)[0]
idx = np.delete(idx, ind_rm_i)
ind_pick = np.array(ind_pick)
return ind_pick
def torch_intersection(segments1, segments2):
"""Compute pairwise intersection length between segments.
Args:
segments1 (torch tensor): shape [N, 2] holding N segments
segments2 (torch tensor): shape [M, 2] holding M segments
Returns:
a torch tensor with shape [N, M] representing pairwise intersection length
"""
[t_min1, t_max1] = torch.chunk(segments1, 2, dim=1)
[t_min2, t_max2] = torch.chunk(segments2, 2, dim=1)
# t_max1 and t_max2 are not contigous, does it matter?
# it seems that t_*_[1-2] share memory with segments[1-2], thus I don't
# transpose in-place
all_pairs_min_tmax = torch.min(t_max1, t_max2.transpose(0, 1))
all_pairs_max_tmin = torch.max(t_min1, t_min2.transpose(0, 1))
intersect_length = torch.max(torch.zeros_like(all_pairs_max_tmin),
all_pairs_min_tmax - all_pairs_max_tmin)
return intersect_length
def torch_iou(segments1, segments2):
"""Computes pairwise intersection-over-union between segment collections.
Args:
segments1 (torch tensor): shape [N, 2] holding N segments
segments2 (torch tensor): shape [M, 2] holding N boxes.
Returns:
a torch tensor with shape [N, M] representing pairwise iou scores.
"""
intersect = torch_intersection(segments1, segments2)
length1 = length(segments1)
length2 = length(segments2)
# print(length1.shape, length2.shape, intersect)
union = length1.unsqueeze_(1) + length2.unsqueeze_(0) - intersect
return intersect / union
if __name__ == '__main__':
# kinda unit-test
def random_segments(n):
x_ = np.random.rand(n, 2).astype(np.float32)
x = np.empty_like(x_)
x[:, 0] = np.min(x_, axis=1)
x[:, 1] = np.max(x_, axis=1)
return x
N, M = 1113, 2367
a = random_segments(N)
b = random_segments(M)
a_torch = torch.from_numpy(a)
b_torch = torch.from_numpy(b)
length(a)
length(a_torch)
MAYBE_NUMERIC_FNS = {torch_iou}
for functions_i in [(intersection, torch_intersection), (iou, torch_iou)]:
numpy_fn, torch_fn = functions_i
gt = numpy_fn(a, b)
# torch
for cuda in [False, True]:
if cuda:
a_torch = a_torch.cuda()
b_torch = b_torch.cuda()
else:
a_torch = a_torch.cpu()
b_torch = b_torch.cpu()
rst = torch_fn(a_torch, b_torch)
if cuda:
rst = rst.cpu()
testing_fn = np.testing.assert_array_equal
if torch_fn in MAYBE_NUMERIC_FNS:
testing_fn = np.testing.assert_array_almost_equal
testing_fn(rst.numpy(), gt)
scores = np.random.rand(N)
nms_threshold = 0.75
non_maxima_suppresion(a, scores, nms_threshold)
|
{"hexsha": "9c4bf93813d605bfe17e7a74e765a820ec974d82", "size": 5843, "ext": "py", "lang": "Python", "max_stars_repo_path": "np_segments_ops.py", "max_stars_repo_name": "escorciav/moments-retrieval-page", "max_stars_repo_head_hexsha": "84c31150246797e2db1a63159cceded30998e2be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2019-10-23T03:37:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-08T23:40:16.000Z", "max_issues_repo_path": "np_segments_ops.py", "max_issues_repo_name": "escorciav/moments-retrieval-page", "max_issues_repo_head_hexsha": "84c31150246797e2db1a63159cceded30998e2be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-09-01T19:04:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-20T11:23:47.000Z", "max_forks_repo_path": "np_segments_ops.py", "max_forks_repo_name": "escorciav/moments-retrieval-page", "max_forks_repo_head_hexsha": "84c31150246797e2db1a63159cceded30998e2be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-01T12:54:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-27T09:28:41.000Z", "avg_line_length": 33.5804597701, "max_line_length": 82, "alphanum_fraction": 0.6375149752, "include": true, "reason": "import numpy", "num_tokens": 1527}
|
# -*- coding: utf-8 -*-
"""
Description: A module to define resilience models and simulations.
- :class:`Common`: Class defining common methods accessible by Function/Flow/Component Classes
- :class:`FxnBlock`: Class defining Model Functions and their attributes
- :class:`Flow`: Class defining Model Flows and their attributes
- :class:`Component`: Class defining Function Components and their attributes
- :class:`SampleApproach`: Class defining fault sampling approaches
- :class:`NominalApproach`: Class defining parameter sampling approaches
"""
#File name: modeldef.py
#Author: Daniel Hulse
#Created: October 2019
import numpy as np
import itertools
import dill
import pickle
import networkx as nx
import copy
import warnings
from ordered_set import OrderedSet
from operator import itemgetter
from collections.abc import Iterable
# MAJOR CLASSES
class Common(object):
def set_atts(self, **kwargs):
"""Sets the given arguments to a given value. Mainly useful for
reducing length/adding clarity to assignment statements in __init__ methods
(self.put is reccomended otherwise so that the iteration is on function/flow *states*)
e.g., self.set_attr(maxpower=1, maxvoltage=1) is the same as saying
self.maxpower=1; self.maxvoltage=1
"""
for name, value in kwargs.items():
setattr(self, name, value)
def put(self,**kwargs):
"""Sets the given arguments to a given value. Mainly useful for
reducing length/adding clarity to assignment statements.
e.g., self.EE.put(v=1, a=1) is the same as saying
self.EE.v=1; self.EE.a=1
"""
for name, value in kwargs.items():
if name not in self._states: raise Exception(name+" not a property of "+self.name)
setattr(self, name, value)
def assign(self,obj,*states):
""" Sets the same-named values of the current flow/function object to those of a given flow.
Further arguments specify which values.
e.g. self.EE1.assign(EE2, 'v', 'a') is the same as saying
self.EE1.a = self.EE2.a; self.EE1.v = self.EE2.v
"""
if len(states)==0: states= obj._states
for state in states:
if state not in self._states: raise Exception(state+" not a property of "+self.name)
setattr(self, state, getattr(obj,state))
def get(self, *attnames, **kwargs):
"""Returns the given attribute names (strings). Mainly useful for reducing length
of lines/adding clarity to assignment statements.
e.g., x,y = self.Pos.get('x','y') is the same as
x,y = self.Pos.x, self.Pos.y, or
z = self.Pos.get('x','y') is the same as
z = np.array([self.Pos.x, self.Pos.y])
"""
if len(attnames)==1: states = getattr(self,attnames[0])
else: states = [getattr(self,name) for name in attnames]
if not is_iter(states): return states
elif len(states)==1: return states[0]
elif kwargs.get('as_array', True): return np.array(states)
else: return states
def values(self):
return self.gett(*self._states)
def gett(self, *attnames):
"""Alternative to self.get that returns the given constructs as a tuple instead
of as an array. Useful when a numpy array would translate the underlying data types
poorly (e.g., np.array([1,'b'] would make 1 a string--using a tuple instead preserves
the data type)"""
states = self.get(*attnames,as_array=False)
if not is_iter(states): return states
elif len(states)==1: return states[0]
else: return tuple(states)
def inc(self,**kwargs):
"""Increments the given arguments by a given value. Mainly useful for
reducing length/adding clarity to increment statements.
e.g., self.Pos.inc(x=1,y=1) is the same as
self.Pos.x+=1; self.Pos.y+=1, or
self.Pos.x = self.Pos.x + 1; self.Pos.y = self.Pos.y +1
Can additionally be provided with a second value denoting a limit on the increments
e.g. self.Pos.inc(x=(1,10)) will increment x by 1 until it reaches 10
"""
for name, value in kwargs.items():
if name not in self._states: raise Exception(name+" not a property of "+self.name)
if type(value)==tuple:
current = getattr(self,name)
sign = np.sign(value[0])
newval = current + value[0]
if sign*newval <= sign*value[1]: setattr(self, name, newval)
else: setattr(self,name,value[1])
else: setattr(self, name, getattr(self,name)+ value)
def limit(self,**kwargs):
"""Enforces limits on the value of a given property. Mainly useful for
reducing length/adding clarity to increment statements.
e.g., self.EE.limit(a=(0,100), v=(0,12)) is the same as
self.EE.a = min(100, max(0,self.EE.a));
self.EE.v = min(12, max(0,self.EE.v))
"""
for name, value in kwargs.items():
if name not in self._states: raise Exception(name+" not a property of "+self.name)
setattr(self, name, min(value[1], max(value[0], getattr(self,name))))
def mul(self,*states):
"""Returns the multiplication of given attributes of the model construct.
e.g., a = self.mul('x','y','z') is the same as
a = self.x*self.y*self.z
"""
a= self.get(states[0])
for state in states[1:]:
a = a * self.get(state)
return a
def div(self,*states):
"""Returns the division of given attributes of the model construct
e.g., a = self.div('x','y','z') is the same as
a = (self.x/self.y)/self.z
"""
a= self.get(states[0])
for state in states[1:]:
a = a / self.get(state)
return a
def add(self,*states):
"""Returns the addition of given attributes of the model construct
e.g., a = self.add('x','y','z') is the same as
a = self.x+self.y+self.z
"""
a= self.get(states[0])
for state in states[1:]:
a += self.get(state)
return a
def sub(self,*states):
"""Returns the addition of given attributes of the model construct
e.g., a = self.div('x','y','z') is the same as
a = (self.x-self.y)-self.z
"""
a= self.get(states[0])
for state in states[1:]:
a -= self.get(state)
return a
def same(self,values, *states):
"""Tests whether a given iterable values has the same value as each
give state in the model construct.
e.g., self.same([1,2],'a','b') is the same as
all([1,2]==[self.a, self.b])"""
test = values==self.get(*states)
if is_iter(test): return all(test)
else: return test
def different(self,values, *states):
"""Tests whether a given iterable values has any different value the
given states in the model construct.
e.g., self.same([1,2],'a','b') is the same as
any([1,2]!=[self.a, self.b])"""
test = values!=self.get(*states)
if is_iter(test): return any(test)
else: return test
def make_flowdict(self,flownames,flows):
"""
Puts a list of flows with a list of flow names in a dictionary.
Parameters
----------
flownames : list or dict or empty
names of flows corresponding to flows
using {externalname: internalname}
flows : list
flows
Returns
-------
flowdict : dict
dict of flows indexed by flownames
"""
flowdict = {}
if not(flownames) or type(flownames)==dict:
flowdict = {f.name:f for f in flows}
if flownames:
for externalname, internalname in flownames.items():
flowdict[internalname] = flowdict.pop(externalname)
elif type(flownames)==list:
if len(flownames)==len(flows):
for ind, flowname in enumerate(flownames):
flowdict[flowname]=flows[ind]
else: raise Exception("flownames "+str(flownames)+"\n don't match flows "+str(flows)+"\n in: "+self.name)
else: raise Exception("Invalid flownames option in "+self.name)
return flowdict
def warn(self, *messages, stacklevel=2):
"""
Prints warning message(s) when called.
Parameters
----------
*messages : str
Strings to make up the message (will be joined by spaces)
stacklevel : int
Where the warning points to. The default is 2 (points to the place in the model)
"""
warnings.warn(' '.join(messages), stacklevel=stacklevel)
class Block(Common):
"""
Superclass for FxnBlock and Component subclasses. Has functions for model setup, querying state, reseting the model
Attributes
----------
failrate : float
Failure rate for the block
time : float
internal time of the function
faults : set
faults currently present in the block. If the function is nominal, set is {'nom'}
faultmodes : dict
faults possible to inject in the block and their properties. Has structure:
- faultname :
- dist : (float of % failures due to this fualt)
- oppvect : (list of relative probabilities of the fault occuring in each phase)
- rcost : cost of repairing the fault
opermodes : list
possible modes for the block to enter
rngs : dict
dictionary of random number generators for random states
seed : int
seed sequence for internal random number generator
mode : string
current mode of block operation
"""
def __init__(self, states={}):
"""
Instance superclass. Called by FxnBlock and Component classes.
Parameters
----------
states : dict, optional
Internal states (variables, essentially) of the block. The default is {}.
"""
self._states=list(states.keys())
self._initstates=states.copy()
self.failrate = getattr(self, 'failrate', 1.0)
self.localname=''
for state in states.keys():
setattr(self, state,states[state])
self.faults=set(['nom'])
self.opermodes= getattr(self, 'opermodes', [])
self.faultmodes= getattr(self, 'faultmodes', {})
self.rngs=getattr(self, 'rngs', {})
self._rng_params=getattr(self, '_rng_params', {})
if not getattr(self, 'seed', []):
self.seed=np.random.SeedSequence.generate_state(np.random.SeedSequence(),1)[0]
self.rng=np.random.default_rng(self.seed)
self.time=0.0
def __repr__(self):
if hasattr(self,'name'):
return getattr(self, 'name', '')+' '+self.__class__.__name__+' '+getattr(self,'type', '')+': '+str(self.return_states())
else: return 'New uninitialized '+self.__class__.__name__
def add_params(self, *params):
"""Adds given dictionary(s) of parameters to the function/block.
e.g. self.add_params({'x':1,'y':1}) results in a block where:
self.x = 1, self.y = 1
"""
for param in params:
for attr, val in param.items():
setattr(self, attr, val)
def assoc_rand_states(self, *states):
"""
Associates multiple random states with the model
Parameters
----------
*states : tuple
can give any number of tuples for each of the states.
The tuple is of the form (name, default), where:
name : str
name for the parameter to use in the model behavior.
default : int/float/str/etc
Default value for the parameter
"""
if type(states[0])==tuple:
for state in states:
self.assoc_rand_state(*state)
else: self.assoc_rand_state(*states)
def assoc_rand_state(self,name,default, seed=None, auto_update=[]):
"""
Associate a stochastic state with the Block. Enables the simulation of stochastic behavior over time.
Parameters
----------
name : str
name for the parameter to use in the model behavior.
default : int/float/str/etc
Default value for the parameter for the parameter
seed : int
seed for the random state generator to use. Defaults to None.
auto_update : list, optional
If given, updates the state with the given numpy method at each time-step.
List is made up of two arguments:
- generator_method : str
Name of the numpy random method to use.
see: https://numpy.org/doc/stable/reference/random/generator.html
- generator_params : tuple
Parameter inputs for the numpy generator function
"""
if not auto_update: generator_method, generator_params= None,None
else: generator_method, generator_params = auto_update
if not hasattr(self,'_states'): raise Exception("Call __init__ method for function first")
self._states.append(name)
self._initstates[name]=default
setattr(self, name,default)
if not seed: seed = self.rng.integers(np.iinfo(np.int32).max)
if not hasattr(self,'rngs'): self.rngs={name:np.random.default_rng(seed)}
else: self.rngs[name]=np.random.default_rng(seed)
if not hasattr(self,'_rng_params'): self._rng_params={name:(default, generator_method, generator_params,seed)}
else: self._rng_params[name]=(default, generator_method, generator_params,seed)
def assoc_faultstates(self, fstates, mode_app='single-state', probtype='prob', units='hr'):
"""
Adds health state attributes to the model (and a mode approach if desired).
Parameters
----------
fstates : Dict
Health states to incorporate in the model and their respective values.
e.g., {'state':[1,{0,2,-1}]}, {'state':{0,2,-1}}
mode_app : str
type of modes to elaborate from the given health states.
"""
if not hasattr(self,'_states'): raise Exception("Call __init__ method for function first")
franges = dict.fromkeys(fstates.keys())
nom_fstates = {}
for state in fstates:
self._states.append(state)
if type(fstates[state]) in [set, np.ndarray]:
nom_fstates[state] = 1.0
franges[state]=set(fstates[state])
elif type(fstates[state])==list:
nom_fstates[state] = fstates[state][0]
franges[state]=set(fstates[state][1])
elif type(fstates[state]) in [float, int]:
nom_fstates[state] = fstates[state]
franges[state]={}
else: raise Exception("Invalid input option for health state")
setattr(self, state, nom_fstates[state])
self._initstates.update(nom_fstates)
self.assoc_faultstate_modes(franges=franges, mode_app=mode_app, probtype=probtype, units=units)
def assoc_faultstate_modes(self, franges = {}, mode_app = 'none', manual_modes={}, probtype='prob', units='hr', key_phases_by='global'):
"""
Associates modes with given faultstates.
Parameters
----------
franges : dict, optional
Dictionary of form {'state':{val1, val2...}) of ranges for each health state (if used to generate modes). The default is {}.
mode_app : str
type of modes to elaborate from the given health states.
manual_modes : dict, optional
Dictionary/Set of faultmodes with structure, which has the form:
- dict {'fault1': [atts], 'fault2': atts}, where atts may be of form:
- states: {state1: val1, state2, val2}
- [states, faultattributes], where faultattributes is the same as in assoc_modes
probtype : str, optional
Type of probability in the probability model, a per-time 'rate' or per-run 'prob'.
The default is 'rate'
units : str, optional
Type of units ('sec'/'min'/'hr'/'day') used for the rates. Default is 'hr'
"""
if not getattr(self, 'is_copy', False):
if not getattr(self, 'faultmodes', []): self.faultmodes = dict()
if not getattr(self, 'mode_state_dict', False): self.mode_state_dict = {}
nom_fstates = {state: self._initstates[state] for state in franges}
if mode_app=='none': a=0
elif mode_app=='single-state':
for state in franges:
modes = {state+'_'+str(value):'synth' for value in franges[state]}
modestates = {state+'_'+str(value): {state:value} for value in franges[state]}
self.faultmodes.update(modes)
self.mode_state_dict.update(modestates)
elif mode_app =='all' or type(mode_app)==int:
for state in franges: franges[state].add(nom_fstates[state])
nomvals = tuple([*nom_fstates.values()])
statecombos = [i for i in itertools.product(*franges.values()) if i!=nomvals]
if type(mode_app)==int and len(statecombos)>0:
sample = self.rng.choice([i for i,_ in enumerate(statecombos)], size=mode_app, replace=False)
statecombos = [statecombos[i] for i in sample]
self.faultmodes.update({'hmode_'+str(i):'synth' for i in range(len(statecombos))})
self.mode_state_dict.update({'hmode_'+str(i): {list(franges)[j]:state for j, state in enumerate(statecombos[i])} for i in range(len(statecombos))})
else: raise Exception("Invalid mode elaboration approach")
num_synth_modes = len(self.mode_state_dict)
for mode,atts in manual_modes.items():
if type(atts)==list:
self.mode_state_dict.update({mode:atts[0]})
if not getattr(self, 'exclusive_faultmodes', False): print("Changing fault mode exclusivity to True")
self.assoc_modes(faultmodes={mode:atts[1]}, initmode=getattr(self,'mode', 'nom'), probtype=probtype, proptype=probtype, exclusive=True, key_phases_by=key_phases_by)
elif type(atts)==dict:
self.mode_state_dict.update({mode:atts})
self.faultmodes.update({mode:'synth'})
num_synth_modes+=1
if not hasattr(self,'key_phases_by'): self.key_phases_by=key_phases_by
elif getattr(self, 'key_phases_by', '')!=key_phases_by:
print("Changing key_phases_by to "+key_phases_by)
self.key_phases_by=key_phases_by
def add_he_rate(self,gtp,EPCs={'na':[1,0]}):
"""
Calculates self.failrate based on a human error probability model.
Parameters
----------
gtp : float
Generic Task Probability. (from HEART)
EPCs : Dict or list
Error producing conditions (and respective factors) for a given task (from HEART). Used in format:
Dict {'name':[EPC factor, Effect proportion]} or list [[EPC factor, Effect proportion],[[EPC factor, Effect proportion]]]
"""
if type(EPCs)==dict: EPC_f = np.prod([((epc-1)*x+1) for _, [epc,x] in EPCs.items()])
elif type(EPCs)==list: EPC_f = np.prod([((epc-1)*x+1) for [epc,x] in EPCs])
else: raise Exception("Invalid type for EPCs: "+str(type(EPCs)))
self.failrate = gtp*EPC_f
def assoc_modes(self, faultmodes={}, opermodes=[],initmode='nom', name='', probtype='rate', units='hr', exclusive=False, key_phases_by='global', longnames={}):
"""
Associates fault and operational modes with the block when called in the function or component.
Parameters
----------
faultmodes : dict, optional
Dictionary/Set of faultmodes with structure, which can have the forms:
- set {'fault1', 'fault2', 'fault3'} (just the respective faults)
- dict {'fault1': faultattributes, 'fault2': faultattributes}, where faultattributes is:
- float: rate for the mode
- [float, float]: rate and repair cost for the mode
- float, oppvect, float]: rate, opportunity vector, and repair cost for the mode
opportunity vector can be specified as:
[float1, float2,...], a vector of relative likelihoods for each phase, or
{opermode:float1, opermode:float1}, a dict of relative likelihoods for each phase/mode
the phases/modes to key by are defined in "key_phases_by"
opermodes : list, optional
List of operational modes
initmode : str, optional
Initial operational mode. Default is 'nom'
name : str, optional
(for components/actions only) Name of the component. The default is ''.
probtype : str, optional
Type of probability in the probability model, a per-time 'rate' or per-run 'prob'.
The default is 'rate'
units : str, optional
Type of units ('sec'/'min'/'hr'/'day') used for the rates. Default is 'hr'
exclusive : True/False
Whether fault modes are exclusive of each other or not. Default is False (i.e. more than one can be present).
key_phases_by : 'self'/'none'/'global'/'fxnname'
Phases to key the faultmodes by (using local, global, or an external function's modes'). Default is 'global'
longnames : dict
Longer names for the faults (if desired). {faultname: longname}
"""
if opermodes:
self.opermodes = opermodes
if initmode in self.opermodes:
self._states.append('mode')
self._initstates['mode'] = initmode
self.mode = initmode
else: raise Exception("Initial mode "+initmode+" not in defined modes for "+self.name)
else:
self._states.append('mode')
self._initstates['mode'] = initmode
self.mode = initmode
self.exclusive_faultmodes = exclusive
self.localname = name
if not getattr(self, 'is_copy', False): #saves time by using the same fault mode dictionary from previous
if not getattr(self, 'faultmodes', []):
if name: self.faultmodes=dict()
else: self.faultmodes=dict.fromkeys(faultmodes)
for mode in faultmodes:
self.faultmodes[mode]=dict.fromkeys(('dist', 'oppvect', 'rcost', 'probtype', 'units'))
self.faultmodes[mode]['probtype'] = probtype
self.faultmodes[mode]['units'] = units
if type(faultmodes) == set: # minimum information - here the faultmodes are only a set of labels
self.faultmodes[mode]['dist'] = 1.0/len(faultmodes)
self.faultmodes[mode]['oppvect'] = [1.0]
self.faultmodes[mode]['rcost'] = 0.0
elif type(faultmodes[mode]) == float: # dict of modes: dist, where dist is the distribution (or individual rate/probability)
self.faultmodes[mode]['dist'] = faultmodes[mode]
self.faultmodes[mode]['oppvect'] = [1.0]
self.faultmodes[mode]['rcost'] = 0.0
elif len(faultmodes[mode]) == 3: # three-arg mode definition: dist, oppvect, repair costs
self.faultmodes[mode]['dist'] = faultmodes[mode][0]
self.faultmodes[mode]['oppvect'] = faultmodes[mode][1]
self.faultmodes[mode]['rcost'] = faultmodes[mode][2]
if key_phases_by =='none': raise Exception("How should the opportunity vector be keyed? Provide 'key_phases_by' option.")
elif len(faultmodes[mode]) == 2: # dist, repair costs
self.faultmodes[mode]['dist'] = faultmodes[mode][0]
self.faultmodes[mode]['oppvect'] = [1.0]
self.faultmodes[mode]['rcost'] = faultmodes[mode][1]
elif len(faultmodes[mode]) == 1: # dist only
self.faultmodes[mode]['dist'] = faultmodes[mode][0]
self.faultmodes[mode]['oppvect'] = [1.0]
self.faultmodes[mode]['rcost'] = 0.0
else:
raise Exception("Invalid mode definition")
self.faultmodes[mode]['longname'] = longnames.get(mode,mode)
if key_phases_by=='self': self.key_phases_by = self.name
else: self.key_phases_by = key_phases_by
def choose_rand_fault(self, faults, default='first', combinations=1):
"""
Randomly chooses a fault or combination of faults to insert in the function.
Parameters
----------
faults : list
list of fault modes to choose from
default : str/list, optional
Default fault to inject when model is run deterministically.
The default is 'first', which chooses the first in the list.
Can provide a mode as a str or a list of modes
combinations : int, optional
Number of combinations of faults to elaborate and select from.
The default is 1, which just chooses single fault modes.
"""
if getattr(self, 'run_stochastic', True):
faults = [list(x) for x in itertools.combinations(faults, combinations)]
self.add_fault(*self.rng.choice(faults))
elif default=='first': self.add_fault(faults[0])
elif type(default)==str: self.add_fault(default)
else: self.add_fault(*default)
def set_rand(self,statename,methodname, *args):
"""
Update the given random state with a given method and arguments
Parameters
----------
statename : str
name of the random state defined in assoc_rand_state(s)
methodname :
str name of the numpy method to call in the rng
*args : args
arguments for the numpy method
"""
if getattr(self, 'run_stochastic', True):
gen_method = getattr(self.rngs[statename], methodname)
setattr(self, statename, gen_method(*args))
def to_default(self,*statenames):
""" Resets (given or all by default) random states to their default values
Parameters
----------
*statenames : str, str, str...
names of the random state defined in assoc_rand_state(s)
"""
if not statenames: statenames=list(self._rng_params.keys())
for statename in statenames: setattr(self, statename, self._rng_params[statename][0])
def set_mode(self, mode):
"""Sets a mode in the block
Parameters
----------
mode : str
name of the mode to enter.
"""
if self.exclusive_faultmodes:
if self.any_faults(): raise Exception("Cannot set mode from fault state without removing faults.")
elif mode in self.faultmodes: self.to_fault(mode)
else: self.mode=mode
else: self.mode = mode
def in_mode(self,*modes):
"""Checks if the system is in a given operational mode
Parameters
----------
*modes : strs
names of the mode to check
"""
return self.mode in modes
def has_fault(self,*faults):
"""Check if the block has fault (a str)
Parameters
----------
*faults : strs
names of the fault to check.
"""
return any(self.faults.intersection(set(faults)))
def no_fault(self,fault):
"""Check if the block does not have fault (a str)
Parameters
----------
fault : str
name of the fault to check.
"""
return not(any(self.faults.intersection(set([fault]))))
def any_faults(self):
"""check if the block has any fault modes"""
return any(self.faults.difference({'nom'}))
def to_fault(self,fault):
"""Moves from the current fault mode to a new fault mode
Parameters
----------
fault : str
name of the fault mode to switch to
"""
self.faults.clear()
self.faults.add(fault)
if self.exclusive_faultmodes: self.mode = fault
def add_fault(self,*faults):
"""Adds fault (a str) to the block
Parameters
----------
*fault : str(s)
name(s) of the fault to add to the black
"""
self.faults.update(faults)
if self.exclusive_faultmodes:
if len(faults)>1: raise Exception("Multiple fault modes added to function with exclusive fault representation")
elif len(faults)==1 and list(faults)[0]!='nom': self.mode =faults[0]
def replace_fault(self, fault_to_replace,fault_to_add):
"""Replaces fault_to_replace with fault_to_add in the set of faults
Parameters
----------
fault_to_replace : str
name of the fault to replace
fault_to_add : str
name of the fault to add in its place
"""
self.faults.add(fault_to_add)
self.faults.remove(fault_to_replace)
if self.exclusive_faultmodes: self.mode = fault_to_add
def remove_fault(self, fault_to_remove, opermode=False, warnmessage=False):
"""Removes fault in the set of faults and returns to given operational mode
Parameters
----------
fault_to_replace : str
name of the fault to remove
opermode : str (optional)
operational mode to return to when the fault mode is removed
warnmessage : str/False
Warning to give when performing operation. Default is False (no warning)
"""
self.faults.discard(fault_to_remove)
if len(self.faults) == 0: self.faults.add('nom')
if opermode: self.mode = opermode
if self.exclusive_faultmodes and not(opermode):
raise Exception("Unclear which operational mode to enter with fault removed")
if warnmessage: self.warn(warnmessage,"Fault mode `"+fault_to_remove+"' removed.", stacklevel=3)
def remove_any_faults(self, opermode=False, warnmessage=False):
"""Resets fault mode to nominal and returns to the given operational mode
Parameters
----------
opermode : str (optional)
operational mode to return to when the fault mode is removed
warnmessage : str/False
Warning to give when performing operation. Default is False (no warning)
"""
self.faults.clear()
self.faults.add('nom')
if opermode: self.mode = opermode
if self.exclusive_faultmodes and not(opermode):
raise Exception("Unclear which operational mode to enter with fault removed")
if warnmessage: self.warn(warnmessage, "All faults removed.")
def get_flowtypes(self):
"""Returns the names of the flow types in the model"""
return {obj.type for name, obj in self.flows.items()}
def reset(self): #reset requires flows to be cleared first
""" Resets the block to the initial state with no faults. Used by default in
derived objects when resetting the model. Requires associated flows to be cleared first."""
self.faults.clear()
self.faults.add('nom')
for state in self._initstates.keys():
setattr(self, state,self._initstates[state])
for generator in self.rngs:
self.rngs[generator]=np.random.default_rng(self._rng_params[generator][-1])
self.rng = np.random.default_rng(self.seed)
if hasattr(self, 'time'): self.time=0.0
if hasattr(self, 'tstep'): self.tstep=self.tstep
if hasattr(self, 'internal_flows'):
for flowname, flow in self.internal_flows.items():
flow.reset()
if self.type=='function':
for name, component in self.components.items():
component.reset()
for timername in self.timers:
getattr(self, timername).reset()
self.updatefxn('reset', faults=['nom'], time=0)
def return_states(self):
"""
Returns states of the block at the current state. Used (iteratively) to record states over time.
Returns
-------
states : dict
States (variables) of the block
faults : set
Faults present in the block
"""
states={}
for state in self._states:
states[state]=getattr(self,state)
return states, self.faults.copy()
def check_update_nominal_faults(self):
if self.faults.difference({'nom'}): self.faults.difference_update({'nom'})
elif len(self.faults)==0: self.faults.update(['nom'])
#Function superclass
class FxnBlock(Block):
"""
Superclass for functions.
Attributes (specific to FxnBlock--see Block glass for more)
----------
type : str
labels the function as a function (may not be necessary) Default is 'function'
flows : dict
flows associated with the function. structured {flow:{value:XX}}
components : dict
component instantiations of the function (if any)
timers : set
names of timers to be used in the function (if any)
tstep : float
timestep of the model in the function (added/overridden by model definition)
"""
def __init__(self,name, flows, flownames=[], states={}, components={},timers={}, tstep=1.0, seed=None):
"""
Instantiates the function superclass with the relevant parameters.
Parameters
----------
flows :list
Flow objects to (in order correspoinding to flownames) associate with the function
flownames : list/dict, optional
Names of flows to use in the function, if private flow names are needed (e.g. functions with in/out relationships).
Either provided as a list (in the same order as the flows) of all flow names corresponding to those flows
Or as a dict of form {External Flowname: Internal Flowname}
states : dict, optional
Internal states to associate with the function. The default is {}.
components : dict, optional
Component objects to associate with the function. The default is {}.
timers : set, optional
Set of names of timers to use in the function. The default is {}.
"""
self.type = 'function'
self.name = name
self.internal_flows=dict()
self.flows=self.make_flowdict(flownames,flows)
for flow in self.flows.keys():
setattr(self, flow, self.flows[flow])
self.components=components
if not getattr(self, 'faultmodes', []): self.faultmodes={}
self.compfaultmodes= dict()
self.exclusive_faultmodes = False
for cname in components:
self.faultmodes.update({components[cname].localname+f:vals for f, vals in components[cname].faultmodes.items()})
self.compfaultmodes.update({components[cname].localname+modename:cname for modename in components[cname].faultmodes})
setattr(self, cname, components[cname])
self.timers = timers
for timername in timers:
setattr(self, timername, Timer(timername))
self.tstep=tstep
self.actions={}; self.conditions={}; self.condition_edges={}; self.actfaultmodes = {}
self.action_graph = nx.DiGraph(); self.flow_graph = nx.Graph()
super().__init__(states)
def __repr__(self):
blockret = super().__repr__()
if getattr(self, 'actions'): return blockret+', active: '+str(self.active_actions)
else: return blockret
def add_act(self, name, action, *flows, duration=0.0, **params):
"""
Associate an Action with the Function Block for use in the Action Sequence Graph
Parameters
----------
name : str
Internal Name for the Action
action : Action
Action class to instantiate
*flows : flow
Flows (optional) which connect the actions
**params : any
parameters to instantiate the Action with.
"""
self.actions[name] = action(name,flows, **params)
self.actions[name].duration=duration
setattr(self, name, self.actions[name])
self.action_graph.add_node(name)
self.flow_graph.add_node(name, bipartite=0)
for flow in flows:
self.flow_graph.add_node(flow.name, bipartite=1)
self.flow_graph.add_edge(name,flow.name)
def cond_pass(self):
return True
def add_cond(self, start_action, end_action, name='auto',condition='pass'):
"""
Associates a Condition with the Function Block for use in the Action Sequence Graph
Parameters
----------
start_action : str
Action where the condition is checked
end_action : str
Action that the condition leads to.
name : str
Name for the condition. Defaults to numbered conditions if none are provided.
condition : method
Method in the class to use as a condition. Defaults to self.condition_pass if none are provided
"""
if name=='auto': name = str(len(self.conditions)+1)
if condition=='pass': condition = self.cond_pass
self.conditions[name] = condition
self.condition_edges[name] = (start_action, end_action)
self.action_graph.add_edge(start_action, end_action, **{'name':name, name:'name', 'arrow':True})
def build_ASG(self, initial_action="auto",state_rep="finite-state", max_action_prop="until_false", mode_rep="replace", asg_proptype='dynamic', per_timestep=False, asg_pos={}):
"""
Constructs the Action Sequence Graph with the given parameters.
Parameters
----------
initial_action : str/list
Initial action to set as active. Default is 'auto'
- 'auto' finds the starting node of the graph and uses it
- 'ActionName' sets the given action as the first active action
- providing a list of actions will set them all to active (if multi-state rep is used)
state_rep : 'finite-state'/'multi-state'
How the states of the system are represented. Default is 'finite-state'
- 'finite-state' means only one action in the system can be active at once (i.e., a finite state machine)
- 'multi-state' means multiple actions can be performed at once
max_action_prop : 'until_false'/'manual'/int
How actions progress. Default is 'until_false'
- 'until_false' means actions are simulated until all outgoing conditions are false
- providing an integer places a limit on the number of actions that can be performed per timestep
mode_rep : 'replace'/'independent'
How actions are used to represent modes. Default is 'replace.'
- 'replace' uses the actions to represent the operational modes of the system (only compatible with 'exclusive' representation)
- 'independent' keeps the actions and function-level mode seperate
asg_proptype : 'static'/'dynamic'/'manual'
Which propagation step to execute the Action Sequence Graph in. Default is 'dynamic'
- 'manual' means that the propagation is performed manually (defined in a behavior method)
per_timestep : bool
Defines whether the action sequence graph is reset to the initial state each time-step (True) or stays in the current action (False). Default is False
asg_pos : dict, optional
Positions of the nodes of the action/flow graph {node: [x,y]}. Default is {}
"""
if initial_action=='auto': initial_action = [act for act, in_degree in self.action_graph.in_degree if in_degree==0]
elif type(initial_action)==str: initial_action=[initial_action]
self.set_active_actions(initial_action)
self.set_atts(state_rep=state_rep, max_action_prop=max_action_prop, mode_rep=mode_rep, asg_proptype=asg_proptype,initial_action=initial_action, per_timestep=per_timestep)
if self.state_rep=='finite-state' and len(initial_action)>1: raise Exception("Cannot have more than one initial action with finite-state representation")
if self.mode_rep=='replace':
if not self.exclusive_faultmodes: raise Exception("Cannot use mode_rep='replace' option without an exclusive_faultmodes representation (set in assoc_modes)")
elif not self.state_rep=='finite-state': raise Exception("Cannot use mode_rep='replace' option without using state_rep=`finite-state`")
elif self.opermodes: raise Exception("Cannot use mode_rep='replace' option simultaneously with defined operational modes in assoc_modes()")
if len(self.faultmodes)>0: raise Exception("Cannot use mode_rep='replace option while having Function-level fault modes (define at Action level)")
else:
self.opermodes = [*self.actions.keys()]
self.mode=initial_action[0]
elif self.mode_rep=='independent':
if self.exclusive_faultmodes: raise Exception("Cannot use mode_rep='independent option' without a non-exclusive fault mode representation (set in assoc_modes)")
for aname, action in self.actions.items():
modes_to_add = {action.localname+f:val for f,val in action.faultmodes.items()}
self.faultmodes.update(modes_to_add)
fmode_intersect = set(modes_to_add).intersection(self.actfaultmodes)
if any(fmode_intersect):
raise Exception("Action "+aname+" overwrites existing fault modes: "+str(fmode_intersect)+". Rename the faults (or use name option in assoc_modes)")
self.actfaultmodes.update({action.localname+modename:aname for modename in action.faultmodes})
self.asg_pos=asg_pos
def set_active_actions(self, actions):
"""Helper method for setting given action(s) as active"""
if type(actions)==str:
if actions in self.actions: actions = [actions]
else: raise Exception("initial_action="+actions+" not in self.actions: "+str(self.actions))
if type(actions)==list:
self.active_actions = set(actions)
if any(self.active_actions.difference(self.actions)): raise Exception("Initial actions not associated with model: "+str(self.active_actions.difference(self.actions)))
else: raise Exception("Invalid option for initial_action")
def show_ASG(self, gtype='combined', with_cond_labels=True, pos=[]):
"""
Shows a visual representation of the internal Action Sequence Graph of the Function Block
Parameters
----------
gtype : 'combined'/'flows'/'actions'
Gives a graphical representation of the ASG. Default is 'combined'
- 'actions' (for function input): plots the sequence of actions in the function's Action Sequence Graph
- 'flows' (for function input): plots the action/flow connections in the function's Action Sequence Graph
- 'combined' (for function input): plots both the sequence of actions in the functions ASG and action/flow connections
with_cond_labels: Bool
Whether or not to label the conditions
pos : dict
Dictionary of node positions for actions/flows
"""
import matplotlib.pyplot as plt
if gtype=='combined': graph = nx.compose(self.flow_graph, self.action_graph)
elif gtype=='flows': graph = self.flow_graph
elif gtype=='actions': graph = self.action_graph
if not pos:
if not self.asg_pos: pos=nx.planar_layout(graph)
else: pos=self.asg_pos
nx.draw(graph, pos=pos, with_labels=True, node_color='grey')
nx.draw_networkx_nodes(self.action_graph, pos=pos, node_shape='s', node_color='skyblue')
nx.draw_networkx_nodes(self.action_graph, nodelist=self.active_actions, pos=pos, node_shape='s', node_color='green')
edge_labels = {(in_node, out_node): label for in_node, out_node, label in graph.edges(data='name') if label}
if with_cond_labels: nx.draw_networkx_edge_labels(graph, pos, edge_labels)
if gtype=='combined' or gtype=='conditions':
nx.draw_networkx_edges(self.action_graph, pos,arrows=True, arrowsize=30, arrowstyle='->', node_shape='s', node_size=100)
return plt.gcf()
def add_flow(self,flowname, flowdict={}, flowtype=''):
"""
Adds a flow with given attributes to the Function Block
Parameters
----------
flowname : str
Unique flow name to give the flow in the function
flowattributes : dict, Flow, set or empty set
Dictionary of flow attributes e.g. {'value':XX}, or the Flow object.
If a set of attribute names is provided, each will be given a value of 1
If an empty set is given, it will be represented w- {flowname: 1}
"""
if not getattr(self, 'is_copy', False):
if not flowtype: flowtype = flowname
if not flowdict: self.internal_flows[flowname]=Flow({flowname:1}, flowname, flowtype)
elif type(flowdict) == set: self.internal_flows[flowname]=Flow({f:1 for f in flowdict}, flowname, flowtype)
elif type(flowdict) == dict: self.internal_flows[flowname]=Flow(flowdict, flowname,flowtype)
elif isinstance(flowdict, Flow):self.internal_flows[flowname] = flowdict
else: raise Exception('Invalid flow. Must be dict or flow')
setattr(self, flowname, self.internal_flows[flowname])
def copy(self, newflows, *attr):
"""
Creates a copy of the function object with newflows and arbitrary parameters associated with the copy. Used when copying the model.
Parameters
----------
newflows : list
list of new flow objects to be associated with the copy of the function
*attr : any
arbitrary parameters to add (if funciton takes in more than flows e.g. design variables)
Returns
-------
copy : FxnBlock
Copy of the given function with new flows
"""
copy = self.__new__(self.__class__) # Is this adequate? Wouldn't this give it new components?
copy.is_copy=True
copy.__init__(self.name, newflows, *attr)
copy.faults = self.faults.copy()
if hasattr(self, 'faultmodes'): copy.faultmodes = self.faultmodes
if hasattr(self, 'mode_state_dict'): copy.mode_state_dict = self.mode_state_dict
for flowname, flow in self.internal_flows.items():
copy.internal_flows[flowname] = flow.copy()
setattr(copy, flowname, copy.internal_flows[flowname])
for action in self.actions:
for state in copy.actions[action]._initstates.keys():
setattr(copy.actions[action], state, getattr(self.actions[action], state))
copy.actions[action].faults=self.actions[action].faults.copy()
setattr(copy, 'active_actions', getattr(self, 'active_actions', {}))
for timername in self.timers:
timer = getattr(self, timername)
copytimer = getattr(copy, timername)
copytimer.set_timer(timer.time, tstep=timer.tstep)
copytimer.mode=timer.mode
for state in self._initstates.keys():
setattr(copy, state, getattr(self, state))
for generator in self.rngs:
copy.rngs[generator]=np.random.default_rng(self._rng_params[generator][-1])
copy.rngs[generator].__setstate__(self.rngs[generator].__getstate__())
if hasattr(self, 'time'): copy.time=self.time
if hasattr(self, 'tstep'): copy.tstep=self.tstep
return copy
def update_modestates(self):
"""Updates states of the model associated with a specific fault mode (see assoc_modes)."""
num_update = 0
for fault in self.faults:
if fault in self.mode_state_dict:
for state, value in self.mode_state_dict[fault].items():
setattr(self, state, value)
num_update+=1
if num_update > 1: raise Exception("Exclusive fault mode scenarios present at the same time")
def update_stochastic_states(self):
"""Updates the defined stochastic states defined to auto-update (see assoc_randstates)."""
for statename, generator in self.rngs.items():
if self._rng_params[statename][1]:
gen_method = getattr(generator, self._rng_params[statename][1])
setattr(self, statename, gen_method(*self._rng_params[statename][2]))
def prop_internal(self, faults, time, run_stochastic, proptype):
"""
Propagates behaviors through the internal Action Sequence Graph
Parameters
----------
faults : list, optional
Faults to inject in the function. The default is ['nom'].
time : float, optional
Model time. The default is 0.
run_stochastic : book
Whether to run the simulation using stochastic or deterministic behavior
proptype : str
Type of propagation step to update ('behavior', 'static_behavior', or 'dynamic_behavior')
"""
active_actions = self.active_actions
num_prop = 0
while active_actions:
new_active_actions=set(active_actions)
for action in active_actions:
self.actions[action].updateact(time, run_stochastic, proptype=proptype, tstep=self.tstep)
action_cond_edges = self.action_graph.out_edges(action, data=True)
for act_in, act_out, atts in action_cond_edges:
if self.conditions[atts['name']]() and getattr(self.actions[action], 'duration',0.0)+self.tstep<=self.actions[action].t_loc:
self.actions[action].t_loc=0.0
new_active_actions.add(act_out)
new_active_actions.discard(act_in)
if len(new_active_actions)>1 and self.state_rep=='finite-state':
raise Exception("Multiple active actions in a finite-state representation: "+str(new_active_actions))
num_prop +=1
if type(self.asg_proptype)==int and num_prop>=self.asg_proptype:
break
if new_active_actions==set(active_actions):
break
else: active_actions=new_active_actions
if num_prop>10000: raise Exception("Undesired looping in Function ASG for: "+self.name)
if self.mode_rep=='replace': self.mode=[*active_actions][0]
self.active_actions = active_actions
def updatefxn(self,proptype, faults=[], time=0, run_stochastic=False):
"""
Updates the state of the function at a given time and injects faults.
Parameters
----------
proptype : str
Type of propagation step to update ('behavior', 'static_behavior', or 'dynamic_behavior')
faults : list, optional
Faults to inject in the function. The default is ['nom'].
time : float, optional
Model time. The default is 0.
run_stochastic : book
Whether to run the simulation using stochastic or deterministic behavior
"""
self.run_stochastic=run_stochastic
self.add_fault(*faults) #if there is a fault, it is instantiated in the function
if hasattr(self, 'condfaults'): self.condfaults(time) #conditional faults and behavior are then run
if hasattr(self, 'mode_state_dict') and any(faults): self.update_modestates()
if time>self.time and run_stochastic: self.update_stochastic_states()
comp_actions = {**self.components, **self.actions}
if getattr(self, 'per_timestep', False):
self.set_active_actions(self.initial_action)
for action in self.active_actions: self.actions[action].t_loc=0.0
if comp_actions: # propogate faults from function level to component level
for fault in self.faults:
if fault in self.compfaultmodes:
component = self.components[self.compfaultmodes[fault]]
component.add_fault(fault[len(component.localname):])
if fault in self.actfaultmodes:
action = self.actions[self.actfaultmodes[fault]]
action.add_fault(fault[len(action.localname):])
if any(self.actions) and self.asg_proptype==proptype: self.prop_internal(faults, time, run_stochastic, proptype)
if proptype=='static' and hasattr(self,'behavior'): self.behavior(time) #generic behavioral methods are run at all steps
if proptype=='static' and hasattr(self,'static_behavior'): self.static_behavior(time)
elif proptype=='dynamic' and hasattr(self,'dynamic_behavior') and time > self.time: self.dynamic_behavior(time)
elif proptype=='reset':
if hasattr(self,'static_behavior'): self.static_behavior(time)
if hasattr(self,'dynamic_behavior'): self.dynamic_behavior(time)
if comp_actions: # propogate faults from component level to function level
self.faults.difference_update(self.compfaultmodes)
self.faults.difference_update(self.actfaultmodes)
for compname, comp in comp_actions.items():
self.faults.update({comp.localname+f for f in comp.faults if f!='nom'})
self.time=time
self.check_update_nominal_faults()
if self.exclusive_faultmodes==True and len(self.faults)>1:
raise Exception("More than one fault present in "+self.name+"\n at t= "+str(time)+"\n faults: "+str(self.faults)+"\n Is the mode representation nonexclusive?")
return
class GenericFxn(FxnBlock):
"""Generic function block. For use when the user has not yet defined a class for the
given (to be implemented) function block. Acts as a placeholder that enables simulation."""
def __init__(self, name, flows):
super().__init__(name, flows)
class Component(Block):
"""
Superclass for components (most attributes and methods inherited from Block superclass)
"""
def __init__(self,name, states={}):
"""
Inherit the component class
Parameters
----------
name : str
Unique name ID for the component
states : dict, optional
States to use in the component. The default is {}.
"""
self.type = 'component'
self.name = name
super().__init__(states)
def behavior(self,time):
""" Placeholder for component behavior methods. Enables one to include components
without yet having a defined behavior for them."""
return 0
class Action(Block):
"""
Superclass for actions (most attributes and methods inherited from Block superclass)
"""
def __init__(self,name, flows, flownames=[], states={}):
"""
Inherit the Block class
Parameters
----------
name : str
Unique name ID for the action
states : dict, optional
States to use in the action. The default is {}.
"""
self.type = 'action'
self.name = name
self.flows=self.make_flowdict(flownames,flows)
for flow in self.flows.keys():
setattr(self, flow, self.flows[flow])
super().__init__({**states, 't_loc':0.0})
def updateact(self, time=0, run_stochastic=False, proptype='dynamic', tstep=1.0):
"""
Updates the behaviors, faults, times, etc of the action
Parameters
----------
time : float, optional
Model time. The default is 0.
run_stochastic : book
Whether to run the simulation using stochastic or deterministic behavior
"""
self.run_stochastic=run_stochastic
if time>self.time and run_stochastic: self.update_stochastic_states()
if proptype=='dynamic':
if self.time<time: self.behavior(time); self.t_loc+=tstep
else: self.behavior(time); self.t_loc+=tstep
self.time=time
self.check_update_nominal_faults()
def behavior(self, time):
"""Placeholder behavior method for actions"""
a=0
class Flow(Common):
"""
Superclass for flows. Instanced by Model.add_flow but can also be used as a flow superclass if flow attributes are not easily definable as a dict.
"""
def __init__(self, states, name, ftype='generic'):
"""
Instances the flow with given states.
Parameters
----------
states : dict
states and their values to be associated with the flow
name : str
name of the flow
"""
self.type=ftype
self.name=name
self._initstates=states.copy()
self._states=list(states.keys())
for state in self._states:
setattr(self, state, states[state])
def __repr__(self):
if hasattr(self,'name'):
return getattr(self, 'name')+' '+getattr(self, 'type')+' flow: '+str(self.status())
else: return "Uninitialized Flow"
def reset(self):
""" Resets the flow to the initial state"""
for state in self._initstates:
setattr(self, state, self._initstates[state])
def status(self):
"""
Returns a dict with the current states of the flow.
"""
states={}
for state in self._states:
states[state]=getattr(self,state)
return states
def copy(self):
"""
Returns a copy of the flow object (used when copying the model)
"""
states={}
for state in self._states:
states[state]=getattr(self,state)
if self.__class__==Flow:
copy = self.__class__(states, self.name, self.type)
else:
copy = self.__class__()
for state in self._states:
setattr(copy, state, getattr(self,state))
return copy
#Model superclass
class Model(object):
"""
Model superclass used to construct the model, return representations of the model, and copy and reset the model when run.
Attributes
----------
type : str
labels the model as a model (may not be necessary)
flows : dict
dictionary of flows objects in the model indexed by name
fxns : dict
dictionary of functions in the model indexed by name
params,modelparams,valparams : dict
dictionaries of (optional) parameters for a given instantiation of a model
modelparams : dict
dictionary of parameters for running a simulation. defines these parameters in the model:
phases : dict
phases {'name':[start, end]} that the simulation progresses through
times : array
array of times to sample (if desired) [starttime, sampletime1, sampletime2,... endtime]
tstep : float
timestep used in the simulation. default is 1.0
units : str
time-units. default is hours
use_end_condition : bool
whether to use an end-condition method (defined by user-defined end_condition method)
or defined end time to end the simulation
seed : int
seed used for the internal random number generator
valparams :
dictionary of parameters for defining what simulation constructs to record for find_classification
bipartite : networkx graph
bipartite graph view of the functions and flows
graph : networkx graph
multigraph view of functions and flows
"""
def __init__(self, params={},modelparams={}, valparams='all'):
"""
Instantiates internal model attributes with predetermined:
Parameters
----------
params : dict
design variables of the model
modelparams : dict
dictionary of:
- global phases {'phase': [starttime, endtime]}
- times [starttime, ..., endtime] (middle time used for sampling),
- timestep (float) to run the model with)
- seed (int) - if present, sets a seed to run the random number generators from
- use_end_condition (bool) - if True (default), uses end_condition() in the model to determine when the simulation ends.
valparams dict or (`all`/`flows`/`fxns`)
parameters to keep a history of in params needed for find_classification. default is 'all'
dict option is of the form of mdlhist {fxns:{fxn1:{param1}}, flows:{flow1:{param1}}})
"""
self.type='model'
self.flows={}
self.fxns={}
self.params=params
self.valparams = valparams
self.modelparams=modelparams
# model defaults to static representation if no timerange
self.phases=modelparams.get('phases',{'na':[1]})
self.times=modelparams.get('times',[1])
self.tstep = modelparams.get('tstep', 1.0)
self.units = modelparams.get('units', 'hr')
self.use_end_condition = modelparams.get('use_end_condition', True)
if modelparams.get('seed', False): self.seed = modelparams['seed']
else:
self.seed=np.random.SeedSequence.generate_state(np.random.SeedSequence(),1)[0]
modelparams['seed']=self.seed
self._rng = np.random.default_rng(self.seed)
self.functionorder=OrderedSet() #set is ordered and executed in the order specified in the model
self._fxnflows=[]
self._fxninput={}
def __repr__(self):
fxnstr = ''.join(['- '+fxnname+':'+str(fxn.return_states())+' '+str(getattr(fxn,'active_actions',''))+'\n' for fxnname,fxn in self.fxns.items()])
flowstr = ''.join(['- '+flowname+':'+str(flow.status())+'\n' for flowname,flow in self.flows.items()])
return self.__class__.__name__+' model at '+hex(id(self))+' \n'+'functions: \n'+fxnstr+'flows: \n'+flowstr
def add_flows(self, flownames, flowdict={}, flowtype='generic'):
"""
Adds a set of flows with the same type and initial parameters
Parameters
----------
flowname : list
Unique flow names to give the flows in the model
flowattributes : dict, Flow, set or empty set
Dictionary of flow attributes e.g. {'value':XX}, or the Flow object.
If a set of attribute names is provided, each will be given a value of 1
If an empty set is given, it will be represented w- {flowname: 1}
"""
for flowname in flownames: self.add_flow(flowname, flowdict, flowtype)
def add_flow(self,flowname, flowdict={}, flowtype=''):
"""
Adds a flow with given attributes to the model.
Parameters
----------
flowname : str
Unique flow name to give the flow in the model
flowattributes : dict, Flow, set or empty set
Dictionary of flow attributes e.g. {'value':XX}, or the Flow object.
If a set of attribute names is provided, each will be given a value of 1
If an empty set is given, it will be represented w- {flowname: 1}
"""
if not getattr(self, 'is_copy', False):
if not flowtype: flowtype = flowname
if not flowdict: self.flows[flowname]=Flow({flowname:1}, flowname, flowtype)
elif type(flowdict) == set: self.flows[flowname]=Flow({f:1 for f in flowdict}, flowname, flowtype)
elif type(flowdict) == dict: self.flows[flowname]=Flow(flowdict, flowname,flowtype)
elif isinstance(flowdict, Flow):self.flows[flowname] = flowdict
else: raise Exception('Invalid flow. Must be dict or flow')
def add_fxn(self,name, flownames, fclass=GenericFxn, fparams='None'):
"""
Instantiates a given function in the model.
Parameters
----------
name : str
Name to give the function.
flownames : list
List of flows to associate with the function.
fclass : Class
Class to instantiate the function as.
fparams : arbitrary float, dict, list, etc.
Other parameters to send to the __init__ method of the function class
"""
if not getattr(self, 'is_copy', False):
self.fxns[name]=fclass.__new__(fclass)
self.fxns[name].seed=self._rng.integers(np.iinfo(np.int32).max)
flows=self.get_flows(flownames)
if fparams=='None':
self.fxns[name].__init__(name, flows)
self._fxninput[name]={'name':name,'flows': flownames, 'fparams': 'None'}
else:
self.fxns[name].__init__(name, flows,fparams)
self._fxninput[name]={'name':name,'flows': flownames, 'fparams': fparams}
for flowname in flownames:
self._fxnflows.append((name, flowname))
self.functionorder.update([name])
self.fxns[name].tstep=self.tstep
def set_functionorder(self,functionorder):
"""Manually sets the order of functions to be executed (otherwise it will be executed based on the sequence of add_fxn calls)"""
if not self.functionorder.difference(functionorder): self.functionorder=OrderedSet(functionorder)
else: raise Exception("Invalid list: "+str(functionorder)+" should have elements: "+str(self.functionorder))
def get_flows(self,flownames):
""" Returns a list of the model flow objects """
return [self.flows[flowname] for flowname in flownames]
def fxns_of_class(self, ftype):
"""Returns dict of functionname:functionobjects corresponding to the given class name ftype"""
return {fxn:obj for fxn, obj in self.fxns.items() if obj.__class__.__name__==ftype}
def fxnclasses(self):
"""Returns the set of class names used in the model"""
return {obj.__class__.__name__ for fxn, obj in self.fxns.items()}
def flowtypes(self):
"""Returns the set of flow types used in the model"""
return {obj.type for fxn, obj in self.flows.items()}
def flows_of_type(self, ftype):
"""Returns the set of flows for each flow type"""
return {flow for flow, obj in self.flows.items() if obj.type==ftype}
def flowtypes_for_fxnclasses(self):
"""Returns the flows required by each function class in the model (as a dict)"""
class_relationship = dict()
for fxn, obj in self.fxns.items():
if class_relationship.get(obj.__class__.__name__,False):
class_relationship[obj.__class__.__name__].update(obj.get_flowtypes())
else: class_relationship[obj.__class__.__name__] = set(obj.get_flowtypes())
return class_relationship
def build_model(self, functionorder=[], graph_pos={}, bipartite_pos={}, require_connections=True):
"""
Builds the model graph after the functions have been added.
Parameters
----------
functionorder : list, optional
The order for the functions to be executed in. The default is [].
graph_pos : dict, optional
position of graph nodes. The default is {}.
bipartite_pos : dict, optional
position of bipartite graph nodes. The default is {}.
"""
if not getattr(self, 'is_copy', False):
if functionorder: self.set_functionorder(functionorder)
self.staticfxns = OrderedSet([fxnname for fxnname, fxn in self.fxns.items() if getattr(fxn, 'behavior', False) or getattr(fxn, 'static_behavior', False) or getattr(fxn, 'asg_proptype','na')=='static'])
self.dynamicfxns = OrderedSet([fxnname for fxnname, fxn in self.fxns.items() if getattr(fxn, 'dynamic_behavior', False) or getattr(fxn, 'asg_proptype','na')=='dynamic'])
self.construct_graph(graph_pos, bipartite_pos, require_connections=require_connections)
self.staticflows = [flow for flow in self.flows if any([ n in self.staticfxns for n in self.bipartite.neighbors(flow)])]
def construct_graph(self, graph_pos={}, bipartite_pos={}, require_connections=True):
"""
Creates and returns a graph representation of the model
Returns
-------
graph : networkx graph
multgraph representation of the model functions and flows
"""
self.bipartite=nx.Graph()
self.bipartite.add_nodes_from(self.fxns, bipartite=0)
self.bipartite.add_nodes_from(self.flows, bipartite=1)
self.bipartite.add_edges_from(self._fxnflows)
dangling_nodes = [e for e in nx.isolates(self.bipartite)] # check to see that all functions/flows are connected
if dangling_nodes and require_connections: raise Exception("Fxns/flows disconnected from model: "+str(dangling_nodes))
self.multgraph = nx.projected_graph(self.bipartite, self.fxns,multigraph=True)
self.graph = nx.projected_graph(self.bipartite, self.fxns)
attrs={}
#do we still need to do this for the objects? maybe not--I don't think we use the info anymore
for edge in self.graph.edges:
midedges=list(self.multgraph.subgraph(edge).edges)
flows= [midedge[2] for midedge in midedges]
flowdict={}
for flow in flows:
flowdict[flow]=self.flows[flow]
attrs[edge]=flowdict
nx.set_edge_attributes(self.graph, attrs)
nx.set_node_attributes(self.graph, self.fxns, 'obj')
self.graph_pos=graph_pos
self.bipartite_pos=bipartite_pos
return self.graph
def return_typegraph(self, withflows = True):
"""
Returns a graph with the type containment relationships of the different model constructs.
Parameters
----------
withflows : bool, optional
Whether to include flows. The default is True.
Returns
-------
g : nx.DiGraph
networkx directed graph of the type relationships
"""
g = nx.DiGraph()
modelname = type(self).__name__
g.add_node(modelname, level=1)
g.add_nodes_from(self.fxnclasses(), level=2)
function_connections = [(modelname, fname) for fname in self.fxnclasses()]
g.add_edges_from(function_connections)
if withflows:
g.add_nodes_from(self.flowtypes(), level=3)
fxnclass_flowtype = self.flowtypes_for_fxnclasses()
flow_edges = [(fxn, flow) for fxn, flows in fxnclass_flowtype.items() for flow in flows]
g.add_edges_from(flow_edges)
return g
def return_paramgraph(self):
""" Returns a graph representation of the flows in the model, where flows are nodes and edges are
associations in functions """
return nx.projected_graph(self.bipartite, self.flows)
def return_componentgraph(self, fxnname):
"""
Returns a graph representation of the components associated with a given funciton
Parameters
----------
fxnname : str
Name of the function (e.g. in mdl.fxns)
Returns
-------
g : networkx graph
Bipartite graph representation of the function with components.
"""
g = nx.Graph()
g.add_nodes_from([fxnname], bipartite=0)
g.add_nodes_from(self.fxns[fxnname].components, bipartite=1)
g.add_edges_from([(fxnname, component) for component in self.fxns[fxnname].components])
return g
def return_stategraph(self, gtype='bipartite'):
"""
Returns a graph representation of the current state of the model.
Parameters
----------
gtype : str, optional
Type of graph to return (normal, bipartite, component, or typegraph). The default is 'bipartite'.
Returns
-------
graph : networkx graph
Graph representation of the system with the modes and states added as attributes.
"""
if gtype=='normal':
graph=nx.projected_graph(self.bipartite, self.fxns)
elif gtype=='bipartite':
graph=self.bipartite.copy()
elif gtype=='component':
graph=self.bipartite.copy()
for fxnname, fxn in self.fxns.items():
if {**fxn.components, **fxn.actions}:
graph.add_nodes_from({**fxn.components, **fxn.actions}, bipartite=1)
graph.add_edges_from([(fxnname, comp) for comp in {**fxn.components, **fxn.actions}])
elif gtype=='typegraph':
graph=self.return_typegraph()
edgevals, fxnmodes, fxnstates, flowstates, compmodes, compstates, comptypes ={}, {}, {}, {}, {}, {}, {}
if gtype=='normal': #set edge values for normal graph
for edge in graph.edges:
midedges=list(self.multgraph.subgraph(edge).edges)
flows= [midedge[2] for midedge in midedges]
flowdict={}
for flow in flows:
flowdict[flow]=self.flows[flow].status()
edgevals[edge]=flowdict
nx.set_edge_attributes(graph, edgevals)
elif gtype=='bipartite' or gtype=='component': #set flow node values for bipartite graph
for flowname, flow in self.flows.items():
flowstates[flowname]=flow.status()
nx.set_node_attributes(graph, flowstates, 'states')
elif gtype=='typegraph':
for flowtype in self.flowtypes():
flowstates[flowtype] = {flow:self.flows[flow].status() for flow in self.flows_of_type(flowtype)}
nx.set_node_attributes(graph, flowstates, 'states')
#set node values for functions
if gtype=='typegraph':
for fxnclass in self.fxnclasses():
fxnstates[fxnclass] = {fxn:self.fxns[fxn].return_states()[0] for fxn in self.fxns_of_class(fxnclass)}
fxnmodes[fxnclass] = {fxn:self.fxns[fxn].return_states()[1] for fxn in self.fxns_of_class(fxnclass)}
else:
for fxnname, fxn in self.fxns.items():
fxnstates[fxnname], fxnmodes[fxnname] = fxn.return_states()
if gtype=='normal': del graph.nodes[fxnname]['bipartite']
if gtype=='component':
for mode in fxnmodes[fxnname].copy():
for compname, comp in {**fxn.actions, **fxn.components}.items():
compstates[compname]={}
comptypes[compname]=True
if mode in comp.faultmodes:
compmodes[compname]=compmodes.get(compname, set())
compmodes[compname].update([mode])
fxnmodes[fxnname].remove(mode)
fxnmodes[fxnname].update(['Comp_Fault'])
nx.set_node_attributes(graph, fxnstates, 'states')
nx.set_node_attributes(graph, fxnmodes, 'modes')
if gtype=='component':
nx.set_node_attributes(graph,compstates, 'states')
nx.set_node_attributes(graph, compmodes, 'modes')
nx.set_node_attributes(graph, comptypes, 'iscomponent')
return graph
def calc_repaircost(self, additional_cost=0, default_cost=0, max_cost=np.inf):
"""
Calculates the repair cost of the fault modes in the model based on given
mode cost information for each function mode (in fxn.assoc_faultmodes).
Parameters
----------
additional_cost : int/float
Additional cost to add if there are faults in the model. Default is 0.
default_cost : int/float
Cost to use for each fault mode if no fault cost information given
in assoc_faultmodes/ Default is 0.
max_cost : int/float
Maximum cost of repair (e.g. cost of replacement). Default is np.inf
Returns
-------
repair_cost : float
Cost of repairing the fault modes in the given model
"""
repmodes, modeprops = self.return_faultmodes()
modecost = sum([ c['rcost'] if c['rcost']>0.0 else default_cost for m in modeprops.values() for c in m.values()])
repair_cost = np.min([modecost, max_cost])
return repair_cost
def return_faultmodes(self):
"""
Returns faultmodes present in the model
Returns
-------
modes : dict
Fault modes present in the model indexed by function name
modeprops : dict
Fault mode properties (defined in the function definition) with structure {fxn:mode:properties}
"""
modes, modeprops = {}, {}
for fxnname, fxn in self.fxns.items():
ms = [m for m in fxn.faults.copy() if m!='nom']
if ms:
modeprops[fxnname] = {}
modes[fxnname] = ms
for mode in ms:
if mode!='nom':
modeprops[fxnname][mode] = fxn.faultmodes.get(mode)
if mode not in fxn.faultmodes: warnings.warn("Mode "+mode+" not in faultmodes for fxn "+fxnname+" and may not be tracked.")
return modes, modeprops
def copy(self):
"""
Copies the model at the current state.
Returns
-------
copy : Model
Copy of the curent model.
"""
copy = self.__new__(self.__class__) # Is this adequate? Wouldn't this give it new components?
copy.is_copy=True
copy.__init__(params=getattr(self, 'params', {}),modelparams=getattr(self, 'modelparams', {}),valparams=getattr(self, 'valparams', {}))
for flowname, flow in self.flows.items():
copy.flows[flowname]=flow.copy()
for fxnname, fxn in self.fxns.items():
flownames=self._fxninput[fxnname]['flows']
fparams=self._fxninput[fxnname]['fparams']
flows = copy.get_flows(flownames)
if fparams=='None': copy.fxns[fxnname]=fxn.copy(flows)
else: copy.fxns[fxnname]=fxn.copy(flows, fparams)
copy.fxns[fxnname].tstep=self.tstep
copy._fxninput=self._fxninput
copy._fxnflows=self._fxnflows
copy.is_copy=False
copy.build_model(functionorder = self.functionorder, graph_pos=self.graph_pos, bipartite_pos=self.bipartite_pos)
copy.is_copy=True
return copy
def reset(self):
"""Resets the model to the initial state (with no faults, etc)"""
for flowname, flow in self.flows.items():
flow.reset()
for fxnname, fxn in self.fxns.items():
fxn.reset()
self._rng=np.random.default_rng(self.seed)
def find_classification(self, scen, mdlhists):
"""Placeholder for model find_classification methods (for running nominal models)"""
return {'rate':scen['properties'].get('rate', 0), 'cost': 1, 'expected cost': scen['properties'].get('rate',0)}
class Timer():
"""class for model timers used in functions (e.g. for conditional faults)
Attributes
----------
name : str
timer name
time : float
internal timer clock time
tstep : float
time to increment at each time-step
mode : str (standby/ticking/complete)
the internal state of the timer
"""
def __init__(self, name):
"""
Initializes the Tymer
Parameters
----------
name : str
Name for the timer
"""
self.name=str(name)
self.time=0.0
self.tstep=-1.0
self.mode='standby'
def __repr__(self):
return 'Timer '+self.name+': mode= '+self.mode+', time= '+str(self.time)
def t(self):
""" Returns the time elapsed """
return self.time
def inc(self, tstep=[]):
""" Increments the time elapsed by tstep"""
if self.time>=0.0:
if tstep: self.time+=tstep
else: self.time+=self.tstep
self.mode='ticking'
if self.time<=0: self.time=0.0; self.mode='complete'
def reset(self):
""" Resets the time to zero"""
self.time=0.0
self.mode='standby'
def set_timer(self,time, tstep=-1.0, overwrite='always'):
""" Sets timer to a given time
Parameters
----------
time : float
set time to count down in the timer
tstep : float (default -1.0)
time to increment the timer at each time-step
overwrite : str
whether/how to overwrite the previous time
'always' (default) sets the time to the given time
'if_more' only overwrites the old time if the new time is greater
'if_less' only overwrites the old time if the new time is less
'never' doesn't overwrite an existing timer unless it has reached 0.0
'increment' increments the previous time by the new time
"""
if overwrite =='always': self.time=time
elif overwrite=='if_more' and self.time<time: self.time=time
elif overwrite=='if_less' and self.time>time: self.time=time
elif overwrite=='never' and self.time==0.0: self.time=time
elif overwrite=='increment': self.time+=time
self.tstep=tstep
self.mode='set'
def in_standby(self):
"""Whether the timer is in standby (time has not been set)"""
return self.mode=='standby'
def is_ticking(self):
"""Whether the timer is ticking (time is incrementing)"""
return self.mode=='ticking'
def is_complete(self):
"""Whether the timer is complete (after time is done incrementing)"""
return self.mode=='complete'
def is_set(self):
"""Whether the timer is set (before time increments)"""
return self.mode=='set'
class NominalApproach():
"""
Class for defining sets of nominal simulations. To explain, a given system
may have a number of input situations (missions, terrain, etc) which the
user may want to simulate to ensure the system operates as desired. This
class (in conjunction with propagate.nominal_approach()) can be used to
perform these simulations.
Attributes
----------
scenarios : dict
scenarios to inject based on the approach
num_scenarios : int
number of scenarios in the approach
ranges : dict
dict of the parameters defined in each method for the approach
"""
def __init__(self):
"""Instantiates NominalApproach (simulation params are defined using methods)"""
self.scenarios = {}
self.num_scenarios = 0
self.ranges = {}
def add_seed_replicates(self, rangeid, seeds):
"""
Generates an approach with different seeds to use for the model's internal stochastic behaviors
Parameters
----------
rangeid : str
Name for the set of replicates
seeds : int/list
Number of seeds (if an int) or a list of seeds to use.
"""
if type(seeds)==int: seeds = np.random.SeedSequence.generate_state(np.random.SeedSequence(),seeds)
self.ranges[rangeid] = {'seeds':seeds, 'scenarios':[]}
for i in range(len(seeds)):
self.num_scenarios+=1
scenname = rangeid+'_'+str(self.num_scenarios)
self.scenarios[scenname]={'faults':{},'properties':{'type':'nominal','time':0.0, 'name':scenname, 'rangeid':rangeid,\
'modelparams':{'seed':seeds[i]}, 'prob':1/len(seeds)}}
self.ranges[rangeid]['scenarios'].append(scenname)
def add_param_replicates(self,paramfunc, rangeid, replicates, *args, ind_seeds=True, **kwargs):
"""
Adds a set of repeated scenarios to the approach. For use in (external) random scenario generation.
Parameters
----------
paramfunc : method
Python method which generates a set of model parameters given the input arguments.
method should have form: method(fixedarg, fixedarg..., inputarg=X, inputarg=X)
rangeid : str
Name for the set of replicates
replicates : int
Number of replicates to use
*args : any
arguments to send to paramfunc
ind_seeds : Bool/list
Whether the models should be run with different seeds (rather than the same seed). Default is True
When a list is provided, these seeds are are used. Must be of length replicates.
**kwargs : any
keyword arguments to send to paramfunc
"""
if ind_seeds==True: seeds = np.random.SeedSequence.generate_state(np.random.SeedSequence(),replicates)
elif type(ind_seeds)==list:
if len(ind_seeds)!=replicates: raise Exception("list ind_seeds must be of length replicates")
else: seeds=ind_seeds
else: seeds = [None for i in range(replicates)]
self.ranges[rangeid] = {'fixedargs':args, 'inputranges':kwargs, 'scenarios':[], 'num_pts' : replicates}
for i in range(replicates):
self.num_scenarios+=1
params = paramfunc(*args, **kwargs)
scenname = rangeid+'_'+str(self.num_scenarios)
self.scenarios[scenname]={'faults':{},\
'properties':{'type':'nominal','time':0.0, 'name':scenname, 'rangeid':rangeid,\
'params':params,'inputparams':kwargs,'modelparams':{'seed':seeds[i]},\
'paramfunc':paramfunc, 'fixedargs':args, 'prob':1/replicates}}
self.ranges[rangeid]['scenarios'].append(scenname)
def get_param_scens(self, rangeid, *level_params):
"""
Returns the scenarios of a range associated with given parameter ranges
Parameters
----------
rangeid : str
Range id to check
level_params : str (multiple)
Level parameters iterate over
Returns
-------
param_scens : dict
The scenarios associated with each level of parameter (or joint parameters)
"""
inputranges = {param:self.ranges[rangeid]['inputranges'][param] for param in level_params}
if len(inputranges)>1:
ranges = (np.arange(*arg) for k,arg in inputranges.items())
partialspace = [x for x in itertools.product(*ranges)]
else:
partialspace = [x for x in np.arange(*[*inputranges.values()][0])]
param_scens = {p:set() for p in partialspace}
full_indices = list(self.ranges[rangeid]['inputranges'].keys())
inds = [full_indices.index(param) for param in level_params]
for xvals, scenarios in self.ranges[rangeid]['levels'].items():
new_index = itemgetter(*inds)(xvals)
if type(scenarios)==str: scenarios = [scenarios]
param_scens[new_index].update(scenarios)
return param_scens
def add_param_ranges(self,paramfunc, rangeid, *args, replicates=1, seeds='shared', **kwargs):
"""
Adds a set of scenarios to the approach.
Parameters
----------
paramfunc : method
Python method which generates a set of model parameters given the input arguments.
method should have form: method(fixedarg, fixedarg..., inputarg=X, inputarg=X)
rangeid : str
Name for the range being used. Default is 'nominal'
*args: specifies values for positional args of paramfunc.
May be given as a fixed float/int/dict/str defining a set value for positional arguments
replicates : int
Number of points to take over each range (for random parameters). Default is 1.
seeds : str/list
Options for seeding models/replicates: (Default is 'shared')
- 'shared' creates random seeds and shares them between parameters and models
- 'independent' creates separate random seeds for models and parameter generation
- 'keep_model' uses the seed provided in the model for all of the model
When a list is provided, these seeds are are used (and shared). Must be of length replicates.
**kwargs : specifies range for keyword args of paramfunc
May be given as a fixed float/int/dict/str (k=value) defining a set value for the range (if not the default) or
as a tuple k=(start, end, step)
"""
inputranges = {ind:rangespec for ind,rangespec in enumerate(args) if type(rangespec)==tuple}
fixedkwargs = {k:v for k,v in kwargs.items() if type(v)!=tuple}
inputranges = {k:v for k,v in kwargs.items() if type(v)==tuple}
ranges = (np.arange(*arg) for k,arg in inputranges.items())
fullspace = [x for x in itertools.product(*ranges)]
inputnames = list(inputranges.keys())
if type(seeds)==list:
if len(seeds)!=replicates: raise Exception("list seeds must be of length replicates")
else: seedstr=seeds; seeds=np.random.SeedSequence.generate_state(np.random.SeedSequence(),replicates)
if seedstr=='shared': mdlseeds=seeds
elif seedstr=='independent': mdlseeds=np.random.SeedSequence.generate_state(np.random.SeedSequence(),replicates)
elif seedstr=='keep_model': mdlseeds= [None for i in range(replicates)]
self.ranges[rangeid] = {'fixedargs':args, 'fixedkwargs':fixedkwargs, 'inputranges':inputranges, 'scenarios':[], 'num_pts' : len(fullspace), 'levels':{}, 'replicates':replicates}
for xvals in fullspace:
inputparams = {**{name:xvals[i] for i,name in enumerate(inputnames)}, **fixedkwargs}
if replicates>1: self.ranges[rangeid]['levels'][xvals]=[]
for i in range(replicates):
np.random.seed(seeds[i])
self.num_scenarios+=1
params = paramfunc(*args, **inputparams)
scenname = rangeid+'_'+str(self.num_scenarios)
self.scenarios[scenname]={'faults':{},\
'properties':{'type':'nominal','time':0.0, 'name':scenname, 'rangeid':rangeid,\
'params':params,'inputparams':inputparams,'modelparams':{'seed':mdlseeds[i]},\
'paramfunc':paramfunc, 'fixedargs':args, 'fixedkwargs':fixedkwargs, 'prob':1/(len(fullspace)*replicates)}}
self.ranges[rangeid]['scenarios'].append(scenname)
if replicates>1: self.ranges[rangeid]['levels'][xvals].append(scenname)
else: self.ranges[rangeid]['levels'][xvals]=scenname
def change_params(self, rangeid='all', **kwargs):
"""
Changes a given parameter accross all scenarios. Modifies 'params' (rather than regenerating params from the paramfunc).
Parameters
----------
rangeid : str
Name of the range to modify. Optional. Defaults to "all"
**kwargs : any
Parameters to change stated as paramname=value or
as a dict paramname={'sub_param':value}, where 'sub_param' is the parameter of the dictionary with name paramname to update
"""
for r in self.ranges:
if rangeid=='all' or rangeid==r:
if not self.ranges.get('changes', False): self.ranges[r]['changes'] = kwargs
else: self.ranges[r]['changes'].update(kwargs)
for scenname, scen in self.scenarios.items():
if rangeid=='all' or rangeid==scen['properties']['rangeid']:
if not scen['properties'].get('changes', False): scen['properties']['changes']=kwargs
else: scen['properties']['changes'].update(kwargs)
for kwarg, kw_value in kwargs.items(): #updates
if type(kw_value)==dict: scen['properties']['params'][kwarg].update(kw_value)
else: scen['properties']['params'][kwarg]=kw_value
def assoc_probs(self, rangeid, prob_weight=1.0, **inputpdfs):
"""
Associates a probability model (assuming variable independence) with a
given previously-defined range of scenarios using given pdfs
Parameters
----------
rangeid : str
Name of the range to apply the probability model to.
prob_weight : float, optional
Overall probability for the set of scenarios (to use if adding more ranges
or if the range does not cover the space of probability). The default is 1.0.
**inputpdfs : key=(pdf, params)
pdf to associate with the different variables of the model.
Where the pdf has form pdf(x, **kwargs) where x is the location and **kwargs is parameters
(for example, scipy.stats.norm.pdf)
and params is a dictionary of parameters (e.g., {'mu':1,'std':1}) to use '
as the key/parameter inputs to the pdf
"""
for scenname in self.ranges[rangeid]['scenarios']:
inputparams = self.scenarios[scenname]['properties']['inputparams']
inputprobs = [inpdf[0](inputparams[name], **inpdf[1]) for name, inpdf in inputpdfs.items()]
self.scenarios[scenname]['properties']['prob'] = np.prod(inputprobs)
totprobs = sum([self.scenarios[scenname]['properties']['prob'] for scenname in self.ranges[rangeid]['scenarios']])
for scenname in self.ranges[rangeid]['scenarios']:
self.scenarios[scenname]['properties']['prob'] = self.scenarios[scenname]['properties']['prob']*prob_weight/totprobs
def add_rand_params(self, paramfunc, rangeid, *fixedargs, prob_weight=1.0, replicates=1000, seeds='shared', **randvars):
"""
Adds a set of random scenarios to the approach.
Parameters
----------
paramfunc : method
Python method which generates a set of model parameters given the input arguments.
method should have form: method(fixedarg, fixedarg..., inputarg=X, inputarg=X)
rangeid : str
Name for the range being used. Default is 'nominal'
prob_weight : float (0-1)
Overall probability for the set of scenarios (to use if adding more ranges). Default is 1.0
*fixedargs : any
Fixed positional arguments in the parameter generator function.
Useful for discrete modes with different parameters.
seeds : str/list
Options for seeding models/replicates: (Default is 'shared')
- 'shared' creates random seeds and shares them between parameters and models
- 'independent' creates separate random seeds for models and parameter generation
- 'keep_model' uses the seed provided in the model for all of the model
When a list is provided, these seeds are are used (and shared). Must be of length replicates.
**randvars : key=tuple
Specification for each random input parameter, specified as
input = (randfunc, param1, param2...)
where randfunc is the method producing random outputs (e.g. numpy.random.rand)
and the successive parameters param1, param2, etc are inputs to the method
"""
if type(seeds)==list:
if len(seeds)!=replicates: raise Exception("list seeds must be of length replicates")
else: seedstr=seeds; seeds=np.random.SeedSequence.generate_state(np.random.SeedSequence(),replicates)
if seedstr=='shared': mdlseeds=seeds
elif seedstr=='independent': mdlseeds=np.random.SeedSequence.generate_state(np.random.SeedSequence(),replicates)
elif seedstr=='keep_model': mdlseeds= [None for i in range(replicates)]
self.ranges[rangeid] = {'fixedargs':fixedargs, 'randvars':randvars, 'scenarios':[], 'num_pts':replicates}
for i in range(replicates):
self.num_scenarios+=1
np.random.seed(seeds[i])
inputparams = {name: (ins() if callable(ins) else ins[0](*ins[1:])) for name, ins in randvars.items()}
params = paramfunc(*fixedargs, **inputparams)
scenname = rangeid+'_'+str(self.num_scenarios)
self.scenarios[scenname]={'faults':{},\
'properties':{'type':'nominal','time':0.0, 'name':scenname, 'rangeid':rangeid,\
'params':params,'inputparams':inputparams,'modelparams':{'seed':mdlseeds[i]},\
'paramfunc':paramfunc, 'fixedargs':fixedargs, 'prob':prob_weight/replicates}}
self.ranges[rangeid]['scenarios'].append(scenname)
def copy(self):
"""Copies the given sampleapproach. Used in nested scenario sampling."""
newapp = NominalApproach()
newapp.scenarios = copy.deepcopy(self.scenarios)
newapp.ranges = copy.deepcopy(self.ranges)
newapp.num_scenarios = self.num_scenarios
return newapp
class SampleApproach():
"""
Class for defining the sample approach to be used for a set of faults.
Attributes
----------
phases : dict
phases given to sample the fault modes in
globalphases : dict
phases defined in the model
modephases : dict
Dictionary of modes associated with each state
mode_phase_map : dict
Mapping of modes to their corresponding phases
tstep : float
timestep defined in the model
fxnrates : dict
overall failure rates for each function
comprates : dict
overall failure rates for each component
jointmodes : list
(if any) joint fault modes to be injected in the approach
rates/comprates/rates_timeless : dict
rates of each mode (fxn, mode) in each model phase, structured {fxnmode: {phaseid:rate}}
sampletimes : dict
faults to inject at each time in each phase, structured {phaseid:time:fnxmode}
weights : dict
weight to put on each time each fault was injected, structured {fxnmode:phaseid:time:weight}
sampparams : dict
parameters used to sample each mode
scenlist : list
list of fault scenarios (dicts of faults and properties) that fault propagation iterates through
scenids : dict
a list of scenario ids associated with a given fault in a given phase, structured {(fxnmode,phaseid):listofnames}
mode_phase_map : dict
a dict of modes and their respective phases to inject with structure {fxnmode:{mode_phase_map:[starttime, endtime]}}
units : str
time-units to use in the approach probability model
unit_factors : dict
multiplication factors for converting some time units to others.
"""
def __init__(self, mdl, faults='all', phases='global', modephases={}, jointfaults={'faults':'None'}, sampparams={}, defaultsamp={'samp':'evenspacing','numpts':1}):
"""
Initializes the sample approach for a given model
Parameters
----------
mdl : Model
Model to sample.
faults : str/list/tuple, optional
- The default is 'all', which gets all fault modes from the model.
- 'single-components' uses faults from a single component to represent faults form all components
- passing the function name only includes modes from that function
- List of faults of form [(fxn, mode)] to inject in the model.
-Tuple arguments
- ('mode type', 'mode','notmode'), gets all modes with 'mode' as a string (e.g. "mech", "comms", "loss" faults). 'notmode' (if given) specifies strings to remove
- ('mode types', ('mode1', 'mode2')), gets all modes with the listed strings (e.g. "mech", "comms", "loss" faults)
- ('mode name', 'mode'), gets all modes with the exact name 'mode'
- ('mode names', ('mode1', 'mode2')), gets all modes with the exact names defined in the tuple
- ('function class', 'Classname'), which gets all modes from a function with class 'Classname'
- ('function classes', ('Classname1', 'Classname2')), which gets all modes from a function with the names in the tuple
phases: dict or 'global' or list
Local phases in the model to sample.
Dict has structure: {'Function':{'phase':[starttime, endtime]}}
List has structure: ['phase1', 'phase2'] where phases are phases in mdl.phases
Defaults to 'global',here only the phases defined in mdl.phases are used.
Phases and modephases can be gotten from process.modephases(mdlhist)
modephases: dict
Dictionary of modes associated with each phase.
For use when the opportunity vector is keyed to modes and each mode is
entered multiple times in a simulation, resulting in
multiple phases associated with that mode. Has structure:
{'Function':{'mode':{'phase','phase1', 'phase2'...}}}
Phases and modephases can be gotten from process.modephases(mdlhist)
jointfaults : dict, optional
Defines how the approach considers joint faults. The default is {'faults':'None'}. Has structure:
- faults : float
# of joint faults to inject. 'all' specifies all faults at the same time
- jointfuncs : bool
determines whether more than one mode can be injected in a single function
- pcond (optional) : float in range (0,1)
conditional probabilities for joint faults. If not give, independence is assumed.
- inclusive (optional) : bool
specifies whether the fault set includes all joint faults up to the given level, or only the given level
(e.g., True with 'all' means SampleApproach includes every combination of joint fault modes while
False with 'all' means SampleApproach only includes the joint fault mode with all faults)
sampparams : dict, optional
Defines how specific modes in the model will be sampled over time. The default is {}.
Has structure: {(fxnmode,phase): sampparam}, where sampparam has structure:
- 'samp' : str ('quad', 'fullint', 'evenspacing','randtimes','symrandtimes')
sample strategy to use (quadrature, full integral, even spacing, random times, likeliest, or symmetric random times)
- 'numpts' : float
number of points to use (for evenspacing, randtimes, and symrandtimes only)
- 'quad' : dict
dict with structure {'nodes'[nodelist], 'weights':weightlist}
where the nodes in the nodelist range between -1 and 1
and the weights in the weightlist sum to 2.
defaultsamp : TYPE, optional
Defines how the model will be sampled over time by default. The default is {'samp':'evenspacing','numpts':1}. Has structure:
- 'samp' : str ('quad', 'fullint', 'evenspacing','randtimes','symrandtimes')
sample strategy to use (quadrature, full integral, even spacing, random times,likeliest, or symmetric random times)
- 'numpts' : float
number of points to use (for evenspacing, randtimes, and symrandtimes only)
- 'quad' : dict
dict with structure {'nodes'[nodelist], 'weights':weightlist}
where the nodes in the nodelist range between -1 and 1
and the weights in the weightlist sum to 2.
"""
self.unit_factors = {'sec':1, 'min':60,'hr':360,'day':8640,'wk':604800,'month':2592000,'year':31556952}
if phases=='global': self.globalphases = mdl.phases; self.phases = {}; self.modephases = modephases
elif type(phases) in [list, set]: self.globalphases = {ph:mdl.phases[ph] for ph in phases}; self.phases={}; self.modephases = modephases
elif type(phases)==dict:
if type(tuple(phases.values())[0])==dict: self.globalphases = mdl.phases; self.phases = phases; self.modephases = modephases
elif type(tuple(phases.values())[0][0]) in [int, float]: self.globalphases = phases; self.phases ={}; self.modephases = modephases
else: self.globalphases = mdl.phases; self.phases = phases; self.modephases = modephases
#elif type(phases)==set: self.globalphases=mdl.phases; self.phases = {ph:mdl.phases[ph] for ph in phases}
self.tstep = mdl.tstep
self.units = mdl.units
self.init_modelist(mdl,faults, jointfaults)
self.init_rates(mdl, jointfaults=jointfaults, modephases=modephases)
self.create_sampletimes(mdl, sampparams, defaultsamp)
self.create_scenarios()
def init_modelist(self,mdl, faults, jointfaults={'faults':'None'}):
"""Initializes comprates, jointmodes internal list of modes"""
self.comprates={}
self._fxnmodes={}
if faults=='all':
self.fxnrates=dict.fromkeys(mdl.fxns)
for fxnname, fxn in mdl.fxns.items():
for mode, params in fxn.faultmodes.items():
if params=='synth': self._fxnmodes[fxnname, mode] = {'dist':1/len(fxn.faultmodes),'oppvect':[1], 'rcost':0,'probtype':'prob','units':'hrs'}
else: self._fxnmodes[fxnname, mode] = params
self.fxnrates[fxnname]=fxn.failrate
self.comprates[fxnname] = {compname:comp.failrate for compname, comp in fxn.components.items()}
elif faults=='single-component':
self.fxnrates=dict.fromkeys(mdl.fxns)
for fxnname, fxn in mdl.fxns.items():
if getattr(fxn, 'components', {}):
firstcomp = list(fxn.components)[0]
for mode, params in fxn.faultmodes.items():
comp = fxn.compfaultmodes.get(mode, 'fxn')
if comp==firstcomp or comp=='fxn':
if params=='synth': self._fxnmodes[fxnname, mode] = {'dist':1/len(fxn.faultmodes),'oppvect':[1], 'rcost':0,'probtype':'prob','units':'hrs'}
else: self._fxnmodes[fxnname, mode] = params
self.fxnrates[fxnname]=fxn.failrate
self.comprates[fxnname] = {firstcomp: sum([comp.failrate for compname, comp in fxn.components.items()])}
else:
for mode, params in fxn.faultmodes.items():
if params=='synth': self._fxnmodes[fxnname, mode] = {'dist':1/len(fxn.faultmodes),'oppvect':[1], 'rcost':0,'probtype':'prob','units':'hrs'}
else: self._fxnmodes[fxnname, mode] = params
self.fxnrates[fxnname]=fxn.failrate
self.comprates[fxnname] = {}
elif faults=='single-function':
fxnclasses = mdl.fxnclasses();
fxns_for_class = {f:mdl.fxns_of_class(f) for f in fxnclasses}
fxns_to_use = {list(fxns)[0]: len(fxns) for f, fxns in fxns_for_class.items()}
self.fxnrates=dict.fromkeys(fxns_to_use)
for fxnname in fxns_to_use:
fxn = mdl.fxns[fxnname]
for mode, params in fxn.faultmodes.items():
if params=='synth': self._fxnmodes[fxnname, mode] = {'dist':1/len(fxn.faultmodes),'oppvect':[1], 'rcost':0,'probtype':'prob','units':'hrs'}
else: self._fxnmodes[fxnname, mode] = params
self.fxnrates[fxnname]=fxn.failrate * fxns_to_use[fxnname]
self.comprates[fxnname] = {compname:comp.failrate for compname, comp in fxn.components.items()}
else:
if type(faults)==str: faults = [(faults, mode) for mode in mdl.fxns[faults].faultmodes] #single-function modes
elif type(faults)==tuple:
if faults[0]=='mode name': faults = [(fxnname, mode) for fxnname,fxn in mdl.fxns.items() for mode in fxn.faultmodes if mode==faults[1]]
elif faults[0]=='mode names': faults = [(fxnname, mode) for f in faults[1] for fxnname,fxn in mdl.fxns.items() for mode in fxn.faultmodes if mode==f]
elif faults[0]=='mode type': faults = [(fxnname, mode) for fxnname,fxn in mdl.fxns.items() for mode in fxn.faultmodes if faults[1] in mode if (len(faults)<3 or not faults[2] in mode)]
elif faults[0]=='mode types': faults = [(fxnname, mode) for fxnname,fxn in mdl.fxns.items() for mode in fxn.faultmodes if any([f in mode for f in faults[1]])]
elif faults[0]=='function class': faults = [(fxnname, mode) for fxnname,fxn in mdl.fxns_of_class(faults[1]).items() for mode in fxn.faultmodes]
elif faults[0]=='function classes': faults = [(fxnname, mode) for f in faults[1] for fxnname,fxn in mdl.fxns_of_class(f).items() for mode in fxn.faultmodes]
else: raise Exception("Invalid option in tuple argument: "+str(faults[0]))
elif type(faults)==list:
if type(faults[0])!=tuple: raise Exception("Invalid list option: "+str(faults)+" , provide list of tuples")
faults=faults
else: raise Exception("Invalid option for faults: "+str(faults))
self.fxnrates=dict.fromkeys([fxnname for (fxnname, mode) in faults])
for fxnname, mode in faults:
params = mdl.fxns[fxnname].faultmodes[mode]
if params=='synth': self._fxnmodes[fxnname, mode] = {'dist':1/len(faults),'oppvect':[1], 'rcost':0,'probtype':'prob','units':'hrs'}
else: self._fxnmodes[fxnname, mode] = params
self.fxnrates[fxnname]=mdl.fxns[fxnname].failrate
self.comprates[fxnname] = {compname:comp.failrate for compname, comp in mdl.fxns[fxnname].components.items()}
if type(jointfaults['faults'])==int or jointfaults['faults']=='all':
if jointfaults['faults']=='all': num_joint= len(self._fxnmodes)
else: num_joint=jointfaults['faults']
self.jointmodes=[]
inclusive = jointfaults.get('inclusive', True)
if inclusive:
for numjoint in range(2, num_joint+1):
jointmodes = list(itertools.combinations(self._fxnmodes, numjoint))
if not jointfaults.get('jointfuncs', False):
jointmodes = [jm for jm in jointmodes if not any([jm[i-1][0] ==j[0] for i in range(1, len(jm)) for j in jm[i:]])]
self.jointmodes = self.jointmodes + jointmodes
elif not inclusive:
jointmodes = list(itertools.combinations(self._fxnmodes, num_joint))
if not jointfaults.get('jointfuncs', False):
jointmodes = [jm for jm in jointmodes if not any([jm[i-1][0] ==j[0] for i in range(1, len(jm)) for j in jm[i:]])]
self.jointmodes=jointmodes
else: raise Exception("Invalid option for jointfault['inclusive']")
elif type(jointfaults['faults'])==list: self.jointmodes = jointfaults['faults']
elif jointfaults['faults']!='None': raise Exception("Invalid jointfaults argument type: "+str(type(jointfaults['faults'])))
def init_rates(self,mdl, jointfaults={'faults':'None'}, modephases={}):
""" Initializes rates, rates_timeless"""
self.rates=dict.fromkeys(self._fxnmodes)
self.rates_timeless=dict.fromkeys(self._fxnmodes)
self.mode_phase_map=dict.fromkeys(self._fxnmodes)
for (fxnname, mode) in self._fxnmodes:
key_phases = mdl.fxns[fxnname].key_phases_by
if key_phases=='global': fxnphases = self.globalphases
elif key_phases=='none': fxnphases = {'operating':[mdl.times[0], mdl.times[-1]]}
else: fxnphases = self.phases.get(key_phases, self.globalphases)
fxnphases = dict(sorted(fxnphases.items(), key = lambda item: item[1][0]))
self.rates[fxnname, mode]=dict(); self.rates_timeless[fxnname, mode]=dict(); self.mode_phase_map[fxnname, mode] = dict()
overallrate = self.fxnrates[fxnname]
if self.comprates[fxnname]:
for compname, component in mdl.fxns[fxnname].components.items():
if mode in component.faultmodes:
overallrate=self.comprates[fxnname][compname]
if modephases and (key_phases not in ['global', 'none']):
modevect = self._fxnmodes[fxnname, mode]['oppvect']
oppvect = {phase:0 for phase in fxnphases}
oppvect.update({phase:modevect.get(mode, 0)/len(phases) for mode,phases in modephases[key_phases].items() for phase in phases})
else:
a=1
if type(self._fxnmodes[fxnname, mode]['oppvect'])==dict:
oppvect = {phase:0 for phase in fxnphases}
oppvect.update(self._fxnmodes[fxnname, mode]['oppvect'])
else:
oppvect = self._fxnmodes[fxnname, mode]['oppvect']
if type(oppvect)==int or len(oppvect)==1: oppvect = {phase:1 for phase in fxnphases}
elif self.globalphases!=mdl.phases: oppvect = {phase:oppvect[i] for (i, phase) in enumerate(mdl.phases)}
elif len(oppvect)!=len(fxnphases): raise Exception("Invalid Opportunity vector: "+fxnname+". Invalid length.")
else: oppvect = {phase:oppvect[i] for (i, phase) in enumerate(fxnphases)}
for phase, times in fxnphases.items():
opp = oppvect[phase]/(sum(oppvect.values())+1e-100)
dist = self._fxnmodes[fxnname, mode]['dist']
if self._fxnmodes[fxnname, mode]['probtype']=='rate' and len(times)>1:
dt = float(times[1]-times[0])
unitfactor = self.unit_factors[self.units]/self.unit_factors[self._fxnmodes[fxnname, mode]['units']]
elif self._fxnmodes[fxnname, mode]['probtype']=='prob' or len(times)>=1:
dt = 1
unitfactor = 1
self.rates[fxnname, mode][key_phases, phase] = overallrate*opp*dist*dt*unitfactor #TODO: update with units
self.rates_timeless[fxnname, mode][key_phases, phase] = overallrate*opp*dist
self.mode_phase_map[fxnname, mode][key_phases, phase] = times
if getattr(self, 'jointmodes',False):
for (j_ind, jointmode) in enumerate(self.jointmodes):
self.rates.update({jointmode:dict()})
self.rates_timeless.update({jointmode:dict()})
self.mode_phase_map.update({jointmode:dict()})
jointphase_list = [self.mode_phase_map[mode] for mode in jointmode]
jointphase_dict = {k:v for mode in jointmode for k,v in self.mode_phase_map[mode].items()}
for phase_combo in itertools.product(*jointphase_list):
intervals = [jointphase_dict[phase] for phase in phase_combo]
overlap = find_overlap_n(intervals)
if overlap:
phaseid = tuple(set(phase_combo))
if len(phaseid) == 1:
phaseid = phaseid[0]
rates=[self.rates[fmode][phaseid] for fmode in jointmode]
else:
rates = [self.rates[fmode][phase_combo[i]]* np.subtract(*overlap)/np.subtract(*self.mode_phase_map[fmode][phase_combo[i]]) for i,fmode in enumerate(jointmode)]
if not jointfaults.get('pcond', False): # if no input, assume independence
prob = np.prod(1-np.exp(-np.array(rates)))
self.rates[jointmode][phaseid] = -np.log(1.0-prob)
elif type(jointfaults['pcond'])==float:
self.rates[jointmode][phaseid] = jointfaults['pcond']*max(rates)
elif type(jointfaults['pcond'])==list:
self.rates[jointmode][phaseid] = jointfaults['pcond'][j_ind]*max(rates)
if len(overlap)>1: self.rates_timeless[jointmode][phaseid] = self.rates[jointmode][phaseid]/(overlap[1]-overlap[0])
else: self.rates_timeless[jointmode][phaseid] = self.rates[jointmode][phaseid]
self.mode_phase_map[jointmode][phaseid] = overlap
if not jointfaults.get('inclusive', True):
for (fxnname, mode) in self._fxnmodes:
self.rates.pop((fxnname,mode))
self.rates_timeless.pop((fxnname,mode))
self.mode_phase_map.pop((fxnname,mode))
def create_sampletimes(self,mdl, params={}, default={'samp':'evenspacing','numpts':1}):
""" Initializes weights and sampletimes """
self.sampletimes={}
self.weights={fxnmode:dict.fromkeys(rate) for fxnmode,rate in self.rates.items()}
self.sampparams={}
for fxnmode, ratedict in self.rates.items():
for phaseid, rate in ratedict.items():
if rate > 0.0:
times = self.mode_phase_map[fxnmode][phaseid]
param = params.get((fxnmode,phaseid), default)
self.sampparams[fxnmode, phaseid] = param
if len(times)<=1: self.add_phasetimes(fxnmode, phaseid, times)
else:
possible_phasetimes = list(np.arange(times[0], times[1], self.tstep))
if param['samp']=='likeliest':
weights=[]
if self.rates[fxnmode][phaseid] == max(list(self.rates[fxnmode].values())):
phasetimes = [round(np.quantile(possible_phasetimes, 0.5)/self.tstep)*self.tstep]
else: phasetimes = []
else:
pts, weights = self.select_points(param, [pt for pt, t in enumerate(possible_phasetimes)])
phasetimes = [possible_phasetimes[pt] for pt in pts]
self.add_phasetimes(fxnmode, phaseid, phasetimes, weights=weights)
def select_points(self, param, possible_pts):
"""
Selects points in the list possible_points according to a given sample strategy.
Parameters
----------
param : dict
Sample parameter. Has structure:
- 'samp' : str ('quad', 'fullint', 'evenspacing','randtimes','symrandtimes')
sample strategy to use (quadrature, full integral, even spacing, random times, or symmetric random times)
- 'numpts' : float
number of points to use (for evenspacing, randtimes, and symrandtimes only)
- 'quad' : dict
dict with structure {'nodes'[nodelist], 'weights':weightlist}
where the nodes in the nodelist range between -1 and 1
and the weights in the weightlist sum to 2.
possible_pts :
list of possible points in time.
Returns
-------
pts : list
selected points
weights : list
weights for each point
"""
weights=[]
if param['samp']=='fullint': pts = possible_pts
elif param['samp']=='evenspacing':
if param['numpts']+2 > len(possible_pts): pts = possible_pts
else: pts= [int(round(np.quantile(possible_pts, p/(param['numpts']+1)))) for p in range(param['numpts']+2)][1:-1]
elif param['samp']=='quadrature':
quantiles = param['quad']['nodes']/2 +0.5
if len(quantiles) > len(possible_pts): pts = possible_pts
else:
pts= [int(round(np.quantile(possible_pts, q))) for q in quantiles]
weights=param['quad']['weights']/sum(param['quad']['weights'])
elif param['samp']=='randtimes':
if param['numpts']>=len(possible_pts): pts = possible_pts
else: pts= [possible_pts.pop(np.random.randint(len(possible_pts))) for i in range(min(param['numpts'], len(possible_pts)))]
elif param['samp']=='symrandtimes':
if param['numpts']>=len(possible_pts): pts = possible_pts
else:
if len(possible_pts) %2 >0: pts = [possible_pts.pop(int(np.floor(len(possible_pts)/2)))]
else: pts = []
possible_pts_halved = np.reshape(possible_pts, (2,int(len(possible_pts)/2)))
possible_pts_halved[1] = np.flip(possible_pts_halved[1])
possible_inds = [i for i in range(int(len(possible_pts)/2))]
inds = [possible_inds.pop(np.random.randint(len(possible_inds))) for i in range(min(int(np.floor(param['numpts']/2)), len(possible_inds)))]
pts= pts+ [possible_pts_halved[half][ind] for half in range(2) for ind in inds ]
pts.sort()
else: print("invalid option: ", param)
if not any(weights): weights = [1/len(pts) for t in pts]
if len(pts)!=len(set(pts)):
raise Exception("Too many pts for quadrature at this discretization")
return pts, weights
def add_phasetimes(self, fxnmode, phaseid, phasetimes, weights=[]):
""" Adds a set of times for a given mode to sampletimes"""
if phasetimes:
if not self.weights[fxnmode].get(phaseid): self.weights[fxnmode][phaseid] = {t: 1/len(phasetimes) for t in phasetimes}
for (ind, time) in enumerate(phasetimes):
if not self.sampletimes.get(phaseid):
self.sampletimes[phaseid] = {time:[]}
if self.sampletimes[phaseid].get(time): self.sampletimes[phaseid][time] = self.sampletimes[phaseid][time] + [(fxnmode)]
else: self.sampletimes[phaseid][time] = [(fxnmode)]
if any(weights): self.weights[fxnmode][phaseid][time] = weights[ind]
else: self.weights[fxnmode][phaseid][time] = 1/len(phasetimes)
def create_nomscen(self, mdl):
""" Creates a nominal scenario """
nomscen={'faults':{},'properties':{}}
for fxnname in mdl.fxns:
nomscen['faults'][fxnname]='nom'
nomscen['properties']['time']=0.0
nomscen['properties']['type']='nominal'
nomscen['properties']['name']='nominal'
nomscen['properties']['weight']=1.0
return nomscen
def create_scenarios(self):
""" Creates list of scenarios to be iterated over in fault injection. Added as scenlist and scenids """
self.scenlist=[]
self.times = []
self.scenids = {}
for phaseid, samples in self.sampletimes.items():
if samples:
for time, faultlist in samples.items():
self.times+=[time]
for fxnmode in faultlist:
if self.sampparams[fxnmode, phaseid]['samp']=='maxlike':
rate = sum(self.rates[fxnmode].values())
else:
rate = self.rates[fxnmode][phaseid] * self.weights[fxnmode][phaseid][time]
if type(fxnmode[0])==str:
name = fxnmode[0]+' '+fxnmode[1]+', t='+str(time)
scen={'faults':{fxnmode[0]:fxnmode[1]}, 'properties':{'type': 'single-fault', 'function': fxnmode[0],\
'fault': fxnmode[1], 'rate': rate, 'time': time, 'name': name}}
else:
name = ' '.join([fm[0]+': '+fm[1]+',' for fm in fxnmode])+' t='+str(time)
faults = dict.fromkeys([fm[0] for fm in fxnmode])
for fault in faults:
faults[fault] = [fm[1] for fm in fxnmode if fm[0]==fault]
scen = {'faults':faults, 'properties':{'type': str(len(fxnmode))+'-joint-faults', 'functions':{fm[0] for fm in fxnmode}, \
'modes':{fm[1] for fm in fxnmode}, 'rate': rate, 'time': time, 'name': name}}
self.scenlist=self.scenlist+[scen]
if self.scenids.get((fxnmode, phaseid)): self.scenids[fxnmode, phaseid] = self.scenids[fxnmode, phaseid] + [name]
else: self.scenids[fxnmode, phaseid] = [name]
self.times.sort()
def prune_scenarios(self,endclasses,samptype='piecewise', threshold=0.1, sampparam={'samp':'evenspacing','numpts':1}):
"""
Finds the best sample approach to approximate the full integral (given the approach was the full integral).
Parameters
----------
endclasses : dict
dict of results (cost, rate, expected cost) for the model run indexed by scenid
samptype : str ('piecewise' or 'bestpt'), optional
Method to use.
If 'bestpt', finds the point in the interval that gives the average cost.
If 'piecewise', attempts to split the inverval into sub-intervals of continuity
The default is 'piecewise'.
threshold : float, optional
If 'piecewise,' the threshold for detecting a discontinuity based on deviation from linearity. The default is 0.1.
sampparam : float, optional
If 'piecewise,' the sampparam sampparam to prune to. The default is {'samp':'evenspacing','numpts':1}, which would be a single point (optimal for linear).
"""
newscenids = dict.fromkeys(self.scenids.keys())
newsampletimes = {key:{} for key in self.sampletimes.keys()}
newweights = {fault:dict.fromkeys(phasetimes) for fault, phasetimes in self.weights.items()}
for modeinphase in self.scenids:
costs= np.array([endclasses[scen]['cost'] for scen in self.scenids[modeinphase]])
if samptype=='bestpt':
errs = abs(np.mean(costs) - costs)
mins = np.where(errs == errs.min())[0]
pts=[mins[int(len(mins)/2)]]
weights=[1]
elif samptype=='piecewise':
if not self.phases or modeinphase[1][0]=='global':
beginning, end = self.globalphases[modeinphase[1][1]]
else:
beginning, end = self.phases[modeinphase[1][0]][modeinphase[1][1]]
partlocs=[0, len(list(np.arange(beginning,end, self.tstep)))]
reset=False
for ind, cost in enumerate(costs[1:-1]): # find where fxn is no longer linear
if reset==True:
reset=False
continue
if abs(((cost-costs[ind]) - (costs[ind+2]-cost))/(costs[ind+2]-cost + 0.0001)) > threshold:
partlocs = partlocs + [ind+2]
reset=True
partlocs.sort()
pts=[]
weights=[]
for (ind_part, partloc) in enumerate(partlocs[1:]): # add points in each section
partition = [i for i in range(partlocs[ind_part], partloc)]
part_pts, part_weights = self.select_points(sampparam, partition)
pts = pts + part_pts
overall_part_weight = (partloc-partlocs[ind_part])/(partlocs[-1]-partlocs[0])
weights = weights + list(np.array(part_weights)*overall_part_weight)
pts.sort()
newscenids[modeinphase] = [self.scenids[modeinphase][pt] for pt in pts]
newscens = [scen for scen in self.scenlist if scen['properties']['name'] in newscenids[modeinphase]]
newweights[modeinphase[0]][modeinphase[1]] = {scen['properties']['time']:weights[ind] for (ind, scen) in enumerate(newscens)}
newscenids[modeinphase] = [self.scenids[modeinphase][pt] for pt in pts]
for newscen in newscens:
if not newsampletimes[modeinphase[1]].get(newscen['properties']['time']):
newsampletimes[modeinphase[1]][newscen['properties']['time']] = [modeinphase[0]]
else:
newsampletimes[modeinphase[1]][newscen['properties']['time']] = newsampletimes[modeinphase[1]][newscen['properties']['time']] + [modeinphase[0]]
self.scenids = newscenids
self.weights = newweights
self.sampletimes = newsampletimes
self.create_scenarios()
self.sampparams={key:{'samp':'pruned '+samptype} for key in self.sampparams}
def list_modes(self, joint=False):
""" Returns a list of modes in the approach """
if joint:
return [(fxn, mode) for fxn, mode in self._fxnmodes.keys()] + self.jointmodes
else:
return [(fxn, mode) for fxn, mode in self._fxnmodes.keys()]
def list_moderates(self):
""" Returns the rates for each mode """
return {(fxn, mode): sum(self.rates[fxn,mode].values()) for (fxn, mode) in self.rates.keys()}
def find_overlap_n(intervals):
"""Finds the overlap between given intervals.
Used to sample joint fault modes with different (potentially overlapping) phases """
try:
upper_limits = [interval[1] for interval in intervals]
lower_limits = [interval[0] for interval in intervals]
if any(u < l for u in upper_limits for l in lower_limits): return []
if not upper_limits and not lower_limits: return []
orderedintervals = np.sort(upper_limits+lower_limits)
return [orderedintervals[len(intervals)-1],orderedintervals[len(intervals)]]
except IndexError:
if all(intervals[0]==i for i in intervals): return intervals[0]
else: return 0
def phases(times, names=[]):
""" Creates named phases from a set of times defining the edges of the intervals """
if not names: names = range(len(times)-1)
return {names[i]:[times[i], times[i+1]] for (i, _) in enumerate(times) if i < len(times)-1}
def m2to1(x):
"""
Multiplies a list of numbers which may take on the values infinity or zero. In deciding if num is inf or zero, the earlier values take precedence
Parameters
----------
x : list
numbers to multiply
Returns
-------
y : float
result of multiplication
"""
if np.size(x)>2: x=[x[0], m2to1(x[1:])]
if x[0]==np.inf: y=np.inf
elif x[1]==np.inf:
if x[0]==0.0: y=0.0
else: y=np.inf
else: y=x[0]*x[1]
return y
def trunc(x, n=2.0, truncif='greater'):
"""truncates a value to a given number (useful if behavior unchanged by increases)
Parameters
----------
x : float/int
number to truncate
n : float/int (optional)
number to truncate to if >= number
truncif: 'greater'/'less'
whether to truncate if greater or less than the given number
"""
if truncif=='greater' and x>n: y=n
elif truncif=='greater' and x<n: y=n
else: y=x
return y
def union(probs):
""" Calculates the union of a list of probabilities [p_1, p_2, ... p_n] p = p_1 U p_2 U ... U p_n """
while len(probs)>1:
if len(probs) % 2:
p, probs = probs[0], probs[1:]
probs[0]=probs[0]+p -probs[0]*p
probs = [probs[i-1]+probs[i]-probs[i-1]*probs[i] for i in range(1, len(probs), 2)]
return probs[0]
def reseting_accumulate(vec):
""" Accummulates vector for all positive output (e.g. if input =[1,1,1, 0, 1,1], output = [1,2,3,0,1,2])"""
newvec = vec
val=0
for ind, i in enumerate(vec):
if i > 0: val = i + val
else: val = 0
newvec[ind] = val
return newvec
def accumulate(vec):
""" Accummulates vector (e.g. if input =[1,1,1, 0, 1,1], output = [1,2,3,3,4,5])"""
return [sum(vec[:i+1]) for i in range(len(vec)) ]
def is_iter(data):
""" Checks whether a data type should be interpreted as an iterable or not and returned
as a single value or tuple/array"""
if isinstance(data, Iterable) and type(data)!=str: return True
else: return False
"""Model checking"""
def check_pickleability(obj, verbose=True):
""" Checks to see which attributes of an object will pickle (and thus parallelize)"""
unpickleable = []
for name, attribute in vars(obj).items():
if not dill.pickles(attribute):
unpickleable = unpickleable + [name]
if verbose:
if unpickleable: print("The following attributes will not pickle: "+str(unpickleable))
else: print("The object is pickleable")
return unpickleable
def check_model_pickleability(model):
""" Checks to see which attributes of a model object will pickle, providing more detail about functions/flows"""
unpickleable = check_pickleability(model)
if 'flows' in unpickleable:
print('FLOWS ')
for flowname, flow in model.flows.items():
print(flowname)
check_pickleability(flow)
if 'fxns' in unpickleable:
print('FUNCTIONS ')
for fxnname, fxn in model.fxns.items():
print(fxnname)
check_pickleability(fxn)
|
{"hexsha": "f97f9e0b00ebadb7288dde168e1e1c5a9595ffcf", "size": 136642, "ext": "py", "lang": "Python", "max_stars_repo_path": "fmdtools/modeldef.py", "max_stars_repo_name": "nasa/fmdtools", "max_stars_repo_head_hexsha": "7415068776998ff05eb199c78531ee7f9c2422e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-03-17T22:10:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T03:11:21.000Z", "max_issues_repo_path": "fmdtools/modeldef.py", "max_issues_repo_name": "nasa/fmdtools", "max_issues_repo_head_hexsha": "7415068776998ff05eb199c78531ee7f9c2422e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fmdtools/modeldef.py", "max_forks_repo_name": "nasa/fmdtools", "max_forks_repo_head_hexsha": "7415068776998ff05eb199c78531ee7f9c2422e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.9620155039, "max_line_length": 213, "alphanum_fraction": 0.5973200041, "include": true, "reason": "import numpy,import networkx", "num_tokens": 31378}
|
import torch
import numpy as np
from torch import nn
from util_layers import *
class NaiveNet(nn.Module):
"""
CNN only
"""
def __init__(self,input_size=None,num_task=None):
self.num_task = num_task
super(NaiveNet, self).__init__()
self.NaiveCNN = nn.Sequential(
nn.Conv1d(in_channels=4,out_channels=8,kernel_size=7,stride=2,padding=0), #[bs, 8, 72]
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Conv1d(in_channels=8,out_channels=32,kernel_size=3,stride=1,padding=1),#[bs 32 72]
nn.ReLU(),
nn.MaxPool1d(kernel_size=2,padding=0), #[bs 32 36]
nn.Dropout(p=0.2),
nn.Conv1d(in_channels=32,out_channels=128,kernel_size=3,stride=1,padding=1),#[bs 128 36]
nn.ReLU(),
nn.MaxPool1d(kernel_size=2,padding=0) #[bs 128 18]
)
# self.NaiveBiLSTM = nn.LSTM(input_size=128,hidden_size=128,batch_first=True,bidirectional=True)
in_features_1 = (input_size - 7) // 2 + 1
in_features_2 = (in_features_1 - 2) // 2 + 1
in_features_3 = (in_features_2 - 2) // 2 + 1
self.Flatten = nn.Flatten()
self.SharedFC = nn.Sequential(nn.Linear(in_features=128*in_features_3,out_features=1024),
nn.ReLU(),
nn.Dropout()
)
for i in range(num_task):
setattr(self, "NaiveFC%d" %i, nn.Sequential(
nn.Linear(in_features=1024,out_features=256),
nn.ReLU(),
nn.Dropout(),
nn.Linear(in_features=256,out_features=64),
nn.ReLU(),
nn.Dropout(),
nn.Linear(in_features=64,out_features=1),
nn.Sigmoid()
))
def forward(self,x):
x = self.NaiveCNN(x)
output = self.Flatten(x) # flatten output
shared_layer = self.SharedFC(output)
outs = []
for i in range(self.num_task):
FClayer = getattr(self, "NaiveFC%d" %i)
y = FClayer(shared_layer)
y = torch.squeeze(y, dim=-1)
outs.append(y)
return outs
class NaiveNet_v1(nn.Module):
"""
CNN + LSTM + Attention
"""
def __init__(self,input_size=None,num_task=None):
self.num_task = num_task
super(NaiveNet_v1, self).__init__()
self.NaiveCNN = nn.Sequential(
nn.Conv1d(in_channels=4,out_channels=8,kernel_size=7,stride=2,padding=0),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Conv1d(in_channels=8,out_channels=32,kernel_size=3,stride=1,padding=1),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2,padding=1),
nn.Dropout(p=0.2),
nn.Conv1d(in_channels=32,out_channels=128,kernel_size=3,stride=1,padding=1),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2,padding=1)
)
self.NaiveBiLSTM = nn.LSTM(input_size=128,hidden_size=128,batch_first=True,bidirectional=True)
self.Attention = BahdanauAttention(in_features=256,hidden_units=10,num_task=num_task)
for i in range(num_task):
setattr(self, "NaiveFC%d" %i, nn.Sequential(
nn.Linear(in_features=256,out_features=64),
nn.ReLU(),
nn.Dropout(),
nn.Linear(in_features=64,out_features=1),
nn.Sigmoid()
))
def forward(self,x):
x = self.NaiveCNN(x)
batch_size, features, seq_len = x.size()
x = x.view(batch_size,seq_len, features) # parepare input for LSTM
output, (h_n, c_n) = self.NaiveBiLSTM(x)
h_n = h_n.view(batch_size,output.size()[-1]) # pareprae input for Attention
context_vector,attention_weights = self.Attention(h_n,output) # Attention (batch_size, num_task, unit)
outs = []
for i in range(self.num_task):
FClayer = getattr(self, "NaiveFC%d" %i)
y = FClayer(context_vector[:,i,:])
y = torch.squeeze(y, dim=-1)
outs.append(y)
return outs
class NaiveNet_v2(nn.Module):
"""
CNN + LSTM
"""
def __init__(self,input_size=None,num_task=None):
self.num_task = num_task
super(NaiveNet_v2, self).__init__()
self.NaiveCNN = nn.Sequential(
nn.Conv1d(in_channels=4,out_channels=8,kernel_size=7,stride=2,padding=0),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Conv1d(in_channels=8,out_channels=32,kernel_size=3,stride=1,padding=1),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2,padding=0),
nn.Dropout(p=0.2),
nn.Conv1d(in_channels=32,out_channels=128,kernel_size=3,stride=1,padding=1),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2,padding=0)
)
in_features_1 = (input_size - 7) // 2 + 1
in_features_2 = (in_features_1 - 2) // 2 + 1
in_features_3 = (in_features_2 - 2) // 2 + 1
self.NaiveBiLSTM = nn.LSTM(input_size=128,hidden_size=128,batch_first=True,bidirectional=True)
self.Flatten = nn.Flatten()
self.SharedFC = nn.Sequential(nn.Linear(in_features=in_features_3*256,out_features=1024),
nn.ReLU(),
nn.Dropout()
)
for i in range(num_task):
setattr(self, "NaiveFC%d" %i, nn.Sequential(
nn.Linear(in_features=1024,out_features=256),
nn.ReLU(),
nn.Dropout(),
nn.Linear(in_features=256,out_features=64),
nn.ReLU(),
nn.Dropout(),
nn.Linear(in_features=64,out_features=1),
nn.Sigmoid()
))
def forward(self,x):
x = self.NaiveCNN(x)
batch_size, features, seq_len = x.size()
x = x.view(batch_size,seq_len, features) # parepare input for LSTM
output, (h_n, c_n) = self.NaiveBiLSTM(x)
output = self.Flatten(output) # flatten output
shared_layer = self.SharedFC(output)
outs = []
for i in range(self.num_task):
FClayer = getattr(self, "NaiveFC%d" %i)
y = FClayer(shared_layer)
y = torch.squeeze(y, dim=-1)
outs.append(y)
return outs
class model_v3(nn.Module):
def __init__(self,num_task,use_embedding):
super(model_v3,self).__init__()
self.num_task = num_task
self.use_embedding = use_embedding
if self.use_embedding:
self.embed = EmbeddingSeq('../Embeddings/embeddings_12RM.pkl') # Word2Vec
# self.embed = EmbeddingHmm(t=3,out_dims=300) # hmm
self.NaiveBiLSTM = nn.LSTM(input_size=300,hidden_size=256,batch_first=True,bidirectional=True)
else:
self.NaiveBiLSTM = nn.LSTM(input_size=4,hidden_size=256,batch_first=True,bidirectional=True)
self.Attention = BahdanauAttention(in_features=512,hidden_units=100,num_task=num_task)
for i in range(num_task):
setattr(self, "NaiveFC%d" %i, nn.Sequential(
nn.Linear(in_features=512,out_features=128),
nn.ReLU(),
nn.Dropout(),
nn.Linear(in_features=128,out_features=1),
nn.Sigmoid()
))
def forward(self,x):
if self.use_embedding:
x = self.embed(x)
else:
x = torch.transpose(x,1,2)
batch_size = x.size()[0]
# x = torch.transpose(x,1,2)
output,(h_n,c_n) = self.NaiveBiLSTM(x)
h_n = h_n.view(batch_size,output.size()[-1])
context_vector,attention_weights = self.Attention(h_n,output)
# print(attention_weights.shape)
outs = []
for i in range(self.num_task):
FClayer = getattr(self, "NaiveFC%d" %i)
y = FClayer(context_vector[:,i,:])
y = torch.squeeze(y, dim=-1)
outs.append(y)
return outs
|
{"hexsha": "6b2361ada0c28f54c5856499134030e6b2c51d30", "size": 9274, "ext": "py", "lang": "Python", "max_stars_repo_path": "Scripts/models.py", "max_stars_repo_name": "Tsedao/MultiRM", "max_stars_repo_head_hexsha": "3947d0d74b8c6d6c7deeb534ef51fc1c85c27309", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-03-10T02:06:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-04T05:44:34.000Z", "max_issues_repo_path": "Scripts/models.py", "max_issues_repo_name": "Tsedao/MultiRM", "max_issues_repo_head_hexsha": "3947d0d74b8c6d6c7deeb534ef51fc1c85c27309", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-08-30T13:12:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-08T00:47:46.000Z", "max_forks_repo_path": "Scripts/models.py", "max_forks_repo_name": "Tsedao/MultiRM", "max_forks_repo_head_hexsha": "3947d0d74b8c6d6c7deeb534ef51fc1c85c27309", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-03-10T02:06:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T00:31:48.000Z", "avg_line_length": 45.0194174757, "max_line_length": 112, "alphanum_fraction": 0.4891093379, "include": true, "reason": "import numpy", "num_tokens": 2059}
|
/// @file
/// @copyright The code is licensed under the BSD License
/// <http://opensource.org/licenses/BSD-2-Clause>,
/// Copyright (c) 2013-2015 Alexandre Hamez.
/// @author Alexandre Hamez
#pragma once
#include <boost/filesystem.hpp>
namespace pnmc { namespace util {
/*------------------------------------------------------------------------------------------------*/
boost::filesystem::path
file(std::string);
/*------------------------------------------------------------------------------------------------*/
boost::filesystem::path
in_file(const std::string&);
/*------------------------------------------------------------------------------------------------*/
boost::filesystem::path
directory(const std::string&);
/*------------------------------------------------------------------------------------------------*/
}} // namespace pnmc::util
|
{"hexsha": "ed67c1332e5d791d5f6ae6c4625209faff79d917", "size": 886, "ext": "hh", "lang": "C++", "max_stars_repo_path": "support/util/paths.hh", "max_stars_repo_name": "ahamez/pnmc", "max_stars_repo_head_hexsha": "cee5f2e01edc2130278ebfc13f0f859230d65680", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2015-02-05T20:56:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T01:20:24.000Z", "max_issues_repo_path": "support/util/paths.hh", "max_issues_repo_name": "ahamez/pnmc", "max_issues_repo_head_hexsha": "cee5f2e01edc2130278ebfc13f0f859230d65680", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "support/util/paths.hh", "max_forks_repo_name": "ahamez/pnmc", "max_forks_repo_head_hexsha": "cee5f2e01edc2130278ebfc13f0f859230d65680", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5806451613, "max_line_length": 100, "alphanum_fraction": 0.3690744921, "num_tokens": 128}
|
import os
import sys
import numpy as np
import tensorflow as tf
class Model(object):
def __init__(self,
images,
labels,
cutout_size=None,
batch_size=32,
eval_batch_size=100,
clip_mode=None,
grad_bound=None,
l2_reg=1e-4,
lr_init=0.1,
lr_dec_start=0,
lr_dec_every=100,
lr_dec_rate=0.1,
keep_prob=1.0,
optim_algo=None,
sync_replicas=False,
num_aggregate=None,
num_replicas=None,
data_format="NHWC",
name="generic_model",
seed=None,
):
"""
Args:
lr_dec_every: number of epochs to decay
"""
print("-" * 80)
print("Build model {}".format(name))
self.cutout_size = cutout_size
self.batch_size = batch_size
self.eval_batch_size = eval_batch_size
self.clip_mode = clip_mode
self.grad_bound = grad_bound
self.l2_reg = l2_reg
self.lr_init = lr_init
self.lr_dec_start = lr_dec_start
self.lr_dec_rate = lr_dec_rate
self.keep_prob = keep_prob
self.optim_algo = optim_algo
self.sync_replicas = sync_replicas
self.num_aggregate = num_aggregate
self.num_replicas = num_replicas
self.data_format = data_format
self.name = name
self.seed = seed
self.global_step = None
self.valid_acc = None
self.test_acc = None
print("Build data ops")
with tf.device("/cpu:0"):
# training data
self.num_train_examples = np.shape(images["train"])[0]
self.num_train_batches = (
self.num_train_examples + self.batch_size - 1) // self.batch_size
x_train, y_train = tf.train.shuffle_batch(
[images["train"], labels["train"]],
batch_size=self.batch_size,
capacity=50000,
enqueue_many=True,
min_after_dequeue=0,
num_threads=16,
seed=self.seed,
allow_smaller_final_batch=True,
)
self.lr_dec_every = lr_dec_every * self.num_train_batches
def _pre_process(x):
x = tf.pad(x, [[4, 4], [4, 4], [0, 0]])
x = tf.random_crop(x, [32, 32, 3], seed=self.seed)
x = tf.image.random_flip_left_right(x, seed=self.seed)
if self.cutout_size is not None:
mask = tf.ones(
[self.cutout_size, self.cutout_size], dtype=tf.int32)
start = tf.random_uniform(
[2], minval=0, maxval=32, dtype=tf.int32)
mask = tf.pad(mask, [[self.cutout_size + start[0], 32 - start[0]],
[self.cutout_size + start[1], 32 - start[1]]])
mask = mask[self.cutout_size: self.cutout_size + 32,
self.cutout_size: self.cutout_size + 32]
mask = tf.reshape(mask, [32, 32, 1])
mask = tf.tile(mask, [1, 1, 3])
x = tf.where(tf.equal(mask, 0), x=x, y=tf.zeros_like(x))
if self.data_format == "NCHW":
x = tf.transpose(x, [2, 0, 1])
return x
self.x_train = tf.map_fn(_pre_process, x_train, back_prop=False)
self.y_train = y_train
# valid data
self.x_valid, self.y_valid = None, None
if images["valid"] is not None:
images["valid_original"] = np.copy(images["valid"])
labels["valid_original"] = np.copy(labels["valid"])
if self.data_format == "NCHW":
images["valid"] = tf.transpose(
images["valid"], [0, 3, 1, 2])
self.num_valid_examples = np.shape(images["valid"])[0]
self.num_valid_batches = (
(self.num_valid_examples + self.eval_batch_size - 1)
// self.eval_batch_size)
self.x_valid, self.y_valid = tf.train.batch(
[images["valid"], labels["valid"]],
batch_size=self.eval_batch_size,
capacity=5000,
enqueue_many=True,
num_threads=1,
allow_smaller_final_batch=True,
)
# test data
if self.data_format == "NCHW":
images["test"] = tf.transpose(images["test"], [0, 3, 1, 2])
self.num_test_examples = np.shape(images["test"])[0]
self.num_test_batches = (
(self.num_test_examples + self.eval_batch_size - 1)
// self.eval_batch_size)
self.x_test, self.y_test = tf.train.batch(
[images["test"], labels["test"]],
batch_size=self.eval_batch_size,
capacity=10000,
enqueue_many=True,
num_threads=1,
allow_smaller_final_batch=True,
)
# cache images and labels
self.images = images
self.labels = labels
def eval_once(self, sess, eval_set, child_model, verbose=False):
"""Expects self.acc and self.global_step to be defined.
Args:
sess: tf.Session() or one of its wrap arounds.
feed_dict: can be used to give more information to sess.run().
eval_set: "valid" or "test"
"""
assert self.global_step is not None
global_step = sess.run(self.global_step)
print("Eval at {}".format(global_step))
if eval_set == "valid":
assert self.x_valid is not None
assert self.valid_acc is not None
num_examples = self.num_valid_examples
num_batches = self.num_valid_batches
acc_op = self.valid_acc
elif eval_set == "test":
assert self.test_acc is not None
num_examples = self.num_test_examples
num_batches = self.num_test_batches
acc_op = self.test_acc
else:
raise NotImplementedError("Unknown eval_set '{}'".format(eval_set))
total_acc = 0
total_exp = 0
for batch_id in range(num_batches):
acc = sess.run(acc_op)
total_acc += acc
total_exp += self.eval_batch_size
if verbose:
sys.stdout.write(
"\r{:<5d}/{:>5d}".format(total_acc, total_exp))
if verbose:
print("")
print("{}_accuracy: {:<6.4f}".format(
eval_set, float(total_acc) / total_exp))
return float(total_acc) / total_exp
def _model(self, images, is_training, reuse=None):
raise NotImplementedError("Abstract method")
def _build_train(self):
raise NotImplementedError("Abstract method")
def _build_valid(self):
raise NotImplementedError("Abstract method")
def _build_test(self):
raise NotImplementedError("Abstract method")
|
{"hexsha": "089fe846a6e1c0bce909ee7fdd1737130f6206e1", "size": 7331, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/trials/nas_cifar10/src/cifar10/models.py", "max_stars_repo_name": "runauto/nni", "max_stars_repo_head_hexsha": "30152b04c4739f5b4f95087dee5f1e66ee893078", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-02-03T09:00:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-03T09:00:49.000Z", "max_issues_repo_path": "examples/trials/nas_cifar10/src/cifar10/models.py", "max_issues_repo_name": "runauto/nni", "max_issues_repo_head_hexsha": "30152b04c4739f5b4f95087dee5f1e66ee893078", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/trials/nas_cifar10/src/cifar10/models.py", "max_forks_repo_name": "runauto/nni", "max_forks_repo_head_hexsha": "30152b04c4739f5b4f95087dee5f1e66ee893078", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-11T13:19:26.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-11T13:19:26.000Z", "avg_line_length": 37.2131979695, "max_line_length": 87, "alphanum_fraction": 0.5182103397, "include": true, "reason": "import numpy", "num_tokens": 1565}
|
# -*- coding:utf-8 -*-
# author: Huang Zilong
# 将wav文件随机打乱,并打标签
import numpy as np
from DCASE2018_1 import read_file
def file_path_shuffle(feature, label):
train_f, train_l = np.array(feature), np.array(label)
np.random.seed(1)
shuffle_indices = np.random.permutation(np.arange(len(train_f)))
train_f = train_f[shuffle_indices]
train_l = train_l[shuffle_indices]
train_file, train_label = list(train_f), list(train_l)
for i in range(len(train_label)):
if train_label[i] == 'airport\n':
train_label[i] = 0
if train_label[i] == 'shopping_mall\n':
train_label[i] = 1
if train_label[i] == 'metro_station\n':
train_label[i] = 2
if train_label[i] == 'street_pedestrian\n':
train_label[i] = 3
if train_label[i] == 'public_square\n':
train_label[i] = 4
if train_label[i] == 'street_traffic\n':
train_label[i] = 5
if train_label[i] == 'tram\n':
train_label[i] = 6
if train_label[i] == 'bus\n':
train_label[i] = 7
if train_label[i] == 'metro\n':
train_label[i] = 8
if train_label[i] == 'park\n':
train_label[i] = 9
return train_file, train_label
# if __name__ == '__main__':
# a= 'D:\\007DataSet\\TUT\\DCASE2018\\fold1_train.txt'
# b= 'D:\\007DataSet\\TUT\\DCASE2018\\fold1_evaluate.txt'
# train_x, train_y = read_file.read_file(a)
# train_x, train_y = file_path_shuffle(train_x,train_y)
# val_x, val_y = read_file.read_file(b)
# val_x, val_y = file_path_shuffle(val_x,val_y)
# print(len(train_x))
# print(len(train_y))
# print(train_x[0:5])
# print(train_y[0:5])
# print(len(val_x))
# print(len(val_y))
# print(val_x[0:5])
# print(val_y[0:5])
|
{"hexsha": "b3570e64af7a4735f178700b13a7d405778846f6", "size": 1823, "ext": "py", "lang": "Python", "max_stars_repo_path": "file_shuffle.py", "max_stars_repo_name": "zw76859420/Voiceprint_Recognition", "max_stars_repo_head_hexsha": "b883297fe8c61858a2a5ad83064ed0da0456c7b7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-08T08:48:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-08T08:48:59.000Z", "max_issues_repo_path": "file_shuffle.py", "max_issues_repo_name": "zw76859420/Voiceprint_Recognition", "max_issues_repo_head_hexsha": "b883297fe8c61858a2a5ad83064ed0da0456c7b7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "file_shuffle.py", "max_forks_repo_name": "zw76859420/Voiceprint_Recognition", "max_forks_repo_head_hexsha": "b883297fe8c61858a2a5ad83064ed0da0456c7b7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7450980392, "max_line_length": 68, "alphanum_fraction": 0.6023038947, "include": true, "reason": "import numpy", "num_tokens": 536}
|
#!/usr/bin/env python
'''
Script for predicting PHOCs for a number of images residing in a folder on
disk.
'''
import argparse
import logging
import os
import caffe
import numpy as np
import cv2
from phocnet.evaluation.cnn import net_output_for_word_image_list
def load_siamese_model(siamese_model, siamese_proto, phoc_proto):
siamese_model = caffe.Net(siamese_proto, siamese_model, caffe.TEST)
phoc_model = caffe.Net(phoc_proto, caffe.TEST)
# ignore last layer
layers = [f for f in phoc_model.params.keys() if f != 'fc8']
for layer in layers:
# for the Siamese network, load only one branch as PHOCNet base
pre_layer = '{}l'.format(layer)
phoc_model.params[layer][0].data[...] = (siamese_model
.params[pre_layer][0].data)
phoc_model.params[layer][1].data[...] = (siamese_model
.params[pre_layer][1].data)
return phoc_model
def load_images(img_dir):
# check input structure: plain vs. folder structure
img_names = [f for f in os.listdir(img_dir) if f.endswith('.png')]
if len(img_names) == 0:
classes = [f for f in os.listdir(img_dir)
if os.path.isdir(os.path.join(img_dir, f))]
img_paths = [os.path.join(img_dir, c, f)
for c in classes
for f in os.listdir(os.path.join(img_dir, c))
if f.endswith('.png')]
img_ids = ['{}_{}'.format(os.path.basename(os.path.dirname(f)),
os.path.basename(f).split('.')[0])
for f in img_paths]
else:
img_ids = [f.split('.')[0] for f in img_names]
img_paths = [os.path.join(img_dir, f) for f in img_names]
imgs = [cv2.imread(f, cv2.IMREAD_GRAYSCALE) for f in img_paths]
return imgs, img_ids
def main(img_dir, output_dir, pretrained_phocnet, deploy_proto,
output_layer, min_image_width_height, gpu_id):
logging_format = '[%(asctime)-19s, %(name)s, %(levelname)s] %(message)s'
logging.basicConfig(level=logging.INFO,
format=logging_format)
logger = logging.getLogger('Predict PHOCs')
if gpu_id is None:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
logger.info('Loading PHOCNet...')
phocnet = caffe.Net(deploy_proto, caffe.TEST,
weights=pretrained_phocnet)
# find all images in the supplied dir
word_img_list, img_ids = load_images(img_dir)
logger.info('Found %d word images to process', len(img_ids))
# push images through the PHOCNet
logger.info('Predicting PHOCs...')
predicted_phocs = net_output_for_word_image_list(
phocnet=phocnet,
word_img_list=word_img_list,
min_img_width_height=min_image_width_height,
output_layer=output_layer)
# save everything
logger.info('Saving...')
np.savez(os.path.join(output_dir,
'predicted_output_{}.npz'.format(output_layer)),
img_ids=img_ids,
output=predicted_phocs)
logger.info('Finished')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Predict PHOCs from a '
'pretrained PHOCNet. The PHOCs are saved '
' as Numpy Array to disk.')
parser.add_argument('--min_image_width_height', '-miwh', action='store',
type=int, default=26, help='The minimum image width '
'or height to be passed through the PHOCNet. '
'Default: 26')
parser.add_argument('--output_dir', '-od', action='store', type=str,
default='.', help='The directory where to store the '
'PHOC Numpy Array. Default: .')
parser.add_argument('--img_dir', '-id', action='store', type=str,
required=True, help='All images in this folder are '
'processed in ASCII order of their respective names.'
' A PHOC is predicted for each.')
parser.add_argument('--pretrained_phocnet', '-pp', action='store',
type=str, required=True, help='Path to a pretrained '
'PHOCNet binaryproto file.')
parser.add_argument('--deploy_proto', '-dp', action='store', type=str,
required=True, help='Path to PHOCNet deploy prototxt '
'file.')
parser.add_argument('--output_layer', help='Store output of provided '
'layer. Default: sigmoid', default='sigmoid')
parser.add_argument('--gpu_id', '-gpu', action='store', type=int,
help='The ID of the GPU to use. If not specified, '
'training is run in CPU mode.')
args = vars(parser.parse_args())
main(**args)
|
{"hexsha": "0d0798500476770b7e38666acf461e14cde25346", "size": 5075, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/predict_phocs.py", "max_stars_repo_name": "FlorianWestphal/phocnet", "max_stars_repo_head_hexsha": "737b7bdd58441fc0a1fa35013db885bfa2cfdfe0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tools/predict_phocs.py", "max_issues_repo_name": "FlorianWestphal/phocnet", "max_issues_repo_head_hexsha": "737b7bdd58441fc0a1fa35013db885bfa2cfdfe0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/predict_phocs.py", "max_forks_repo_name": "FlorianWestphal/phocnet", "max_forks_repo_head_hexsha": "737b7bdd58441fc0a1fa35013db885bfa2cfdfe0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.9421487603, "max_line_length": 79, "alphanum_fraction": 0.5769458128, "include": true, "reason": "import numpy", "num_tokens": 1115}
|
#!/usr/bin/env python3
from visualization import Scenes3DVisualizer
from visualization import set_pointcloud_obj
from visualization import set_camera_view
from odom import SiftOdom, OrbOdom
from camera import Camera
import open3d as o3d
import numpy as np
import argparse
import pykitti
import cv2
parser = argparse.ArgumentParser("demo_trajectory")
parser.add_argument('--data', default='kitti', dest='data')
parser.add_argument('--sequence', type=int, default=7, dest='vid_seq')
parser.add_argument('--model', default='orb', dest='model')
parser.add_argument('--nbr-features', type=int, default=500, dest='nbr_features')
parser.add_argument('--camera', default='mono', dest='camera')
parser.add_argument('--method', default='direct', dest='matcher')
parser.add_argument('--save', default=False, dest='save')
args = parser.parse_args()
cv_win_title = f"{args.data}-{args.camera}-{args.model}"
kitti_src = "/home/loay/Documents/datasets/kitti/raw/odometry_gray/dataset"
kitti_odom = pykitti.odometry(kitti_src, "%02d"%args.vid_seq)
cam0 = Camera(*(960, 290), kitti_odom.calib.P_rect_00)
odom_params = (cam0.K, args.nbr_features)
model = OrbOdom(*odom_params) if args.model == 'orb' else SiftOdom(*odom_params)
img_to_pcd_tf = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]
gt_path = np.array([pose[:3, 3] for pose in kitti_odom.poses])
pcd_gt_path = o3d.t.geometry.PointCloud()
pcd_pred_path = o3d.t.geometry.PointCloud()
cam_view_mesh = o3d.t.geometry.LineSet()
set_pointcloud_obj(pcd_gt_path, np.array(gt_path), color=[0, 0, 1], tf=img_to_pcd_tf)
vis3d = Scenes3DVisualizer("Odom")
vis3d.add_geometries([{"name":"gt_path", "geometry": pcd_gt_path}])
cv2.namedWindow(cv_win_title)
def main(args):
try:
for frame_id , img in enumerate(kitti_odom.cam0):
frame = np.asarray(img)
try:
model.track_motion(frame)
frame = cv2.resize(frame, (960, 290))
set_pointcloud_obj(pcd_pred_path, model.trajectory, color=[1, 0, 0], tf=img_to_pcd_tf)
cam0.setProjectionMtx(model.cam_tf[-1][:3]) # last TF -> P(3x4)
set_camera_view(cam_view_mesh, cam0.view(20), tf=img_to_pcd_tf)
vis3d.add_geometries([
{"name":"pred_path", "geometry": pcd_pred_path},
{"name":"cam_view", "geometry": cam_view_mesh},
])
vis3d.show(interactive=False, line_width=3, point_size=3)
except Exception as e: print(e)
cv2.imshow(cv_win_title, frame)
cv2.moveWindow(cv_win_title, 0, 0)
if cv2.waitKey(1) == ord('q'): break
except KeyboardInterrupt: pass
if args.save:
np.savez(
f"./output/{args.data}-{args.model}-{args.nbr_features}-{args.camera}-visual_odom.npz",
trajectory=model.trajectory,
motion=model.cam_tf
)
cv2.destroyAllWindows()
if __name__ == '__main__':
print("*"*50)
print(f"data: {args.data},\t model: {args.model},\t camera-type: {args.camera}")
print("*"*50)
main(args)
|
{"hexsha": "d31b5d3761dd4cf3c71e1abeee359f25246d94b9", "size": 3112, "ext": "py", "lang": "Python", "max_stars_repo_path": "kitti_test.py", "max_stars_repo_name": "loaywael/VisualOdom", "max_stars_repo_head_hexsha": "c090a78d7166ce12e0b526df0219015949c3de79", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kitti_test.py", "max_issues_repo_name": "loaywael/VisualOdom", "max_issues_repo_head_hexsha": "c090a78d7166ce12e0b526df0219015949c3de79", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kitti_test.py", "max_forks_repo_name": "loaywael/VisualOdom", "max_forks_repo_head_hexsha": "c090a78d7166ce12e0b526df0219015949c3de79", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3924050633, "max_line_length": 102, "alphanum_fraction": 0.6603470437, "include": true, "reason": "import numpy", "num_tokens": 856}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.