text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
from typing import Any, Dict, Optional, List
import os
import numpy as np
from ramachandran.geometry import protein_backbone_dihedral_angle_phi, protein_backbone_dihedral_angle_psi
from ramachandran.torsion import ResidueTorsionCollection, ResidueTorsion
from tqdm import tqdm
from multiprocessing import Pool
from functools import partial
def read_pdb(pdb_file_path: str) -> Dict[Any, Any]:
"""Read the basic information of a protein from a PDB file.
The function does minimal format checking for the PDB format.
Please make sure the PDB format is correct before calling the function.
Args:
pdb_file_path (str): file path to the PDB file.
Returns:
Dict[Any, Any]: A dictionary containing the basic protein information.
"""
sequence = {}
with open(pdb_file_path, "r") as fp:
for line in fp:
# Check if the line is SEQRES
if line[0:6] == "SEQRES":
items = line.split()
chain_identifier = items[2]
amino_acids = items[4:]
if chain_identifier not in sequence:
sequence[chain_identifier] = []
sequence[chain_identifier] += amino_acids
protein = {}
with open(pdb_file_path, "r") as fp:
for line in fp:
# Check if the line is ATOM
# 1-4 Record Type
if line[0:4] == "ATOM":
# It is not best practice for dealing with PDB format.
# Use it for the ATOM section for now.
items = line.split()
try:
atom_serial_number = int(items[1])
except:
raise RuntimeError(
"Atom serial number has to be integer. Got {}".format(
items[1]))
atom_name = items[2]
residue_name = items[3]
chain_identifier = items[4]
try:
residue_sequence_number = int(items[5])
except:
raise RuntimeError(
"Residue sequence number has to be integer.")
try:
x = float(items[6])
y = float(items[7])
z = float(items[8])
except:
raise RuntimeError(
"Atom coordinate has to be a real value.")
try:
b_factor = float(items[10])
except:
raise RuntimeError("B factor has to be a real value.")
atom_coodinates = ((x, y, z), b_factor)
if chain_identifier not in protein:
protein[chain_identifier] = {}
if residue_sequence_number not in protein[chain_identifier]:
protein[chain_identifier][residue_sequence_number] = {}
if "residue" not in protein[chain_identifier][
residue_sequence_number]:
protein[chain_identifier][residue_sequence_number][
"residue"] = residue_name
if "is_pre_proline" not in protein[chain_identifier][
residue_sequence_number]:
chain_sequence = sequence[chain_identifier]
num_residues = len(chain_sequence)
if residue_sequence_number == num_residues:
protein[chain_identifier][residue_sequence_number][
"is_pre_proline"] = False
else:
protein[chain_identifier][residue_sequence_number][
"is_pre_proline"] = (
chain_sequence[residue_sequence_number] ==
"PRO")
protein[chain_identifier][residue_sequence_number][
atom_name] = atom_coodinates
return protein
def read_pdbx(pdbx_file_path: str) -> Dict[Any, Any]:
"""Read the basic information of a protein from a PDBx file.
The function does minimal format checking for the PDBx format.
Please make sure the PDBx format is correct before calling the function.
Args:
pdbx_file_path (str): file path to the PDBx file.
Returns:
Dict[Any, Any]: A dictionary containing the basic protein information.
"""
sequence = {}
with open(pdbx_file_path, "r") as fp:
is_sequence = False
for line in fp:
if line.strip() == "#":
is_sequence = False
# Check if the line is SEQRES
if is_sequence:
items = line.split()
assert len(items) == 4, line
chain_identifier = items[0]
amino_acid = items[2]
if chain_identifier not in sequence:
sequence[chain_identifier] = []
sequence[chain_identifier].append(amino_acid)
if line.strip() == "_entity_poly_seq.hetero":
is_sequence = True
protein = {}
# http://ww1.iucr.org/iucr-top/cif/mmcif/workshop/mmCIF-tutorials/intro/atom.htm
with open(pdbx_file_path, "r") as fp:
is_atom = False
for line in fp:
if line.strip() == "#":
is_atom = False
# Check if the line is ATOM
# 1-4 Record Type
if is_atom and line[0:4] == "ATOM":
# It is not best practice for dealing with PDB format.
# Use it for the ATOM section for now.
items = line.split()
try:
atom_serial_number = int(items[1])
except:
raise RuntimeError(
"Atom serial number has to be integer. Got {}".format(
pdbx_file_path))
atom_name = items[3]
residue_name = items[5]
chain_identifier = items[6]
chain_sequence_identifier = items[7]
try:
residue_sequence_number = int(items[8])
except:
raise RuntimeError(
"Residue sequence number has to be integer.")
try:
x = float(items[10])
y = float(items[11])
z = float(items[12])
except:
raise RuntimeError(
"Atom coordinate has to be a real value.")
try:
b_factor = float(items[14])
except:
raise RuntimeError("B factor has to be a real value.")
atom_coodinates = ((x, y, z), b_factor)
if chain_identifier not in protein:
protein[chain_identifier] = {}
if residue_sequence_number not in protein[chain_identifier]:
protein[chain_identifier][residue_sequence_number] = {}
if "residue" not in protein[chain_identifier][
residue_sequence_number]:
protein[chain_identifier][residue_sequence_number][
"residue"] = residue_name
if "is_pre_proline" not in protein[chain_identifier][
residue_sequence_number]:
chain_sequence = sequence[chain_sequence_identifier]
num_residues = len(chain_sequence)
if residue_sequence_number == num_residues:
protein[chain_identifier][residue_sequence_number][
"is_pre_proline"] = False
else:
protein[chain_identifier][residue_sequence_number][
"is_pre_proline"] = (
chain_sequence[residue_sequence_number] ==
"PRO")
protein[chain_identifier][residue_sequence_number][
atom_name] = atom_coodinates
if line.strip() == "_atom_site.pdbx_PDB_model_num":
is_atom = True
return protein
def read_residue_torsion_collection_from_protein(
protein: Dict[Any, Any],
b_factor_threshold: Optional[float] = None
) -> ResidueTorsionCollection:
"""Read torsion collection from a protein dictionary.
Args:
protein (Dict[Any, Any]): A protein dictionary.
b_factor_threshold (Optional[float], optional): B-factor indicating the uncertainty of the atom coordinates. Defaults to None.
Returns:
ResidueTorsionCollection: A residue torsion collection for the protein given.
"""
residue_torsion_collection = ResidueTorsionCollection()
for chain_identifier in protein:
chain = protein[chain_identifier]
for residue_sequence_number in chain:
residue = chain[residue_sequence_number]
residue_name = residue["residue"]
# Skip the first, the last, and problematic residues
if residue_sequence_number - 1 not in chain or residue_sequence_number + 1 not in chain:
continue
last_residue = chain[residue_sequence_number - 1]
next_residue = chain[residue_sequence_number + 1]
next_residue_name = next_residue["residue"]
# Skip the residues that has missing information to compute dihedral angles
if "N" not in residue or "CA" not in residue or "C" not in residue or "C" not in last_residue or "N" not in next_residue:
continue
n, b_factor_n = residue["N"]
c_alpha, b_factor_c_alpha = residue["CA"]
c, b_factor_c = residue["C"]
c_minus, b_factor_c_minus = last_residue["C"]
n_plus, b_factor_n_plus = next_residue["N"]
if b_factor_threshold is not None:
if b_factor_n > b_factor_threshold or b_factor_c_alpha > b_factor_threshold or b_factor_c > b_factor_threshold or b_factor_c_minus > b_factor_threshold or b_factor_n_plus > b_factor_threshold:
continue
phi = protein_backbone_dihedral_angle_phi(c_minus=c_minus,
n=n,
c_alpha=c_alpha,
c=c)
psi = protein_backbone_dihedral_angle_psi(n=n,
c_alpha=c_alpha,
c=c,
n_plus=n_plus)
phi = np.rad2deg(phi)
psi = np.rad2deg(psi)
is_pre_proline = (next_residue_name == "PRO")
residue_torsion = ResidueTorsion(phi=phi,
psi=psi,
residue_type=residue_name,
is_pre_proline=is_pre_proline)
residue_torsion_collection.append(residue_torsion=residue_torsion)
return residue_torsion_collection
def read_residue_torsion_collection_from_file(
file_path: str,
b_factor_threshold: Optional[float] = None
) -> ResidueTorsionCollection:
"""Read torsion collection from a PDB or PDBx file.
Args:
file_path (str): file path to a PDB or PDBx file.
b_factor_threshold (Optional[float], optional): B-factor indicating the uncertainty of the atom coordinates. Defaults to None.
Raises:
RuntimeError: When non-PDB or non-PDBx files were provided, raise RuntimeError.
Returns:
ResidueTorsionCollection: A residue torsion collection for the PDB or PDBx file given.
"""
_, file_extension = os.path.splitext(file_path)
if file_extension == ".pdb":
protein = read_pdb(pdb_file_path=file_path)
elif file_extension == ".cif":
protein = read_pdbx(pdbx_file_path=file_path)
else:
raise RuntimeError(
"Only files with extensions of pdb and cif are supported.")
residue_torsion_collection = read_residue_torsion_collection_from_protein(
protein=protein, b_factor_threshold=b_factor_threshold)
return residue_torsion_collection
def read_residue_torsion_collection_from_file_resolved(
file_path: str,
b_factor_threshold: Optional[float] = None
) -> ResidueTorsionCollection:
try:
residue_torsion_collection = read_residue_torsion_collection_from_file(
file_path=file_path, b_factor_threshold=b_factor_threshold)
except:
residue_torsion_collection = ResidueTorsionCollection()
return residue_torsion_collection
def read_residue_torsion_collection_from_files(
file_paths: List[str],
num_processes: int = 4,
b_factor_threshold: Optional[float] = None
) -> ResidueTorsionCollection:
pool = Pool(processes=num_processes)
residue_torsion_collection_all = ResidueTorsionCollection()
partial_func = partial(read_residue_torsion_collection_from_file_resolved,
b_factor_threshold=b_factor_threshold)
jobs = [
pool.apply_async(func=partial_func, args=(file_path, ))
for file_path in file_paths
]
pool.close()
for job in tqdm(jobs):
residue_torsion_collection_all += job.get()
return residue_torsion_collection_all
def read_residue_torsion_collection_from_directory(
dir_path: str,
b_factor_threshold: float = 30,
num_processes: int = 4) -> ResidueTorsionCollection:
valid_file_paths = []
for f in os.listdir(dir_path):
_, file_extension = os.path.splitext(f)
file_path = os.path.join(dir_path, f)
if os.path.isfile(file_path) and (file_extension == ".pdb"
or file_extension == ".cif"):
valid_file_paths.append(file_path)
residue_torsion_collection = read_residue_torsion_collection_from_files(
file_paths=valid_file_paths,
num_processes=num_processes,
b_factor_threshold=b_factor_threshold)
return residue_torsion_collection
|
{"hexsha": "37de42a4127df8a737c4a120f734794fc513b5c5", "size": 14349, "ext": "py", "lang": "Python", "max_stars_repo_path": "ramachandran/io.py", "max_stars_repo_name": "leimao/Ramachandran", "max_stars_repo_head_hexsha": "8080697cced0b33792493de8d784467734433ca5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-02-15T07:11:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-23T09:25:54.000Z", "max_issues_repo_path": "ramachandran/io.py", "max_issues_repo_name": "leimao/Ramachandran", "max_issues_repo_head_hexsha": "8080697cced0b33792493de8d784467734433ca5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ramachandran/io.py", "max_forks_repo_name": "leimao/Ramachandran", "max_forks_repo_head_hexsha": "8080697cced0b33792493de8d784467734433ca5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1435768262, "max_line_length": 208, "alphanum_fraction": 0.5681929054, "include": true, "reason": "import numpy", "num_tokens": 2857}
|
#! /usr/bin/env python
__author__ = 'frankhe'
import lasagne
import numpy as np
import theano
import theano.tensor as T
from updates import deepmind_rmsprop
class DeepQLearner:
def __init__(self, input_width, input_height, num_actions,
num_frames, discount, learning_rate, rho,
rms_epsilon, momentum, clip_delta, freeze_interval,
batch_size, network_type, update_rule,
batch_accumulator, rng, input_scale=255.0,
double=False, transition_length=4):
if double:
print 'USING DOUBLE DQN'
self.input_width = input_width
self.input_height = input_height
self.num_actions = num_actions
self.num_frames = num_frames
self.batch_size = batch_size
self.discount = discount
self.rho = rho
self.lr = learning_rate
self.rms_epsilon = rms_epsilon
self.momentum = momentum
self.clip_delta = clip_delta
self.freeze_interval = freeze_interval
self.rng = rng
lasagne.random.set_rng(self.rng)
self.update_counter = 0
self.l_out = self.build_network(network_type, input_width, input_height,
num_actions, num_frames, batch_size)
if self.freeze_interval > 0:
self.next_l_out = self.build_network(network_type, input_width,
input_height, num_actions,
num_frames, batch_size)
self.reset_q_hat()
states = T.tensor4('states_t')
actions = T.icol('actions_t')
target = T.col('evaluation_t')
self.states_shared = theano.shared(
np.zeros((batch_size, num_frames, input_height, input_width),
dtype=theano.config.floatX))
self.actions_shared = theano.shared(
np.zeros((batch_size, 1), dtype='int32'),
broadcastable=(False, True))
self.target_shared = theano.shared(
np.zeros((batch_size, 1), dtype=theano.config.floatX),
broadcastable=(False, True))
self.states_transition_shared = theano.shared(
np.zeros((batch_size, transition_length * 2, num_frames, input_height, input_width),
dtype=theano.config.floatX))
self.states_one_shared = theano.shared(
np.zeros((num_frames, input_height, input_width),
dtype=theano.config.floatX))
q_vals = lasagne.layers.get_output(self.l_out, states / input_scale)
"""get Q(s) batch_size = 1 """
q1_givens = {
states: self.states_one_shared.reshape((1,
self.num_frames,
self.input_height,
self.input_width))
}
self._q1_vals = theano.function([], q_vals[0], givens=q1_givens)
"""get Q(s) batch_size = batch size """
q_batch_givens = {
states: self.states_shared.reshape((self.batch_size,
self.num_frames,
self.input_height,
self.input_width))
}
self._q_batch_vals = theano.function([], q_vals, givens=q_batch_givens)
action_mask = T.eq(T.arange(num_actions).reshape((1, -1)),
actions.reshape((-1, 1))).astype(theano.config.floatX)
q_s_a = (q_vals * action_mask).sum(axis=1).reshape((-1, 1))
""" get Q(s,a) batch_size = batch size """
q_s_a_givens = {
states: self.states_shared.reshape((self.batch_size,
self.num_frames,
self.input_height,
self.input_width)),
actions: self.actions_shared
}
self._q_s_a_vals = theano.function([], q_s_a, givens=q_s_a_givens)
if self.freeze_interval > 0:
q_target_vals = lasagne.layers.get_output(self.next_l_out,
states / input_scale)
else:
q_target_vals = lasagne.layers.get_output(self.l_out,
states / input_scale)
q_target_vals = theano.gradient.disconnected_grad(q_target_vals)
if not double:
q_target = T.max(q_target_vals, axis=1)
else:
greedy_actions = T.argmax(q_vals, axis=1)
q_target_mask = T.eq(T.arange(num_actions).reshape((1, -1)),
greedy_actions.reshape((-1, 1)).astype(theano.config.floatX))
q_target = (q_target_vals * q_target_mask).sum(axis=1).reshape((-1, 1))
"""get Q target Q'(s,a') for a batch of transitions batch size = batch_size * transition length"""
q_target_transition_givens = {
states: self.states_transition_shared.reshape(
(batch_size * transition_length * 2, self.num_frames, self.input_height, self.input_width))
}
self._q_target = theano.function([], q_target.reshape((batch_size, transition_length * 2)),
givens=q_target_transition_givens)
"""get Q target_vals Q'(s) for a batch of transitions batch size = batch_size * transition length"""
self._q_target_vals = theano.function([], q_target_vals.reshape(
(batch_size, transition_length * 2, num_actions)), givens=q_target_transition_givens)
diff = q_s_a - target
if self.clip_delta > 0:
# If we simply take the squared clipped diff as our loss,
# then the gradient will be zero whenever the diff exceeds
# the clip bounds. To avoid this, we extend the loss
# linearly past the clip point to keep the gradient constant
# in that regime.
#
# This is equivalent to declaring d loss/d q_vals to be
# equal to the clipped diff, then backpropagating from
# there, which is what the DeepMind implementation does.
quadratic_part = T.minimum(abs(diff), self.clip_delta)
linear_part = abs(diff) - quadratic_part
loss = 0.5 * quadratic_part ** 2 + self.clip_delta * linear_part
else:
loss = 0.5 * diff ** 2
if batch_accumulator == 'sum':
loss = T.sum(loss)
elif batch_accumulator == 'mean':
loss = T.mean(loss)
else:
raise ValueError("Bad accumulator: {}".format(batch_accumulator))
params = lasagne.layers.helper.get_all_params(self.l_out)
if update_rule == 'deepmind_rmsprop':
updates = deepmind_rmsprop(loss, params, self.lr, self.rho,
self.rms_epsilon)
elif update_rule == 'rmsprop':
updates = lasagne.updates.rmsprop(loss, params, self.lr, self.rho,
self.rms_epsilon)
elif update_rule == 'sgd':
updates = lasagne.updates.sgd(loss, params, self.lr)
else:
raise ValueError("Unrecognized update: {}".format(update_rule))
if self.momentum > 0:
updates = lasagne.updates.apply_momentum(updates, None,
self.momentum)
"""Q(s,a) target train()"""
train_givens = {
states: self.states_shared,
actions: self.actions_shared,
target: self.target_shared
}
self._train = theano.function([], [loss], updates=updates, givens=train_givens, on_unused_input='warn')
self._train2 = theano.function([], [loss], updates=updates, givens=train_givens, on_unused_input='warn')
def q_vals(self, single_state):
self.states_one_shared.set_value(single_state)
return self._q1_vals()
def q_batch_vals(self, states):
self.states_shared.set_value(states)
return self._q_batch_vals()
def q_s_a_batch_vals(self, states, actions):
self.states_shared.set_value(states)
self.actions_shared.set_value(actions)
return self._q_s_a_vals()
def q_target(self, batch_transition_states):
self.states_transition_shared.set_value(batch_transition_states)
return self._q_target()
def q_target_vals(self, batch_transition_states):
self.states_transition_shared.set_value(batch_transition_states)
return self._q_target_vals()
def train(self, states, actions, target):
self.states_shared.set_value(states)
self.actions_shared.set_value(actions)
self.target_shared.set_value(target)
if self.freeze_interval > 0 and self.update_counter % self.freeze_interval == 0:
self.reset_q_hat()
loss = self._train()
self.update_counter += 1
return np.sqrt(loss)
def train2(self, states, actions, target):
self.states_shared.set_value(states)
self.actions_shared.set_value(actions)
self.target_shared.set_value(target)
if self.freeze_interval > 0 and self.update_counter % self.freeze_interval == 0:
self.reset_q_hat()
loss = self._train2()
return np.sqrt(loss)
def build_network(self, network_type, input_width, input_height,
output_dim, num_frames, batch_size):
if network_type == "nature_cuda":
return self.build_nature_network(input_width, input_height,
output_dim, num_frames, batch_size)
if network_type == "nature_dnn":
return self.build_nature_network_dnn(input_width, input_height,
output_dim, num_frames,
batch_size)
elif network_type == "linear":
return self.build_linear_network(input_width, input_height,
output_dim, num_frames, batch_size)
else:
raise ValueError("Unrecognized network: {}".format(network_type))
def choose_action(self, state, epsilon):
if self.rng.rand() < epsilon:
return self.rng.randint(0, self.num_actions)
q_vals = self.q_vals(state)
return np.argmax(q_vals)
def reset_q_hat(self):
all_params = lasagne.layers.helper.get_all_param_values(self.l_out)
lasagne.layers.helper.set_all_param_values(self.next_l_out, all_params)
def build_nature_network(self, input_width, input_height, output_dim,
num_frames, batch_size):
"""
Build a large network consistent with the DeepMind Nature paper.
"""
from lasagne.layers import cuda_convnet
l_in = lasagne.layers.InputLayer(
shape=(None, num_frames, input_width, input_height)
)
l_conv1 = cuda_convnet.Conv2DCCLayer(
l_in,
num_filters=32,
filter_size=(8, 8),
stride=(4, 4),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(), # Defaults to Glorot
b=lasagne.init.Constant(.1),
dimshuffle=True
)
l_conv2 = cuda_convnet.Conv2DCCLayer(
l_conv1,
num_filters=64,
filter_size=(4, 4),
stride=(2, 2),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(),
b=lasagne.init.Constant(.1),
dimshuffle=True
)
l_conv3 = cuda_convnet.Conv2DCCLayer(
l_conv2,
num_filters=64,
filter_size=(3, 3),
stride=(1, 1),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(),
b=lasagne.init.Constant(.1),
dimshuffle=True
)
l_hidden1 = lasagne.layers.DenseLayer(
l_conv3,
num_units=512,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(),
b=lasagne.init.Constant(.1)
)
l_out = lasagne.layers.DenseLayer(
l_hidden1,
num_units=output_dim,
nonlinearity=None,
W=lasagne.init.HeUniform(),
b=lasagne.init.Constant(.1)
)
return l_out
def build_nature_network_dnn(self, input_width, input_height, output_dim,
num_frames, batch_size):
"""
Build a large network consistent with the DeepMind Nature paper.
"""
from lasagne.layers import dnn
l_in = lasagne.layers.InputLayer(
shape=(None, num_frames, input_width, input_height)
)
l_conv1 = dnn.Conv2DDNNLayer(
l_in,
num_filters=32,
filter_size=(8, 8),
stride=(4, 4),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(),
b=lasagne.init.Constant(.1)
)
l_conv2 = dnn.Conv2DDNNLayer(
l_conv1,
num_filters=64,
filter_size=(4, 4),
stride=(2, 2),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(),
b=lasagne.init.Constant(.1)
)
l_conv3 = dnn.Conv2DDNNLayer(
l_conv2,
num_filters=64,
filter_size=(3, 3),
stride=(1, 1),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(),
b=lasagne.init.Constant(.1)
)
l_hidden1 = lasagne.layers.DenseLayer(
l_conv3,
num_units=512,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(),
b=lasagne.init.Constant(.1)
)
l_out = lasagne.layers.DenseLayer(
l_hidden1,
num_units=output_dim,
nonlinearity=None,
W=lasagne.init.HeUniform(),
b=lasagne.init.Constant(.1)
)
return l_out
def build_linear_network(self, input_width, input_height, output_dim,
num_frames, batch_size):
"""
Build a simple linear learner. Useful for creating
tests that sanity-check the weight update code.
"""
l_in = lasagne.layers.InputLayer(
shape=(None, num_frames, input_width, input_height)
)
l_out = lasagne.layers.DenseLayer(
l_in,
num_units=output_dim,
nonlinearity=None,
W=lasagne.init.Constant(0.0),
b=None
)
return l_out
|
{"hexsha": "1c676f1621c44203de1361684ec0d14e658af2fc", "size": 15005, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/q_network.py", "max_stars_repo_name": "ShibiHe/Learning-to-play-in-a-day-Optimality-Tightening", "max_stars_repo_head_hexsha": "1cce8cf94e3fe026a8d18f7e2f2ed8e709392f08", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 40, "max_stars_repo_stars_event_min_datetime": "2017-04-25T06:37:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-23T08:59:30.000Z", "max_issues_repo_path": "code/q_network.py", "max_issues_repo_name": "ShibiHe/Learning-to-play-in-a-day-Optimality-Tightening", "max_issues_repo_head_hexsha": "1cce8cf94e3fe026a8d18f7e2f2ed8e709392f08", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2017-06-16T07:56:53.000Z", "max_issues_repo_issues_event_max_datetime": "2017-12-15T08:30:44.000Z", "max_forks_repo_path": "code/q_network.py", "max_forks_repo_name": "ShibiHe/Learning-to-play-in-a-day-Optimality-Tightening", "max_forks_repo_head_hexsha": "1cce8cf94e3fe026a8d18f7e2f2ed8e709392f08", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2017-04-26T15:46:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-03T01:46:18.000Z", "avg_line_length": 39.0755208333, "max_line_length": 112, "alphanum_fraction": 0.5666777741, "include": true, "reason": "import numpy,import theano", "num_tokens": 3148}
|
# Cutoff strategies for long-range interactions
export
NoCutoff,
DistanceCutoff,
ShiftedPotentialCutoff,
ShiftedForceCutoff,
CubicSplineCutoff
"""
NoCutoff()
Placeholder cutoff that does not alter forces or potentials.
"""
struct NoCutoff end
cutoff_points(::Type{NoCutoff}) = 0
force_divr_cutoff(::NoCutoff, r2, inter, params) = force_divr_nocutoff(inter, r2, inv(r2), params)
potential_cutoff(::NoCutoff, r2, inter, params) = potential(inter, r2, inv(r2), params)
"""
DistanceCutoff(dist_cutoff)
Cutoff that sets the potential and force to be zero past a specified cutoff point.
"""
struct DistanceCutoff{D, S, I}
dist_cutoff::D
sqdist_cutoff::S
inv_sqdist_cutoff::I
end
function DistanceCutoff(dist_cutoff)
return DistanceCutoff(dist_cutoff, dist_cutoff ^ 2, inv(dist_cutoff ^ 2))
end
cutoff_points(::Type{DistanceCutoff{D, S, I}}) where {D, S, I} = 1
force_divr_cutoff(::DistanceCutoff, r2, inter, params) = force_divr_nocutoff(inter, r2, inv(r2), params)
potential_cutoff(::DistanceCutoff, r2, inter, params) = potential(inter, r2, inv(r2), params)
"""
ShiftedPotentialCutoff(dist_cutoff)
Cutoff that shifts the potential to be continuous at a specified cutoff point.
"""
struct ShiftedPotentialCutoff{D, S, I}
dist_cutoff::D
sqdist_cutoff::S
inv_sqdist_cutoff::I
end
function ShiftedPotentialCutoff(dist_cutoff)
return ShiftedPotentialCutoff(dist_cutoff, dist_cutoff ^ 2, inv(dist_cutoff ^ 2))
end
cutoff_points(::Type{ShiftedPotentialCutoff{D, S, I}}) where {D, S, I} = 1
force_divr_cutoff(::ShiftedPotentialCutoff, r2, inter, params) = force_divr_nocutoff(inter, r2, inv(r2), params)
function potential_cutoff(cutoff::ShiftedPotentialCutoff, r2, inter, params)
potential(inter, r2, inv(r2), params) - potential(inter, cutoff.sqdist_cutoff, cutoff.inv_sqdist_cutoff, params)
end
"""
ShiftedForceCutoff(dist_cutoff)
Cutoff that shifts the force to be continuous at a specified cutoff point.
"""
struct ShiftedForceCutoff{D, S, I}
dist_cutoff::D
sqdist_cutoff::S
inv_sqdist_cutoff::I
end
function ShiftedForceCutoff(dist_cutoff)
return ShiftedForceCutoff(dist_cutoff, dist_cutoff ^ 2, inv(dist_cutoff ^ 2))
end
cutoff_points(::Type{ShiftedForceCutoff{D, S, I}}) where {D, S, I} = 1
function force_divr_cutoff(cutoff::ShiftedForceCutoff, r2, inter, params)
return force_divr_nocutoff(inter, r2, inv(r2), params) - force_divr_nocutoff(
inter, cutoff.sqdist_cutoff, cutoff.inv_sqdist_cutoff, params)
end
@fastmath function potential_cutoff(cutoff::ShiftedForceCutoff, r2, inter, params)
invr2 = inv(r2)
r = √r2
rc = cutoff.dist_cutoff
fc = force_divr_nocutoff(inter, cutoff.sqdist_cutoff, cutoff.inv_sqdist_cutoff, params) * r
potential(inter, r2, invr2, params) + (r - rc) * fc -
potential(inter, cutoff.sqdist_cutoff, cutoff.inv_sqdist_cutoff, params)
end
"""
CubicSplineCutoff(dist_activation, dist_cutoff)
Cutoff that interpolates the true potential and zero between an activation point
and a cutoff point, using a cubic Hermite spline.
"""
struct CubicSplineCutoff{D, S, I}
dist_cutoff::D
sqdist_cutoff::S
inv_sqdist_cutoff::I
dist_activation::D
sqdist_activation::S
inv_sqdist_activation::I
end
function CubicSplineCutoff(dist_activation, dist_cutoff)
if dist_cutoff <= dist_activation
error("The cutoff radius must be strictly larger than the activation radius")
end
D, S, I = typeof.([dist_cutoff, dist_cutoff^2, inv(dist_cutoff^2)])
return CubicSplineCutoff{D, S, I}(dist_cutoff, dist_cutoff^2, inv(dist_cutoff^2), dist_activation,
dist_activation^2, inv(dist_activation^2))
end
cutoff_points(::Type{CubicSplineCutoff{D, S, I}}) where {D, S, I} = 2
@fastmath function force_divr_cutoff(cutoff::CubicSplineCutoff, r2, inter, params)
r = √r2
t = (r - cutoff.dist_activation) / (cutoff.dist_cutoff-cutoff.dist_activation)
Va = potential(inter, cutoff.sqdist_activation, cutoff.inv_sqdist_activation, params)
dVa = -force_divr_nocutoff(inter, cutoff.sqdist_activation, cutoff.inv_sqdist_activation, params) * cutoff.dist_activation
return -((6t^2 - 6t) * Va / (cutoff.dist_cutoff-cutoff.dist_activation) + (3t^2 - 4t + 1) * dVa)/r
end
@fastmath function potential_cutoff(cutoff::CubicSplineCutoff, r2, inter, params)
r = √r2
t = (r - cutoff.dist_activation) / (cutoff.dist_cutoff-cutoff.dist_activation)
Va = potential(inter, cutoff.sqdist_activation, cutoff.inv_sqdist_activation, params)
dVa = -force_divr_nocutoff(inter, cutoff.sqdist_activation, cutoff.inv_sqdist_activation, params) * cutoff.dist_activation
return (2t^3 - 3t^2 + 1) * Va + (t^3 - 2t^2 + t) * (cutoff.dist_cutoff-cutoff.dist_activation) * dVa
end
|
{"hexsha": "39840f83036c53a30cae19f4c6a39aece8dbd097", "size": 4853, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/cutoffs.jl", "max_stars_repo_name": "chemicalfiend/Molly.jl", "max_stars_repo_head_hexsha": "561b42f30b18699902da8ee0036184929d0436cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 41, "max_stars_repo_stars_event_min_datetime": "2018-05-17T00:35:57.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-21T09:36:02.000Z", "max_issues_repo_path": "src/cutoffs.jl", "max_issues_repo_name": "chemicalfiend/Molly.jl", "max_issues_repo_head_hexsha": "561b42f30b18699902da8ee0036184929d0436cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2018-10-29T19:18:31.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-16T16:43:25.000Z", "max_forks_repo_path": "src/cutoffs.jl", "max_forks_repo_name": "chemicalfiend/Molly.jl", "max_forks_repo_head_hexsha": "561b42f30b18699902da8ee0036184929d0436cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-10-29T19:03:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-03T20:34:58.000Z", "avg_line_length": 33.4689655172, "max_line_length": 126, "alphanum_fraction": 0.7306820523, "num_tokens": 1395}
|
using PhotoOrganizer
dry_run = false
rm_src = false
dst_root="/home/hertz/mnt/media/Pictures"
#src_dirs = String[]
#push!(src_dirs, "/run/user/1000/gvfs/mtp:host=%5Busb%3A002%2C009%5D/Samsung SD card/CameraZOOM")
#push!(src_dirs, "/run/user/1000/gvfs/mtp:host=%5Busb%3A002%2C009%5D/Samsung SD card/DCIM/Camera")
#push!(src_dirs, "/home/hertz/Documents/Glen/backups/Google/Takeout/Google Photos")
#push!(src_dirs, "/home/hertz/Documents/Erin/backups/Google/Takeout/Google Photos")
#src_dirs = readlines(`/home/hertz/bin/ls_phone_backup_dirs.jl`)
#src_dirs = ["/home/hertz/Documents.local/Pictures"]
#src_root = "/home/hertz/Documents/backups/S7"
#src_root = "/run/user/1000/gvfs/mtp:host=%5Busb%3A003%2C004%5D"
#src_dirs = [
# "$src_root/Card/DCIM/Camera",
# "$src_root/Phone/DCIM/Camera",
# "$src_root/Phone/DCIM/PhotoScan",
# "$src_root/Phone/DCIM/Screenshots",
# "$src_root/Phone/DCIM/Video Editor",
# "$src_root/Phone/Movies/Instagram",
# "$src_root/Phone/Pictures",
# "$src_root/Phone/Snapchat",
# "$src_root/Phone/Snapseed",
# "$src_root/Phone/Studio",
# "$src_root/Phone/Telegram",
# "$src_root/Phone/WhatsApp/Media"]
#src_dirs = ["/media/hertz/NIKON D5500/DCIM"]
src_root = "/home/hertz/mnt/syncthing/Pixel4a"
src_dirs = [
"$src_root/Glen_DCIM"
"$src_root/Phone/DCIM/Camera",
"$src_root/Phone/DCIM/PhotoScan",
"$src_root/Phone/DCIM/Screenshots",
"$src_root/Phone/DCIM/Video Editor",
"$src_root/Phone/Movies/Instagram",
"$src_root/Phone/Pictures",
"$src_root/Phone/Snapchat",
"$src_root/Phone/Snapseed",
"$src_root/Phone/Studio",
"$src_root/Phone/Telegram",
"$src_root/Phone/WhatsApp/Media"]
if length(src_dirs) > 0
organize_photos(src_dirs, dst_root, rm_src, dry_run)
else
warn("No directories found to backup")
end
|
{"hexsha": "fcef2c68ba8bf081bc29292bd53c6f77ee5082d7", "size": 1953, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/pixel.jl", "max_stars_repo_name": "GlenHertz/PhotoOrganizer.jl", "max_stars_repo_head_hexsha": "cb081e3190a3b578cf479305948b76ca68ec3cf6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-08-20T00:51:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-30T13:46:49.000Z", "max_issues_repo_path": "test/pixel.jl", "max_issues_repo_name": "GlenHertz/PhotoOrganizer.jl", "max_issues_repo_head_hexsha": "cb081e3190a3b578cf479305948b76ca68ec3cf6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/pixel.jl", "max_forks_repo_name": "GlenHertz/PhotoOrganizer.jl", "max_forks_repo_head_hexsha": "cb081e3190a3b578cf479305948b76ca68ec3cf6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-26T09:36:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-26T09:36:27.000Z", "avg_line_length": 38.2941176471, "max_line_length": 98, "alphanum_fraction": 0.6584741423, "num_tokens": 600}
|
(**************************************************************)
(* Copyright Dominique Larchey-Wendling [*] *)
(* *)
(* [*] Affiliation LORIA -- CNRS *)
(**************************************************************)
(* This file is distributed under the terms of the *)
(* CeCILL v2 FREE SOFTWARE LICENSE AGREEMENT *)
(**************************************************************)
Require Import List Arith Lia Max.
From Undecidability.Shared.Libs.DLW.Utils
Require Import utils_tac utils_list utils_nat finite.
From Undecidability.Shared.Libs.DLW.Vec
Require Import pos vec.
From Undecidability.FOL.TRAKHTENBROT
Require Import notations utils decidable
fol_ops fo_sig fo_terms fo_logic fo_sat.
Import fol_notations.
Set Implicit Arguments.
(* * Removal of function symbols from propositional signatures *)
Section Σ_Σ0.
Variable (Σ : fo_signature)
(HΣ : forall r, ar_rels Σ r = 0).
Definition Σ0 : fo_signature.
Proof using Σ.
exists Empty_set (rels Σ); exact (fun _ => 0).
Defined.
Fixpoint Σ_Σ0 (A : fol_form Σ) :=
match A with
| ⊥ => ⊥
| fol_atom r _ => @fol_atom Σ0 r vec_nil
| fol_bin b A B => fol_bin b (Σ_Σ0 A) (Σ_Σ0 B)
| fol_quant q A => fol_quant q (Σ_Σ0 A)
end.
Section soundness.
Variable (X : Type) (M : fo_model Σ X).
Let M' : fo_model Σ0 unit.
Proof.
split.
+ intros [].
+ intros r _; apply (fom_rels M r).
rewrite HΣ; exact vec_nil.
Defined.
Local Fact Σ_Σ0_sound A φ ψ : fol_sem M φ A <-> fol_sem M' ψ (Σ_Σ0 A).
Proof.
revert φ ψ; induction A as [ | r v | b A HA B HB | [] A HA ]; intros φ ψ.
+ simpl; tauto.
+ simpl; fol equiv.
revert v; rewrite (HΣ r); unfold eq_rect_r; simpl.
intros v; vec nil v; auto.
+ apply fol_bin_sem_ext; auto.
+ simpl; split.
* intros (? & H); exists tt; revert H; apply HA.
* intros (? & H); exists (φ 0); revert H; apply HA.
+ simpl; split.
* intros H x; generalize (H (φ 0)); apply HA.
* intros H x; generalize (H tt); apply HA.
Qed.
Hypothesis (Mdec : fo_model_dec M)
(phi : nat -> X)
(A : fol_form Σ)
(HA : fol_sem M phi A).
Local Lemma Σ_Σ0_soundness : fo_form_fin_dec_SAT (Σ_Σ0 A).
Proof using HΣ Mdec HA.
exists unit, M', finite_t_unit.
exists. { intros r v; simpl; apply Mdec. }
exists (fun _ => tt).
revert HA; apply Σ_Σ0_sound.
Qed.
End soundness.
Section completeness.
Variable (X : Type) (M' : fo_model Σ0 X).
Let M : fo_model Σ unit.
Proof.
split.
+ intros; exact tt.
+ intros r _; apply (fom_rels M' r vec_nil).
Defined.
Local Fact Σ_Σ0_complete A φ ψ : fol_sem M φ A <-> fol_sem M' ψ (Σ_Σ0 A).
Proof.
revert φ ψ; induction A as [ | r v | b A HA B HB | [] A HA ]; intros φ ψ.
+ simpl; tauto.
+ simpl; tauto.
+ apply fol_bin_sem_ext; auto.
+ simpl; split.
* intros (? & H); exists (ψ 0); revert H; apply HA.
* intros (? & H); exists (φ 0); revert H; apply HA.
+ simpl; split.
* intros H x; generalize (H (φ 0)); apply HA.
* intros H x; generalize (H (ψ 0)); apply HA.
Qed.
Hypothesis (M'dec : fo_model_dec M')
(psi : nat -> X)
(A : fol_form Σ)
(HA : fol_sem M' psi (Σ_Σ0 A)).
Local Lemma Σ_Σ0_completeness : fo_form_fin_dec_SAT A.
Proof using M'dec HA.
exists unit, M, finite_t_unit.
exists. { intros r v; simpl; apply M'dec. }
exists (fun _ => tt).
revert HA; apply Σ_Σ0_complete.
Qed.
End completeness.
Theorem Σ_Σ0_correct A : fo_form_fin_dec_SAT A <-> fo_form_fin_dec_SAT (Σ_Σ0 A).
Proof using HΣ.
split.
+ intros (X & M & _ & G2 & phi & G3).
apply Σ_Σ0_soundness with X M phi; auto.
+ intros (X & M & _ & G2 & phi & G3).
apply Σ_Σ0_completeness with X M phi; auto.
Qed.
End Σ_Σ0.
Section Σ0_Σ.
Variable (Σ : fo_signature)
(HΣ : forall r, ar_rels Σ r = 0).
Fixpoint Σ0_Σ (A : fol_form (Σ0 Σ)) :=
match A with
| ⊥ => ⊥
| fol_atom r _ => @fol_atom Σ r (cast vec_nil (eq_sym (HΣ r)))
| fol_bin b A B => fol_bin b (Σ0_Σ A) (Σ0_Σ B)
| fol_quant q A => fol_quant q (Σ0_Σ A)
end.
Section soundness.
Variable (X : Type) (M : fo_model (Σ0 Σ) X).
Let M' : fo_model Σ unit.
Proof.
split.
+ intros; exact tt.
+ intros r _; apply (fom_rels M r vec_nil).
Defined.
Local Fact Σ0_Σ_sound A φ ψ : fol_sem M' ψ (Σ0_Σ A) <-> fol_sem M φ A.
Proof.
revert φ ψ; induction A as [ | r v | b A HA B HB | [] A HA ]; intros φ ψ.
+ simpl; tauto.
+ simpl in *; vec nil v; simpl; tauto.
+ apply fol_bin_sem_ext; auto.
+ simpl; split.
* intros (? & H); exists (φ 0); revert H; apply HA.
* intros (? & H); exists (ψ 0); revert H; apply HA.
+ simpl; split.
* intros H x; generalize (H (ψ 0)); apply HA.
* intros H x; generalize (H (φ 0)); apply HA.
Qed.
Hypothesis (Mdec : fo_model_dec M)
(φ : nat -> X)
(A : fol_form (Σ0 Σ))
(HA : fol_sem M φ A).
Local Lemma Σ0_Σ_soundness : fo_form_fin_dec_SAT (Σ0_Σ A).
Proof using Mdec HA.
exists unit, M', finite_t_unit.
exists. { intros r v; simpl; apply Mdec. }
exists (fun _ => tt).
revert HA; apply Σ0_Σ_sound.
Qed.
End soundness.
Section completeness.
Variable (X : Type) (M : fo_model Σ X).
Let M' : fo_model (Σ0 Σ) unit.
Proof.
split.
+ intros [].
+ intros r _; apply (fom_rels M r).
rewrite HΣ; exact vec_nil.
Defined.
Local Fact Σ0_Σ_complete A φ ψ : fol_sem M ψ (Σ0_Σ A) <-> fol_sem M' φ A.
Proof.
revert φ ψ; induction A as [ | r v | b A HA B HB | [] A HA ]; intros φ ψ.
+ simpl; tauto.
+ simpl; fol equiv.
revert v; rewrite (HΣ r); unfold eq_rect_r; simpl; auto.
+ apply fol_bin_sem_ext; auto.
+ simpl; split.
* intros (? & H); exists tt; revert H; apply HA.
* intros (? & H); exists (ψ 0); revert H; apply HA.
+ simpl; split.
* intros H x; generalize (H (ψ 0)); apply HA.
* intros H x; generalize (H tt); apply HA.
Qed.
Hypothesis (Mdec : fo_model_dec M)
(phi : nat -> X)
(A : fol_form (Σ0 Σ))
(HA : fol_sem M phi (Σ0_Σ A)).
Local Lemma Σ0_Σ_completeness : fo_form_fin_dec_SAT A.
Proof using Mdec HA.
exists unit, M', finite_t_unit.
exists. { intros r v; simpl; apply Mdec. }
exists (fun _ => tt).
revert HA; apply Σ0_Σ_complete.
Qed.
End completeness.
Theorem Σ0_Σ_correct A : fo_form_fin_dec_SAT A <-> fo_form_fin_dec_SAT (Σ0_Σ A).
Proof.
split.
+ intros (X & M & _ & G2 & phi & G3).
apply Σ0_Σ_soundness with X M phi; auto.
+ intros (X & M & _ & G2 & phi & G3).
apply Σ0_Σ_completeness with X M phi; auto.
Qed.
End Σ0_Σ.
|
{"author": "uds-psl", "repo": "coq-library-undecidability", "sha": "4547d325e8ce7a6d841fbfe5df4429ee9cb6f214", "save_path": "github-repos/coq/uds-psl-coq-library-undecidability", "path": "github-repos/coq/uds-psl-coq-library-undecidability/coq-library-undecidability-4547d325e8ce7a6d841fbfe5df4429ee9cb6f214/theories/FOL/TRAKHTENBROT/Sig0.v"}
|
using FormulationLattice
using Base.Test
let
@Literals(A, B, C, D)
cl = A ∨ ((B ∨ C) ∧ D)
formtrack = FormulaState[]
cl2 = dnf(cl, formtrack)
@test cl2 == A ∨ (B ∧ D) ∨ (C ∧ D)
end
let
@Literals(A, B, C, D)
cl = (A ∨ B) ∧ (C ∨ D)
formtrack = FormulaState[]
cl2 = dnf(cl, formtrack)
@test cl2 == (A ∧ C) ∨ (A ∧ D) ∨ (B ∧ C) ∨ (B ∧ D)
end
let
@Literals(A, B, C, D)
cl = (A ∨ B) ∧ C ∧ D
formtrack = FormulaState[]
cl2 = dnf(cl, formtrack)
@test cl2 == (A ∧ C ∧ D) ∨ (B ∧ C ∧ D)
end
let
@Literals(A, B, C, D, E, F)
cl = (A ∨ B) ∧ (C ∨ D) ∧ (E ∨ F)
formtrack = FormulaState[]
cl2 = dnf(cl, formtrack)
@test cl2 == (A ∧ C ∧ E) ∨ (A ∧ C ∧ F) ∨ (A ∧ D ∧ E) ∨ (A ∧ D ∧ F) ∨
(B ∧ C ∧ E) ∨ (B ∧ C ∧ F) ∨ (B ∧ D ∧ E) ∨ (B ∧ D ∧ F)
end
let
@Literals(A12, A13)
@Literals(B12, B13)
@Literals(C12, C13)
cl = (A12 ∨ B12 ∨ C12) ∧ (A13 ∨ B13 ∨ C13)
formtrack = FormulaState[]
cl2 = dnf(cl, formtrack)
@test cl2 == (A12 ∧ A13) ∨ (A12 ∧ B13) ∨ (A12 ∧ C13) ∨
(B12 ∧ A13) ∨ (B12 ∧ B13) ∨ (B12 ∧ C13) ∨
(C12 ∧ A13) ∨ (C12 ∧ B13) ∨ (C12 ∧ C13)
end
let
@Literals(A12, A13, A23)
@Literals(B12, B13, B23)
@Literals(C12, C13, C23)
cl = (A12 ∨ B12 ∨ C12) ∧ (A13 ∨ B13 ∨ C13) ∧ (A23 ∨ B23 ∨ C23)
@Literals(A12, A13)
@Literals(B12, B13)
@Literals(C12, C13)
cl = (A12 ∨ B12 ∨ C12) ∧ (A13 ∨ B13 ∨ C13)
formtrack = FormulaState[]
cl2 = dnf(cl, formtrack)
@test cl2 == (A12 ∧ A13 ∧ A23) ∨ (A12 ∧ A13 ∧ B23) ∨ (A12 ∧ A13 ∧ C23) ∨ (A12 ∧ B13 ∧ A23) ∨ (A12 ∧ B13 ∧ B23) ∨ (A12 ∧ B13 ∧ C23) ∨ (A12 ∧ C13 ∧ A23) ∨ (A12 ∧ C13 ∧ B23) ∨ (A12 ∧ C13 ∧ C23) ∨
(B12 ∧ A13 ∧ A23) ∨ (B12 ∧ A13 ∧ B23) ∨ (B12 ∧ A13 ∧ C23) ∨ (B12 ∧ B13 ∧ A23) ∨ (B12 ∧ B13 ∧ B23) ∨ (B12 ∧ B13 ∧ C23) ∨ (B12 ∧ C13 ∧ A23) ∨ (B12 ∧ C13 ∧ B23) ∨ (B12 ∧ C13 ∧ C23) ∨
(C12 ∧ A13 ∧ A23) ∨ (C12 ∧ A13 ∧ B23) ∨ (C12 ∧ A13 ∧ C23) ∨ (C12 ∧ B13 ∧ A23) ∨ (C12 ∧ B13 ∧ B23) ∨ (C12 ∧ B13 ∧ C23) ∨ (C12 ∧ C13 ∧ A23) ∨ (C12 ∧ C13 ∧ B23) ∨ (C12 ∧ C13 ∧ C23)
end
let
@Literals(A12, A13, A14, A23, A24, A34)
@Literals(B12, B13, B14, B23, B24, B34)
@Literals(C12, C13, C14, C23, C24, C34)
@Literals(D12, D13, D14, D23, D24, D34)
cl = (A12 ∨ B12 ∨ C12 ∨ D12) ∧ (A13 ∨ B13 ∨ C13 ∨ D13) ∧
(A14 ∨ B14 ∨ C14 ∨ D14) ∧ (A23 ∨ B23 ∨ C23 ∨ D23) ∧
(A24 ∨ B24 ∨ C24 ∨ D24) ∧ (A34 ∨ B34 ∨ C34 ∨ D34)
formtrack = FormulaState[]
cl2 = dnf(cl, formtrack)
@test length(cl2.clauses) == 4096
@test all(cl2.clauses) do x
length(x.clauses) == 6 &&
isa(x,And) &&
all(y -> isa(y,Literal), x.clauses)
end
end
|
{"hexsha": "d156f1b784fd49371c75aa35f55a28fd69890dd5", "size": 2722, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "joehuchette/FormulationLattice.jl", "max_stars_repo_head_hexsha": "c708a36ab9d8263addd223ec7378bd99a0d0c0e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "joehuchette/FormulationLattice.jl", "max_issues_repo_head_hexsha": "c708a36ab9d8263addd223ec7378bd99a0d0c0e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "joehuchette/FormulationLattice.jl", "max_forks_repo_head_hexsha": "c708a36ab9d8263addd223ec7378bd99a0d0c0e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1951219512, "max_line_length": 196, "alphanum_fraction": 0.4687729611, "num_tokens": 1313}
|
from __future__ import absolute_import, division, print_function
from dynd import nd
import numpy as np
from pandas import DataFrame
import numpy as np
import bcolz
from blaze.expr import TableSymbol, by, TableExpr
from blaze.api.into import into
from blaze.api.table import Table
from blaze.compute import compute
import blaze.compute.numpy
import blaze.compute.pandas
import blaze.compute.bcolz
from blaze.expr.functions import *
from blaze.sql import SQL
sources = []
t = TableSymbol('t', '{amount: int64, id: int64, name: string}')
L = [[100, 1, 'Alice'],
[200, 2, 'Bob'],
[300, 3, 'Charlie'],
[400, 4, 'Dan'],
[500, 5, 'Edith']]
df = DataFrame(L, columns=['amount', 'id', 'name'])
x = into(np.ndarray, df)
bc = into(bcolz.ctable, df)
sql = SQL('sqlite:///:memory:', 'accounts', schema=t.schema)
sql.extend(L)
sources = [df, x, bc, sql]
try:
import pymongo
except ImportError:
pymongo = mongo = None
if pymongo:
from blaze.mongo import *
try:
db = pymongo.MongoClient().db
db._test_comprehensive.drop()
mongo = into(db._test_comprehensive, df)
sources.append(mongo)
except pymongo.errors.ConnectionFailure:
mongo = None
# {expr: [list-of-exclusions]}
expressions = {
t: [],
t['id']: [],
t.id.max(): [],
t.amount.sum(): [],
t.amount + 1: [mongo],
sin(t.amount): [sql, mongo], # sqlite doesn't support trig
exp(t.amount): [sql, mongo],
t.amount > 50: [mongo],
t[t.amount > 50]: [],
t.sort('name'): [bc],
t.sort('name', ascending=False): [bc],
t.head(3): [],
t.name.distinct(): [],
t[t.amount > 50]['name']: [], # odd ordering issue
t.id.map(lambda x: x + 1, '{id: int}'): [sql, mongo],
t[t.amount > 50]['name']: [],
by(t, t.name, t.amount.sum()): [],
by(t, t.id, t.id.count()): [],
by(t, t[['id', 'amount']], t.id.count()): [],
by(t, t[['id', 'amount']], (t.amount + 1).sum()): [mongo],
by(t, t[['id', 'amount']], t.name.nunique()): [mongo],
by(t, t.id, t.amount.count()): [],
by(t, t.id, t.id.nunique()): [mongo],
# by(t, t, t.count()): [],
# by(t, t.id, t.count()): [df],
t[['amount', 'id']]: [x], # https://github.com/numpy/numpy/issues/3256
t[['id', 'amount']]: [x, bc], # bcolz sorting
}
base = df
def df_eq(a, b):
return (list(a.columns) == list(b.columns)
and list(a.dtypes) == list(b.dtypes)
and into(set, into(list, a)) == into(set, into(list, b)))
def test_base():
for expr, exclusions in expressions.items():
model = compute(expr.subs({t: Table(base, t.schema)}))
for source in sources:
if id(source) in map(id, exclusions):
continue
T = Table(source)
result = into(model, expr.subs({t: T}))
if isinstance(expr, TableExpr):
if expr.iscolumn:
assert set(into([], result)) == set(into([], model))
else:
assert df_eq(result, model)
else:
assert result == model
|
{"hexsha": "6f60eda5d381566e66b278665d2ebe6466341599", "size": 3196, "ext": "py", "lang": "Python", "max_stars_repo_path": "blaze/compute/tests/test_comprehensive.py", "max_stars_repo_name": "chdoig/blaze", "max_stars_repo_head_hexsha": "caa5a497e1ca1ceb1cf585483312ff4cd74d0bda", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-01-18T23:59:57.000Z", "max_stars_repo_stars_event_max_datetime": "2015-01-18T23:59:57.000Z", "max_issues_repo_path": "blaze/compute/tests/test_comprehensive.py", "max_issues_repo_name": "chdoig/blaze", "max_issues_repo_head_hexsha": "caa5a497e1ca1ceb1cf585483312ff4cd74d0bda", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "blaze/compute/tests/test_comprehensive.py", "max_forks_repo_name": "chdoig/blaze", "max_forks_repo_head_hexsha": "caa5a497e1ca1ceb1cf585483312ff4cd74d0bda", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3211009174, "max_line_length": 78, "alphanum_fraction": 0.5438047559, "include": true, "reason": "import numpy", "num_tokens": 854}
|
import cPickle as pickle
import sgd as optimizer
from rnn import RNN
from rntn import RNTN
from rnn2deep_dropout import RNN2Drop
from rnn2deep import RNN2
from rnn2deep_dropout_maxout import RNN2DropMaxout
import tree as tr
import time
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def plot_cost_acc(a, b, figname, epochs):
annotate_size = 0.15
if figname.startswith('Cost') == True:
annotate_size *= 30
plt.figure(figsize=(6,4))
plt.title(figname)
plt.xlabel("SGD Iterations");plt.ylabel(r"Accuracy or Cost")
plt.ylim(ymin=min(min(a),min(b))*0.8,ymax=max(max(a),max(b))*1.2)
plt.plot(np.arange(epochs),a,'bo-')
plt.annotate("train_curve", xy=(1,a[1]),
xytext=(1,a[1]+annotate_size),
arrowprops=dict(facecolor='green'),
horizontalalignment='left',verticalalignment='top')
plt.plot(np.arange(epochs),b,'ro-')
plt.annotate("dev_curve",xy=(50,b[50]),
xytext=(50,b[50]+annotate_size),
arrowprops=dict(facecolor='red'),
horizontalalignment='left',verticalalignment='top')
plt.savefig("%s_per_epochs.png"%figname)
plt.close()
def test(nn, trees):
cost, correct, guess, total = nn.costAndGrad(trees,test=True)
sum1 = 0
for i in range(len(correct)):
sum1 += (guess[i] == correct[i])
return cost, sum1*1.0/(float(total))
def run():
print "Loading data..."
model = "RNN"
trees = tr.loadTrees('train')
dev_trees = tr.loadTrees('dev')
wvecDimList = [5, 15, 25, 35, 45]
#wvecDimList = [10,20,40]
accuracy_per_wvecDim = []
epochs = 100
outFileText = "./param/%s/%s_cost_and_acc" % (model,model)
f = open(outFileText,'w')
for wvecDim in wvecDimList:
nn = RNN(wvecDim,5,len(tr.loadWordMap()),30)
nn.initParams()
sgd = optimizer.SGD(nn, alpha=0.01, minibatch=30, optimizer="adagrad")
outFile = "./param/%s/%s_wvecDim_%d_epochs_%d_step_001.bin" % (model,model,wvecDim,epochs)
train_cost = []
train_acc =[]
dev_cost = []
dev_acc = []
cost = 0
accuracy = 0
for e in range(epochs):
start = time.time()
sgd.run(trees)
end = time.time()
print "Time per epoch : %f" % (end-start)
with open(outFile,'w') as fid:
hyperparam = {}
hyperparam['alpha'] = 0.01
hyperparam['minibatch'] = 30
hyperparam['wvecDim'] = wvecDim
pickle.dump(hyperparam,fid)
nn.toFile(fid)
cost, accuracy = test(nn, trees)
train_cost.append(cost)
train_acc.append(accuracy)
cost, accuracy = test(nn, dev_trees)
dev_cost.append(cost)
dev_acc.append(accuracy)
for tree in trees:
tr.leftTraverse(tree.root,nodeFn=tr.clearFprop)
for tree in dev_trees:
tr.leftTraverse(tree.root,nodeFn=tr.clearFprop)
print "fprop in trees cleared"
plot_cost_acc(train_cost, dev_cost, "./figures/%s/%s_Cost_Figure_%d"%(model,model,wvecDim), epochs)
plot_cost_acc(train_acc, dev_acc, "./figures/%s/%s_Accuracy_Figure_%d"%(model,model,wvecDim),epochs)
anwser = "Cost = %f, Acc= %f" %(cost, accuracy)
f.write(anwser)
accuracy_per_wvecDim.append(accuracy)
f.close()
plt.figure(figsize=(6,4))
plt.title(r"Accuracies and vector Dimension")
plt.xlabel("vector Dimension");plt.ylabel(r"Accuracy")
plt.ylim(ymin=min(accuracy_per_wvecDim)*0.8,ymax=max(accuracy_per_wvecDim)*1.2)
plt.plot(wvecDimList,accuracy_per_wvecDim,color='b', marker='o', linestyle='-')
plt.savefig("./figures/%s/%s_Accuracy_and_vectorDimsension.png" % (model,model))
plt.close()
if __name__ == '__main__':
run()
|
{"hexsha": "dc2afa7cd92a75c289f6b77249f1ed5fe035576c", "size": 4002, "ext": "py", "lang": "Python", "max_stars_repo_path": "assignment3/runNNet_dev_wvecDim.py", "max_stars_repo_name": "PDFangeltop1/cs224d", "max_stars_repo_head_hexsha": "d8450895994215280482bf6f48892f3928446c50", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 38, "max_stars_repo_stars_event_min_datetime": "2016-04-18T07:34:11.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-13T11:07:02.000Z", "max_issues_repo_path": "assignment3/runNNet_dev_wvecDim.py", "max_issues_repo_name": "SitaoLuan/cs224d", "max_issues_repo_head_hexsha": "d8450895994215280482bf6f48892f3928446c50", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-06-12T03:43:58.000Z", "max_issues_repo_issues_event_max_datetime": "2017-06-12T03:43:58.000Z", "max_forks_repo_path": "assignment3/runNNet_dev_wvecDim.py", "max_forks_repo_name": "SitaoLuan/cs224d", "max_forks_repo_head_hexsha": "d8450895994215280482bf6f48892f3928446c50", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 38, "max_forks_repo_forks_event_min_datetime": "2016-05-13T13:53:49.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-17T04:30:16.000Z", "avg_line_length": 34.2051282051, "max_line_length": 108, "alphanum_fraction": 0.5992003998, "include": true, "reason": "import numpy", "num_tokens": 1073}
|
[STATEMENT]
lemma from_bool_to_bool_iff:
"w = from_bool b \<longleftrightarrow> to_bool w = b \<and> (w = 0 \<or> w = 1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (w = from_bool b) = (to_bool w = b \<and> (w = 0 \<or> w = 1))
[PROOF STEP]
by (cases b) (auto simp: from_bool_def to_bool_def)
|
{"llama_tokens": 137, "file": "Word_Lib_More_Word_Operations", "length": 1}
|
import numpy as np
from collections import Counter
import warnings
import matplotlib.pyplot as plt
from matplotlib import style
style.use('fivethirtyeight')
dataset = {'k': [[1, 2], [3, 3], [2, 5]], 'r': [[5, 6], [5, 8], [7, 7]]}
test_data = [6, 5]
def k_nearest_neighbors(data, test, k=3):
if len(data) >= k:
warnings.warn('You are an idiot!')
distance = []
for group in data:
for features in data[group]:
euclidean_distance = np.linalg.norm(np.array(features) - np.array(test))
distance.append([euclidean_distance, group])
vote = [i[1] for i in sorted(distance)[:k]]
vote_result = Counter(vote).most_common(1)[0][0]
print(Counter(vote).most_common(1))
return vote_result
result = k_nearest_neighbors(dataset, test_data, k=3)
print(result)
# # this for function is for plot
# for i in dataset:
# for ii in dataset[i]:
# plt.scatter(ii[0], ii[1], size=100, color=i)
# # one line for loop for plot for this upper function
[[plt.scatter(ii[0], ii[1], s=100, color=i) for ii in dataset[i]] for i in dataset]
plt.scatter(test_data[0], test_data[1], s=100, color='g')
plt.show()
|
{"hexsha": "8a0241a21c9af507c0fdd624e66ae23b872df89e", "size": 1196, "ext": "py", "lang": "Python", "max_stars_repo_path": "self_created_k_nearest_model.py", "max_stars_repo_name": "sahilsngh/hackthon.py", "max_stars_repo_head_hexsha": "12c75d6fe30527e7f15aab3ca4bb50d5abe9ff1d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-11-02T07:58:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-13T19:40:15.000Z", "max_issues_repo_path": "self_created_k_nearest_model.py", "max_issues_repo_name": "sahilsngh/hackthon.py", "max_issues_repo_head_hexsha": "12c75d6fe30527e7f15aab3ca4bb50d5abe9ff1d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "self_created_k_nearest_model.py", "max_forks_repo_name": "sahilsngh/hackthon.py", "max_forks_repo_head_hexsha": "12c75d6fe30527e7f15aab3ca4bb50d5abe9ff1d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-02T07:58:29.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-12T08:26:03.000Z", "avg_line_length": 32.3243243243, "max_line_length": 85, "alphanum_fraction": 0.6295986622, "include": true, "reason": "import numpy", "num_tokens": 337}
|
# This file roughly corresponds to functions documented in the
# Assignments API: https://canvas.instructure.com/doc/api/assignments
"""
Canvas.delete_assignment(c::Course, a::Assignment; kwargs...) -> Assignment
Delete the given assignment and return the former details. Return an [`Assignment`](@ref).
**Canvas API documentation**:
[Delete an assignment (`DELETE /api/v1/courses/:course_id/assignments/:id`)]
(https://canvas.instructure.com/doc/api/assignments#method.assignments.destroy)
"""
function delete_assignment(c::Course, a::Assignment; kwargs...)
json = request("DELETE", "/api/v1$(Internals.pid(c))$(Internals.pid(a))"; kwargs...)
return Assignment(json)
end
"""
Canvas.assignments(c::Course; kwargs...) -> Vector{Assignment}, page_data
Return the paginated list of assignments for the course.
Return a vector of [`Assignment`](@ref)s and a dictionary with page data.
**Canvas API documentation**:
[List assignments (`GET /api/v1/courses/:course_id/assignments`)]
(https://canvas.instructure.com/doc/api/assignments#method.assignments_api.index)
"""
function assignments(c::Course; kwargs...)
json, page_data = paged_request("GET", "/api/v1$(Internals.pid(c))/assignments"; kwargs...)
return Assignment.(json), page_data
end
"""
Canvas.assignments(c::Course, a::AssignmentGroup; kwargs...) -> Vector{Assignment}, page_data
Return the paginated list of assignments for the course and assignment group.
Return a vector of [`Assignment`](@ref)s and a dictionary with page data.
**Canvas API documentation**:
[List assignments (`GET /api/v1/courses/:course_id/assignment_groups/:assignment_group_id/assignments`)]
(https://canvas.instructure.com/doc/api/assignments#method.assignments_api.index)
"""
function assignments(c::Course, a::AssignmentGroup; kwargs...)
json, page_data = paged_request("GET", "/api/v1$(Internals.pid(c))$(Internals.pid(a))/assignments"; kwargs...)
return Assignment.(json), page_data
end
"""
Canvas.assignments(u::User, c::Course; kwargs...) -> Vector{Assignment}, page_data
Return the paginated list of assignments for the specified user and course.
Return a vector of [`Assignment`](@ref)s and a dictionary with page data.
**Canvas API documentation**:
[List assignments for user(`GET /api/v1/users/:user_id/courses/:course_id/assignments`)]
(https://canvas.instructure.com/doc/api/assignments#method.assignments_api.user_index)
"""
function assignments(u::User, c::Course; kwargs...)
json, page_data = paged_request("GET", "/api/v1$(Internals.pid(u))$(Internals.pid(c))/assignments"; kwargs...)
return Assignment.(json), page_data
end
"""
Canvas.assignment(c::Course, a::Assignment; kwargs...) -> Assignment
Return the assignment given by id. Return an [`Assignment`](@ref).
**Canvas API documentation**:
[Get a single assignment (`GET /api/v1/courses/:course_id/assignments/:id`)]
(https://canvas.instructure.com/doc/api/assignments#method.assignments_api.show)
"""
function assignment(c::Course, a::Assignment; kwargs...)
json = request("GET", "/api/v1$(Internals.pid(c))$(Internals.pid(a))"; kwargs...)
return Assignment(json)
end
"""
Canvas.create_assignment(c::Course, a::Assignment; kwargs...) -> Assignment
Create a new assignment for the course and return the details. Return an [`Assignment`](@ref).
**Canvas API documentation**:
[Create an assignment (`POST /api/v1/courses/:course_id/assignments`)]
(https://canvas.instructure.com/doc/api/assignments#method.assignments_api.create)
"""
function create_assignment(c::Course; kwargs...)
json = request("POST", "/api/v1$(Internals.pid(c))/assignments"; kwargs...)
return Assignment(json)
end
"""
Canvas.update_assignment(c::Course, a::Assignment; kwargs...) -> Assignment
Modify an existing assignment and return the new details. Return an [`Assignment`](@ref).
**Canvas API documentation**:
[Edit an assignment (`PUT /api/v1/courses/:course_id/assignments/:id`)]
(https://canvas.instructure.com/doc/api/assignments#method.assignments_api.update)
"""
function update_assignment(c::Course, a::Assignment; kwargs...)
json = request("PUT", "/api/v1$(Internals.pid(c))$(Internals.pid(a))"; kwargs...)
return Assignment(json)
end
"""
Canvas.assignment_overrides(c::Course, a::Assignment; kwargs...) -> Vector{AssignmentOverride}, page_data
Return the paginated list of overrides for this course and assignment.
Return a vector of [`AssignmentOverride`](@ref)s and a dictionary with page data.
**Canvas API documentation**:
[List assignment overrides (`GET /api/v1/courses/:course_id/assignments/:assignment_id/overrides`)]
(https://canvas.instructure.com/doc/api/assignments#method.assignment_overrides.index)
"""
function assignment_overrides(c::Course, a::Assignment; kwargs...)
json, page_data = paged_request("GET", "/api/v1$(Internals.pid(c))$(Internals.pid(a))/overrides"; kwargs...)
return AssignmentOverride.(json), page_data
end
"""
Canvas.assignment_override(c::Course, a::Assignment, o::AssignmentOverride; kwargs...) -> AssignmentOverride
Return the details of override specified by id. Return an [`AssignmentOverride`](@ref).
**Canvas API documentation**:
[Get a single assignment overrides (`GET /api/v1/courses/:course_id/assignments/:assignment_id/overrides/:id`)]
(https://canvas.instructure.com/doc/api/assignments#method.assignment_overrides.show)
"""
function assignment_override(c::Course, a::Assignment, o::AssignmentOverride; kwargs...)
json = request("GET", "/api/v1$(Internals.pid(c))$(Internals.pid(a))$(Internals.pid(o))"; kwargs...)
return AssignmentOverride(json)
end
"""
Canvas.assignment_override(co::Union{Group,Section}, a::Assignment; kwargs...) -> AssignmentOverride
Return the override for the given group or section associated with the assignment (404 otherwise).
Return an [`AssignmentOverride`](@ref).
**Canvas API documentation**:
- [Redirect to the assignment override for a group (`GET /api/v1/groups/:group_id/assignments/:assignment_id/override`)]
(https://canvas.instructure.com/doc/api/assignments#method.assignment_overrides.group_alias)
- [Redirect to the assignment override for a section (`GET /api/v1/sections/:course_section_id/assignments/:assignment_id/override`)]
(https://canvas.instructure.com/doc/api/assignments#method.assignment_overrides.section_alias)
"""
function assignment_override(co::Union{Group,Section}, a::Assignment; kwargs...)
json = request("GET", "/api/v1$(Internals.pid(co))$(Internals.pid(a))/override"; kwargs...)
return AssignmentOverride(json)
end
"""
Canvas.create_assignment_override(c::Course, a::Assignment; kwargs...) -> AssignmentOverride
Create an assignment override. One of `student_ids`, `group_id` or `course_section_id` must be present,
see API specification for details. Return the [`AssignmentOverride`](@ref).
**Canvas API documentation**:
[Create an assignment override (`POST /api/v1/courses/:course_id/assignments/:assignment_id/overrides`)]
(https://canvas.instructure.com/doc/api/assignments#method.assignment_overrides.create)
"""
function create_assignment_override(c::Course, a::Assignment; kwargs...)
json = request("POST", "/api/v1$(Internals.pid(c))$(Internals.pid(a))/overrides"; kwargs...)
return AssignmentOverride(json)
end
"""
Canvas.update_assignment_override(c::Course, a::Assignment, o::AssignmentOverride; kwargs...) -> AssignmentOverride
Update the assignment override. All current overriden values must be supplied if the should
be retained, see API specification for details. Return the [`AssignmentOverride`](@ref).
**Canvas API documentation**:
[Update an assignment override (`PUT /api/v1/courses/:course_id/assignments/:assignment_id/overrides/:id`)]
(https://canvas.instructure.com/doc/api/assignments#method.assignment_overrides.update)
"""
function update_assignment_override(c::Course, a::Assignment, o::AssignmentOverride; kwargs...)
json = request("PUT", "/api/v1$(Internals.pid(c))$(Internals.pid(a))$(Internals.pid(o))"; kwargs...)
return AssignmentOverride(json)
end
"""
Canvas.delete_assignment_override(c::Course, a::Assignment, o::AssignmentOverride; kwargs...) -> AssignmentOverride
Delete and override and return the former details. Return an [`AssignmentOverride`](@ref).
**Canvas API documentation**:
[Update an assignment override (`DELETE /api/v1/courses/:course_id/assignments/:assignment_id/overrides/:id`)]
(https://canvas.instructure.com/doc/api/assignments#method.assignment_overrides.update)
"""
function delete_assignment_override(c::Course, a::Assignment, o::AssignmentOverride; kwargs...)
json = request("DELETE", "/api/v1$(Internals.pid(c))$(Internals.pid(a))$(Internals.pid(o))"; kwargs...)
return AssignmentOverride(json)
end
# TODO:
# - Batch retrieve overrides in a course
# - Batch create overrides in a course
# - Batch update overrides in a course
|
{"hexsha": "8baaf6ffc67dd693f1776144f273acd95e60d256", "size": 8900, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/endpoints/assignments.jl", "max_stars_repo_name": "fredrikekre/Canvas.jl", "max_stars_repo_head_hexsha": "65fd255735e01fb52e7ead4f44d430d6f7f9f14e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-11-04T15:08:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-30T11:21:46.000Z", "max_issues_repo_path": "src/endpoints/assignments.jl", "max_issues_repo_name": "fredrikekre/Canvas.jl", "max_issues_repo_head_hexsha": "65fd255735e01fb52e7ead4f44d430d6f7f9f14e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/endpoints/assignments.jl", "max_forks_repo_name": "fredrikekre/Canvas.jl", "max_forks_repo_head_hexsha": "65fd255735e01fb52e7ead4f44d430d6f7f9f14e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.5, "max_line_length": 134, "alphanum_fraction": 0.7426966292, "num_tokens": 2015}
|
import numpy as np
class TimeIntegrationScheme(object):
def __init__(self, dt, comp_model, initial_conditions):
# time step
self.dt = dt
# mass, damping and spring stiffness
self.M = comp_model[0]
self.B = comp_model[1]
self.K = comp_model[2]
# initial displacement, velocity and acceleration
self.u0 = initial_conditions[0]
self.v0 = initial_conditions[1]
self.a0 = initial_conditions[2]
# initial previous step displacement, velocity and acceleration
self.un1 = self.u0
self.vn1 = self.v0
self.an1 = self.a0
# initial current step displacement, velocity and acceleration
self.u1 = self.u0
self.v1 = self.v0
self.a1 = self.a0
# force from a previous time step (initial force)
self.f0 = None
self.f1 = None
def _print_time_integration_setup(self):
pass
def predict_displacement(self):
return 2.0 * self.u1 - self.u0
def predict_velocity(self, u1):
pass
def predict_acceleration(self, v1):
pass
def solve_single_step(self, f1):
pass
def update(self):
pass
def update_displacement(self, u_new):
self.u1 = u_new
self.v1 = self.predict_velocity(self.u1)
self.a1 = self.predict_acceleration(self.v1)
def update_comp_model(self, new_comp_model):
self.M = new_comp_model[0]
self.B = new_comp_model[1]
self.K = new_comp_model[2]
def print_values_at_current_step(self, n):
print("Printing values at step no: ", n, " (+1)")
print("u0: ", self.u1)
print("v0: ", self.v1)
print("a0: ", self.a1)
print("f0: ", self.f1)
print(" ")
def get_displacement(self):
return self.u1
def get_velocity(self):
return self.v1
def get_acceleration(self):
return self.a1
def get_old_displacement(self):
return self.un1
def get_old_velocity(self):
return self.vn1
def get_old_acceleration(self):
return self.an1
|
{"hexsha": "3ec74b7775b5e395c82cf52c1a90a22019776329", "size": 2129, "ext": "py", "lang": "Python", "max_stars_repo_path": "source/solving_strategies/schemes/time_integration_scheme.py", "max_stars_repo_name": "JoZimmer/ParOptBeam", "max_stars_repo_head_hexsha": "50d15d8d822a2718f2932807e06c4a7e02f866a3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-09T14:08:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-09T14:08:20.000Z", "max_issues_repo_path": "source/solving_strategies/schemes/time_integration_scheme.py", "max_issues_repo_name": "JoZimmer/ParOptBeam", "max_issues_repo_head_hexsha": "50d15d8d822a2718f2932807e06c4a7e02f866a3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-04-28T15:05:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-10T15:12:56.000Z", "max_forks_repo_path": "source/solving_strategies/schemes/time_integration_scheme.py", "max_forks_repo_name": "JoZimmer/ParOptBeam", "max_forks_repo_head_hexsha": "50d15d8d822a2718f2932807e06c4a7e02f866a3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-02-01T08:49:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-10T02:07:36.000Z", "avg_line_length": 24.4712643678, "max_line_length": 71, "alphanum_fraction": 0.6035697511, "include": true, "reason": "import numpy", "num_tokens": 532}
|
//
// Copyright 2022 DMetaSoul
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <arrow/api.h>
#include <arrow/compute/api.h>
#include <arrow/compute/exec/exec_plan.h>
#include <arrow/compute/exec/options.h>
#include <arrow/util/async_generator.h>
#include <absl/status/status.h>
#include <boost/asio/experimental/concurrent_channel.hpp>
#include <fmt/format.h>
#include <range/v3/range/conversion.hpp>
#include <range/v3/view/transform.hpp>
#include <spdlog/spdlog.h>
#include <serving/arrow_helpers.h>
#include <serving/threadpool.h>
#include <serving/utils.h>
using namespace metaspore::serving;
using namespace std::string_literals;
using channel_type = boost::asio::experimental::concurrent_channel<void(boost::system::error_code,
arrow::compute::ExecBatch)>;
class FeatureComputeContext {
public:
struct InputSource {
arrow::PushGenerator<arrow::util::optional<arrow::compute::ExecBatch>> input_queue;
arrow::compute::ExecNode *node;
};
std::shared_ptr<arrow::compute::ExecPlan> plan_;
std::unordered_map<std::string, InputSource> name_source_map_;
arrow::compute::ExecNode *root_node_{nullptr};
arrow::compute::ExecNode *join_node_{nullptr};
channel_type channel_{Threadpools::get_compute_threadpool(), 10};
arrow::Future<> sink_future_{arrow::Future<>::Make()};
};
absl::Status add_source(std::unique_ptr<FeatureComputeContext> &context_, const std::string &name,
std::shared_ptr<arrow::Schema> schema) {
auto pair = context_->name_source_map_.emplace(name, FeatureComputeContext::InputSource());
if (!pair.second) {
return absl::AlreadyExistsError(fmt::format("Input source {} already exists", name));
}
auto node_result = arrow::compute::MakeExecNode(
"source", context_->plan_.get(), /* inputs = */ {},
arrow::compute::SourceNodeOptions{schema, pair.first->second.input_queue});
if (!node_result.ok()) {
return absl::InternalError(node_result.status().message());
}
pair.first->second.node = *node_result;
context_->root_node_ = *node_result;
return absl::OkStatus();
}
absl::Status feed_input(std::unique_ptr<FeatureComputeContext> &context_,
const std::string &source_name, std::shared_ptr<arrow::RecordBatch> batch) {
auto source = context_->name_source_map_.find(source_name);
if (source == context_->name_source_map_.end()) {
return absl::NotFoundError(
fmt::format("FeatureComputeExec feed_input with non-exist name {}", source_name));
}
arrow::compute::ExecBatch exec_batch(*batch);
source->second.input_queue.producer().Push(
arrow::util::make_optional<arrow::compute::ExecBatch>(std::move(exec_batch)));
return absl::OkStatus();
}
absl::Status add_join_plan(std::unique_ptr<FeatureComputeContext> &context_,
const std::string &left_source_name,
const std::string &right_source_name, arrow::compute::JoinType join_type,
const std::vector<std::string> &left_key_names,
const std::vector<std::string> &right_key_names) {
auto left_source = context_->name_source_map_.find(left_source_name);
if (left_source == context_->name_source_map_.end()) {
return absl::NotFoundError(
fmt::format("FeatureComputeExec feed_input with non-exist name {}", left_source_name));
}
auto right_source = context_->name_source_map_.find(right_source_name);
if (right_source == context_->name_source_map_.end()) {
return absl::NotFoundError(
fmt::format("FeatureComputeExec feed_input with non-exist name {}", right_source_name));
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wfree-nonheap-object"
arrow::compute::HashJoinNodeOptions join_opts{
join_type, left_key_names | ranges::views::transform([](const std::string &name) {
return arrow::FieldRef(name);
}) | ranges::to<std::vector>(),
right_key_names | ranges::views::transform([](const std::string &name) {
return arrow::FieldRef(name);
}) | ranges::to<std::vector>()};
#pragma GCC diagnostic pop
auto hashjoin = arrow::compute::MakeExecNode(
"hashjoin", context_->plan_.get(), {left_source->second.node, right_source->second.node},
join_opts);
if (!hashjoin.ok()) {
return absl::InternalError(hashjoin.status().message());
}
context_->root_node_ = *hashjoin;
context_->join_node_ = *hashjoin;
return absl::OkStatus();
}
struct SinkConsumer : public arrow::compute::SinkNodeConsumer {
SinkConsumer(std::shared_ptr<arrow::Schema> schema, channel_type &ch, arrow::Future<> fut)
: output_schema_(schema), channel_(ch), fut_(std::move(fut)) {}
arrow::Status Consume(arrow::compute::ExecBatch batch) override {
fmt::print("consuming batch\n");
bool s = channel_.try_send(boost::system::error_code(), std::move(batch));
if (!s) {
fmt::print("send batch failed\n");
return arrow::Status::CapacityError("cannot consume batch and send to channel");
} else {
fmt::print("send batch succeeded\n");
return arrow::Status::OK();
}
}
arrow::Future<> Finish() override {
fmt::print("consumer finished\n");
return fut_;
}
std::shared_ptr<arrow::Schema> output_schema_;
channel_type &channel_;
arrow::Future<> fut_;
};
absl::Status finish_plan(std::unique_ptr<FeatureComputeContext> &context_) {
// create sink reader
auto sink_result = arrow::compute::MakeExecNode(
"consuming_sink", context_->plan_.get(), {context_->root_node_},
arrow::compute::ConsumingSinkNodeOptions{std::make_shared<SinkConsumer>(
context_->root_node_->output_schema(), context_->channel_, context_->sink_future_)});
if (!sink_result.ok()) {
return absl::InternalError(sink_result.status().message());
}
// validate the ExecPlan
auto validate = context_->plan_->Validate();
if (!validate.ok()) {
return absl::InternalError(validate.message());
}
spdlog::info("FeatureComputeExec created plan {}", context_->plan_->ToString());
// start the ExecPlan
auto start = context_->plan_->StartProducing();
if (!start.ok()) {
return absl::InternalError(start.message());
}
return absl::OkStatus();
}
absl::StatusOr<std::shared_ptr<arrow::RecordBatch>>
get_output(std::unique_ptr<FeatureComputeContext> &context_) {
arrow::compute::ExecBatch exec_batch;
std::mutex m;
std::condition_variable cv;
bool ready = false;
context_->channel_.async_receive(
[&](boost::system::error_code ec, arrow::compute::ExecBatch b) {
fmt::print("exec batch received\n");
exec_batch = std::move(b);
{
std::lock_guard<std::mutex> lk(m);
ready = true;
}
cv.notify_one();
});
std::unique_lock<std::mutex> lk(m);
cv.wait(lk, [&] { return ready; });
auto rb_result = exec_batch.ToRecordBatch(context_->root_node_->output_schema());
if (!rb_result.ok()) {
return absl::InternalError(rb_result.status().message());
}
return *rb_result;
}
absl::Status stop(std::unique_ptr<FeatureComputeContext> &context_) {
for (auto &[name, queue] : context_->name_source_map_) {
queue.input_queue.producer().Close();
}
context_->sink_future_.MarkFinished();
const auto &s = context_->plan_->finished().status();
if (!s.ok()) {
return absl::InternalError(fmt::format("Finish plan failed {}", s.message()));
}
return absl::OkStatus();
}
static std::pair<std::shared_ptr<arrow::RecordBatch>, std::shared_ptr<arrow::Schema>>
make_user_record_batch() {
auto idarray = ArrowHelpers::GetArrayDataSample<arrow::UInt64Type>({123UL}).ValueOrDie();
auto user_feature = ArrowHelpers::GetArrayDataSample<arrow::UInt64Type>({456UL}).ValueOrDie();
auto schema = std::make_shared<arrow::Schema>(arrow::FieldVector{
arrow::field("user_id", arrow::uint64()), arrow::field("user_feature", arrow::uint64())});
auto batch =
ArrowHelpers::GetSampleRecordBatch({idarray, user_feature}, schema->fields()).ValueOrDie();
return std::make_pair(batch, schema);
}
static std::pair<std::shared_ptr<arrow::RecordBatch>, std::shared_ptr<arrow::Schema>>
make_item_record_batch() {
auto user_idarray =
ArrowHelpers::GetArrayDataSample<arrow::UInt64Type>({123UL, 123UL, 0UL}).ValueOrDie();
auto campaign_id =
ArrowHelpers::GetArrayDataSample<arrow::UInt64Type>({0UL, 1UL, 2UL}).ValueOrDie();
auto campaign_feature =
ArrowHelpers::GetArrayDataSample<arrow::UInt64Type>({789UL, 1024UL, 42UL}).ValueOrDie();
auto schema = std::make_shared<arrow::Schema>(arrow::FieldVector{
arrow::field("user_id", arrow::uint64()), arrow::field("campaign_id", arrow::uint64()),
arrow::field("campaign_feature", arrow::uint64())});
auto batch = ArrowHelpers::GetSampleRecordBatch({user_idarray, campaign_id, campaign_feature},
schema->fields())
.ValueOrDie();
return std::make_pair(batch, schema);
}
int main(int argc, char **argv) {
auto result = arrow::compute::ExecPlan::Make();
if (!result.ok()) {
fmt::print(stderr, "{}\n", result.status());
return 1;
}
std::unique_ptr<FeatureComputeContext> context_ = std::make_unique<FeatureComputeContext>();
context_->plan_ = *result;
std::string user_table("user_table");
std::string item_table("item_table");
auto [user_batch, user_schema] = make_user_record_batch();
auto status = add_source(context_, user_table, user_schema);
if (!status.ok()) {
fmt::print(stderr, "add source failed {}", status);
return 1;
}
auto [item_batch, item_schema] = make_item_record_batch();
status = add_source(context_, item_table, item_schema);
if (!status.ok()) {
fmt::print(stderr, "add source failed {}", status);
return 1;
}
status = add_join_plan(context_, item_table, user_table, arrow::compute::JoinType::LEFT_OUTER,
std::vector({"user_id"s}), std::vector({"user_id"s}));
if (!status.ok()) {
fmt::print(stderr, "add join plan failed {}", status);
return 1;
}
status = finish_plan(context_);
if (!status.ok()) {
fmt::print(stderr, "finish plan failed {}", status);
return 1;
}
{
// feed left table, item
status = feed_input(context_, item_table, item_batch);
if (!status.ok()) {
fmt::print(stderr, "feed left failed {}", status);
return 1;
}
// feed right table, user
status = feed_input(context_, user_table, user_batch);
if (!status.ok()) {
fmt::print(stderr, "feed right failed {}", status);
return 1;
}
for (auto node : context_->join_node_->inputs()) {
context_->join_node_->InputFinished(node, 1);
}
// get output record batch
auto output_result = get_output(context_);
if (!output_result.ok()) {
fmt::print(stderr, "get output failed {}", output_result.status());
return 1;
}
auto record_batch = *output_result;
fmt::print("output batch 1: {}*{}\n", record_batch->num_rows(),
record_batch->num_columns());
}
{
// feed left table, item
status = feed_input(context_, item_table, item_batch);
if (!status.ok()) {
fmt::print(stderr, "feed left failed {}", status);
return 1;
}
// feed right table, user
status = feed_input(context_, user_table, user_batch);
if (!status.ok()) {
fmt::print(stderr, "feed right failed {}", status);
return 1;
}
for (auto node : context_->join_node_->inputs()) {
context_->join_node_->InputFinished(node, 1);
}
// get output record batch
auto output_result = get_output(context_);
if (!output_result.ok()) {
fmt::print(stderr, "get output failed {}", output_result.status());
return 1;
}
auto record_batch = *output_result;
fmt::print("output batch 2: {}*{}\n", record_batch->num_rows(),
record_batch->num_columns());
}
status = stop(context_);
if (!status.ok()) {
fmt::print(stderr, "stop plan failed {}", status);
return 1;
}
return 0;
}
|
{"hexsha": "ceba8741887a4b2308eefcb5fa8cacb4f47a004c", "size": 13398, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "cpp/tests/serving/arrow_plan_test.cpp", "max_stars_repo_name": "meta-soul/MetaSpore", "max_stars_repo_head_hexsha": "e6fbc12c6a3139df76c87215b16f9dba65962ec7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 32.0, "max_stars_repo_stars_event_min_datetime": "2022-03-30T10:24:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T16:19:15.000Z", "max_issues_repo_path": "cpp/tests/serving/arrow_plan_test.cpp", "max_issues_repo_name": "meta-soul/MetaSpore", "max_issues_repo_head_hexsha": "e6fbc12c6a3139df76c87215b16f9dba65962ec7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cpp/tests/serving/arrow_plan_test.cpp", "max_forks_repo_name": "meta-soul/MetaSpore", "max_forks_repo_head_hexsha": "e6fbc12c6a3139df76c87215b16f9dba65962ec7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2022-03-30T10:28:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T11:37:39.000Z", "avg_line_length": 39.7566765579, "max_line_length": 100, "alphanum_fraction": 0.6340498582, "num_tokens": 3087}
|
r=0.26
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d70c7j/media/images/d70c7j-003/svc:tesseract/full/full/0.26/default.jpg Accept:application/hocr+xml
|
{"hexsha": "f7c06ef38c3c6f24f2a9e8b41adbc62f58359675", "size": 195, "ext": "r", "lang": "R", "max_stars_repo_path": "tesseract/rotate/d70c7j-003.r", "max_stars_repo_name": "ucd-library/wine-price-extraction", "max_stars_repo_head_hexsha": "c346e48b5cda8377335b66e4a1f57c013aa06f1f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-11-16T19:55:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-21T18:04:46.000Z", "max_issues_repo_path": "ark_87287/d70c7j/d70c7j-003/rotated.r", "max_issues_repo_name": "ucd-library/wine-price-extraction", "max_issues_repo_head_hexsha": "c346e48b5cda8377335b66e4a1f57c013aa06f1f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2018-09-18T17:41:32.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-02T19:19:33.000Z", "max_forks_repo_path": "tesseract/rotate/d70c7j-003.r", "max_forks_repo_name": "ucd-library/wine-price-extraction", "max_forks_repo_head_hexsha": "c346e48b5cda8377335b66e4a1f57c013aa06f1f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 65.0, "max_line_length": 187, "alphanum_fraction": 0.8256410256, "num_tokens": 62}
|
from itertools import chain
import json
import argparse
import os, sys
import time
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable as V
from datasets import *
from decoder import *
from encoder_v2 import *
from train_encoder_v2 import *
def make_parser():
parser = argparse.ArgumentParser(description='Test Sequence Generation')
parser.add_argument('--lang', type=str, default='en',
help='Can be en or fr')
parser.add_argument('--model_path', type=str, default='../model/saved_models/en/model_en_pretrained_epoch_1.pt',
help='change model name to get different checkpoints')
parser.add_argument('--dataset_dir', type=str, default='../generate-data/data_final/train',
help='Dataset directory')
parser.add_argument('--embeds_path', type=str, default='../tokenizer/data_final/indexed_data_words.json',
help='Embeddings path')
parser.add_argument('--vocab_path', type=str, default='../tokenizer/data_final/vocab_words.json',
help='Embeddings path')
parser.add_argument('--use_pretrained', action='store_true')
parser.add_argument('--cuda', action='store_false',
help='[DONT] use CUDA')
parser.add_argument('--batch_size', type=int, default=32, metavar='N',
help='batch size')
return parser
def main():
args = make_parser().parse_args()
print("[Model hyperparams]: {}".format(str(args)))
cuda = torch.cuda.is_available() and args.cuda
print(f'Cuda available? {torch.cuda.is_available()}')
device = torch.device("cpu") if not cuda else torch.device("cuda:0")
seed_everything(seed=1337, cuda=cuda)
# get ix_to_word map
ix_to_word = create_ix_to_vocab_map(args.vocab_path)
dataset_path = os.path.join(args.dataset_dir, f'{args.lang}.csv')
# Load dataset iterators
iters = load_data(dataset_path, args.embeds_path, args.lang, args.batch_size, device)
print('Finished loading data')
# Some datasets just have the train & test sets, so we just pretend test is valid
if len(iters) == 4:
X_str, train_iter, val_iter, test_iter = iters
else:
X_str, train_iter, test_iter = iters
val_iter = test_iter
# get length of a sentence
target_length = len(train_iter[0][0][0]) # TODO - double check this is the right length
print(f'target length: {target_length}')
# get size of vocab
vocab = load_json(args.vocab_path)
output_dims = len(vocab)
print("[Corpus]: train: {}, test: {}".format(
len(train_iter[0]) * len(train_iter[0][0]), len(test_iter[0]) * len(test_iter[0][0])))
# load model
model = torch.load(args.model_path, map_location=device)
# emb = list(model.children())[:-1][0]
# print(emb(torch.tensor(5, dtype=torch.long, device=device)))
# Define and compute loss
criterion = nn.CrossEntropyLoss(ignore_index=0)
# loss = evaluate_encoder(model, test_iter, criterion, device, args, type='Test')
# print("Test loss: ", loss)
# make predictions with model
ix = np.random.randint(len(test_iter)) # get random batch to predict
y = test_iter[0][ix] # actual data
x = test_iter[2][ix].float()
pred = model(x)
# Print some sample evaluations
translated_batch = translate_batch(pred, ix_to_word)
translated_y = translate_batch(y, ix_to_word)
for ix, v in enumerate(x[:10]):
print(f'\nOriginal instructions: \n{v}')
print(f'Predicted:\n{translated_batch[ix]}')
print(f'Actual:\n{translated_y[ix]}')
if __name__ == '__main__':
main()
|
{"hexsha": "4af792bb636da74ecebf317c17d8409ee69b627a", "size": 3707, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/test_seq_generation.py", "max_stars_repo_name": "chengemily/pidgin-rl", "max_stars_repo_head_hexsha": "a90b4182b37b2e88cbfd1eb7e6f102b92afc968b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis/test_seq_generation.py", "max_issues_repo_name": "chengemily/pidgin-rl", "max_issues_repo_head_hexsha": "a90b4182b37b2e88cbfd1eb7e6f102b92afc968b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/test_seq_generation.py", "max_forks_repo_name": "chengemily/pidgin-rl", "max_forks_repo_head_hexsha": "a90b4182b37b2e88cbfd1eb7e6f102b92afc968b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7, "max_line_length": 116, "alphanum_fraction": 0.6601025088, "include": true, "reason": "import numpy", "num_tokens": 880}
|
# -*- coding: utf-8 -*-
# Library for self ABM
# Author: KPN
#------------------------------------------------------------------------------#
# Serve as repository for classes and modules
# Library will depend upon following modules:
'''
Please make sure your module libraries are up to date, this module depends upon:
numpy 1.8.1, pandas, shapely and scipy
'''
import pandas as pd
import numpy as np
import scipy.constants
from shapely.geometry import Point
from shapely.geometry import Polygon
from shapely.geometry import LineString
from shapely.geometry import LinearRing
import networkx as nx
from scipy.integrate import odeint
from scipy import interpolate
import pandas as pd
import sqlite3
import os
from osgeo import ogr
import copy
G = scipy.constants.G
def dBase(outputWS,dbName):
'''function creates an event log database for later analysis'''
path = os.path.join(outputWS,dbName + '.db')
# Create and connect to results database
conn = sqlite3.connect(path)
c = conn.cursor()
c.execute('''DROP TABLE IF EXISTS route''')
c.execute('''DROP TABLE IF EXISTS agent''')
c.execute('''DROP TABLE IF EXISTS interaction''')
c.execute('''DROP TABLE IF EXISTS timeStep''')
c.execute('''DROP TABLE IF EXISTS surge''')
c.execute('''DROP TABLE IF EXISTS windFarms''')
c.execute('''CREATE TABLE route(agent_ID INTEGER, route TEXT)''')
c.execute('''CREATE TABLE agent(agent_ID INTEGER, m REAL, dest TEXT, start TEXT, type TEXT, L REAL, B REAL, T REAL, Tprime REAL, Kprime REAL, desVel REAL)''')
c.execute('''CREATE TABLE interaction(timeStamp INTEGER, own INTEGER, target INTEGER, ownPsi REAL, targetPsi REAL, qDir REAL, repLogic INTEGER, inertialDisp REAL, agentDist REAL, collisionDist Real, RPS_scen INTEGER, rep TEXT, voyage INTEGER, crash TEXT)''')
c.execute('''CREATE TABLE timeStep(agent_ID INTEGER, timeStamp INTEGER, att TEXT, rep TEXT, obs TEXT, totDir TEXT, delta_c REAL, RPS REAL, u REAL, prev TEXT, curr TEXT, voyage INTEGER)''')
c.execute('''CREATE TABLE surge(agent_ID INTEGER, u REAL, maxAgnScen INTEGER, maxObsScen INTEGER)''')
c.execute('''CREATE TABLE windFarms(farmID INTEGER, centroid TEXT, perimeter REAL, area REAL)''')
return c, conn
def rotMatrix(attitude):
''' rotation matrix function makes use of the self's attitude matrix describing
rotation that takes {n} into {b}
Perez, T., & Fossen, T. I. (2007). Kinematic models for manoeuvring and seakeeping of marine vessels. Modeling, Identification and Control, 28(1), 19-30.
the inut, an attitude vector can be accessed with self.attitude
'''
return np.array([[np.cos(attitude[2])*np.cos(attitude[0]),-np.sin(attitude[2])*np.cos(attitude[1])+np.cos(attitude[2])*np.sin(attitude[0])*np.sin(attitude[1]),np.sin(attitude[2])*np.sin(attitude[1])+np.cos(attitude[2])*np.sin(attitude[1])+np.cos(attitude[2])*np.cos(attitude[1])*np.sin(attitude[0])],
[np.sin(attitude[2])*np.cos(attitude[0]),np.cos(attitude[2])*np.cos(attitude[1])+np.sin(attitude[1])*np.sin(attitude[0])*np.sin(attitude[2]),-np.cos(attitude[2])*np.sin(attitude[1])+np.sin(attitude[2])*np.cos(attitude[1])*np.sin(attitude[0])],
[-np.sin(attitude[0]),np.cos(attitude[0])*np.sin(attitude[1]),np.cos(attitude[0])*np.cos(attitude[1])]])
def obstacles(obsWS):
'''Function create a dataframe of vertices for all obstacle shapefiles found wihtin a
workspace.
In order for the ABM to properly function, the shapefiles must be preprocessed.
The obstacle polygon must have a convex boundary and contain at least 3 fields
as written: 'direction', 'buffer' and 'type'.
For navigational channels, the 'direction' field describes the direction of traffic.
If an agent is an 'incoming' vessel, the 'outgoing' marked channels will act as
an obstacle, while 'outgoing' vessels will not view the channel as an obstacle and
can travel through it. When direction is 'both', the agent will always notice
the obstacle and avoid it.
The buffer field is required for route. The MCA recommends vessels stay at least
800 m from ocean renewable energy infrastructure. Safety buffers are also
applied to 'land' features. Route uses the buffer field to plan around obstructions
while maintaining minimum safety distances.
The type field indicates tye type of obstruction, and can either take 'land',
'WEA' or 'channel'.
The output of the function is a pandas dataframe, which will serve as an input for an agent class,
therefore each agent knows where all obstacles are regardless of their direction.'''
# list files in input directory
files = os.listdir(obsWS)
obstacles = []
shapes = []
for f in files: # we need to find the shapefiles first...
if f.endswith('.shp'):
shapes.append(f)
for s in shapes: # loop through shapefiles in directory and add to list
fileName = os.path.join(obsWS,s)
shp = ogr.Open(fileName,0)
obstacles.append(shp)
columns = ['shape','direction','buff','type']
obstacleDF = pd.DataFrame(columns = columns)
for obs in obstacles:
lyr = obs.GetLayer(0)
for feat in lyr:
pts = feat.GetGeometryRef() # get points
ring = pts.GetGeometryRef(0) # why is the ring necessary
points = ring.GetPointCount() # seems redundant
arr = []
for i in range(points): # are you kidding me, I have to loop through the ponts
p = ring.GetPoint(i)
lon = p[0]
lat = p[1]
arr.append([lon,lat]) # so many steps...
poly = Polygon(arr) # shapely polygon
direction = feat.GetField('Direction') # get fields
buff = feat.GetField('Buffer')
typ = feat.GetField('Type')
row = pd.DataFrame([[poly,direction,buff,typ]], columns = columns) # create row
obstacleDF = obstacleDF.append(row) # append to result
obstacleDF.fillna(0,inplace = True) # convert any nan to 0
return obstacleDF
def initialStates (n,obstacles,origins,destinations,ships,travel_network,frames,outputWS = None):
'''Function creates the initial state of each agent for a given model run.
By implementing this function, the modeler is assured that the initial states
of each agent are random.
If the the modeler requires sequential model runs with the same initial
states, the dataframe produced by this fucntion can be saved to an output
workspace with the optional 'output' argument set to True, and supplied with
an output workspace directory. If no outputWS is returned, an error message
will appear.
Required Arguments:
n = number of agents
obstacles = pandas dataframe of obstacles, output of obstacles function
origins = pandas dataframe of origins
destinations = pandas dataframe of destinations
ship = python dictionary with ship types (key) and their relative
proportions within th modeled system (value)
'''
# create profiles ID's, start at zero to make iterating easy
profiles = np.arange(0,n,1)
# create array of ship types, can either be cargo or tanker
shipTypes = np.random.choice(list(ships.keys()), size = n)#, p = ships.values())
# create arrays for K-T indices, L,B,T and DWT - based on whether or not ship was a cargo or tanker
Tprime = np.zeros(len(profiles),np.dtype(float))
Kprime = np.zeros(len(profiles),np.dtype(float))
L = np.zeros(len(profiles),np.dtype(float))
B = np.zeros(len(profiles),np.dtype(float))
T = np.zeros(len(profiles),np.dtype(float))
change = np.zeros(len(profiles),np.dtype(float))
DWT = np.zeros(len(profiles),np.dtype(float))
V_des = np.random.uniform(17,20,n)
for i in profiles:
change[i] = np.random.choice([-1,1])
if shipTypes[i] == 'Cargo':
Tprime[i] = np.random.uniform(1.2,1.5,size = 1) #1.5, 2.5
Kprime[i] = np.random.uniform(2.2,3.0,size = 1) #1.5, 2.0
L[i] = np.random.uniform(226,330)
'''we need a comprehensive database of ship parameters, these values suck and I feel like we can't properly calibrate'''
if L[i] > 253:
B[i] = np.random.uniform(44,60)
T[i] = np.random.uniform(13,20)
DWT[i] = np.random.uniform(800000,2000000)
else:
B[i] = np.random.uniform(24,40)
T[i] = np.random.uniform(7,12)
DWT[i] = np.random.uniform(200000,800000)
else:
Tprime[i] = np.random.uniform(1.5,1.7,size = 1) # 3.0, 6.0
Kprime[i] = np.random.uniform(2.7,3.2,size = 1) # 1.7, 3.0
L[i] = np.random.uniform(226,330)
if L[i] > 253:
B[i] = np.random.uniform(44,60)
T[i] = np.random.uniform(13,20)
DWT[i] = np.random.uniform(1600000,2400000)
else:
B[i] = np.random.uniform(24,40)
T[i] = np.random.uniform(7,12)
DWT[i] = np.random.uniform(200000,1600000)
# give 'em all random starting velocities
v0 = np.random.uniform(2,4,n)
# alter start times - make sure agents don't get bunched up
t_start = np.round(np.linspace(0,frames-200,n),0)
origin = np.random.choice(origins.OBJECTID.values,n) # create an origin for each agent, it get's a random choice of origin
destination = []
for i in origin: # however, it's destination is limited to one of the possible destinations we give it in the graph G
if len(destinations) > 1:
dests = list(nx.neighbors(travel_network,i))
destination.append(np.random.choice(dests))
else:
destination.append(destinations.OBJECTID.values[0])
# build data frame
dataframe = {'profileNo':profiles,'shipTypes':shipTypes,'Tprime':Tprime,'Kprime':Kprime,'L':L,'B':B,'T':T,'DWT':DWT,'V_des':V_des,'v0':v0,'change':change,'t-start':t_start,'origin':origin,'destination':destination}
df = pd.DataFrame.from_dict(dataframe,orient = 'columns')
# join dataframe to XY coordinates of the origin and destination:
df = df.merge(origins,how = 'left', left_on = 'origin', right_on = 'OBJECTID')
df.drop(columns = ['Channel','OBJECTID','Direction'], inplace = True)
df.rename(columns = {'X':'X0','Y':'Y0'}, inplace = True)
df = df.merge(destinations,how = 'left', left_on = 'destination', right_on = 'OBJECTID')
df.drop(columns = ['Channel','OBJECTID','Direction'], inplace = True)
df.rename(columns = {'X':'XD1','Y':'YD1'}, inplace = True)
df['X0'] = df.X0 + np.random.normal(200,100,len(df))
df['Y0'] = df.Y0 + np.random.normal(200,100,len(df))
df['XD1'] = df.XD1 + np.random.normal(200,100,len(df))
df['YD1'] = df.YD1 + np.random.normal(200,100,len(df))
print (df.head)
if outputWS != None:
df.to_csv(os.path.join(outputWS,'initialStates.csv'),index = False)
return df
class Ship(): # create a class object to describe a self agent
'''A class object for a ship agent
The class object describes and holds all of the attributes of a ship agent.
During a time step, the simulation will update affected attributes, while some
remain stable throughout the simulation.
The class object also contains all of the functions an agent needs to explore
its world and interact with other agents. The functions also include write
methods to log files.
'''
def __init__(self, profileNumber,profileData,dBase,route_obstacles = None, nav_obstacles = None):
'''when class is intialized, feed length, weight, velocity (m/s),
heading (radians), current position (pos1), and the agent's destination (goal)
'''
self.ID = str(profileNumber) # agent identifier
self.dest = np.array([profileData.loc[profileNumber]['XD1'],profileData.loc[profileNumber]['YD1']]) # agent destination
if 'XD' in profileData:
self.intDest = np.array([profileData.loc[profileNumber]['XD'],profileData.loc[profileNumber]['YD']]) # intermediate destination
else:
self.intDest = 'none'
self.xpos = profileData.loc[profileNumber]['X0'] # set starting X position for movement algorithm
self.ypos = profileData.loc[profileNumber]['Y0'] # set starting Y position for movement algorithm
#self.color = color # these should be in matplotlib color labels
self.type = profileData.loc[profileNumber]['shipTypes'] # what's it gonna be? # if you're a cargo vessel...
self.Tprime = profileData.loc[profileNumber]['Tprime'] # K-T indices
self.Kprime = profileData.loc[profileNumber]['Kprime']
self.L = profileData.loc[profileNumber]['L'] # values taken from: https://en.wikipedia.org/wiki/Tanker_(ship)
self.B = profileData.loc[profileNumber]['B']
self.T = profileData.loc[profileNumber]['T']
self.DWT = profileData.loc[profileNumber]['DWT']
self.t_start = profileData.loc[profileNumber]['t-start']
self.C_B = 0.80 # block coefficient
''' need decent values for drag coefficient'''
self.C_D = 0.10 # drag coefficient
self.d = 0.65 * self.T # propeller diameter # propeller diameter
#self.d = 0.35 * self.T # propeller diameter # propeller diameter
self.m = self.DWT * 907.185 # vessel mass converted from DWT measurement
self.K_t = 1.2 # propeller thrust coefficient NEEDS TO BE CALIBRATED???
self.A = ((self.L * self.B) + (2 * (self.B * self.T)) + (2 * (self.L * self.T))) * self.C_B # wetted area (m^2) useful for resistance
self.delta_c = np.array([0.0]) # initial rudder heading
self.r = np.array([0.0]) # initial rotational velocity
self.currentPos = np.array([self.xpos,self.ypos,0]) # N, E, D coordinates in array format
self.u = profileData.loc[profileNumber]['v0'] * 0.514444 # get the initial velocity in m/s, convert from knots
self.startVel = profileData.loc[profileNumber]['v0'] * 0.514444
self.desVel = profileData.loc[profileNumber]['V_des']
self.openWaterDesVel = profileData.loc[profileNumber]['V_des']
if route_obstacles is None: # obstacles for route planning - buffered and simplified in a GIS
self.route_obstacles = []
else:
self.route_obstacles = route_obstacles
if nav_obstacles is None: # obstacles as mapped into a GIS - not buffered and not simplified - actual GIS data
self.nav_obstacles = []
else:
self.nav_obstacles = nav_obstacles
self.origDesVel = self.desVel
self.RPS = 5 # current RPS setting (currently set at 300 RPM)
self.rho = 1029 # density of seawater kg/m^3
self.collide = 0
self.c = dBase[0]
self.conn = dBase[1]
self.voyageCounter = 1
# agent background
self.c.execute("INSERT INTO agent VALUES(%s,%s,'%s','%s','%s',%s,%s,%s,%s,%s,%s)"%(self.ID,self.m,str(self.dest),str(self.currentPos),str(self.type),self.L,self.B,self.T,self.Tprime,self.Kprime,self.desVel))
self.conn.commit()
self.crash = False
self.goal = False
self.delta_max = np.radians(35) # maximum rudder differential
self.inertialStopFunc = self.inertialStop(1)
def velRestrict(self):
pos = self.currentPos[:2]
# if the agent is within the mixer... this should be fixed with a polygon and shapely at a later date
if 597171 < pos[0] < 605117 and 4475972 < pos[1] < 4480975:
self.desVel = 10
elif pos[0] < 605117 and pos[1] > 4475026:
self.desVel = 10
#elif pos[0] < 612749 and pos[1] > 4467321:
# self.desVel = 10
else:
self.desVel = self.openWaterDesVel
size = 50 # this will make my triangles bigger
nullShape = np.array([[0,size,0],[- 0.5 * size,-1 * size,0],[0.5 * size,-1 * size,0]]) # null is the outline of the vessel, this can get way more complicated, for now we have a triangle # surge velocity (m/s)
maxRPS = 30 # maximum propeller shaft RPS
def nextWpt(self):
'''Function identifies the next waypoint for each agent depending upon its current location.
Because the likelihood of an agent actually hitting a waypoint (single point in space),
the agent only has to get within 100 m of the point, before the next waypoint becomes the new destination'''
c = Point(np.array(self.currentPos[:2]))
wpt = Point(self.wpt)
pt = (self.wpt[0],self.wpt[1])
if c.distance(wpt) <= 50:
idx = self.wpts.index(pt)
if wpt != Point(self.dest):
self.wpt = self.wpts[idx + 1]
def Route(self):
'''Function identifies shortest route around obstacles, using a Euclidean Shortest Path
algorithm modified from Hong & Murray (2013)
'''
if len(self.route_obstacles) > 0:
wpts = nx.Graph()
A = Point(self.currentPos[:2])
B = Point(self.dest)
farms = []
for row in self.route_obstacles.iterrows():
farm = row[1]['shape']
buff = row[1]['buff']
#farm = farm.buffer(buff).simplify(0.4, preserve_topology=False)
farms.append(farm) # append simplified farm to list
wpts.add_node(list(A.coords)[0]) # add origin and destination to G*
wpts.add_node(list(B.coords)[0])
AB = LineString([A,B]) # create the line segment AB
# Step 2, if AB crosses obstacle polygon find G* if not add AB to G* we are done
testList = [] # list of nodes to test
tested = [] # list of nodes already tested
tested_edge = []
crossList = [] # list of polygons crossed by the current tested edge
# if original segment crosses a polygon
for poly in farms: # for each polygon in our list
if AB.crosses(poly): # if AB crosses it
crossList.append(poly) # append it to a cross list
# if AB does not cross any polygon, the shortest path to the origin is a straight line
if len(crossList) == 0:
wpts.add_edge(list(A.coords)[0],list(B.coords)[0],dist = AB.length)
print ("AB does not cross any obstacle, G* consists of: {0}".format(wpts))
# if the edge crosses at least 1 polygon, we need to build G*
else: # if it doesn't we're fucked
print ("AB crosses at least one obstacle, test all obstacle vertices")
# Part 1, build and test a set of edges from the origin to every obstacle vertex
i = A # part 1, A becomes i
jList = []
for p in crossList: # for every polygon in the cross list
verts = list(p.exterior.coords) # extract exterior coordinates, these are nodes to test
for v in verts:
jList.append(v) # append their vertices to j list
jList = list(set(jList))
del p
testList = []
for j in jList: # for every j in jlist:
j = Point(j)
ij = LineString([i,j]) # create a line segment from i to j
test = True # if true, line segment does not intersect with any other polygon, we add this edge to the graph
for p in crossList:
if ij.crosses(p) or ij.within(p): # if ij does not cross this polygon, or if a polygon does not contain ij
test = False
print ("Ai crosses an obstacle")
break
if test == True:
wpts.add_nodes_from([list(j.coords)[0]])
wpts.add_edge(i.coords[0],j.coords[0],dist = ij.length)
testList.append(j) # let's add j to the test list, becomes i in next round
print ("Ai does not cross any obstacle, waypoint {0} added to G*".format(j.coords[:][0]))
del i, j, jList, p, v, verts, crossList
while testList: # while test list is not empty
for i in testList:
crossList = []
iB = LineString([i,B]) # create line segment to B
for poly in farms: # for every polygon, if iB happens to cross it, add it to the cross list
if iB.crosses(poly) or iB.within(poly):
crossList.append(poly)
del poly
if len(crossList) == 0: # if it turns out iB doesn't cross a polygon we have a winner, add it to G*
wpts.add_edge(i.coords[0],B.coords[0],dist = iB.length)
print ("iB does not cross any obstacle, an edge to the destination has been found and added to G*")
else:
jList = []
for p in crossList: # for every polygon in the cross list
verts = list(p.exterior.coords)
for v in verts:
iv = LineString([i,v])
if iv not in tested_edge:
jList.append(v) # append its vertices to the jlist
jList = list(set(jList))
del p, verts, v
for j in jList: # for every j in jlist:
j = Point(j)
ij = LineString([i,j]) # create a line segment from i to j
tested_edge.append(ij)
test = True
for p in farms:
if ij.crosses(p) or ij.within(p): # if ij does not cross a polygon, or if a polygon does not contain ij
test = False
print ("ij crosses an obstacle")
if test == True:
wpts.add_nodes_from([list(j.coords)[0]])
wpts.add_edge(i.coords[0],j.coords[0],dist = ij.length)
print ("ij does not cross an obstacle, edge added to G*")
if j not in tested:
testList.append(j) # let's add j to the test list, becomes i in next round
tested.append(i)
testList.remove(i)
del crossList
else:
wpts = nx.Graph()
A = Point(self.currentPos[:2])
B = Point(self.dest)
AB = LineString([A,B]) # create the line segment AB
wpts.add_node(list(A.coords)[0]) # add origin and destination to G*
wpts.add_node(list(B.coords)[0])
wpts.add_edge(list(A.coords)[0],list(B.coords)[0],dist = AB.length)
print ("G* built, consisting of waypoints:{0}".format(list(wpts.nodes)))
short = nx.dijkstra_path(wpts,list(A.coords)[0],list(B.coords)[0],weight = "dist")
self.short_route = short
self.c.execute("INSERT INTO route VALUES(%s,'%s')"%(self.ID,str(short)))
self.conn.commit()
print ("Shortest route for agent {0} found".format(self.ID))
print ("Agents route is along the following waypoints:{0}".format(np.round(short,0)))
self.wpts = short
if len(self.wpts) < 2:
self.wpt = self.wpts[0]
else:
self.wpt = self.wpts[1] # identify the first waypoint
posVec = (self.wpt - self.currentPos[:2]) # what is the position vector to the way point from the vessel's current position
posDir = posVec/np.linalg.norm(posVec) # unit vector describing direction of target vessel relative to own
heading = np.arctan2(posDir[1],posDir[0]) # heading in radians
self.prevPos = np.array([self.currentPos[0] - np.cos(heading) * self.u,self.currentPos[1] - np.sin(heading) * self.u,0.0]) # create a previous position for the sake of the simulation
self.psi = np.array([heading]) # what is the heading in radians
def M(self, agents, obs, obsW):
'''destination must be super massive so that it always has an attractive pull on
the own agent unless it is extremely close to another agent. Simply 100
times the sum of all agent masses within an affected region
'''
mList = []
for i in agents:
mList.append(i.m)
for i in obs:
mList.append(obsW)
exp = len(agents) + len(obs)
return np.sum(mList)
def attitude(self):
'''attitude method is called and is a function of the class itself. After
movement model concludes, psi is updated (model limited heading) and should
not equal delta_c (collision avoidance command heading) unless it is within
a physically realistic turning radius.
'''
return np.array([0,0,self.psi + np.pi/2 *-1])
def shapePos(self):
''' When a ship agent is initialized, the original orientation is given due north,
The simulation will update shapePos whenever this method is called,
the vessel shpae will rotate according to model limited heading (psi).
The resultant shape position is rotated according to attitude.
'''
# columns: X, Y , Z
# rows: Bow, Port Stern, Starboard Stern
current = np.array([[self.currentPos[0], self.currentPos[1] + self.size, self.currentPos[2]],
[self.currentPos[0] - self.size * 0.5, self.currentPos[1] - self.size,self.currentPos[2]],
[self.currentPos[0] + self.size * 0.5, self.currentPos[1] - self.size,self.currentPos[2]]])
delta = current - self.nullShape
rotPos = np.zeros(current.shape)
rot = rotMatrix(self.attitude())
# for now this is an ineficient method for rotation, it would be better to use GPU processing
# for this application
for j in np.arange(len(rotPos)):
rotPos[j] = rot.dot(current[j] - delta[j]) + delta[j] # need to translate to null position location (around origin)
#rotPos[j] = ref.dot(rotPos[j])
return rotPos
def inertialStop(self,delta):
''' Function wraps the intertial stop velocity function into a numpy vectorized
function that applies intertialStopVel over an array of delta t's'''
def inertialStopVel(t,m,v0,A,C_D):
'''Function for the velocity of a vessel during inertial stop at time (t).
During inertial stop there is no negative thrust from a reversal in gear,
therefore the only thing slowing the vessel down is drag.
During a meeting on January 5, 2016, Dr. Meyer derived this formula with
Mathematica. Kevin Nebiolo implemented it in Python on October 13, 2016 for use
in an agent based model of the commercial shipping industry
t = model time step
m = mass of vessel
v0 = initial velocity
A = cross sectional area
delta = change in seconds'''
return (m * v0)/(m + A * C_D * t * v0)
vfunc = np.vectorize(inertialStopVel) # vectorize inertialStopVel
maxVel = self.openWaterDesVel
velArray = np.arange(0,maxVel+2.0,0.5)
dispArray = np.array([])
for i in velArray:
# apply vectorized function over a vector of dt's
t = np.linspace(0,7200,7201) # create a vector of model time steps
vel_t = vfunc(t,self.m,i,self.A,self.C_D) # calculate velocity at time (t)
disp_t = vel_t * delta # calculate displacement at time (t)
try:
t_index = np.where(vel_t < 2.0)[0][0] # find the index where the agent is pretty much stopped
except:
t_index = len(t) -1 # if the agent never fully stopped, what was the last time index?
t_at_0V_A = int(t[t_index]) # what time step was that at?
inertialDisp = sum(disp_t[:t_at_0V_A+1])
dispArray = np.append(dispArray,inertialDisp)
return interpolate.interp1d(velArray,dispArray,kind = 'cubic',bounds_error = False)
def F_att(self,agents,obs,obsW):
'''
Gravitational attraction between the agent and the destination.
Note, M is supermassive and is equal to the double the sum of all object
masses within the simulation
Newton's law of gravitation specifies that the gravitational force exerted
by a point mass M on an object is equal to (Meyer 2002):
F = - (G*M*r_hat)/(magnitude r)**2
Function Inputs:
the method incporates class variables,
agent list may change depending upon those within close proximity
Works Cited:
Meyer, T. H. (2002). Introduction to Geometrical and Physical Geodesy:
Foundation of Geomatics. Redlands, CA: ESRI Press.
'''
r = (self.currentPos[0:2] - self.wpt)
rhat = r/np.linalg.norm(r)
#att = np.negative((G * self.M(agents,obs,obsW) * rhat)/(np.linalg.norm(r)**2))
att = np.negative((G * self.M(agents,obs,obsW) * rhat))
self.attForce = att
return att
def F_rep_agn(self, agents, time):
'''
Modified gravitational attraction function for repulsion from other self agents.
Rather than having a negative, attractive force, repulsion is expressed as a
positive force by multipying by -1
Function uses case logic to identify if a target vessel is a collision threat.
The function also scales the force by distance. Without scaling the distance,
repulsive force is only strong enough to repell a self when distances are close.
This behavior is unsafe, therefore we scale this distance by sum of the length
overall of the own and target vessel.
Function Inputs:
G = gravitational constant
agents = list of agents within range
Function also classifies each interaction, decides on the level of evasive
maneuvering required and writes this information to the event log.
Depending upon the interaction with the agent, they may need to slow down.
Some interactions come with high risk (RPS = 3) and the agent will apply full
reverse, while other interactions will only warrant inertial stop (RPS = 2).
When the agent does not need to slow down, RPS = 1
RPS_scen:
3: high risk, crash trajectory polygons overlap, RPS = full astern
2: medium risk, inertial trajectory polygons overlap, RPS = 0
1: low to no risk, vessel aims to achieve desired RPS
Repulsive force logic is a coded value classifying the type of agent-agent
interaction. There can be multiple interaction types, each with their own
repulsive force logic.
repLogic:
1: target-agent (q_curr) within the trajectory polygon of the own-agent, apply repulsive force
2: agents are head on, apply repulsive force
3: own-agent is in line with and behind target-agent, no repulsive force
4: own-agent approaching the port side of the target-agent, apply repulsive force
5: own-agent approaching the starboard side of the target-agent, no repulsive force
6: trajectory polygons of the own and target-agent do not over lap, no repulsive force
7: target-agent is not within the 270 degree swath around own-agent, no repulsive force
8: target-agent is greater than 5 km away, no repulsive force applied
'''
self.interactions = [] # create list of interactions to add at end
self.RPS_rep_scen = [] # RPS scenario dictionary that own agent will compile for interactions with every other agent
# numerical value indicates risk level
# 3 = high risk, crash trajectory polygons overlap, RPS = full astern
# 2 = medium risk, inertial trajectory polygons overlap, RPS = 0
# 1 = low to no risk, vessel aims to achieve desired RPS
self.matchVelLogic = []
self.matchVel = []
repArr = [] # create array to store repulsive forces generate by all agents in simulation
agentsUnder2k = []
if self.u > 0.0:
for i in agents:
if i.ID == self.ID:
rep = np.array([0.0,0.0])
repArr.append(rep)
self.RPS_rep_scen.append(1)
else:
p_curr = self.currentPos[:2]
p_prev = self.prevPos[:2]
q_curr = i.currentPos[:2] # current position of target-agent
q_prev = i.prevPos[:2]
v = (q_curr - p_curr) # vector of the target vessel relative to the own
# v = (p_curr - q_curr) # vector of the target vessel relative to the own
v_prime = (p_curr - q_curr) # vector of the own relative to the target vessel
v_hat = v/np.linalg.norm(v) # unit vector desribing direction of own vessel relative to target
v_prime_hat = v_prime/np.linalg.norm(v_prime) # unit vector desribing direction of target vessel relative to own
v_hat_ang = np.arctan2(v_hat[1],v_hat[0]) # direction in radians between ships
dist = np.linalg.norm(v) # distance between ships
if dist <= 5 and self.goal == False:
actionLogic = 187 # you are dead
self.crash = True # you have crashed, you are now dead
rep = (G * i.m * v_hat)/(dist**2)
#rep = np.array([0,0]) # there is no repulsive force to feel because you are dead, you can't feel anything
repLogic = 187 # you dead homie, screamin 187 on a motha fuckin agent
repArr.append(rep) # append the repulsive force to an array of repulsive forces
s_o = np.array([0])
c = dist
RPS_scen = 1
# we don't care about most interaction
elif 20 < dist <= 5000: # if the target agent is less than or equal to 3 km away
psi_o = p_curr - p_prev
psi_t = q_curr - q_prev
psi_o_prime = psi_o/np.linalg.norm(psi_o) # unit vector direction own agent # rotational velocity agent A - construct
psi_t_prime = psi_t/np.linalg.norm(psi_t) # unit vector direction target agent
psi_o_ang = self.psi # psi of own agent, heading in radians
tau_o = self.delta_max/5.0 # maximum rudder deflection angle own-agent
tau_t = i.delta_max/5.0 # maximum rudder deflection angle target-agent
#tau_o = 2 # maximum rudder deflection angle own-agent
#tau_t = 2
s_o = self.inertialStopFunc(self.u).tolist()
s_t = i.inertialStopFunc(i.u).tolist()
agentsUnder2k.append(i.ID)
# We have three opporunities to say if action of wasting computational resources on assessing collision is worthwhile...
if psi_o_ang - np.radians(135) <= v_hat_ang <= psi_o_ang + np.radians(135):
actionLogic = 1
########################################
# Step 1: Develop trajectory polygons #
#######################################
# calculate the inertial displaced positions of the own (p_s) and target agent (p_t)
p_s = p_curr + s_o * psi_o_prime # inertial displaced position of the own agent, total displacement s_o
q_s = q_curr + s_t * psi_t_prime # inertial displaced position of the target agent, total displacement s_o
# calculate R_tA and R_tB
rot_p_tau_o = np.array([[np.cos(tau_o/2.0), -np.sin(tau_o/2.0)],[np.sin(tau_o/2.0),np.cos(tau_o/2.0)]]) # rotation matrix for agent A at current rate of rotation
rot_p_tau_t = np.array([[np.cos(tau_t/2.0), -np.sin(tau_t/2.0)],[np.sin(tau_t/2.0),np.cos(tau_t/2.0)]]) # rotation matrix for agent B at current rate of ratation
rot_s_tau_o = np.array([[np.cos(-1 * tau_o), -np.sin(-1 * tau_o)],[np.sin(-1 * tau_o),np.cos(-1 * tau_o)]]) # rotation matrix for agent A at current rate of rotation
rot_s_tau_t = np.array([[np.cos(-1 * tau_t), -np.sin(-1 * tau_t)],[np.sin(-1 * tau_t),np.cos(-1 * tau_t)]]) # rotation matrix for agent B at current rate of ratation
p_s_p = p_curr + s_o * rot_p_tau_o.dot(psi_o_prime) # inertial displaced, port-rotated position of the own agent
q_s_p = q_curr + s_t * rot_p_tau_t.dot(psi_t_prime) # inertial displaced, port-rotated position of the target agent
p_s_s = p_curr + s_o * rot_s_tau_o.dot(psi_o_prime) # inertial displaced, starboard-rotated position of the own agent
q_s_s = q_curr + s_t * rot_s_tau_t.dot(psi_t_prime) # inertial displaced, starboard-rotated position of the target agent
# create trajectory polygons
if self.u == 0.0:
Psi_o = Point(p_curr).buffer(1000)
else:
Psi_o = Polygon([p_curr,p_s_p,p_s,p_s_s]) # create a polygon of all A positions
if i.u == 0.0:
Psi_t = Point(p_curr).buffer(1000)
else:
Psi_t = Polygon([q_curr,q_s_p,q_s,q_s_s]) # create a polygon of all B positions
########################################################################
# Step 3 apply case logic to determine if repulsive force is required #
#######################################################################
if s_o > 0.0 and s_t > 0.0:
if Psi_o.intersects(Psi_t):
# test for intersection
Psi_c = Psi_o.intersection(Psi_t)
p_0 = Point(p_curr) # create shapely point for current position of own-agent
c = p_0.distance(Psi_c) # calculate the distance from the own-agent to the polygon where trajectory polygons overlap
# Type II interaction: the vessel's are head on
if np.radians(170) < (i.psi - self.psi) < np.radians(190) or np.radians(-170) < (i.psi - self.psi) < np.radians(-190):
repLogic = 2 # repulsive force logic - vessels are head on
#rep = (G * i.m * v_hat)/((dist/(self.L + i.L))**2) # apply repulsive force
if c < 300:
RPS_scen = 1 # full astern thrust
self.matchVel.append(5.0)
rep = (G * i.m * v_hat)/((dist/(self.L + i.L))**2) # apply repulsive force
#rep = (G * i.m * v_hat)/(dist**2) # apply repulsive force
elif 300 <= c < 1000:
self.matchVel.append(5.0)
RPS_scen = 1
rep = np.array([0.0,0.0]) # apply repulsive force
else:
RPS_scen = 1
self.matchVel.append(10.0)
rep = np.array([0.0,0.0]) # apply repulsive force
repArr.append(rep) # append repulsive force to array
# Type I interaction, target agent is within the own agent's trajectory polygon
elif Point(q_curr).within(Psi_o):
repLogic = 1 # repulsive force logic - target agent within
#rep = (G * i.m * v_hat)/((dist/(2 * (self.L + i.L)))**2) # apply repulsive force
if c < 500:
RPS_scen = 3 # full astern thrust
rep = (G * i.m * v_hat)/((dist/(self.L + i.L))**2) # apply repulsive force
elif 500 <= c < 1000:
RPS_scen = 2 # full astern thrust
rep = (G * i.m * v_hat)/(dist**2) # apply repulsive force
else:
self.matchVel.append(5.0)
RPS_scen = 1
rep = np.array([0.0,0.0]) # apply repulsive force
repArr.append(rep) # append repulsive force to array
# Type III interaction: own agent is in line and behind the target agent
elif np.all(np.sign(psi_o_prime) == np.sign(v)) and psi_o_ang - np.radians(22.5) <= v_hat_ang <= psi_o_ang + np.radians(22.5):
repLogic = 3 # own agent is in line and behind the target agent
#rep = (G * i.m * v_hat)/((dist/(self.L + i.L))**2) # apply repulsive force
#rep = (G * i.m * v_hat)/((dist/s_o)**2) # apply repulsive force
if c < 500:
RPS_scen = 3 # full astern thrust
elif 500 <= c < 1000:
RPS_scen = 2 # full astern thrus
else:
self.matchVel.append(i.u)
RPS_scen = 1
rep= np.array([0.0,0.0])
repArr.append(rep) # append repulsive force to array
# Type IV interaction: own agent is approaching the port side of the target agent
elif np.sign(psi_o_prime[0] * v_hat[1] - v_hat[0] * psi_o_prime[1]) < 0:
repLogic = 4 # own agent is approaching the port side of the target agent
#rep = (G * i.m * v_hat)/((dist/(self.L + i.L)*2)**2) # apply repulsive force
rep = (G * i.m * v_hat)/((dist/(self.L + i.L))**2) # apply repulsive force
if c < 500:
RPS_scen = 3 # full astern thrust
elif 500 <= c < 1000:
RPS_scen = 2 # full astern thrus
else:
self.matchVel.append(10.0)
RPS_scen = 1
repArr.append(rep) # append repulsive force to array
# Type V interaction: own agent is approaching the starboard side of the target agent
else:
repLogic = 5 # own agent is approaching the starboard side of the target agent
rep = np.array([0.0,0.0]) # stand on vessel does not feel repulsive force
RPS_scen = 1 # the agent solves for RPS to maintain desired velocity
repArr.append(rep)
else:
repLogic = 6 # trajectory polygons of the own and target-agent do not overlap, no repulsive force applied
RPS_scen = 1
rep = np.array([0.0,0.0])
repArr.append(rep)
c = s_o
elif s_o > 0.0 and s_t == 0.0:
if Point(q_curr).within(Psi_o):
p_0 = Point(p_curr) # create shapely point for current position of own-agent
c = p_0.distance(Point(q_curr)) # calculate the distance from the own-agent to the polygon where trajectory polygons overlap
repLogic = 1 # repulsive force logic - target agent within
rep = (G * i.m * v_hat)/(dist**2) # apply repulsive force
#rep = (G * i.m * v_hat)/((dist/s_o)**2) # apply repulsive force
if c < 0.05 * s_o:
RPS_scen = 3 # full astern thrust
elif 0.05 * s_o >= c < 0.75 * s_o:
RPS_scen = 2
else:
self.matchVel.append(i.u)
RPS_scen = 1
repArr.append(rep) # append repulsive force to array
else:
repLogic = 6 # trajectory polygons of the own and target-agent do not overlap, no repulsive force applied
RPS_scen = 1
rep = np.array([0.0,0.0])
repArr.append(rep)
c = s_o
else:
repLogic = 99 # trajectory polygons of the own and target-agent do not overlap, no repulsive force applied
RPS_scen = 1
rep = np.array([0.0,0.0])
repArr.append(rep)
c = s_o
else:
actionLogic = 0
repLogic = 7 # target-agent is not within the 270o swath around the own-agent, no repulsive force applied
rep = np.array([0.0,0.0])
RPS_scen = 1
repArr.append(rep)
else:
actionLogic = 0
repLogic = 8 # target-agent is greater than 5 km away, no repulsive force applied
rep = np.array([0.0,0.0]) # there is no repulsive force from this agent
RPS_scen = 1 # if there are no 2's or 3's the vessel maintains course and desired velocity
repArr.append(rep) # append the repulsive force for this agent to the final array
self.RPS_rep_scen.append(RPS_scen)
# Write interaction data to event log
if actionLogic == 0 or actionLogic == 2:
self.interactions.append((time,self.ID,i.ID,self.psi[0],i.psi[0],v_hat_ang,repLogic,0,dist,0,RPS_scen,np.array2string(rep),self.voyageCounter,self.crash))
else:
self.interactions.append((time,self.ID,i.ID,self.psi[0],i.psi[0],v_hat_ang,repLogic,float(s_o),dist,c,RPS_scen,np.array2string(rep),self.voyageCounter,self.crash))
#0 1 2 3 4 5 6 7 8 9 10 11 12 13 self.RPS_rep_scen.append(RPS_scen)
self.c.executemany('INSERT INTO interaction VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)',self.interactions)
self.conn.commit()
repArr = np.nan_to_num(np.array(repArr)) # convert nans to numbers
self.agnRepArr = np.sum(repArr,axis = 0)
return np.sum(repArr, axis = 0) # returnt he sum of the repulsive force to the simulation
else:
self.agnRepArr = [0.0,0.0]
return [0.0, 0.0]
def F_rep_obs(self, obs, obsW):
'''
Modified gravitational attraction function for repulsion from other obstructions.
Rather than having a negative, attractive force, repulsion is expressed as a
positive force by multipying by -1
Function uses case logic to identify if an obstruction is a collision threat.
The function also scales the force by distance. Without scaling the distance,
repulsive force is only strong enough to repell an agent when distances are close.
This behavior is unsafe, therefore we scale this distance by dividing the
collision distance by the inertial stop distance.
Function Inputs:
G = gravitational constant
agents = list of agents within range
Function also classifies each reaction, decides on the level of evasive
maneuvering required and writes this information to the event log.
Depending upon the reaction to the obstacle, they may need to slow down.
Some interactions come with high risk (RPS = 3) and the agent will apply full
reverse, while other interactions will only warrant inertial stop (RPS = 2).
When the agent does not need to slow down, RPS = 1
RPS_scen:
3: high risk, crash trajectory polygons overlap, RPS = full astern
2: medium risk, inertial trajectory polygons overlap, RPS = 0
1: low to no risk, vessel aims to achieve desired RPS
'''
self.RPS_obs_scen = [] # RPS scenario dictionary that own agent will compile for interactions with every other agent
# numerical value indicates risk level
# 3 = high risk, crash trajectory polygons overlap, RPS = full astern
# 2 = medium risk, inertial trajectory polygons overlap, RPS = 0
# 1 = low to no risk, vessel aims to achieve desired RPS
repArr = []
#if len(obs) > 0:
for row in self.nav_obstacles.iterrows():
if row[1]['type'] == 'land' or row[1]['type'] == 'WEA':
if self.u > 1.0:
p_curr = self.currentPos[:2] # create a point of the agent's current position
p_prev = self.prevPos[:2]
psi_o = p_curr - p_prev
p_currPoint = Point(p_curr) # create a point of the agent's current position
# omega = row[1]['shape'].centroid
obsLine = LinearRing(row[1]['shape'].exterior.coords) # boundary of polygon as line
omega = obsLine.interpolate(obsLine.project(p_currPoint)) # identify the nearest point, omega
# sigma = np.array(list(omega.coords))[0] - np.array(list(p_currPoint.coords))[0] # vector describing the position of the own agent from omega
sigma = np.array(list(p_currPoint.coords))[0] - np.array(list(omega.coords))[0] # vector describing the position of the own agent from omega
obsDist = p_currPoint.distance(row[1]['shape'])
if obsDist <= 500:
self.matchVel.append(3.0)
elif 500 < obsDist <= 1000:
self.matchVel.append(5.0)
elif 1000 < obsDist <= 2000:
self.matchVel.append(8.0)
elif 2000 < obsDist <= 3000:
self.matchVel.append(10.0)
sigmahat = sigma/np.linalg.norm(sigma) # unit vector desribing direction of own vessel relative to the obstacle
sigmaNorm = np.linalg.norm(sigma)
Omega = row[1]['shape'] # obstacle
coll_dist = p_currPoint.distance(Omega) # distance to obstacle
s_o = self.inertialStopFunc(self.u).tolist()
#s_o = s_o / 2.0
if (psi_o == np.array([0.0,0.0])).all():
Vx = np.cos(self.psi)
Vy = np.sin(self.psi)
psi_o_prime = np.arctan2(Vy, Vx)
else:
psi_o_prime = psi_o/np.linalg.norm(psi_o) # unit vector direction own agent
tau_o = 0.05
# calculate the inertial displaced positions of the own (p_s) and target agent (p_t)
p_s = p_curr + s_o * psi_o_prime # inertial displaced position of the own agent, total displacement s_o
# calculate R_tA and R_tB
rot_p_tau_o = np.array([[np.cos(tau_o), -np.sin(tau_o)],[np.sin(tau_o),np.cos(tau_o)]]) # rotation matrix for agent A at current rate of rotation
rot_s_tau_o = np.array([[np.cos(-1 * tau_o), -np.sin(-1 * tau_o)],[np.sin(-1 * tau_o),np.cos(-1 * tau_o)]]) # rotation matrix for agent A at current rate of rotation
p_s_p = p_curr + s_o * rot_p_tau_o.dot(psi_o_prime) # inertial displaced, port-rotated position of the own agent
p_s_s = p_curr + s_o * rot_s_tau_o.dot(psi_o_prime) # inertial displaced, starboard-rotated position of the own agent
# create trajectory polygons
Psi_o = Polygon([p_curr,p_s_p,p_s,p_s_s]) # create a polygon of all A positions
if Psi_o.area > 0.0:
if Psi_o.intersects(Polygon(row[1]['shape'])):
if coll_dist <= 5:
self.crash = True
rep = np.array([0.0,0.0])
repArr.append(rep)
elif 5 < coll_dist <= 1000:
rep = (G * obsW * sigmahat)/((coll_dist/(5 * self.L))**2)
self.RPS_obs_scen.append(3)
repArr.append(rep)
elif 1000 < coll_dist <= 2000:
#rep = (G * obsW * sigmahat)/(coll_dist**2)
rep = np.array([0.0,0.0])
self.matchVel.append(2.0)
self.RPS_obs_scen.append(1)
repArr.append(rep)
elif 2000 < coll_dist < 5000:
#rep = (G * obsW * sigmahat)/(coll_dist**2)
rep = np.array([0.0,0.0])
self.matchVel.append(5.0)
self.RPS_obs_scen.append(1)
repArr.append(rep)
else:
repArr.append(np.array([0.0,0.0]))
else:
repArr.append(np.array([0.0,0.0]))
else:
repArr.append(np.array([0.0,0.0]))
else:
repArr.append(np.array([0.0,0.0]))
repArr = np.nan_to_num(np.array(repArr))
self.obsRepArr = np.sum(repArr,axis = 0)
return np.sum(repArr,axis = 0)
def RPScommand(self):
'''This function returns the RPS command based upon the interactions with
other agents and obstacles during a time step. The agent collects information
during interactions with all agents and each obstacle that is less than 1 km
away.
If any interaction is high risk then the return is full astern
If any interaction is medium risk then the return is RPS = 0 for inertial stopping
Otherwise the agent solves for RPS after determining the desired acceleration.
'''
def resistance(rho,u,C_D,A):
'''formula for vessel resistance where:
C_D = drag coefficient
A = vessel wetted area
u = agent's current velocity
rho = density of seawater
'''
return 0.5 * rho * u**2 * C_D * A
risk = self.RPS_rep_scen + self.RPS_obs_scen
if np.any(np.equal(risk,np.repeat(3,len(risk)))):
n = -5
elif np.any(np.equal(risk,np.repeat(2,len(risk)))):
n = 0
else:
R = resistance(self.rho, self.u,self.C_D, self.A)
u_0 = np.nan_to_num(self.u) # get agent's velocity
if len(self.matchVel) > 0:
u_1 = np.min(self.matchVel)
if u_1 > self.openWaterDesVel:
u_1 = self.openWaterDesVel
else:
u_1 = self.desVel # get desired velocity
a_d = (u_1 - u_0)/120 # calculate desired acceration as the change in velocity over change in time
'''I fail here, why?'''
if np.sign(self.m * a_d + R) == -1:
n = np.sqrt(((self.m * a_d + R)*-1)/(self.K_t * self.rho * self.d**4)) # solve for RPS
else:
n = np.sqrt((self.m * a_d + R)/(self.K_t * self.rho * self.d**4)) # solve for RPS
if n > self.maxRPS:
n = self.maxRPS
# elif n < 0:
# n = 0.0
self.matchVel = []
return n
def surge(self):
'''Surge function adopted from Ueng (2008), allows an agent to increase
or decrease surge velocity as a function of vessel density
'''
def thrust(K_t,rho,n,d):
'''formula for thrust where:
K_t = propeller thrust coefficient
RPS = agent's current RPS setting
D = propeller diameter
'''
if np.sign(n) == 1:
return K_t * rho * n**2 * d**4
else:
return (K_t * rho * n**2 * d**4) * -1
def resistance(rho,u,C_D,A):
'''formula for vessel resistance where:
C_D = drag coefficient
A = vessel wetted area
u = agent's current velocity
'''
return 0.5 * rho * u**2 * C_D * A
def V(dt,acc):
return acc * dt
risk = self.RPS_rep_scen + self.RPS_obs_scen
R = resistance(self.rho,self.u,self.C_D,self.A) # drag force drag coefficient * wetted area * velocity squared
# if np.round(self.delta_c,1) != 0.0:
# if np.abs(self.delta_c[0]) > np.radians(35):
# rudder_percent = 1.0
# else:
# rudder_percent = self.delta_c[0] / np.radians(35)
# R = R + R ** rudder_percent
T = thrust(self.K_t,self.rho,self.RPS,self.d) # thrust = thrust coefficient * RPS squared * propeller diameter to the 4th power
if self.crash == True:
self.u = 0.0
else:
# if np.any(np.equal(risk,np.repeat(3,len(risk)))):
# acc = ((-1 * thrust) - resistance)/self.m
# else:
# acc = (thrust - resistance)/self.m # acceleration is equal to the sum of surgeforces divided by the vessel's mass
acc = (T - R)/self.m # acceleration is equal to the sum of surgeforces divided by the vessel's mass
dV = V(1,acc)
if np.linalg.norm((self.currentPos[:2] - self.dest)) < 500: # if the agent is within 500 m of their destination - it cycles back to the origin and starts over again
self.goal = True
self.voyageCounter = self.voyageCounter + 1
self.currentPos = np.array([0,0,0])
self.wpt = self.wpts[1] # identify the first waypoint
posVec = (self.wpt - self.currentPos[:2]) # what is the position vector to the way point from the vessel's current position
posDir = posVec/np.linalg.norm(posVec) # unit vector describing direction of target vessel relative to own
heading = np.arctan2(posDir[1],posDir[0]) # heading in radians
self.prevPos = np.array([self.currentPos[0] - np.cos(heading) * self.u,self.currentPos[1] - np.sin(heading) * self.u,0.0]) # create a previous position for the sake of the simulation
self.psi = np.array([heading]) # what is the heading in radians
self.u = self.startVel
else:
self.u = self.u + dV # the new scalar velocity is equal to the current scalar velocity plus the current scalar acceleration
if self.u < 0.0:
self.u = 0.0
self.RPS_rep_scen = []
self.RPS_obs_scen = []
def move(self):
'''
Movement functions based on Nomoto
displacement function of:
u = surge velocity/forward motion at t0 - will be the result of agent input next...
psi = current heading at t0
theta = command heading at t0
K, T = Nomoto maneuverability indices
delta t is intended to be 1 second, therefore there is no need to multiply by dt
'''
def dPsi(dt,r):
return r * dt
if self.u == 0.0:
self.prevPos = self.currentPos # previous position is now equal to the current position
newX = np.array([self.currentPos[0]])
newY = np.array([self.currentPos[1]])
self.currentPos = np.zeros(3)
self.currentPos = np.array([newX[0],newY[0],0]) # set current position
self.psi = self.psi # set new vessel heading
self.r = self.r # set new vessel rotational veloci
else:
K = (self.Kprime * self.u)/self.L
T = (self.Tprime * self.L)/self.u
# command heading may be larger than maximum allowable rudder range
if np.abs(self.delta_c) > self.delta_max:
self.delta_c = np.sign(self.delta_c) * self.delta_max
# start movement
self.prevPos = self.currentPos # previous position is now equal to the current position
newX = np.array([self.currentPos[0] + self.u * np.cos(self.psi[0])]) # calculate New X
newY = np.array([self.currentPos[1] + self.u * np.sin(self.psi[0])]) # calculate New Y
self.currentPos = np.zeros(3)
self.currentPos = np.array([newX[0],newY[0],0]) # set current position
self.psi = self.psi + dPsi(1,self.r) # set new vessel heading
self.r = self.r + (K * (self.delta_c - self.r))/T # set new vessel rotational velocity
# calculate a dampening force acting against the direction of motion r
self.damp = self.r * -0.02
#self.damp = np.sqrt(self.r**2 - (self.r/(2*self.m))**2)
self.r = self.r + self.damp
if self.goal == True:
self.currentPos = np.array([0,0,0])
def time_step_log(self,time_step,att,rep,obs,direction,RPS):
'''function that logs results of a time step - because writing to sqlite
sucks for some reason'''
try:
self.time_log[time_step] = [self.ID,att[0],att[1],rep[0],rep[1],obs[0],obs[1],direction[0],direction[1],self.delta_c[0],RPS,self.u,self.prevPos[0],self.prevPos[1],self.currentPos[0],self.currentPos[1],self.voyageCounter]
except AttributeError:
self.time_log = dict()
self.time_log[time_step] = [self.ID,att[0],att[1],rep[0],rep[1],obs[0],obs[1],direction[0],direction[1],self.delta_c[0],RPS,self.u,self.prevPos[0],self.prevPos[1],self.currentPos[0],self.currentPos[1],self.voyageCounter]
def selfRotation(rotMatrix, coord):
return rotMatrix.dot(coord)
|
{"hexsha": "ed30253e0a00c622db21e58acc4d2c69452026e7", "size": 74464, "ext": "py", "lang": "Python", "max_stars_repo_path": "emergent/shipABM_Complexity_V13.py", "max_stars_repo_name": "knebiolo/emergent", "max_stars_repo_head_hexsha": "a6edb20c9907d4122d165eecaeeff33782d24a48", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "emergent/shipABM_Complexity_V13.py", "max_issues_repo_name": "knebiolo/emergent", "max_issues_repo_head_hexsha": "a6edb20c9907d4122d165eecaeeff33782d24a48", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "emergent/shipABM_Complexity_V13.py", "max_forks_repo_name": "knebiolo/emergent", "max_forks_repo_head_hexsha": "a6edb20c9907d4122d165eecaeeff33782d24a48", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 65.2049036778, "max_line_length": 304, "alphanum_fraction": 0.4806080791, "include": true, "reason": "import numpy,import scipy,from scipy,import networkx", "num_tokens": 15281}
|
[STATEMENT]
lemma ltl_llist_of_stream [simp]: "ltl (llist_of_stream xs) = llist_of_stream (stl xs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ltl (llist_of_stream xs) = llist_of_stream (stl xs)
[PROOF STEP]
by(simp add: llist_of_stream_def)
|
{"llama_tokens": 112, "file": "Coinductive_Coinductive_Stream", "length": 1}
|
[STATEMENT]
lemma (in domain) pdivides_imp_degree_le:
assumes "subring K R" and "p \<in> carrier (K[X])" "q \<in> carrier (K[X])" "q \<noteq> []"
shows "p pdivides q \<Longrightarrow> degree p \<le> degree q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. p pdivides q \<Longrightarrow> degree p \<le> degree q
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. p pdivides q \<Longrightarrow> degree p \<le> degree q
[PROOF STEP]
assume "p pdivides q"
[PROOF STATE]
proof (state)
this:
p pdivides q
goal (1 subgoal):
1. p pdivides q \<Longrightarrow> degree p \<le> degree q
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
p pdivides q
[PROOF STEP]
obtain r where r: "polynomial (carrier R) r" "q = poly_mult p r"
[PROOF STATE]
proof (prove)
using this:
p pdivides q
goal (1 subgoal):
1. (\<And>r. \<lbrakk>polynomial (carrier R) r; q = poly_mult p r\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding pdivides_def factor_def univ_poly_mult univ_poly_carrier
[PROOF STATE]
proof (prove)
using this:
\<exists>c\<in>carrier (poly_ring R). q = poly_mult p c
goal (1 subgoal):
1. (\<And>r. \<lbrakk>r \<in> carrier (poly_ring R); q = poly_mult p r\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
polynomial (carrier R) r
q = poly_mult p r
goal (1 subgoal):
1. p pdivides q \<Longrightarrow> degree p \<le> degree q
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
polynomial (carrier R) r
q = poly_mult p r
goal (1 subgoal):
1. p pdivides q \<Longrightarrow> degree p \<le> degree q
[PROOF STEP]
have p: "polynomial (carrier R) p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. polynomial (carrier R) p
[PROOF STEP]
using assms(2) carrier_polynomial[OF assms(1)]
[PROOF STATE]
proof (prove)
using this:
p \<in> carrier (K [X])
polynomial K ?p \<Longrightarrow> polynomial (carrier R) ?p
goal (1 subgoal):
1. polynomial (carrier R) p
[PROOF STEP]
unfolding univ_poly_carrier
[PROOF STATE]
proof (prove)
using this:
p \<in> carrier (K [X])
?p \<in> carrier (K [X]) \<Longrightarrow> ?p \<in> carrier (poly_ring R)
goal (1 subgoal):
1. p \<in> carrier (poly_ring R)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
polynomial (carrier R) p
goal (1 subgoal):
1. p pdivides q \<Longrightarrow> degree p \<le> degree q
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
polynomial (carrier R) p
goal (1 subgoal):
1. p pdivides q \<Longrightarrow> degree p \<le> degree q
[PROOF STEP]
have "p \<noteq> []" and "r \<noteq> []"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. p \<noteq> [] &&& r \<noteq> []
[PROOF STEP]
using poly_mult_zero(2)[OF polynomial_incl[OF p]] r(2) assms(4)
[PROOF STATE]
proof (prove)
using this:
poly_mult p [] = []
q = poly_mult p r
q \<noteq> []
goal (1 subgoal):
1. p \<noteq> [] &&& r \<noteq> []
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
p \<noteq> []
r \<noteq> []
goal (1 subgoal):
1. p pdivides q \<Longrightarrow> degree p \<le> degree q
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
polynomial (carrier R) r
q = poly_mult p r
polynomial (carrier R) p
p \<noteq> []
r \<noteq> []
[PROOF STEP]
show "degree p \<le> degree q"
[PROOF STATE]
proof (prove)
using this:
polynomial (carrier R) r
q = poly_mult p r
polynomial (carrier R) p
p \<noteq> []
r \<noteq> []
goal (1 subgoal):
1. degree p \<le> degree q
[PROOF STEP]
using poly_mult_degree_eq[OF carrier_is_subring, of p r]
[PROOF STATE]
proof (prove)
using this:
polynomial (carrier R) r
q = poly_mult p r
polynomial (carrier R) p
p \<noteq> []
r \<noteq> []
\<lbrakk>polynomial (carrier R) p; polynomial (carrier R) r\<rbrakk> \<Longrightarrow> degree (poly_mult p r) = (if p = [] \<or> r = [] then 0 else degree p + degree r)
goal (1 subgoal):
1. degree p \<le> degree q
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
degree p \<le> degree q
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1646, "file": null, "length": 20}
|
Set Warnings "-notation-overridden".
Require Import Coq.Program.Basics.
Require Import Coq.Lists.List.
From Equations Require Import Equations.
Unset Equations With Funext.
Require Import Category.Lib.
Require Import Category.Theory.
Require Import Embed.Theory.Utils.
Require Import Embed.Theory.Btree.
Require Import Embed.Theory.Btree.Functor.
Require Import Embed.Theory.Btree.Monad.
Generalizable All Variables.
Set Universe Polymorphism.
Set Nested Proofs Allowed.
Equations num_leaves {A} (xs : @btree A) : nat :=
num_leaves (bnil _) := S O ;
num_leaves (bcons x y) := num_leaves x + num_leaves y.
Global Transparent num_leaves.
Lemma num_leaves_positive {A} (xs : @btree A) : ~(num_leaves xs = O).
Proof.
revert xs.
fix num_leaves_positive 1.
destruct xs.
rewrite num_leaves_equation_1.
auto.
rewrite num_leaves_equation_2.
intro.
pose (Plus.plus_is_O (num_leaves xs1) (num_leaves xs2) H).
destruct a.
exact (num_leaves_positive xs1 H0).
Qed.
Lemma num_leaves_fmap {A B} {f : A -> B} (xs : @btree A) : num_leaves (fmap f xs) = num_leaves xs.
Proof.
revert xs.
fix num_leaves_fmap 1.
destruct xs.
rewrite fmap_btree_equation_1.
repeat (rewrite num_leaves_equation_1).
reflexivity.
rewrite fmap_btree_equation_2.
repeat (rewrite num_leaves_equation_2).
refine (nat_plus_eq _ _).
exact (num_leaves_fmap _).
exact (num_leaves_fmap _).
Qed.
Equations leaves {A} (xs : @btree A) : list A :=
leaves (bnil x) := cons x nil ;
leaves (bcons x y) := leaves x ++ leaves y.
Fixpoint leaves_fmap {A B} (f : A -> B) (xs : @btree A) :
leaves (fmap_btree f xs) = List.map f (leaves xs).
destruct xs.
rewrite fmap_btree_equation_1.
repeat (rewrite leaves_equation_1).
reflexivity.
rewrite fmap_btree_equation_2.
repeat (rewrite leaves_equation_2).
rewrite map_app.
refine (list_app_eq _ _).
exact (leaves_fmap _ _ _ _).
exact (leaves_fmap _ _ _ _).
Qed.
Lemma eq_num_leaves {A} (xs : @btree A) : length (leaves xs) = num_leaves xs.
Proof.
revert xs.
fix eq_num_leaves 1.
destruct xs.
rewrite leaves_equation_1.
rewrite num_leaves_equation_1.
unfold length.
reflexivity.
rewrite leaves_equation_2.
rewrite num_leaves_equation_2.
rewrite app_length.
refine (nat_plus_eq _ _).
exact (eq_num_leaves _).
exact (eq_num_leaves _).
Qed.
Local Open Scope nat_scope.
(* lemma for zip_btree *)
Lemma num_leaves_bcons_length_l {A B} {x y : @btree A} {ys : list B} : num_leaves (bcons x y) = length ys
-> num_leaves x <= length ys.
intro.
rewrite num_leaves_equation_2 in H.
rewrite <- H.
exact (Plus.le_plus_l _ _).
Qed.
Definition num_leaves_firstn {A B} {x y : @btree A} {zs : list B} (pf : num_leaves (bcons x y) = length zs) :
num_leaves x = length (firstn (num_leaves x) zs) := eq_sym
(firstn_length_le zs (num_leaves_bcons_length_l pf)).
Definition num_leaves_skipn {A B} {x y : @btree A} {zs : list B} (pf : num_leaves (bcons x y) = length zs) :
num_leaves y = length (skipn (num_leaves x) zs) := eq_sym
(eq_trans
(skipn_length (num_leaves x) zs)
(eq_sym
(Minus.plus_minus (length zs) (num_leaves x) (num_leaves y) (eq_sym pf))
)
).
(* list_num_leaves xs = sum (map num_leaves xs) *)
Equations list_num_leaves {A} (xs : list (@btree A)) : nat :=
list_num_leaves nil := O ;
list_num_leaves (cons y ys) := num_leaves y + list_num_leaves ys.
Lemma eq_num_leaves_list_num_leaves {A} (xs : @btree A) : num_leaves xs = list_num_leaves (cons xs nil).
Proof.
rewrite list_num_leaves_equation_2.
rewrite list_num_leaves_equation_1.
auto.
Qed.
Lemma list_num_leaves_cons_positive {A} (x : @btree A) (xs : list (@btree A)) :
list_num_leaves (x :: xs) <> 0%nat.
Proof.
revert xs; revert x.
fix list_num_leaves_cons_positive 2.
destruct xs.
rewrite <- eq_num_leaves_list_num_leaves.
exact (num_leaves_positive _).
rewrite list_num_leaves_equation_2.
intro.
refine (list_num_leaves_cons_positive b xs _).
pose (Plus.plus_is_O _ _ H).
destruct a.
exact H1.
Qed.
Lemma list_num_leaves_map_fmap {A B} {xs : list (@btree A)} (f : A → B) :
list_num_leaves (List.map (fmap[btree_Functor] f) xs) = list_num_leaves xs.
Proof.
revert xs.
fix list_num_leaves_map_fmap 1.
destruct xs.
simpl.
reflexivity.
unfold List.map.
repeat (rewrite list_num_leaves_equation_2).
refine (nat_plus_eq _ _).
exact (num_leaves_fmap _).
exact (list_num_leaves_map_fmap xs).
Qed.
Equations total : list nat -> nat :=
total nil := 0%nat;
total (cons x xs) := x + total xs.
Fixpoint total_app (xs ys : list nat) :
total (xs ++ ys) = (total xs + total ys)%nat.
destruct xs.
{
reflexivity.
}
{
rewrite total_equation_2.
simpl.
rewrite total_equation_2.
rewrite <- PeanoNat.Nat.add_assoc.
f_equal.
exact (total_app _ _).
}
Qed.
Fixpoint list_num_leaves_total {A : Type} (xs : list (@btree A)) :
list_num_leaves xs = total (map num_leaves xs).
destruct xs.
{
rewrite list_num_leaves_equation_1.
simpl.
rewrite total_equation_1.
reflexivity.
}
{
rewrite list_num_leaves_equation_2.
simpl.
rewrite total_equation_2.
rewrite (list_num_leaves_total A xs).
reflexivity.
}
Qed.
(* Local Open Scope nat_scope. *)
Lemma list_num_leaves_app {A} (xs ys : list (@btree A)) :
(list_num_leaves xs + list_num_leaves ys)%nat = list_num_leaves (xs ++ ys).
do 3 (rewrite list_num_leaves_total).
rewrite map_app.
rewrite total_app.
reflexivity.
Qed.
Fixpoint num_leaves_join {A} (xs : @btree (@btree A)) :
num_leaves (join_btree xs) = list_num_leaves (leaves xs).
destruct xs.
rewrite join_btree_equation_1.
rewrite leaves_equation_1.
rewrite list_num_leaves_equation_2.
rewrite list_num_leaves_equation_1.
exact (plus_n_O _).
rewrite join_btree_equation_2.
rewrite leaves_equation_2.
rewrite num_leaves_equation_2.
rewrite <- list_num_leaves_app.
refine (nat_plus_eq _ _).
exact (num_leaves_join _ _).
exact (num_leaves_join _ _).
Qed.
Local Open Scope nat_scope.
Definition list_num_leaves_firstn {A B} {z : @btree A} {zs : list (@btree A)} {ws : list B}
(pf : list_num_leaves (z :: zs) = length ws) :
num_leaves z = length (firstn (num_leaves z) ws) := eq_sym
(firstn_length_le
ws
(eq_rect (num_leaves z + list_num_leaves zs) (fun w => num_leaves z <= w) (Plus.le_plus_l (num_leaves z) (list_num_leaves zs)) (length ws) (eq_trans (eq_sym (list_num_leaves_equation_2 A z zs)) pf))
).
Local Close Scope nat_scope.
Definition list_num_leaves_skipn {A B} {z : @btree A} {zs : list (@btree A)} {ws : list B}
(pf : list_num_leaves (z :: zs) = length ws) :
list_num_leaves zs = length (skipn (num_leaves z) ws) := eq_sym
(eq_trans
(skipn_length (num_leaves z) ws)
(eq_sym
(Minus.plus_minus (length ws) (num_leaves z) (list_num_leaves zs) (eq_sym pf))
)
).
|
{"author": "michaeljklein", "repo": "btree-lattice-experiments", "sha": "769670d3c98591a4ddb3854feea22eae554323f5", "save_path": "github-repos/coq/michaeljklein-btree-lattice-experiments", "path": "github-repos/coq/michaeljklein-btree-lattice-experiments/btree-lattice-experiments-769670d3c98591a4ddb3854feea22eae554323f5/Theory/Btree/Leaves.v"}
|
double precision function HAQggvsqanal(j1,j2,j3,j4)
implicit none
include 'constants.f'
c include 'scale.f'
c include 'masses.f'
c include 'deltar.f'
C--- matrix element squared for 0 --> H + a(j1)+q(j2)+g(j3)+g(j4)
c--- implemented according to arXiv:0906.0008, Eq. (2.23)
integer j1,j2,j3,j4,h1,h2,h3
double complex A0ab(2,2,2),A0ba(2,2,2),
& A41ab(2,2,2),A41ba(2,2,2),A43ab(2,2,2),A43ba(2,2,2)
double precision temp,ren,H4prenorm
call Amplo_AQgg(j1,j2,j3,j4,A0ab,A0ba)
call Ampvirt_AQgg(j1,j2,j3,j4,A41ab,A41ba,A43ab,A43ba)
c--- get renormalization factor
ren=H4prenorm()
temp=0d0
do h1=1,2
do h2=1,2
do h3=1,2
A41ab(h1,h2,h3)=A41ab(h1,h2,h3)+ren*A0ab(h1,h2,h3)
A41ba(h1,h2,h3)=A41ba(h1,h2,h3)+ren*A0ba(h1,h2,h3)
c--- Note: A43 receives no renormalization
temp=temp+dble(Dconjg(A0ab(h1,h2,h3))*
. (V*A41ab(h1,h2,h3)-A41ba(h1,h2,h3)+A43ab(h1,h2,h3))
. +Dconjg(A0ba(h1,h2,h3))*
. (V*A41ba(h1,h2,h3)-A41ab(h1,h2,h3)+A43ba(h1,h2,h3)))
enddo
enddo
enddo
c--- Note: additional factor of 1/4 here due to difference between
c--- definition of overall Hgg coupling C^2 (sentence following (2.1))
c--- and our factor, "Asq" in gg_hgg_v.f
HAQggvsqanal=V*temp/4d0
return
end
|
{"hexsha": "a5900c82bb5e4f6303f7627525c106baf42b2cce", "size": 1380, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "MCFM-JHUGen/src/ggHggvirt/HAQggvsqanal.f", "max_stars_repo_name": "tmartini/JHUGen", "max_stars_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-06-08T13:09:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-04T19:59:36.000Z", "max_issues_repo_path": "MCFM-JHUGen/src/ggHggvirt/HAQggvsqanal.f", "max_issues_repo_name": "tmartini/JHUGen", "max_issues_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 64, "max_issues_repo_issues_event_min_datetime": "2015-06-24T15:08:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-25T04:59:32.000Z", "max_forks_repo_path": "MCFM-JHUGen/src/ggHggvirt/HAQggvsqanal.f", "max_forks_repo_name": "tmartini/JHUGen", "max_forks_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2015-05-04T22:15:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T10:04:40.000Z", "avg_line_length": 30.6666666667, "max_line_length": 70, "alphanum_fraction": 0.602173913, "num_tokens": 572}
|
import numpy as np
def check_intersect(p1, q1, p2, q2):
def on_segment(p, q, r):
if q[0] > np.max([p[0], r[0]]):
return False
if q[0] < np.min([p[0], r[0]]):
return False
if q[1] < np.min([p[0], r[0]]):
return False
if q[1] > np.max([p[1], r[1]]):
return False
if q[1] < np.min([p[1], r[1]]):
return False
return True
def orientation(p, q, r):
val = ((q[1] - p[1]) * (r[0] - q[0])) - ((q[0] - p[0]) * (r[1] - q[1]))
if val > 0:
return 'cw'
elif val < 0:
return 'ccw'
else:
return 'col'
o1 = orientation(p1, q1, p2)
o2 = orientation(p1, q1, q2)
o3 = orientation(p2, q2, p1)
o4 = orientation(p2, q2, q1)
if o1 != o2 and o3 != o4:
return True
if o1 == 'col' and on_segment(p1, p2, q1):
return True
if o2 == 'col' and on_segment(p1, q2, q1):
return True
if o3 == 'col' and on_segment(p2, p1, q2):
return True
if o4 == 'col' and on_segment(p2, q1, q2):
return True
return False
|
{"hexsha": "4e6014d191f3353525ac383a53163a9f1603a044", "size": 1143, "ext": "py", "lang": "Python", "max_stars_repo_path": "markergen/lineintersect.py", "max_stars_repo_name": "henrykrumb/polaris-marker-generator", "max_stars_repo_head_hexsha": "d2f454e7b6587fe1dce0da72bbe593e2b92186d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "markergen/lineintersect.py", "max_issues_repo_name": "henrykrumb/polaris-marker-generator", "max_issues_repo_head_hexsha": "d2f454e7b6587fe1dce0da72bbe593e2b92186d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "markergen/lineintersect.py", "max_forks_repo_name": "henrykrumb/polaris-marker-generator", "max_forks_repo_head_hexsha": "d2f454e7b6587fe1dce0da72bbe593e2b92186d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5813953488, "max_line_length": 79, "alphanum_fraction": 0.4558180227, "include": true, "reason": "import numpy", "num_tokens": 412}
|
"""
rivers2stratigraphy GUI -- build river stratigraphy interactively
Stratigraphic model based on LAB models, i.e., geometric channel body is
deposited in "matrix" of floodplain mud. The channel is always fixed to the
basin surface and subsidence is only control on vertical stratigraphy.
Horizontal stratigraphy is set by 1) lateral migration (drawn from a pdf)
and dampened for realism, and 2) avulsion that is set to a fixed value.
written by Andrew J. Moodie
amoodie@rice.edu
Feb 2018
"""
# import matplotlib
# matplotlib.use('TkAgg', warn=False)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from .strat import Strat
from .slider_manager import SliderManager
from . import geom, sedtrans, utils
class GUI(object):
"""
main GUI object that selects parameters for initialization and
handles creation of all the needed parts of the model. This class is
initialized below by class Runner if this file is run as __main__
"""
def __init__(self):
# initial conditions
config = utils.Config()
# model run params
config.dt = 100 # timestep in yrs
self._paused = False
# setup params
config.Cf = 0.004 # friction coeff
config.D50 = 300*1e-6
config.Beta = 1.5 # exponent to avulsion function
config.Df = 0.6 # dampening factor to lateral migration rate change
config.dxdtstd = 1 # stdev of lateral migration dist, [m/yr]?
# constants
config.conR = 1.65
config.cong = 9.81
config.conrhof = 1000
config.connu = 1.004e-6
config.Rep = geom.Repfun(config.D50, config.conR, config.cong, config.connu) # particle Reynolds num
# water discharge slider params
config.Qw = config.Qwinit = 1000
config.Qwmin = 200
config.Qwmax = 4000
config.Qwstep = 100
# subsidence slider params
config.sig = config.siginit = 2
config.sigmin = 0
config.sigmax = 5
config.sigstep = 0.2
# avulsion timescale slider params
config.Ta = config.Tainit = 500
config.Tamin = config.dt
config.Tamax = 1500
config.Tastep = 10
# yView slider params
config.yView = config.yViewinit = 100
config.yViewmin = 25
config.yViewmax = 250
config.yViewstep = 25
# basin width slider params
config.Bb = config.Bbinit = 4000 # width of belt (m)
config.Bbmin = 1
config.Bbmax = 10
config.Bbstep = 0.5
# additional initializations
config.Bast = 0 # Basin top level
# setup the figure
plt.rcParams['toolbar'] = 'None'
plt.rcParams['figure.figsize'] = 8, 6
self.fig, self.strat_ax = plt.subplots()
self.fig.canvas.manager.set_window_title('SedEdu -- rivers2stratigraphy')
plt.subplots_adjust(left=0.085, bottom=0.1, top=0.95, right=0.5)
self.strat_ax.set_xlabel("channel belt (km)")
self.strat_ax.set_ylabel("stratigraphy (m)")
plt.ylim(-config.yView, 0.1*config.yView)
plt.xlim(-config.Bb/2, config.Bb/2)
self.strat_ax.xaxis.set_major_formatter( plt.FuncFormatter(
lambda v, x: str(v / 1000).format('%0.0f')) )
# add sliders
self.config = config
self.sm = SliderManager(self)
def pause_anim(self, event):
"""
pause animation by altering hidden var
"""
if self._paused:
self._paused = False
else:
self._paused = True
class Runner(object):
def __init__(self):
gui = GUI()
# time looping
gui.strat = Strat(gui)
anim = animation.FuncAnimation(gui.fig, gui.strat,
interval=100, blit=False,
save_count=None)
plt.show()
if __name__ == '__main__':
runner = Runner()
|
{"hexsha": "e6184b2a71fdce0bbe8d2d32d23a410ab998cd62", "size": 4054, "ext": "py", "lang": "Python", "max_stars_repo_path": "rivers2stratigraphy/gui.py", "max_stars_repo_name": "amoodie/rivers2stratigraphy", "max_stars_repo_head_hexsha": "3978f0ea4bd087332ee0215f5a003d7f63152598", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-04-13T19:17:14.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-26T04:36:54.000Z", "max_issues_repo_path": "rivers2stratigraphy/gui.py", "max_issues_repo_name": "amoodie/rivers2stratigraphy", "max_issues_repo_head_hexsha": "3978f0ea4bd087332ee0215f5a003d7f63152598", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2018-03-01T03:56:41.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-24T16:27:53.000Z", "max_forks_repo_path": "rivers2stratigraphy/gui.py", "max_forks_repo_name": "amoodie/rivers2stratigraphy", "max_forks_repo_head_hexsha": "3978f0ea4bd087332ee0215f5a003d7f63152598", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5912408759, "max_line_length": 108, "alphanum_fraction": 0.6075481006, "include": true, "reason": "import numpy", "num_tokens": 1034}
|
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import torch
from gst_appsink_display import run_pipeline
def main(args):
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
print(f"Running inference on device: {device}")
model = torch.hub.load("intel-isl/MiDaS", "MiDaS", pretrained=True)
model.to(device)
model.eval()
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")
transform = midas_transforms.default_transform
color_map = plt.get_cmap("inferno")
def user_callback(image_data):
if image_data is None:
return None
input_batch = transform(image_data).to(device)
start_time = time.monotonic()
with torch.no_grad():
prediction = model(input_batch)
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=image_data.shape[:2],
mode="bicubic",
align_corners=False,
).squeeze()
inference_time_ms = (time.monotonic() - start_time) * 1000
print(f"Inference time: {inference_time_ms:.2f}ms")
output = prediction.cpu().numpy()
depth_min = output.min()
depth_max = output.max()
output = (output - depth_min) / (depth_max - depth_min)
output = color_map(output)
output = output[:, :, :3]
output = output * 255
output = output.astype(np.uint8)
return np.concatenate((image_data, output), axis=1)
run_pipeline(
user_callback,
src_frame_rate=args.frame_rate,
src_height=args.source_height,
src_width=args.source_width,
binning_level=args.binning_level,
)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--source_width", type=int)
parser.add_argument("--source_height", type=int)
parser.add_argument("--frame_rate", type=int)
parser.add_argument("--binning_level", type=int, default=2)
main(parser.parse_args())
|
{"hexsha": "8979cf9fac7e89353ca1358e278a6e4709879df7", "size": 2118, "ext": "py", "lang": "Python", "max_stars_repo_path": "applications/relative-depth.py", "max_stars_repo_name": "BrianOfrim/gst_pyspinsrc", "max_stars_repo_head_hexsha": "0021351d699d2b9563041828a869ce9e770d87b7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2020-04-29T16:31:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-27T13:07:20.000Z", "max_issues_repo_path": "applications/relative-depth.py", "max_issues_repo_name": "BrianOfrim/gst_pyspinsrc", "max_issues_repo_head_hexsha": "0021351d699d2b9563041828a869ce9e770d87b7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-07-08T09:57:34.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-08T19:08:17.000Z", "max_forks_repo_path": "applications/relative-depth.py", "max_forks_repo_name": "BrianOfrim/gst_pyspinsrc", "max_forks_repo_head_hexsha": "0021351d699d2b9563041828a869ce9e770d87b7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-05-06T14:36:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-05T20:25:15.000Z", "avg_line_length": 25.5180722892, "max_line_length": 87, "alphanum_fraction": 0.6378659112, "include": true, "reason": "import numpy", "num_tokens": 469}
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import random
import time
import numpy as np
from collections import defaultdict, deque
from quoridor import Quoridor
from policy_value_net import PolicyValueNet
from mcts import MCTSPlayer
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from constant import *
iter_count = 0
writer = SummaryWriter()
class TrainPipeline(object):
def __init__(self, init_model=None):
self.game = Quoridor()
self.learn_rate = 2e-3
self.lr_multiplier = 1.0
self.temp = 1.0
self.n_playout = 200
self.c_puct = 5
self.buffer_size = 10000
self.data_buffer = deque(maxlen=self.buffer_size)
self.play_batch_size = 1
self.kl_targ = 0.02
self.check_freq = 10
self.game_batch_num = 1000
self.best_win_ratio = 0.0
self.pure_mcts_playout_num = 1000
self.old_probs = 0
self.new_probs = 0
self.first_trained = False
if init_model:
self.policy_value_net = PolicyValueNet(model_file=init_model)
else:
self.policy_value_net = PolicyValueNet()
self.mcts_player = MCTSPlayer(self.policy_value_net.policy_value_fn, c_puct=self.c_puct,
n_playout=self.n_playout, is_selfplay=1)
def get_equi_data(self, play_data):
extend_data = []
for i, (state, mcts_prob, winner) in enumerate(play_data):
wall_state = state[:3,:BOARD_SIZE - 1,:BOARD_SIZE - 1]
dist_state1 = np.reshape(state[(6 + (WALL_NUM + 1) * 2), :BOARD_SIZE, :BOARD_SIZE], (1, BOARD_SIZE, BOARD_SIZE))
dist_state2 = np.reshape(state[(7 + (WALL_NUM + 1) * 2), :BOARD_SIZE, :BOARD_SIZE], (1, BOARD_SIZE, BOARD_SIZE))
# horizontally flipped game
flipped_wall_state = []
for i in range(3):
wall_padded = np.fliplr(wall_state[i])
wall_padded = np.pad(wall_padded, (0,1), mode='constant', constant_values=0)
flipped_wall_state.append(wall_padded)
flipped_wall_state = np.array(flipped_wall_state)
player_position = state[3:5, :,:]
flipped_player_position = []
for i in range(2):
flipped_player_position.append(np.fliplr(player_position[i]))
flipped_player_position = np.array(flipped_player_position)
h_equi_state = np.vstack([flipped_wall_state, flipped_player_position, state[5:, :,:]])
h_equi_mcts_prob = np.copy(mcts_prob)
h_equi_mcts_prob[11] = mcts_prob[10] # SE to SW
h_equi_mcts_prob[10] = mcts_prob[11] # SW to SE
h_equi_mcts_prob[9] = mcts_prob[8] # NE to NW
h_equi_mcts_prob[8] = mcts_prob[9] # NW to NE
h_equi_mcts_prob[7] = mcts_prob[6] # EE to WW
h_equi_mcts_prob[6] = mcts_prob[7] # WW to EE
h_equi_mcts_prob[3] = mcts_prob[2] # E to W
h_equi_mcts_prob[2] = mcts_prob[3] # W to E
h_wall_actions = h_equi_mcts_prob[12:12 + (BOARD_SIZE-1) ** 2].reshape(BOARD_SIZE-1, BOARD_SIZE-1)
v_wall_actions = h_equi_mcts_prob[12 + (BOARD_SIZE-1) ** 2:].reshape(BOARD_SIZE-1, BOARD_SIZE -1)
flipped_h_wall_actions = np.fliplr(h_wall_actions)
flipped_v_wall_actions = np.fliplr(v_wall_actions)
h_equi_mcts_prob[12:] = np.hstack([flipped_h_wall_actions.flatten(), flipped_v_wall_actions.flatten()])
# Vertically flipped game
flipped_wall_state = []
for i in range(3):
wall_padded = np.flipud(wall_state[i])
wall_padded = np.pad(wall_padded, (0,1), mode='constant', constant_values=0)
flipped_wall_state.append(wall_padded)
flipped_wall_state = np.array(flipped_wall_state)
flipped_player_position = []
for i in range(2):
flipped_player_position.append(np.flipud(player_position[1-i]))
flipped_player_position = np.array(flipped_player_position)
cur_player = (np.ones((BOARD_SIZE, BOARD_SIZE)) - state[5 + 2* (WALL_NUM+1),:,:]).reshape(-1,BOARD_SIZE, BOARD_SIZE)
v_equi_state = np.vstack([flipped_wall_state, flipped_player_position, state[5+(WALL_NUM+1):5 + 2*(WALL_NUM+1), :,:], state[5:5+(WALL_NUM+1),:,:], cur_player, dist_state2, dist_state1])
# v_equi_state = np.vstack([flipped_wall_state, flipped_player_position, state[5:(5 + (WALL_NUM+1) * 2), :, :], cur_player, state[:(6 + (WALL_NUM + 1) * 2), :, :]])
v_equi_mcts_prob = np.copy(mcts_prob)
v_equi_mcts_prob[11] = mcts_prob[9] # SE to NE
v_equi_mcts_prob[10] = mcts_prob[8] # SW to NW
v_equi_mcts_prob[9] = mcts_prob[11] # NE to SE
v_equi_mcts_prob[8] = mcts_prob[10] # NW to SW
v_equi_mcts_prob[5] = mcts_prob[4] # NN to SS
v_equi_mcts_prob[4] = mcts_prob[5] # SS to NN
v_equi_mcts_prob[1] = mcts_prob[0] # N to S
v_equi_mcts_prob[0] = mcts_prob[1] # S to N
h_wall_actions = v_equi_mcts_prob[12:12 + (BOARD_SIZE-1) ** 2].reshape(BOARD_SIZE-1, BOARD_SIZE-1)
v_wall_actions = v_equi_mcts_prob[12 + (BOARD_SIZE-1) ** 2:].reshape(BOARD_SIZE-1, BOARD_SIZE -1)
flipped_h_wall_actions = np.flipud(h_wall_actions)
flipped_v_wall_actions = np.flipud(v_wall_actions)
v_equi_mcts_prob[12:] = np.hstack([flipped_h_wall_actions.flatten(), flipped_v_wall_actions.flatten()])
## Horizontally-vertically flipped game
wall_state = state[:3,:BOARD_SIZE - 1,:BOARD_SIZE - 1]
flipped_wall_state = []
for i in range(3):
wall_padded = np.fliplr(np.flipud(wall_state[i]))
wall_padded = np.pad(wall_padded, (0,1), mode='constant', constant_values=0)
flipped_wall_state.append(wall_padded)
flipped_wall_state = np.array(flipped_wall_state)
flipped_player_position = []
for i in range(2):
flipped_player_position.append(np.fliplr(np.flipud(player_position[1-i])))
flipped_player_position = np.array(flipped_player_position)
cur_player = (np.ones((BOARD_SIZE, BOARD_SIZE)) - state[5 + 2*(WALL_NUM+1),:,:]).reshape(-1,BOARD_SIZE, BOARD_SIZE)
hv_equi_state = np.vstack([flipped_wall_state, flipped_player_position, state[5 + (WALL_NUM+1):5 + 2*(WALL_NUM+1), :,:], state[5:5+(WALL_NUM+1),:,:], cur_player, dist_state2, dist_state1])
# hv_equi_state = np.vstack([flipped_wall_state, flipped_player_position, state[5:(5 + (WALL_NUM+1) * 2), :, :], cur_player, state[(6 + (WALL_NUM + 1) * 2):, :, :]])
hv_equi_mcts_prob = np.copy(mcts_prob)
hv_equi_mcts_prob[11] = mcts_prob[8] # SE to NW
hv_equi_mcts_prob[10] = mcts_prob[9] # SW to NE
hv_equi_mcts_prob[9] = mcts_prob[10] # NE to SW
hv_equi_mcts_prob[8] = mcts_prob[11] # NW to SE
hv_equi_mcts_prob[7] = mcts_prob[6] # EE to WW
hv_equi_mcts_prob[6] = mcts_prob[7] # WW to EE
hv_equi_mcts_prob[5] = mcts_prob[4] # NN to SS
hv_equi_mcts_prob[4] = mcts_prob[5] # SS to NN
hv_equi_mcts_prob[3] = mcts_prob[2] # E to W
hv_equi_mcts_prob[2] = mcts_prob[3] # W to E
hv_equi_mcts_prob[1] = mcts_prob[0] # N to S
hv_equi_mcts_prob[0] = mcts_prob[1] # S to N
h_wall_actions = hv_equi_mcts_prob[12:12 + (BOARD_SIZE-1) ** 2].reshape(BOARD_SIZE-1, BOARD_SIZE-1)
v_wall_actions = hv_equi_mcts_prob[12 + (BOARD_SIZE-1) ** 2:].reshape(BOARD_SIZE-1, BOARD_SIZE -1)
flipped_h_wall_actions = np.fliplr(np.flipud(h_wall_actions))
flipped_v_wall_actions = np.fliplr(np.flipud(v_wall_actions))
hv_equi_mcts_prob[12:] = np.hstack([flipped_h_wall_actions.flatten(), flipped_v_wall_actions.flatten()])
###########
extend_data.append((state, mcts_prob, winner))
extend_data.append((h_equi_state, h_equi_mcts_prob, winner))
extend_data.append((v_equi_state, v_equi_mcts_prob, winner * -1))
extend_data.append((hv_equi_state, hv_equi_mcts_prob, winner * -1))
return extend_data
def collect_selfplay_data(self, n_games=1):
for i in range(n_games):
winner, play_data = self.game.start_self_play(self.mcts_player, temp=self.temp)
play_data = list(play_data)[:]
self.episode_len = len(play_data)
play_data = self.get_equi_data(play_data)
self.data_buffer.extend(play_data)
print("{}th game finished. Current episode length: {}, Length of data buffer: {}".format(i, self.episode_len, len(self.data_buffer)))
def policy_update(self):
dataloader = DataLoader(self.data_buffer, batch_size=BATCH_SIZE, shuffle=False, pin_memory=True)
valloss_acc = 0
polloss_acc = 0
entropy_acc = 0
for i in range(NUM_EPOCHS):
self.old_probs = self.new_probs
if self.first_trained:
kl = np.mean(np.sum(self.old_probs * (np.log(self.old_probs + 1e-10) - np.log(self.new_probs + 1e-10)), axis=1))
if kl > self.kl_targ * 4:
break
if kl > self.kl_targ * 2 and self.lr_multiplier > 0.1:
self.lr_multiplier /= 1.5
elif kl < self.kl_targ / 2 and self.lr_multiplier < 10:
self.lr_multiplier *= 1.5
for i, (state, mcts_prob, winner) in enumerate(dataloader):
valloss, polloss, entropy = self.policy_value_net.train_step(state, mcts_prob, winner, self.learn_rate * self.lr_multiplier)
self.new_probs, new_v = self.policy_value_net.policy_value(state)
global iter_count
writer.add_scalar("Val Loss/train", valloss.item(), iter_count)
writer.add_scalar("Policy Loss/train", polloss.item(), iter_count)
writer.add_scalar("Entropy/train", entropy, iter_count)
writer.add_scalar("LR Multiplier", self.lr_multiplier, iter_count)
iter_count += 1
valloss_acc += valloss.item()
polloss_acc += polloss.item()
entropy_acc += entropy.item()
self.first_trained = True
valloss_mean = valloss_acc / (len(dataloader) * NUM_EPOCHS)
polloss_mean = polloss_acc / (len(dataloader) * NUM_EPOCHS)
entropy_mean = entropy_acc / (len(dataloader) * NUM_EPOCHS)
#explained_var_old = 1 - np.var(np.array(winner_batch) - old_v.flatten()) / np.var(np.array(winner_batch))
#explained_var_new = 1 - np.var(np.array(winner_batch) - new_v.flatten()) / np.var(np.array(winner_batch))
#print( "kl:{:.5f}, lr_multiplier:{:.3f}, value loss:{}, policy loss:[], entropy:{}".format(
# kl, self.lr_multiplier, valloss, polloss, entropy, explained_var_old, explained_var_new))
return valloss_mean, polloss_mean, entropy_mean
def run(self):
try:
self.collect_selfplay_data(3)
count = 0
for i in range(self.game_batch_num):
self.collect_selfplay_data(self.play_batch_size) # collect_s
print("batch i:{}, episode_len:{}".format(i + 1, self.episode_len))
if len(self.data_buffer) > BATCH_SIZE:
valloss, polloss, entropy = self.policy_update()
print("VALUE LOSS: %0.3f " % valloss, "POLICY LOSS: %0.3f " % polloss, "ENTROPY: %0.3f" % entropy)
#writer.add_scalar("Val Loss/train", valloss.item(), i)
#writer.add_scalar("Policy Loss/train", polloss.item(), i)
#writer.add_scalar("Entory/train", entropy, i)
if (i + 1) % self.check_freq == 0:
count += 1
print("current self-play batch: {}".format(i + 1))
# win_ratio = self.policy_evaluate()
# Add generation to filename
self.policy_value_net.save_model('model_7x7_' + str(count) + '_' + str("%0.3f_" % (valloss+polloss) + str(time.strftime('%Y-%m-%d', time.localtime(time.time())))))
except KeyboardInterrupt:
print('\n\rquit')
# Start
if __name__ == '__main__':
training_pipeline = TrainPipeline(init_model=None)
training_pipeline.run()
|
{"hexsha": "25ca34da3f193474453d1a93fb7d6bf3d08ee257", "size": 13041, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_sk.py", "max_stars_repo_name": "Clarit7/AlphaZero_Quoridor", "max_stars_repo_head_hexsha": "838763ee5554a98173ae166c41ff52ecddc35424", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train_sk.py", "max_issues_repo_name": "Clarit7/AlphaZero_Quoridor", "max_issues_repo_head_hexsha": "838763ee5554a98173ae166c41ff52ecddc35424", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_sk.py", "max_forks_repo_name": "Clarit7/AlphaZero_Quoridor", "max_forks_repo_head_hexsha": "838763ee5554a98173ae166c41ff52ecddc35424", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.3571428571, "max_line_length": 201, "alphanum_fraction": 0.5958132045, "include": true, "reason": "import numpy", "num_tokens": 3430}
|
$ a_1 = x $
$ a_2 = \frac{1}{2} x + \frac{\sqrt{3}}{2} y $
```python
import sympy as sp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import RegularPolygon
import copy
from matplotlib.animation import FuncAnimation
%matplotlib notebook
```
```python
x, y = sp.symbols('x y')
a1 = x
a2 = x/2 + sp.sqrt(3)/2 * y
```
```python
d_test = {}
d_test[2*a1 + 3*a2] = 1
```
:)
## Part 1
```python
example_input = [
"sesenwnenenewseeswwswswwnenewsewsw",
"neeenesenwnwwswnenewnwwsewnenwseswesw",
"seswneswswsenwwnwse",
"nwnwneseeswswnenewneswwnewseswneseene",
"swweswneswnenwsewnwneneseenw",
"eesenwseswswnenwswnwnwsewwnwsene",
"sewnenenenesenwsewnenwwwse",
"wenwwweseeeweswwwnwwe",
"wsweesenenewnwwnwsenewsenwwsesesenwne",
"neeswseenwwswnwswswnw",
"nenwswwsewswnenenewsenwsenwnesesenew",
"enewnwewneswsewnwswenweswnenwsenwsw",
"sweneswneswneneenwnewenewwneswswnese",
"swwesenesewenwneswnwwneseswwne",
"enesenwswwswneneswsenwnewswseenwsese",
"wnwnesenesenenwwnenwsewesewsesesew",
"nenewswnwewswnenesenwnesewesw",
"eneswnwswnwsenenwnwnwwseeswneewsenese",
"neswnwewnwnwseenwseesewsenwsweewe",
"wseweeenwnesenwwwswnew",
]
```
```python
with open('../inputs/24.txt', 'r') as f:
real_input = f.readlines()
```
```python
def parse_line(line):
i = 0
pos = 0
while i != len(line):
if line[i] == 's':
if line[i+1] == 'w':
pos -= a2
elif line[i+1] == 'e':
pos = pos - a2 + a1
i += 2
elif line[i] == 'n':
if line[i+1] == 'w':
pos = pos + a2 - a1
elif line[i+1] == 'e':
pos += a2
i += 2
elif line[i] == 'w':
pos -= a1
i += 1
elif line[i] == 'e':
pos += a1
i += 1
else:
print('Unknown direction: {}'.format(line[i]))
i+= 1
return pos
print(parse_line('nenenene'))
print(parse_line('eeww'))
print(parse_line('nenw'))
print(parse_line('seswnenw'))
```
2*x + 2*sqrt(3)*y
0
sqrt(3)*y
0
```python
def paint_hexes(lines):
d = {}
for line in lines:
pos = parse_line(line.strip())
if d.get(pos, None):
d[pos] = False
else:
d[pos] = True
return d
```
```python
def count_hexes(d):
return sum(d.values())
```
```python
d = paint_hexes(example_input)
sum(d.values())
```
10
```python
hexes = paint_hexes(real_input)
sum(hexes.values())
```
282
```python
def plot_hexes(hexes, LIM=15):
fig, ax = plt.subplots(1)
ax.set_aspect('equal')
for h in hexes.keys():
xx, yy = h.subs([(x,1),(y,0)]), h.subs([(x,0),(y,1)])
if xx**2 + yy**2 > LIM**2:
continue
hex = RegularPolygon((xx, yy), numVertices=6, radius=0.5,
orientation=np.radians(0),
facecolor='white' if not hexes[h] else 'black', alpha=0.2, edgecolor='w')
ax.add_patch(hex)
ax.set_xlim([-LIM,LIM])
ax.set_ylim([-LIM,LIM])
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
plt.tick_params(
axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the bottom edge are off
right=False, # ticks along the top edge are off
labelleft=False) # labels along the bottom edge are off
plt.show()
def plot_ani(hex_history, LIM=60):
fig, ax = plt.subplots(1)
ax.set_aspect('equal')
ax.set_xlim([-LIM,LIM])
ax.set_ylim([-LIM,LIM])
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
plt.tick_params(
axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the bottom edge are off
right=False, # ticks along the top edge are off
labelleft=False) # labels along the bottom edge are off
def init():
frames = []
for h in hex_history[0].keys():
xx, yy = h.subs([(x,1),(y,0)]), h.subs([(x,0),(y,1)])
if xx**2 + yy**2 > LIM**2:
continue
hex = RegularPolygon((xx, yy), numVertices=6, radius=0.5,
orientation=np.radians(0),
facecolor='white' if not hex_history[i][h] else 'black', alpha=0.2, edgecolor='w')
frames.append(ax.add_patch(hex))
return frames
def animate(i):
print('Animating frame {}...'.format(i))
frames = []
for h in hex_history[i].keys():
xx, yy = h.subs([(x,1),(y,0)]), h.subs([(x,0),(y,1)])
if xx**2 + yy**2 > LIM**2:
continue
hex = RegularPolygon((xx, yy), numVertices=6, radius=0.5,
orientation=np.radians(0),
facecolor='white' if not hex_history[i][h] else 'black', alpha=0.2, edgecolor='w')
frames.append(ax.add_patch(hex))
return frames
anim = FuncAnimation(fig, animate, init_func=init,
frames=100, interval=20, blit=True)
plt.show()
anim.save('./animation.gif', writer='imagemagick', fps=60)
plot_hexes(hexes)
```
<IPython.core.display.Javascript object>
```
Any black tile with zero or more than 2 black tiles immediately adjacent to it is flipped to white.
Any white tile with exactly 2 black tiles immediately adjacent to it is flipped to black.
```
```python
def count_painted_neighbors(d, key):
return sum([d.get(key+a1, 0),
d.get(key-a1, 0),
d.get(key+a2, 0),
d.get(key-a2, 0),
d.get(key+a1-a2, 0),
d.get(key-a1+a2, 0)])
count_painted_neighbors(hexes, 0)
```
3
```python
def decide_tile(d, key):
count = count_painted_neighbors(d, key)
if d.get(key, False):
if not 1 <= count <= 2:
return False
else:
return True
else:
if count == 2:
return True
else:
return False
decide_tile(hexes, 4*x)
```
True
```python
def apply_round(d, R=60):
new_d = copy.deepcopy(d)
visited = set()
for i in range(-R,R+1):
for j in range(-R,R+1):
coord = a1*i + a2*j
if coord in visited:
continue
else:
visited.add(coord)
new_d[coord] = decide_tile(d, coord)
return new_d
```
```python
plot_hexes(hexes)
new_hexes = apply_round(hexes)
plot_hexes(new_hexes)
```
<IPython.core.display.Javascript object>
<IPython.core.display.Javascript object>
```python
hh = copy.deepcopy(hexes)
hist = [hh]
for i in range(100):
print('Running step {}'.format(i+1))
hh = apply_round(hh, R=30)
hist.append(hh)
```
Running step 1
Running step 2
Running step 3
Running step 4
Running step 5
Running step 6
Running step 7
Running step 8
Running step 9
Running step 10
Running step 11
Running step 12
Running step 13
Running step 14
Running step 15
Running step 16
Running step 17
Running step 18
Running step 19
Running step 20
Running step 21
Running step 22
Running step 23
Running step 24
Running step 25
Running step 26
Running step 27
Running step 28
Running step 29
Running step 30
Running step 31
Running step 32
Running step 33
Running step 34
Running step 35
Running step 36
Running step 37
Running step 38
Running step 39
Running step 40
Running step 41
Running step 42
Running step 43
Running step 44
Running step 45
Running step 46
Running step 47
Running step 48
Running step 49
Running step 50
Running step 51
Running step 52
Running step 53
Running step 54
Running step 55
Running step 56
Running step 57
Running step 58
Running step 59
Running step 60
Running step 61
Running step 62
Running step 63
Running step 64
Running step 65
Running step 66
Running step 67
Running step 68
Running step 69
Running step 70
Running step 71
Running step 72
Running step 73
Running step 74
Running step 75
Running step 76
Running step 77
Running step 78
Running step 79
Running step 80
Running step 81
Running step 82
Running step 83
Running step 84
Running step 85
Running step 86
Running step 87
Running step 88
Running step 89
Running step 90
Running step 91
Running step 92
Running step 93
Running step 94
Running step 95
Running step 96
Running step 97
Running step 98
Running step 99
Running step 100
```python
sum(hh.values())
```
3445
```python
plot_hexes(hh, 30)
```
<IPython.core.display.Javascript object>
```python
plot_ani(hist)
```
<IPython.core.display.Javascript object>
Animating frame 0...
Animating frame 1...
Animating frame 2...
Animating frame 3...
Animating frame 4...
Animating frame 5...
Animating frame 6...
Animating frame 7...
Animating frame 8...
Animating frame 9...
Animating frame 10...
Animating frame 11...
Animating frame 12...
Animating frame 13...
Animating frame 14...
Animating frame 15...
Animating frame 16...
Animating frame 17...
Animating frame 18...
Animating frame 19...
Animating frame 20...
Animating frame 21...
Animating frame 22...
Animating frame 23...
Animating frame 24...
Animating frame 25...
Animating frame 26...
Animating frame 27...
Animating frame 28...
Animating frame 29...
Animating frame 30...
Animating frame 31...
Animating frame 32...
Animating frame 33...
Animating frame 34...
Animating frame 35...
Animating frame 36...
Animating frame 37...
Animating frame 38...
Animating frame 39...
Animating frame 40...
Animating frame 41...
Animating frame 42...
Animating frame 43...
Animating frame 44...
Animating frame 45...
Animating frame 46...
Animating frame 47...
Animating frame 48...
Animating frame 49...
Animating frame 50...
Animating frame 51...
Animating frame 52...
Animating frame 53...
Animating frame 54...
Animating frame 55...
Animating frame 56...
Animating frame 57...
Animating frame 58...
Animating frame 59...
Animating frame 60...
Animating frame 61...
Animating frame 62...
Animating frame 63...
Animating frame 64...
Animating frame 65...
Animating frame 66...
Animating frame 67...
Animating frame 68...
Animating frame 69...
Animating frame 70...
Animating frame 71...
Animating frame 72...
Animating frame 73...
Animating frame 74...
Animating frame 75...
Animating frame 76...
Animating frame 77...
Animating frame 78...
Animating frame 79...
Animating frame 80...
Animating frame 81...
Animating frame 82...
Animating frame 83...
Animating frame 84...
Animating frame 85...
Animating frame 86...
Animating frame 87...
Animating frame 88...
Animating frame 89...
Animating frame 90...
Animating frame 91...
Animating frame 92...
Animating frame 93...
Animating frame 94...
Animating frame 95...
Animating frame 96...
Animating frame 97...
Animating frame 98...
Animating frame 99...
```python
```
|
{"hexsha": "f6a2cc00b88f68cc724cafa930013a664d65fca6", "size": 455334, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "aoc2020/python/Problem 24.ipynb", "max_stars_repo_name": "orrinjelo/AdventOfCode2020", "max_stars_repo_head_hexsha": "8a6326bb9926a812142667ee82868d4b59d28f54", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-14T21:04:21.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-14T21:04:21.000Z", "max_issues_repo_path": "aoc2020/python/Problem 24.ipynb", "max_issues_repo_name": "orrinjelo/AdventOfCode2020", "max_issues_repo_head_hexsha": "8a6326bb9926a812142667ee82868d4b59d28f54", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aoc2020/python/Problem 24.ipynb", "max_forks_repo_name": "orrinjelo/AdventOfCode2020", "max_forks_repo_head_hexsha": "8a6326bb9926a812142667ee82868d4b59d28f54", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 97.4601883562, "max_line_length": 94331, "alphanum_fraction": 0.7559352036, "converted": true, "num_tokens": 3561}
|
! Copyright 2014 College of William and Mary
!
! Licensed under the Apache License, Version 2.0 (the "License");
! you may not use this file except in compliance with the License.
! You may obtain a copy of the License at
!
! http://www.apache.org/licenses/LICENSE-2.0
!
! Unless required by applicable law or agreed to in writing, software
! distributed under the License is distributed on an "AS IS" BASIS,
! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
! See the License for the specific language governing permissions and
! limitations under the License.
!
!****************************************************************************************
! *
! Read in (x,y,time) from station.xyt (time in sec); e.g., casts;
! for 3D variables (surface values for 2D variables) DEFINED AT NODES or ELEM.
! Interpolation in time, and
! add extra times before and after to examine phase errors.
! Works for mixed tri/quad outputs from scribe I/O versions.
! Inputs: (1) nc files;
! (2) station.xyt (bp format): make sure all times (in sec) are after 1st record (to ensure interpolation in time);
! pad extra days before and after if necessary.
! (3) read_output_xyt.in:
! 1st line: variable name (e.g. elev)\n
! 2nd line: invalid value (for out of domain, dry etc)\n
! 3rd line: window (hours) b4 and after the cast, stride (hrs) - used to
! examine the phase error. If window=0, each cast is repeated twice
! there are no other extra casts. \n
! 4th line: inode_elem (1: node based; 2: elem based)
! (4) vgrid.in (in this dir or ../)
! Outputs: fort.18; fort.11 (fatal errors); fort.12: nonfatal errors.
! The total # of 'virtual' casts for each actual cast is 2*window/stride+2
!
! ifort -mcmodel=medium -assume byterecl -CB -O2 -o read_output10_xyt.exe ../UtilLib/extract_mod2.f90 ../UtilLib/compute_zcor.f90 ../UtilLib/pt_in_poly_test.f90 read_output10_xyt.f90 -I$NETCDF/include -I$NETCDF_FORTRAN/include -L$NETCDF_FORTRAN/lib -L$NETCDF/lib -lnetcdf -lnetcdff
!****************************************************************************************
!
program read_out
use netcdf
use extract_mod2
use compute_zcor
use pt_in_poly_test
character(len=30) :: file63,varname,file62
character(len=12) :: it_char
integer,allocatable :: kbp(:),kbp00(:),node3(:,:),iep(:),iday(:,:),irecord(:,:),irank_read(:), &
&i34(:),elnode(:,:)
real,allocatable :: sigma(:),cs(:),ztot(:),out2(:,:),eta2(:),arco(:,:),ztmp(:),x00(:),y00(:), &
&out4(:),t00(:),times(:,:),sigma_lcl(:,:),ztmp2(:,:),outvar(:,:),dp(:)
real*8,allocatable :: timeout(:),xnd(:),ynd(:)
integer :: nodel(3),dimids(100),idims(100),char_len,start_2d(2),start_3d(3),start_4d(4), &
&count_2d(2),count_3d(3),count_4d(4)
open(10,file='read_output_xyt.in',status='old')
read(10,'(a30)')varname
! Junk used for 3D variables: below bottom; dry spot; no parents
read(10,*)rjunk
read(10,*)window,wstride !in hours
read(10,*)inode_elem
close(10)
if(icomb/=0.and.icomb/=1) stop 'Unknown icomb'
varname=adjustl(varname); len_var=len_trim(varname)
if(wstride==0) stop 'wstride=0'
nextra=2*window/wstride+1 !extra casts in addition to each cast (for phase error)
!... Get basic info from out2d*.nc
!Returned vars: ne,np,ns,nrec,[xnd ynd dp](np),
!elnode,i34,nvrt,h0,dtout,kbp
call get_dims(1,np,ne,ns,nvrt,h0)
allocate(xnd(np),ynd(np),dp(np),kbp(np),i34(ne),elnode(4,ne),stat=istat)
if(istat/=0) stop 'alloc (1)'
call readheader(1,np,ne,ns,kbp,i34,elnode,nrec,xnd,ynd,dp,dtout)
print*, 'After header:',ne,np,ns,nvrt,nrec,i34(ne), &
&elnode(1:i34(ne),ne),h0,xnd(np),ynd(np),dp(np),dtout !,start_time
! Read in station.xyt
open(10,file='station.xyt',status='old')
read(10,*)
read(10,*) nxy
nxy2=nxy*(1+nextra)
allocate(x00(nxy2),y00(nxy2),t00(nxy2),eta2(np),stat=istat)
if(istat/=0) stop 'Falied to allocate (1)'
do i=1,nxy
read(10,*)k,xtmp,ytmp,ttmp
! Check if time is before first record
if(ttmp<dtout) then
write(11,*)'Time before first record; try to pad extra day:',i,ttmp
stop
endif
indx=(i-1)*(1+nextra)+1
x00(indx)=xtmp
y00(indx)=ytmp
t00(indx)=ttmp !time in sec
!Add extra casts
do j=1,nextra
indx=indx+1
x00(indx)=xtmp
y00(indx)=ytmp
t00(indx)=max(dtout,ttmp-window*3600+(j-1)*wstride*3600) !also need to ensure it's <last time
enddo !j
enddo !i
close(10)
nxy=nxy2
! print*, 'i23d=',i23d,' nrec= ',nrec
! Read in vgrid.in
last_dim=max(np,ne,ns)
allocate(timeout(nrec),ztot(nvrt),sigma(nvrt),sigma_lcl(nvrt,np),kbp00(np),outvar(nvrt,last_dim), &
&node3(nxy,3),arco(nxy,3),iep(nxy))
outvar=-huge(1.0)
call get_vgrid_single('vgrid.in',np,nvrt,ivcor,kz,h_s,h_c,theta_b,theta_f,ztot,sigma,sigma_lcl,kbp)
allocate(ztmp(nvrt),ztmp2(nvrt,3),out2(2,nvrt),out4(nvrt),iday(2,nxy), &
&irecord(2,nxy),times(2,nxy),stat=istat)
if(istat/=0) stop 'Falied to allocate (2)'
! Calculate kbp00
if(ivcor==1) then
kbp00=kbp
else
do i=1,np
!Use large eta to get true bottom
call zcor_SZ_single(dp(i),1.e8,h0,h_s,h_c,theta_b,theta_f,kz,nvrt,ztot,sigma, &
&ztmp(:),idry2,kbp00(i))
enddo !i
endif !ivcor
!... Find parent element for (x00,y00)
iep=0
! arco=1./3 !initialize for pts without parents
! do l=1,nxy
! node3(l,1:3)=elnode(1:3,1) !initialize for pts without parents
! enddo !l
do i=1,ne
do l=1,nxy
if(iep(l)/=0) cycle
call pt_in_poly_single(i34(i),real(xnd(elnode(1:i34(i),i))), &
&real(ynd(elnode(1:i34(i),i))),x00(l),y00(l),inside,arco(l,1:3),nodel)
if(inside==1) then
iep(l)=i
!print*, 'Found:',l,arco(l,1:3),nodel
node3(l,1:3)=elnode(nodel(1:3),i)
endif !inside
enddo !l; build pts
ifl=0 !flag
do l=1,nxy
if(iep(l)==0) then
ifl=1
exit
endif
enddo !l
if(ifl==0) exit
enddo !i=1,ne
iabort=0
do j=1,nxy
if(iep(j)<=0) then
write(11,*)'Cannot find a parent for pt:',j,x00(j),y00(j)
iabort=1
endif
enddo !j
if(iabort==1) stop 'check fort.11 for pts outside'
!... Compute stack and record # for each pt
do i=1,nxy
! Check if time is before first record
! if(t00(i)<dtout) then
! write(11,*)'Time before first record; try to padd extra day (0):',i,t00(i)
!!'
! stop
! endif
! Lower and upper bound stacks and record #s for t00(i)
iday(1,i)=(t00(i)-dtout)/nrec/dtout+1
if(iday(1,i)<1) then
write(11,*)'Make sure cast time is after 1st record:',i,t00(i)
stop
else
irecord(1,i)=(t00(i)-(iday(1,i)-1)*nrec*dtout)/dtout
!Bounding record time just b4 the cast time, corresponding to record
!irecord(1,i) in stack iday(1,i)
times(1,i)=((iday(1,i)-1)*nrec+irecord(1,i))*dtout
iday(2,i)=t00(i)/nrec/dtout+1
irecord(2,i)=(t00(i)-(iday(2,i)-1)*nrec*dtout)/dtout+1
!Bounding record time just after the cast time, corresponding to record
!irecord(2,i) in stack iday(2,i). Note that irecord(2,i)
!may<irecord(1,i) (e.g. t00 before 1st record of iday(2,i))
times(2,i)=((iday(2,i)-1)*nrec+irecord(2,i))*dtout
endif
if(irecord(1,i)>nrec.or.irecord(2,i)>nrec) then
write(11,*)'Record # overflow: ',i,irecord(:,i)
stop
endif
if(t00(i)<times(1,i).or.t00(i)>times(2,i)) then
write(11,*)'Wrong time bounds:',i,t00(i),times(:,i),iday(:,i),irecord(:,i)
stop
endif
enddo !i=1,nxy
!... Time iteration
!...
do i=1,nxy
loop1: do l=1,2 !2 times
print*, 'reading stack ',iday(l,i),' for point ',i
write(it_char,'(i12)')iday(l,i)
it_char=adjustl(it_char)
leng=len_trim(it_char)
file62='out2d_'//it_char(1:leng)//'.nc'
iret=nf90_open(trim(adjustl(file62)),OR(NF90_NETCDF4,NF90_NOWRITE),ncid4)
!time is double
iret=nf90_inq_varid(ncid4,'time',itime_id)
iret=nf90_get_var(ncid4,itime_id,timeout,(/1/),(/nrec/))
! print*, 'time=',timeout !,trim(adjustl(file63))
!Find nc file
file63=varname(1:len_var)//'_'//it_char(1:leng)//'.nc'
inquire(file=file63,exist=lexist)
if(lexist) then
i23d=2 !3D var
else
i23d=1 !2D
file63=file62
endif
iret=nf90_open(trim(adjustl(file63)),OR(NF90_NETCDF4,NF90_NOWRITE),ncid)
iret=nf90_inq_varid(ncid,varname(1:len_var),ivarid1)
if(iret/=nf90_NoErr) stop 'Var not found'
iret=nf90_Inquire_Variable(ncid,ivarid1,ndims=ndims,dimids=dimids)
if(ndims>100) stop 'increase dimension of dimids & idims'
do ii=1,ndims
iret=nf90_Inquire_Dimension(ncid,dimids(ii),len=idims(ii))
enddo !ii
npes=idims(ndims-1) !np|ne|ns
if(npes/=np.and.npes/=ne) stop 'can only handle node- or elem-based'
!'
if(idims(ndims)/=nrec) stop 'last dim is not time'
irec=irecord(l,i)
!Get elev
iret=nf90_inq_varid(ncid4,'elevation',itmp)
start_2d(1)=1; start_2d(2)=irec
count_2d(1)=np; count_2d(2)=1
iret=nf90_get_var(ncid4,itmp,eta2,start_2d,count_2d)
if(i23d==1) then !2D
start_2d(1)=1; start_2d(2)=irec
count_2d(1)=npes; count_2d(2)=1
iret=nf90_get_var(ncid,ivarid1,outvar(1,1:npes),start_2d,count_2d)
else !3D
start_3d(1:2)=1; start_3d(3)=irec
count_3d(1)=nvrt; count_3d(2)=npes; count_3d(3)=1
iret=nf90_get_var(ncid,ivarid1,outvar(:,1:npes),start_3d,count_3d)
endif
!Available now: outvar(nvrt,np|ne), eta2(np)
out2(l,:)=0
if(i23d==1) then !2D
do j=1,3 !nodes
nd=node3(i,j)
if(inode_elem==1) then !node
out2(l,1)=out2(l,1)+arco(i,j)*outvar(1,nd)
else !elem
out2(l,1)=outvar(1,iep(i))
endif
enddo !j
else !3D
! Do interpolation
etal=0; dep=0; idry=0
do j=1,3
nd=node3(i,j)
if(eta2(nd)+dp(nd)<h0) idry=1
etal=etal+arco(i,j)*eta2(nd)
dep=dep+arco(i,j)*dp(nd)
! Debug
! write(11,*)i,j,nd,dp(nd),arco(i,j)
enddo !j
if(idry==1) then
out2(:,:)=rjunk
exit loop1
else !element wet
!Compute z-coordinates
if(ivcor==1) then !localized
do j=1,3
nd=node3(i,j)
do k=kbp(nd)+1,nvrt-1
ztmp2(k,j)=(eta2(nd)+dp(nd))*sigma_lcl(k,nd)+eta2(nd)
enddo !k
ztmp2(kbp(nd),j)=-dp(nd) !to avoid underflow
ztmp2(nvrt,j)=eta2(nd) !to avoid underflow
enddo !j
ztmp=0
kbpl=minval(kbp(node3(i,1:3)))
do k=kbpl,nvrt
do j=1,3
nd=node3(i,j)
ztmp(k)=ztmp(k)+arco(i,j)*ztmp2(max(k,kbp(nd)),j)
enddo !j
enddo !k
else if(ivcor==2) then !SZ
call zcor_SZ_single(dep,etal,h0,h_s,h_c,theta_b,theta_f,kz,nvrt,ztot, &
&sigma,ztmp(:),idry2,kbpl)
endif
do k=kbpl,nvrt
do j=1,3
nd=node3(i,j)
kin=max(k,kbp00(nd))
if(inode_elem==1) then !node
out2(l,k)=out2(l,k)+arco(i,j)*outvar(kin,nd)
else !elem; off by half a layer
out2(l,k)=outvar(k,iep(i))
endif !i23d
enddo !j
enddo !k
endif !dry/wet
endif !i23d
enddo loop1 !l=1,2; 2 times
! Interpolate in time
trat=(t00(i)-times(1,i))/(times(2,i)-times(1,i)) !must be [0,1]
if(i23d==1) then !2D
if(iep(i)==0) then !no parents
out4(1)=rjunk
else
out4(1)=out2(1,1)*(1-trat)+out2(2,1)*trat
endif
write(18,'(e16.8,2(1x,f12.3))')t00(i)/86400,out4(1)
else !3D
if(iep(i)==0) then !no parents
out4(:)=rjunk
else
out4(kbpl:nvrt)=out2(1,kbpl:nvrt)*(1-trat)+out2(2,kbpl:nvrt)*trat
!Extend
do k=1,kbpl-1
out4(k)=out4(kbpl)
ztmp(k)=ztmp(kbpl)
enddo !k
endif
do k=nvrt,1,-1
!First of each cast suite is at the actual cast time (followed by b4 and after)
write(18,'(i6,4(1x,f12.3))')i,out4(k),ztmp(k)-ztmp(nvrt),ztmp(k),t00(i)/86400
enddo !k
endif
enddo !i=1,nxy
print*, 'Finished!'
stop
end
|
{"hexsha": "747c18bb13eb7e8c48879c488f9a5123570d839c", "size": 13741, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/Utility/Post-Processing-Fortran/read_output10_xyt.f90", "max_stars_repo_name": "rustychris/schism", "max_stars_repo_head_hexsha": "3754530ef57b3a058906432b4a9fca4a670f395e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2019-08-12T21:48:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T03:08:10.000Z", "max_issues_repo_path": "src/Utility/Post-Processing-Fortran/read_output10_xyt.f90", "max_issues_repo_name": "rustychris/schism", "max_issues_repo_head_hexsha": "3754530ef57b3a058906432b4a9fca4a670f395e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 42, "max_issues_repo_issues_event_min_datetime": "2019-08-19T21:57:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-03T17:42:01.000Z", "max_forks_repo_path": "src/Utility/Post-Processing-Fortran/read_output10_xyt.f90", "max_forks_repo_name": "rustychris/schism", "max_forks_repo_head_hexsha": "3754530ef57b3a058906432b4a9fca4a670f395e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 51, "max_forks_repo_forks_event_min_datetime": "2019-08-09T20:59:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T15:48:43.000Z", "avg_line_length": 37.543715847, "max_line_length": 281, "alphanum_fraction": 0.5347500182, "num_tokens": 4525}
|
import pickle
import re
import torch
from wafamole.models import PyTorchModelWrapper
import wafamole.models.custom.pytorch_models.utils as ut
from wafamole.utils.check import type_check
from wafamole.exceptions.models_exceptions import (
ModelNotLoadedError,
PyTorchInternalError,
)
import numpy as np
import json
class PyTorchExample(PyTorchModelWrapper):
"""SQLiGoT wrapper"""
def __init__(self, filepath: str):
"""Constructs model by loading pretrained net.
Arguments:
filepath (str) : the path to the pretrained h5 net
Raises:
TypeError: filepath not string
FileNotFoundError: filepath not pointing to anything
NotKerasModelError: filepath not pointing to h5 keras model
"""
type_check(filepath, str, "filepath")
from wafamole.models.custom.pytorch_models.ModelClass import SentimentLSTM
self.filepath = filepath
p = re.compile('.*ModelWAF(\d+).*')
model_num = p.findall(filepath)
self.model_number = model_num[0]
self.vocabfile = './vocab' + self.model_number + '.json'
f = open(self.vocabfile)
self.vocab_to_int = json.load(f)
vocab_size = len(self.vocab_to_int) + 1 # +1 for the 0 padding
output_size = 1
embedding_dim = 100
hidden_dim = 32
n_layers = 2
net = SentimentLSTM(vocab_size, output_size, embedding_dim, hidden_dim, n_layers)
self.load_model(filepath, net)
super(PyTorchExample, self).__init__(self._pytorch_classifier)
def extract_features(self, value: str):
"""Extract feature vector using SQLiGoT extractor.
Arguments:
value (str) : the input SQL query.
Raises:
TypeError: value is not string
ModelNotLoadedError: calling function without having loaded or passed model as arg
Returns:
numpy ndarray : the feature vector
"""
if self._pytorch_classifier is None:
raise ModelNotLoadedError()
type_check(value, str, "value")
# print("Modified String", value)
new_value = ut.PreProc(value, self.model_number, self.vocab_to_int)
# print("pre processed value", new_value)
return new_value
def classify(self, value):
"""Computes the probability of being a sql injection.
Arguments:
value: the input query
Raises:
ModuleNotLoadedError: calling function without having loaded or passed model as arg
SklearnInternalError: internal sklearn exception has been thrown
Returns:
probability of being a sql injection.
"""
# print(value)
if self._pytorch_classifier is None:
raise ModelNotLoadedError()
feature_vector = self.extract_features(value)
# print(feature_vector)
if feature_vector is None:
return 1
feature_vector = torch.from_numpy(feature_vector)
y_pred = ut.predict(self._pytorch_classifier, feature_vector)
# print(y_pred)
return y_pred
|
{"hexsha": "1f2f41fdea2723a929b18b42829bf8c84753f3e9", "size": 3131, "ext": "py", "lang": "Python", "max_stars_repo_path": "wafamole/models/custom/pytorch_models/example_model1.py", "max_stars_repo_name": "SANKEERTH26/waf-a-mole", "max_stars_repo_head_hexsha": "71c59aac7b2c4ee65a2bcfbf89a58e5546b9e26c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "wafamole/models/custom/pytorch_models/example_model1.py", "max_issues_repo_name": "SANKEERTH26/waf-a-mole", "max_issues_repo_head_hexsha": "71c59aac7b2c4ee65a2bcfbf89a58e5546b9e26c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wafamole/models/custom/pytorch_models/example_model1.py", "max_forks_repo_name": "SANKEERTH26/waf-a-mole", "max_forks_repo_head_hexsha": "71c59aac7b2c4ee65a2bcfbf89a58e5546b9e26c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9578947368, "max_line_length": 95, "alphanum_fraction": 0.6534653465, "include": true, "reason": "import numpy", "num_tokens": 683}
|
import json
from pathlib import Path
import numpy as np
import tensorflow as tf
from tensorflow.keras.optimizers.schedules import *
class LiveLrSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
"""
Updates learning rate schedule based on config file during the training process.
"""
def __init__(self, check_for_update_interval,
lr_filename='current_lr.conf',
custom_objects={},
initial_schedule=None):
"""
:param check_for_update_interval: The interval the lr will be updated. Measured in **steps**.
:param lr_filename: The file to read config from.
:param custom_objects:
The dict passed to `deserialize` function while loading lr schedule.
This is typically for custom children of tf.keras.optimizers.schedules.LearningRateSchedule.
"""
lr_filename = Path(lr_filename)
self.lr_filename = lr_filename
self.check_for_update_interval = check_for_update_interval
self.custom_objects = custom_objects
self.initial_schedule = initial_schedule
if not lr_filename.exists():
with open(lr_filename, 'w') as file:
file.write(json.dumps(serialize(initial_schedule)))
self.lr_file = open(lr_filename, 'r')
self.base_schedule = initial_schedule
self.logger = tf.get_logger()
def __call__(self, step):
return tf.py_function(self.call, [step], tf.float32)
def load_config(self):
"""
Tries to load config file and update `base_schedule`. Logs failures with logger.error.
"""
try:
self.lr_file.seek(0)
config = json.loads(self.lr_file.read())
self.base_schedule = deserialize(config, self.custom_objects)
self.logger.info('LiveLrSchedule info: %s', 'loaded schedule name: {}'.format(self.base_schedule.__class__.__name__))
except Exception as err:
self.logger.error('LiveLrSchedule error: %s', str(err), exc_info=True)
self.logger.error('LiveLrSchedule keeps schedule unchanged')
def call(self, step):
if step % self.check_for_update_interval == 0:
self.load_config()
return np.float32(self.base_schedule(step))
def get_config(self):
return {
'check_for_update_interval': self.check_for_update_interval,
'lr_filename': str(self.lr_filename),
'custom_objects': self.custom_objects,
'initial_schedule': self.initial_schedule
}
|
{"hexsha": "56e987191f33e8e26cd064b638856e0d5d5ae6f8", "size": 2247, "ext": "py", "lang": "Python", "max_stars_repo_path": "tf_livepatch_lr/livepatch_lr.py", "max_stars_repo_name": "andrewerf/tf_livepatch_lr", "max_stars_repo_head_hexsha": "b2f6a513e536e61f39bbf99a8832f4f42719854d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tf_livepatch_lr/livepatch_lr.py", "max_issues_repo_name": "andrewerf/tf_livepatch_lr", "max_issues_repo_head_hexsha": "b2f6a513e536e61f39bbf99a8832f4f42719854d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tf_livepatch_lr/livepatch_lr.py", "max_forks_repo_name": "andrewerf/tf_livepatch_lr", "max_forks_repo_head_hexsha": "b2f6a513e536e61f39bbf99a8832f4f42719854d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5692307692, "max_line_length": 120, "alphanum_fraction": 0.7547841567, "include": true, "reason": "import numpy", "num_tokens": 518}
|
from __future__ import print_function
import torch
import torch.optim as optim
from data.data_loader import CreateDataLoader
import tqdm
import cv2
import yaml
from schedulers import WarmRestart, LinearDecay
import numpy as np
from models.networks import get_nets_multitask, EncoderDecoder
from models.losses import get_loss
from models.models import get_model
from tensorboardX import SummaryWriter
import logging
REPORT_EACH = 10
torch.backends.cudnn.bencmark = True
cv2.setNumThreads(0)
class Trainer:
def __init__(self, config):
self.config = config
self.train_dataset = self._get_datasets(config, 'train')
self.val_dataset = self._get_datasets(config, 'test')
self.best_psnr = 0
self.best_ssim = 0
self.warmup_epochs = config['warmup_num']
def train(self):
self._init_params()
for epoch in range(0, config['num_epochs']):
if (epoch == self.warmup_epochs) and not(self.warmup_epochs == 0):
self.encoder.module.unfreeze()
list_of_params = [x for y in self.decoders for x in y.parameters()]
list_of_params = list_of_params + list(self.encoder.parameters())
self.optimizer_G = self._get_optim(list_of_params, self.config['optimizer']['lr_G'])
self.scheduler_G = self._get_scheduler(self.optimizer_G)
# self.optimizer_G = self._get_optim(self.netG, self.config['optimizer']['lr_G'])
# self.scheduler_G = self._get_scheduler(self.optimizer_G)
train_loss = self._run_epoch(epoch)
val_loss, val_psnr, val_ssim = self._validate(epoch)
self.scheduler_G.step()
val_metric = val_psnr
if val_ssim > self.best_ssim:
self.best_ssim = val_ssim
dict_models = {'encoder': self.encoder.state_dict()}
for i in range(num_of_tasks):
dict_models['decoder' + str(i)] = self.decoders[i].state_dict()
if val_metric > self.best_psnr:
self.best_psnr = val_metric
torch.save(dict_models, 'best_{}.h5'.format(self.config['experiment_desc']))
torch.save(dict_models, 'last_{}.h5'.format(self.config['experiment_desc']))
print(('val_loss={}, val_psnr={}, val_ssim={},best_psnr={}, best_ssim={}\n'.
format(val_loss, val_psnr, val_ssim, self.best_psnr, self.best_ssim)))
logging.debug("Experiment Name: %s, Epoch: %d, Train Loss: %.3f, Val Loss: %.3f, Val PSNR: %.3f, "
"Best PSNR: %.3f, Best SSIM: %.3f" % (self.config['experiment_desc'], epoch, train_loss,
val_loss, val_metric, self.best_psnr, self.best_ssim))
def _run_epoch(self, epoch):
losses_G = []
losses_G_i = {}
losses_vgg_i = {}
losses_adv_i = {}
losses_l1_i = {}
psnrs_i = {}
ssims_i = {}
mean_loss_vgg_i = {}
mean_loss_adv_i = {}
mean_loss_l1_i = {}
mean_psnr_i = {}
mean_ssim_i = {}
mean_loss_G_i = {}
max_len = 0
for type, dataset in self.train_dataset.items():
if len(dataset) > max_len:
max_len = len(dataset)
batches_per_epoch = len(dataset) // dataset.dataloader.batch_size
datasets = {"dataiterators":[]}
for param_group in self.optimizer_G.param_groups:
lr = param_group['lr']
mapping = {}
for type, dataset in self.train_dataset.items():
mapping[str(len(datasets['dataiterators']))] = dataset.dataset.name()
datasets["dataiterators"].append(iter(dataset))
losses_G_i[dataset.dataset.name()] = []
losses_vgg_i[dataset.dataset.name()] = []
losses_adv_i[dataset.dataset.name()] = []
losses_l1_i[dataset.dataset.name()] = []
psnrs_i[dataset.dataset.name()] = []
ssims_i[dataset.dataset.name()] = []
loss_di = {}
print(mapping)
tq = tqdm.tqdm(range(batches_per_epoch))
tq.set_description('Epoch {}, lr {}'.format(epoch, lr))
flag_train = False
for i in tq:
if flag_train:
break
loss_G = 0
for idx, dataset in enumerate(datasets["dataiterators"]):
name = mapping[str(idx)]
try:
data = next(dataset)
except StopIteration:
flag_train = True
print('stopIter')
break
inputs, targets = self.model.get_input(data)
# outputs = self.decoders[idx](self.encoder(inputs))
outputs = inputs + self.decoders[idx](self.encoder(inputs))
outputs = torch.clamp(outputs, min=-1, max=1)
for _ in range(config['D_update_ratio']):
self.optimizers_Di[idx].zero_grad()
loss_di[name] = config['loss']['adv'] * self.criterionD(self.netsD[idx], outputs, targets)
loss_di[name].backward(retain_graph=True)
self.optimizers_Di[idx].step()
loss_adv = config['loss']['adv'] * self.criterionD.get_g_loss(self.netsD[idx], outputs, targets)
loss_content = config['loss']['vgg'] * self.criterionG(outputs, targets)
loss_pix = config['loss']['l1'] * self.criterionG_pix(outputs, targets)
losses_adv_i[name].append(loss_adv.item())
losses_vgg_i[name].append(loss_content.item())
losses_l1_i[name].append(loss_pix.item())
lg1 = loss_content + loss_pix + loss_adv
losses_G_i[name].append(lg1.item())
curr_psnr, curr_ssim = self.model.get_acc(outputs, targets)
psnrs_i[name].append(curr_psnr)
ssims_i[name].append(curr_ssim)
mean_loss_vgg_i[name] = np.mean(losses_vgg_i[name][-REPORT_EACH:])
mean_loss_adv_i[name] = np.mean(losses_adv_i[name][-REPORT_EACH:])
mean_loss_l1_i[name] = np.mean(losses_l1_i[name][-REPORT_EACH:])
mean_psnr_i[name] = np.mean(psnrs_i[name][-REPORT_EACH:])
mean_ssim_i[name] = np.mean(ssims_i[name][-REPORT_EACH:])
mean_loss_G_i[name] = np.mean(losses_G_i[name][-REPORT_EACH:])
if i % 200 == 0:
self.model.visualize_data('train', self.config, data, outputs, i + (batches_per_epoch * epoch),
name)
self.optimizer_G.zero_grad()
loss_G += lg1
if flag_train:
break
loss_G.backward()
self.optimizer_G.step()
losses_G.append(loss_G.item())
mean_loss_G = np.mean(losses_G[-REPORT_EACH:])
if i % 100 == 0:
writer.add_scalar('Train_G_Loss', mean_loss_G, i + (batches_per_epoch * epoch))
for name in mean_loss_G_i.keys():
writer.add_scalar('Train_G_Loss_' + name, mean_loss_G_i[name],
i + (batches_per_epoch * epoch))
writer.add_scalar('Train_G_Loss_vgg_' + name, mean_loss_vgg_i[name],
i + (batches_per_epoch * epoch))
writer.add_scalar('Train_G_Loss_adv_' + name, mean_loss_adv_i[name],
i + (batches_per_epoch * epoch))
writer.add_scalar('Train_G_Loss_L1_' + name, mean_loss_l1_i[name],
i + (batches_per_epoch * epoch))
writer.add_scalar('Train_PSNR_' + name, mean_psnr_i[name],
i + (batches_per_epoch * epoch))
writer.add_scalar('Train_SSIM_' + name, mean_ssim_i[name],
i + (batches_per_epoch * epoch))
tq.set_postfix(loss=self.model.get_loss(mean_loss_G,
np.mean(list(mean_psnr_i.values())),
np.mean(list(mean_ssim_i.values()))))
tq.close()
return np.mean(losses_G)
def _validate(self, epoch):
losses_G = []
losses_G_i = {}
psnrs_i = {}
ssims_i = {}
val_psnr = {}
val_ssim = {}
val_loss_G = {}
max_len = 0
for type, dataset in self.val_dataset.items():
if len(dataset) > max_len:
max_len = len(dataset)
batches_per_epoch = len(dataset) // dataset.dataloader.batch_size
datasets = {"dataiterators":[]}
mapping = {}
for type, dataset in self.val_dataset.items():
mapping[str(len(datasets['dataiterators']))] = dataset.dataset.name()
datasets["dataiterators"].append(iter(dataset))
losses_G_i[dataset.dataset.name()] = []
psnrs_i[dataset.dataset.name()] = []
ssims_i[dataset.dataset.name()] = []
tq = tqdm.tqdm(range(batches_per_epoch))
print('Validation')
tq.set_description('Validation')
# flag_val = False
with torch.no_grad():
for i in tq:
# if flag_val:
# break
loss_G = 0
for idx, dataset in enumerate(datasets["dataiterators"]):
name = mapping[str(idx)]
try:
data = next(dataset)
except StopIteration:
# flag_val = True
# print('stopIterVal')
# break
continue
#print("\n=========", data['A'].size())
inputs, targets = self.model.get_input(data)
# outputs = self.decoders[idx](self.encoder(inputs))
outputs = inputs + self.decoders[idx](self.encoder(inputs))
outputs = torch.clamp(outputs, min=-1, max=1)
loss_adv = config['loss']['adv'] * self.criterionD.get_g_loss(self.netsD[idx], outputs, targets)
loss_content = config['loss']['vgg'] * self.criterionG(outputs, targets)
loss_pix = config['loss']['l1'] * self.criterionG_pix(outputs, targets)
lg1 = loss_content + loss_pix + loss_adv
losses_G_i[name].append(lg1.item())
curr_psnr, curr_ssim = self.model.get_acc(outputs, targets)
psnrs_i[name].append(curr_psnr)
ssims_i[name].append(curr_ssim)
if i%10 == 0:
self.model.visualize_data('val', self.config, data, outputs, i + (batches_per_epoch * epoch),
name)
loss_G += lg1
losses_G.append(loss_G.item())
for name in losses_G_i.keys():
print('entered losses_G_i keys()')
val_psnr[name] = np.mean(psnrs_i[name])
val_ssim[name] = np.mean(ssims_i[name])
val_loss_G[name] = np.mean(losses_G_i[name])
writer.add_scalar('Val_PSNR_' + name, val_psnr[name], epoch)
writer.add_scalar('Val_SSIM_' + name, val_ssim[name], epoch)
writer.add_scalar('Val_Loss_G_' + name, val_loss_G[name], epoch)
val_loss = np.mean(losses_G)
writer.add_scalar('Validation_Loss', val_loss, epoch)
tq.close()
return val_loss, np.mean(list(val_psnr.values())), np.mean(list(val_ssim.values()))
def _get_dataset(self, config, filename):
data_loader = CreateDataLoader(config, filename)
return data_loader.load_data()
def _get_datasets(self, config, filename):
if "datasets" not in config:
return self._get_dataset(config, filename)
else:
train_dataset = {}
for dataset in config["datasets"]:
my_config = {"dataset": {"mode": dataset["type"]}, "batch_size": dataset["batch_size"],
"dataroot_train": dataset["dataroot_train"], "dataroot_val": dataset["dataroot_val"],
"fineSize": dataset["fineSize"], "num_workers": config["num_workers"]}
train_dataset[dataset["type"]] = self._get_dataset(my_config, filename)
return train_dataset
def _get_optim(self, list_of_params, lr):
if self.config['optimizer']['name'] == 'adam':
optimizer = optim.Adam(filter(lambda p: p.requires_grad, list_of_params), lr=lr)
elif self.config['optimizer']['name'] == 'sgd':
optimizer = optim.SGD(filter(lambda p: p.requires_grad, list_of_params), lr=lr)
elif self.config['optimizer']['name'] == 'adadelta':
optimizer = optim.Adadelta(filter(lambda p: p.requires_grad, list_of_params), lr=lr)
else:
raise ValueError("Optimizer [%s] not recognized." % self.config['optimizer']['name'])
return optimizer
def _get_scheduler(self, optimizer):
if self.config['scheduler']['name'] == 'plateau':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min',
patience=self.config['scheduler']['patience'],
factor=self.config['scheduler']['factor'],
min_lr=self.config['scheduler']['min_lr'])
elif self.config['optimizer']['name'] == 'sgdr':
scheduler = WarmRestart(optimizer)
elif self.config['scheduler']['name'] == 'linear':
scheduler = LinearDecay(optimizer,
min_lr=self.config['scheduler']['min_lr'],
num_epochs=self.config['num_epochs'],
start_epoch=self.config['scheduler']['start_epoch'])
else:
raise ValueError("Scheduler [%s] not recognized." % self.config['scheduler']['name'])
return scheduler
def _init_params(self):
dict_for_G, self.netsD = get_nets_multitask(self.config['model'], self.config)
self.encoder = dict_for_G['encoder']
self.decoders = dict_for_G['decoders']
self.encoder.cuda()
for decoder in self.decoders:
decoder.cuda()
for netD in self.netsD:
netD.cuda()
self.model = get_model(self.config['model'])
self.criterionG, self.criterionG_pix, self.criterionD = get_loss(self.config['model'])
list_of_params = [x for y in self.decoders for x in y.parameters()]
list_of_params = list_of_params + list(self.encoder.parameters())
self.optimizer_G = self._get_optim(list_of_params, self.config['optimizer']['lr_G'])
self.optimizers_Di = [self._get_optim(x.parameters(), self.config['optimizer']['lr_D']) for x in self.netsD]
self.scheduler_G = self._get_scheduler(self.optimizer_G)
self.schedulers_Di = [self._get_scheduler(x) for x in self.optimizers_Di]
if __name__ == '__main__':
with open('config/mtl_solver.yaml', 'r') as f:
config = yaml.load(f)
exp_desc = config['experiment_desc']
num_of_tasks = len(config['datasets'])
logging.basicConfig(filename=exp_desc + '.log', level=logging.DEBUG)
writer = SummaryWriter(exp_desc + '_runs')
trainer = Trainer(config)
trainer.train()
|
{"hexsha": "d061cfdc2f935a17ba91904f28e45dc139a6ba4e", "size": 12771, "ext": "py", "lang": "Python", "max_stars_repo_path": "multi_task_train.py", "max_stars_repo_name": "t-martyniuk/DeblurGAN", "max_stars_repo_head_hexsha": "4b637bb0e19c446e7dbf63383eb5081a8c3b1804", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2019-01-14T17:23:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-20T14:47:55.000Z", "max_issues_repo_path": "multi_task_train.py", "max_issues_repo_name": "t-martyniuk/DeblurGAN", "max_issues_repo_head_hexsha": "4b637bb0e19c446e7dbf63383eb5081a8c3b1804", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-05-31T09:15:21.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-31T08:30:17.000Z", "max_forks_repo_path": "multi_task_train.py", "max_forks_repo_name": "t-martyniuk/DeblurGAN", "max_forks_repo_head_hexsha": "4b637bb0e19c446e7dbf63383eb5081a8c3b1804", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-18T10:21:46.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-18T10:21:46.000Z", "avg_line_length": 35.475, "max_line_length": 110, "alphanum_fraction": 0.6845196148, "include": true, "reason": "import numpy", "num_tokens": 3525}
|
# Copyright (c) 2017-2021, Lawrence Livermore National Security, LLC and
# other Shroud Project Developers.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (BSD-3-Clause)
# #######################################################################
#
# Test Python API generated from ownership.yaml.
#
from __future__ import print_function
import numpy as np
import unittest
import ownership
class Ownership(unittest.TestCase):
"""Test tutorial problem"""
def XXsetUp(self):
""" Setting up for the test """
print("FooTest:setUp_:begin")
## do something...
print("FooTest:setUp_:end")
def XXtearDown(self):
"""Cleaning up after the test"""
print("FooTest:tearDown_:begin")
## do something...
print("FooTest:tearDown_:end")
#----------------------------------------
# return scalar
def testReturnIntPtrScalar(self):
"Return pointer as int python scalar"
# deref(scalar)
rv = ownership.ReturnIntPtrScalar()
self.assertIsInstance(rv, int)
self.assertEqual(10, rv)
def testReturnIntPtrPointer(self):
"Return pointer to int numpy scalar"
# deref(pointer)
rv = ownership.ReturnIntPtrPointer()
self.assertIsInstance(rv, np.ndarray)
self.assertEqual('int32', rv.dtype.name)
self.assertEqual(1, rv.size)
self.assertEqual(1, rv)
#----------------------------------------
# return dimension(len) owner(caller)
def testReturnIntPtrDimDefault(self):
"Return pointer to existing int array"
rv = ownership.ReturnIntPtrDimDefault()
self.assertIsInstance(rv, np.ndarray)
self.assertEqual('int32', rv.dtype.name)
self.assertEqual(7, rv.size)
self.assertTrue(all(np.equal(rv, [31,32,33,34,35,36,37])))
#----------------------------------------
# return dimension(len) owner(library)
def testReturnIntPtrDimDefaultNew(self):
"Return pointer to a new int array"
rv = ownership.ReturnIntPtrDimDefaultNew()
self.assertIsInstance(rv, np.ndarray)
self.assertEqual('int32', rv.dtype.name)
self.assertEqual(5, rv.size)
self.assertTrue(all(np.equal(rv, [30,31,32,33,34])))
# creating a new test suite
newSuite = unittest.TestSuite()
# adding a test case
newSuite.addTest(unittest.makeSuite(Ownership))
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "2bf80a02959d01e6798f8a64f901d3e0e008face", "size": 2479, "ext": "py", "lang": "Python", "max_stars_repo_path": "regression/run/ownership/python/test.py", "max_stars_repo_name": "ExternalRepositories/shroud", "max_stars_repo_head_hexsha": "86c39d2324d947d28055f9024f52cc493eb0c813", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 73, "max_stars_repo_stars_event_min_datetime": "2017-10-11T17:01:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-01T21:42:12.000Z", "max_issues_repo_path": "regression/run/ownership/python/test.py", "max_issues_repo_name": "ExternalRepositories/shroud", "max_issues_repo_head_hexsha": "86c39d2324d947d28055f9024f52cc493eb0c813", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2018-03-21T19:34:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-04T18:13:14.000Z", "max_forks_repo_path": "regression/run/ownership/python/test.py", "max_forks_repo_name": "ExternalRepositories/shroud", "max_forks_repo_head_hexsha": "86c39d2324d947d28055f9024f52cc493eb0c813", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2017-11-22T14:27:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T08:49:03.000Z", "avg_line_length": 30.6049382716, "max_line_length": 73, "alphanum_fraction": 0.6034691408, "include": true, "reason": "import numpy", "num_tokens": 524}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import cv2
import nrrd
import numpy as np
import argparse
__author__ = 'Alessandro Delmonte'
__email__ = 'delmonte.ale92@gmail.com'
def nothing(_):
pass
def main():
filename = setup()
frames, _ = nrrd.read(filename)
frames = frames.astype('float32')
frames = 128 * ((frames - np.amin(frames)) / np.amax(frames))
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.createTrackbar('frame', 'image', 0, frames.shape[2] - 1, nothing)
cv2.createTrackbar('low', 'image', 0, int(np.amax(frames)) - 1, nothing)
cv2.createTrackbar('high', 'image', 0, int(np.amax(frames)) - 1, nothing)
while True:
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
f = cv2.getTrackbarPos('frame', 'image')
low = cv2.getTrackbarPos('low', 'image') # 22
high = cv2.getTrackbarPos('high', 'image') # 61
edges = cv2.Canny(frames[:, :, f].astype('uint8'), low, high)
im_show = np.vstack((frames[:, :, f].astype('uint8'), edges))
cv2.imshow('image', im_show)
cv2.destroyAllWindows()
def setup():
parser = argparse.ArgumentParser()
parser.add_argument('Input_Image', help='Name of the input file', type=check_ext)
args = parser.parse_args()
return args.Input_Image
def check_ext(value):
filename, file_extension = os.path.splitext(value)
if file_extension == '.nrrd':
return value
else:
raise argparse.ArgumentTypeError(
"Invalid file extension (file format supported: nrrd): %r" % value)
if __name__ == "__main__":
main()
sys.exit()
|
{"hexsha": "b12460674732bdcb249777314343f0c161e9bfa7", "size": 1666, "ext": "py", "lang": "Python", "max_stars_repo_path": "Filters/canny.py", "max_stars_repo_name": "aledelmo/3DSlicer_Plugins", "max_stars_repo_head_hexsha": "918d6ba1ae4e9d5fe2ff01f09bf51e0ffe5a6a00", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Filters/canny.py", "max_issues_repo_name": "aledelmo/3DSlicer_Plugins", "max_issues_repo_head_hexsha": "918d6ba1ae4e9d5fe2ff01f09bf51e0ffe5a6a00", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Filters/canny.py", "max_forks_repo_name": "aledelmo/3DSlicer_Plugins", "max_forks_repo_head_hexsha": "918d6ba1ae4e9d5fe2ff01f09bf51e0ffe5a6a00", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8, "max_line_length": 85, "alphanum_fraction": 0.6218487395, "include": true, "reason": "import numpy", "num_tokens": 467}
|
import _thread as thread
import ast
import io
import json
import os
import sqlite3
import sys
import time
import warnings
from multiprocessing import Process
import numpy as np
import onnxruntime as rt
import torch
import torch.nn.functional as F
from PIL import Image, UnidentifiedImageError
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../"))
import traceback
import torchvision.transforms as transforms
from shared import SharedOptions
class SceneModel(object):
def __init__(self, model_path, cuda=False):
self.sess = rt.InferenceSession(model_path)
self.input_name = self.sess.get_inputs()[0].name
def predict(self, image_tensors):
out = self.sess.run(None, {self.input_name: image_tensors})
out = np.array(out)
torch_out = torch.from_numpy(out).squeeze(1)
torch_out = torch.softmax(torch_out, 1)
return out.argmax(), torch_out.max().item()
def scenerecognition(thread_name, delay):
classes = list()
with open(
os.path.join(SharedOptions.SHARED_APP_DIR, "categories_places365.txt")
) as class_file:
for line in class_file:
classes.append(line.strip().split(" ")[0][3:])
placesnames = tuple(classes)
IMAGE_QUEUE = "scene_queue"
classifier = SceneModel(
os.path.join(SharedOptions.SHARED_APP_DIR, "scene.model"),
SharedOptions.CUDA_MODE,
)
while True:
queue = SharedOptions.db.lrange(IMAGE_QUEUE, 0, 0)
if len(queue) > 0:
SharedOptions.db.ltrim(IMAGE_QUEUE, len(queue), -1)
for req_data in queue:
req_data = json.JSONDecoder().decode(req_data)
img_id = req_data["imgid"]
req_id = req_data["reqid"]
req_type = req_data["reqtype"]
try:
img = Image.open(SharedOptions.TEMP_PATH + img_id).convert("RGB")
trans = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
),
]
)
img = trans(img)
img = img.numpy()
img = np.expand_dims(img, 0).astype(np.float32)
os.remove(SharedOptions.TEMP_PATH + img_id)
cl, conf = classifier.predict(img)
cl = placesnames[cl]
conf = float(conf)
output = {"success": True, "label": cl, "confidence": conf}
except UnidentifiedImageError:
err_trace = traceback.format_exc()
print(err_trace, file=sys.stderr, flush=True)
output = {
"success": False,
"error": "error occured on the server",
"code": 400,
}
except Exception:
err_trace = traceback.format_exc()
print(err_trace, file=sys.stderr, flush=True)
output = {"success": False, "error": "invalid image", "code": 500}
finally:
SharedOptions.db.set(req_id, json.dumps(output))
if os.path.exists(SharedOptions.TEMP_PATH + img_id):
os.remove(SharedOptions.TEMP_PATH + img_id)
time.sleep(delay)
p = Process(target=scenerecognition, args=("", SharedOptions.SLEEP_TIME))
p.start()
|
{"hexsha": "203969e522c17d1566ac3c0faf04eebc83d3522c", "size": 3772, "ext": "py", "lang": "Python", "max_stars_repo_path": "deepstack/intelligencelayer/shared/scene.py", "max_stars_repo_name": "OlafenwaMoses/DeepStack-1", "max_stars_repo_head_hexsha": "0315e48907c36c075da5aa558756786c0d76c1b8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-28T19:38:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-28T19:38:20.000Z", "max_issues_repo_path": "deepstack/intelligencelayer/shared/scene.py", "max_issues_repo_name": "OlafenwaMoses/DeepStack-1", "max_issues_repo_head_hexsha": "0315e48907c36c075da5aa558756786c0d76c1b8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deepstack/intelligencelayer/shared/scene.py", "max_forks_repo_name": "OlafenwaMoses/DeepStack-1", "max_forks_repo_head_hexsha": "0315e48907c36c075da5aa558756786c0d76c1b8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9365079365, "max_line_length": 87, "alphanum_fraction": 0.5448038176, "include": true, "reason": "import numpy", "num_tokens": 766}
|
"""
Abstract supertype for parameters. Theses are wrappers for model parameter values and
metadata that are returned from [`params`](@ref), and used in
`getfield/setfield/getpropery/setproperty` methods and to generate the Tables.jl interface.
They are stripped from the model with [`stripparams`](@ref).
An `AbstractParam` must define a `Base.parent` method that returns a `NamedTuple`, and a
constructor that accepts a `NamedTuple`. It must have a `val` property, and should use
`checkhasval` in its constructor.
"""
abstract type AbstractParam{T} <: AbstractNumbers.AbstractNumber{T} end
function ConstructionBase.setproperties(p::P, patch::NamedTuple) where P <: AbstractParam
fields = ConstructionBase.setproperties(parent(p), patch)
P.name.wrapper(fields)
end
@inline withunits(m, args...) = map(p -> withunits(p, args...), params(m))
@inline function withunits(p::AbstractParam, fn::Symbol=:val)
_applyunits(*, getproperty(p, fn), get(p, :units, nothing))
end
@inline stripunits(m, xs) = map(stripunits, params(m), xs)
@inline function stripunits(p::AbstractParam, x)
_applyunits(/, x, get(p, :units, nothing))
end
# Param might have `nothing` for units
@inline _applyunits(f, x, units) = f(x, units)
@inline _applyunits(f, x, ::Nothing) = x
@inline _applyunits(f, xs::Tuple, units) = map(x -> f(x, units), xs)
@inline _applyunits(f, xs::Tuple, units::Nothing) = xs
@inline _applyunits(f, ::Nothing, units) = nothing
@inline _applyunits(f, ::Nothing, ::Nothing) = nothing
# Base NamedTuple-like interface
Base.keys(p::AbstractParam) = keys(parent(p))
# Base.values has the potential to be confusing, as we
# have a val field in Param. Not sure what to do about this.
Base.values(p::AbstractParam) = values(parent(p))
@inline Base.propertynames(p::AbstractParam) = propertynames(parent(p))
@inline Base.getproperty(p::AbstractParam, x::Symbol) = getproperty(parent(p), x)
@inline Base.get(p::AbstractParam, key::Symbol, default) = get(parent(p), key, default)
@inline Base.getindex(p::AbstractParam, i) = getindex(parent(p), i)
# AbstractNumber interface
Base.convert(::Type{Number}, x::AbstractParam) = AbstractNumbers.number(x)
Base.convert(::Type{P}, x::P) where {P<:AbstractParam} = x
AbstractNumbers.number(p::AbstractParam) = withunits(p)
AbstractNumbers.basetype(::Type{<:AbstractParam{T}}) where T = T
AbstractNumbers.like(::Type{<:AbstractParam}, x) = x
# Flatten.jl defaults defined here: AbstractParam needs to be defined first
const SELECT = AbstractParam
const IGNORE = AbstractDict # What else to blacklist?
# Concrete implementation
"""
Param(p::NamedTuple)
Param(; kw...)
Param(val)
A wrapper type that lets you extract model parameters and metadata about the model like
bounding val, units priors, or anything else you want to attach.
The first argument is assigned to the `val` field, and if only keyword arguments are used,
`val`, must be one of them. `val` is used as the number val if the model us run
without stripping out the `Param` fields. `stripparams` also takes only the `:val` field.
"""
struct Param{T,P<:NamedTuple} <: AbstractParam{T}
parent::P
end
Param(nt::NT) where {NT<:NamedTuple} = begin
_checkhasval(nt)
Param{typeof(nt.val),NT}(nt)
end
Param(val; kwargs...) = Param((; val=val, kwargs...))
Param(; kwargs...) = Param((; kwargs...))
Base.parent(p::Param) = getfield(p, :parent)
# Methods for objects that hold params
params(x) = Flatten.flatten(x, SELECT, IGNORE)
stripparams(x) = hasparam(x) ? Flatten.reconstruct(x, withunits(x), SELECT, IGNORE) : x
# Utils
hasparam(obj) = length(params(obj)) > 0
_checkhasval(nt::NamedTuple{Keys}) where {Keys} = first(Keys) == :val || _novalerror(nt)
# @noinline avoids allocations unless there is actually an error
@noinline _novalerror(nt) = throw(ArgumentError("First field of Param must be :val"))
|
{"hexsha": "098921e422ccbd61ba51722c6bcd02ae68724dae", "size": 3847, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/param.jl", "max_stars_repo_name": "JeffreySarnoff/ModelParameters.jl", "max_stars_repo_head_hexsha": "fa317ec04cd57d134eab1ff44004e604dee681d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/param.jl", "max_issues_repo_name": "JeffreySarnoff/ModelParameters.jl", "max_issues_repo_head_hexsha": "fa317ec04cd57d134eab1ff44004e604dee681d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/param.jl", "max_forks_repo_name": "JeffreySarnoff/ModelParameters.jl", "max_forks_repo_head_hexsha": "fa317ec04cd57d134eab1ff44004e604dee681d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4947368421, "max_line_length": 92, "alphanum_fraction": 0.726800104, "num_tokens": 1022}
|
Require Coq.Arith.PeanoNat.
Require Import Lia.
Require Import Nat.
Import Coq.Arith.Wf_nat. (* needed for "lt_wf_ind" *)
Require Import List.
Require Import SKI.expr.
Require Import SKI.digits.
Require Import SKI.arithmetic_ops.
Require Import SKI.compile.
Require Import SKI.substitution.
Require Import SKI.church_rosser.
Require Import SKI.well_known_combinators.
Notation "[ x ; .. ; y ]" := (cons x .. (cons y nil) .. ).
Fixpoint godel_digits (c : expr) : list nat :=
match c with
| S => [ 1 ]
| K => [ 2 ]
| c1 [+] c2 => [3] ++ (godel_digits c1) ++ (godel_digits c2) ++ [4]
| var n => [ 5 ]
end.
(* let l be the godel_digits of a term c, possibly followed by any other digits *)
(* l = godel_list c ++ other *)
(* prefix_term_length l 0 0 = length godel_list *)
Fixpoint prefix_term_length l acc opens :=
match l with
| 3 :: l' => prefix_term_length l' (Datatypes.S acc) (Datatypes.S opens)
| 4 :: l' => if opens =? 1 then (Datatypes.S acc) else prefix_term_length l' (Datatypes.S acc) (pred opens)
| _ :: l' => if opens =? 0 then 1 else prefix_term_length l' (Datatypes.S acc) opens
| nil => acc
end.
Lemma prefix_term_length_spec : forall c l acc opens,
prefix_term_length (godel_digits c ++ l) acc (Datatypes.S opens) = prefix_term_length l (acc + length(godel_digits c)) (Datatypes.S opens).
Proof.
intro.
induction c.
*
intros. simpl.
assert (Datatypes.S acc = acc + 1) by lia. rewrite H.
reflexivity.
*
intros. simpl.
assert (Datatypes.S acc = acc + 1) by lia. rewrite H.
reflexivity.
*
intros. simpl.
assert (Datatypes.S acc = acc + 1) by lia. rewrite H.
reflexivity.
*
intros.
unfold godel_digits. fold godel_digits.
simpl.
rewrite <- app_assoc.
rewrite IHc1.
rewrite <- app_assoc.
rewrite IHc2.
simpl.
rewrite app_length.
rewrite app_length.
simpl.
assert (Datatypes.S (Datatypes.S (acc + length (godel_digits c1) + length (godel_digits c2))) =
acc + Datatypes.S (length (godel_digits c1) + (length (godel_digits c2) + 1))) by lia.
rewrite H.
reflexivity.
Qed.
(* if l = godel_digits (c1 [+] c2) *)
(* this function computes length of godel_digits (c1) *)
Definition left_term_length l :=
match l with
| 3 :: l' => prefix_term_length l' 0 0
| _ => 0
end.
Theorem left_term_length_spec : forall c1 c2, left_term_length (godel_digits (c1 [+] c2)) = length (godel_digits c1).
Proof.
intros.
destruct c1; simpl; auto.
rewrite <- app_assoc.
rewrite prefix_term_length_spec. simpl.
rewrite <- app_assoc.
rewrite prefix_term_length_spec. simpl.
rewrite app_length.
rewrite app_length. simpl.
assert (Datatypes.S (length (godel_digits c1_1) + length (godel_digits c1_2)) =
length (godel_digits c1_1) + (length (godel_digits c1_2) + 1)) by lia.
rewrite H.
reflexivity.
Qed.
Definition has_unambiguous_parse l := forall c1, is_const c1 /\ l = godel_digits c1 -> forall c2, l = godel_digits c2 -> c1 = c2.
Theorem list_prefixes_equal : forall lx ly lx' ly' : list nat, length lx = length lx' -> lx ++ ly = lx' ++ ly' -> lx = lx'.
Proof.
intro lx.
induction lx.
*
intros.
destruct lx'. auto.
simpl in H. inversion H.
*
intros.
destruct lx'.
simpl in H. inversion H.
assert (a = n). {
inversion H0. subst. auto.
}
subst.
simpl in H.
inversion H. clear H.
assert (lx = lx'). {
eapply IHlx.
auto.
rewrite <- app_comm_cons in H0.
rewrite <- app_comm_cons in H0.
inversion H0. apply H1.
}
rewrite H. auto.
Qed.
Theorem all_unambiguous_parse : forall l, has_unambiguous_parse l.
Proof.
intros.
remember (length l).
generalize Heqn. clear Heqn.
generalize l. clear l.
apply (lt_wf_ind n).
clear n.
intros.
unfold has_unambiguous_parse.
intros.
destruct H0.
destruct c1.
*
generalize (not_is_const_var n0). intros.
contradiction.
*
subst.
destruct c2; simpl in H1; inversion H1. auto.
*
subst.
destruct c2; simpl in H1; inversion H1. auto.
*
subst.
assert (exists c2_1 c2_2, c2 = c2_1 [+] c2_2). {
remember c2.
rewrite Heqe in H1.
destruct c2; simpl in H1; inversion H1.
exists c2_1, c2_2. auto.
}
destruct H2 as [c2_1 [c2_2] ].
subst.
generalize (left_term_length_spec c1_1 c1_2). intros.
generalize (left_term_length_spec c2_1 c2_2). intros.
rewrite H1 in H2.
rewrite H2 in H3.
clear H2.
simpl in H1.
inversion H1.
assert (godel_digits c1_1 = godel_digits c2_1). {
eapply list_prefixes_equal.
auto.
apply H4.
}
assert (c1_1 = c2_1). {
unfold has_unambiguous_parse in H.
eapply H.
assert (length(godel_digits c1_1) < length(godel_digits (c1_1 [+] c1_2))). {
simpl.
rewrite app_length.
rewrite app_length.
simpl. lia.
}
apply H5.
reflexivity.
split.
apply is_const_app_iff in H0. apply H0. auto. auto.
}
subst.
inversion H1.
apply app_inv_head in H6.
apply app_inv_tail in H6.
assert (c1_2 = c2_2). {
eapply H.
assert (length (godel_digits c1_2) < length (godel_digits (c2_1 [+] c1_2))). {
simpl.
rewrite app_length.
rewrite app_length.
simpl.
lia.
}
apply H5.
reflexivity.
split.
apply is_const_app_iff in H0. apply H0. auto. auto.
}
subst. auto.
Qed.
(* Do not try to compute this. It crashes Coq *)
Definition godel_number (c : expr) : nat :=
digits_to_nat (godel_digits c) 0.
Theorem concat10_neq_0 : forall a b, b <> 0 -> concat10 a b <> 0.
Proof.
intros.
unfold concat10.
unfold not. intros.
apply Plus.plus_is_O in H0.
destruct H0.
rewrite H0 in H1.
simpl in H1.
assert (b = 0) by lia.
apply H. auto.
Qed.
Theorem digits_represent_S : digits_represent (godel_digits S) 1.
Proof.
unfold digits_represent.
split. simpl. auto.
split. unfold list_ub. intros. inversion H. lia.
inversion H0.
auto.
Qed.
Theorem digits_represent_K : digits_represent (godel_digits K) 2.
Proof.
unfold digits_represent.
split. simpl. auto.
split. unfold list_ub. intros. inversion H. lia.
inversion H0.
auto.
Qed.
Theorem digits_represent_var : forall n, digits_represent (godel_digits (var n)) 5.
Proof.
unfold digits_represent.
split. simpl. auto.
split. unfold list_ub. intros. inversion H. lia.
inversion H0.
auto.
Qed.
Theorem digits_represent_app : forall c1 n1 c2 n2,
digits_represent (godel_digits c1) n1 ->
digits_represent (godel_digits c2) n2 ->
digits_represent (godel_digits (c1 [+] c2)) (concat10 3 (concat10 n1 (concat10 n2 4))).
Proof.
intros.
unfold godel_digits. fold godel_digits.
apply app_represents_cat10.
apply single_digit_represents. lia.
apply app_represents_cat10. auto.
apply app_represents_cat10. auto.
apply single_digit_represents. lia. lia.
apply concat10_neq_0. lia.
apply concat10_neq_0.
apply concat10_neq_0. lia.
Qed.
Theorem godel_number_spec c : digits_represent (godel_digits c) (godel_number c).
Proof.
induction c.
*
apply digits_represent_var.
*
apply digits_represent_S.
*
apply digits_represent_K.
*
unfold godel_number.
generalize (digits_represent_app c1 (godel_number c1) c2 (godel_number c2) IHc1 IHc2). intros.
unfold digits_represent in H.
destruct H.
destruct H0.
rewrite H1.
apply digits_represent_app. auto. auto.
Qed.
Definition godel_nxt : nat := godel_number nxt. Opaque godel_nxt.
Definition godel_zro : nat := godel_number zro. Opaque godel_zro.
Theorem godel_number_repSn n :
digits_represent (godel_digits (rep (Datatypes.S n))) (concat10 3 (concat10 godel_nxt (concat10 (godel_number (rep n)) 4))).
Proof.
Transparent rep.
unfold rep. fold rep.
apply digits_represent_app.
apply godel_number_spec.
apply godel_number_spec.
Qed.
Definition next_godelize_action :=
concat10_op [+] rep 3 [+] (concat10_op [+] rep godel_nxt [+] (concat10_op [+] var 0 [+] rep 4)).
Definition next_godelize_op :=
compile_nary 1 next_godelize_action.
Theorem is_const_next_godelize_op : is_const next_godelize_op.
Proof.
unfold is_const.
unfold next_godelize_op.
intros.
assert (n = 0 \/ n <> 0) by lia.
destruct H.
subst.
*
apply compile_nary_removes_vars.
auto.
*
unfold next_godelize_action.
apply compile_nary_intros_no_vars.
generalize is_const_concat10_op. intros.
apply not_contains_var_app_iff. split.
apply not_contains_var_app_iff. split.
auto.
apply is_const_rep_n.
apply not_contains_var_app_iff. split.
apply not_contains_var_app_iff. split.
auto.
apply is_const_rep_n.
apply not_contains_var_app_iff. split.
apply not_contains_var_app_iff. split.
auto.
apply not_contains_var_var_iff. auto.
apply is_const_rep_n.
Qed.
Theorem steps_star_next_godelize_op : forall c,
next_godelize_op [+] c ~>* concat10_op [+] rep 3 [+] (concat10_op [+] rep godel_nxt [+] (concat10_op [+] c [+] rep 4)).
Proof.
intros.
unfold next_godelize_op.
eapply steps_star_trans.
apply steps_star_compile_1ary.
unfold next_godelize_action.
Opaque concat10_op.
Opaque rep.
simpl.
rewrite subst_const.
rewrite subst_const.
rewrite subst_const.
rewrite subst_const.
apply steps_none.
apply is_const_rep_n.
apply is_const_rep_n.
apply is_const_rep_n.
apply is_const_concat10_op.
Qed.
Theorem next_godelize_spec : forall n : nat, next_godelize_op [+] rep (godel_number (rep n)) ~>* rep (godel_number (rep (Datatypes.S n))).
Proof.
intros.
eapply steps_star_trans.
apply steps_star_next_godelize_op.
eapply steps_star_trans.
eapply steps_star_app_r.
eapply steps_star_app_r.
apply concat10_spec.
eapply steps_star_trans.
apply steps_star_app_r.
apply concat10_spec.
eapply steps_star_trans.
apply concat10_spec.
assert (concat10 3 (concat10 godel_nxt (concat10 (godel_number (rep n)) 4)) = godel_number (rep (Datatypes.S n))). {
generalize (godel_number_repSn n). intros.
unfold godel_number at 2.
unfold digits_represent in H. destruct H. destruct H0.
rewrite H1. reflexivity.
}
rewrite H. apply steps_none.
Qed.
Definition godelize_action := eq_zro [+] var 1 [+] rep godel_zro [+] (next_godelize_op [+] (var 0 [+] (prv [+] var 1))).
Definition godelize_preop := compile_nary 2 godelize_action.
Definition godelize_op := sage godelize_preop.
Theorem is_const_godelize_op : is_const godelize_op.
Proof.
unfold is_const.
intros.
assert (n < 2 \/ 2 <= n) as cases by lia.
destruct cases.
*
unfold godelize_op.
apply sage_intros_no_vars.
apply compile_nary_removes_vars. auto.
*
unfold godelize_op.
apply sage_intros_no_vars.
apply compile_nary_intros_no_vars.
unfold godelize_action.
assert (n <> 0) by lia.
assert (n <> 1) by lia.
generalize is_const_next_godelize_op. intros.
generalize is_const_eq_zro. intros.
generalize is_const_prv. intros.
apply not_contains_var_app_iff. split.
apply not_contains_var_app_iff. split.
apply not_contains_var_app_iff. split.
auto.
apply not_contains_var_var_iff. auto.
apply is_const_rep_n.
apply not_contains_var_app_iff. split.
auto.
apply not_contains_var_app_iff. split.
apply not_contains_var_var_iff. auto.
apply not_contains_var_app_iff. split.
auto.
apply not_contains_var_var_iff. auto.
Qed.
Theorem steps_star_godelize_op : forall c, godelize_op [+] c ~>* eq_zro [+] c [+] rep godel_zro [+] (next_godelize_op [+] (godelize_op [+] (prv [+] c))).
Proof.
intros.
unfold godelize_op.
eapply steps_star_trans.
apply steps_star_app_l.
apply steps_star_sage.
unfold godelize_preop at 1.
Opaque godelize_action.
Opaque next_godelize_op.
Opaque sage.
Opaque godelize_preop.
Opaque eq_zro.
Opaque godel_zro.
Opaque prv.
eapply steps_star_trans.
apply steps_star_compile_2ary.
Transparent godelize_action.
unfold godelize_action.
rewrite subst_app_distr.
rewrite subst_app_distr.
rewrite subst_app_distr.
rewrite subst_app_distr.
rewrite subst_app_distr.
simpl.
rewrite subst_const.
rewrite subst_const.
rewrite subst_const.
rewrite subst_const.
apply steps_none.
apply is_const_prv.
apply is_const_next_godelize_op.
apply is_const_rep_n.
apply is_const_eq_zro.
Qed.
Theorem godelize_spec : forall n, godelize_op [+] rep n ~>* rep (godel_number (rep n)).
Proof.
intros.
induction n.
*
eapply steps_star_trans.
apply steps_star_godelize_op.
eapply steps_star_trans.
apply steps_star_app_l.
apply steps_star_app_l.
apply steps_star_eq_zro_0.
eapply steps_star_trans.
apply steps_star_t.
apply steps_none.
*
eapply steps_star_trans.
eapply steps_star_godelize_op.
eapply steps_star_trans.
apply steps_star_app_l.
apply steps_star_app_l.
apply steps_star_eq_zro_Sn.
eapply steps_star_trans.
apply steps_star_f.
eapply steps_star_trans.
eapply steps_star_app_r.
eapply steps_star_app_r.
eapply steps_star_prv_nxt.
eapply steps_star_trans.
apply steps_star_app_r.
apply IHn.
apply next_godelize_spec.
Qed.
Definition normalize_action := concat10_op [+] rep 3 [+] (concat10_op [+] var 0 [+] (concat10_op [+] (godelize_op [+] var 0) [+] rep 4)).
Definition normalize_op := compile_nary 1 normalize_action.
Theorem is_const_normalize_op : is_const normalize_op.
Proof.
unfold is_const.
intros.
unfold normalize_op.
assert (n = 0 \/ n <> 0) as cases by lia.
destruct cases.
*
apply compile_nary_removes_vars. lia.
*
apply compile_nary_intros_no_vars.
unfold normalize_action.
generalize is_const_concat10_op. intros.
generalize is_const_godelize_op. intros.
apply not_contains_var_app_iff. split.
apply not_contains_var_app_iff. split.
auto.
apply is_const_rep_n.
apply not_contains_var_app_iff. split.
apply not_contains_var_app_iff. split.
auto.
apply not_contains_var_var_iff. auto.
apply not_contains_var_app_iff. split.
apply not_contains_var_app_iff. split.
auto.
apply not_contains_var_app_iff. split.
auto.
apply not_contains_var_var_iff. auto.
apply is_const_rep_n.
Qed.
Theorem steps_star_normalize_op : forall c,
normalize_op [+] c ~>* concat10_op [+] rep 3 [+] (concat10_op [+] c [+] (concat10_op [+] (godelize_op [+] c) [+] rep 4)).
Proof.
intros.
eapply steps_star_trans.
unfold normalize_op.
apply steps_star_compile_1ary.
unfold normalize_action.
rewrite subst_app_distr.
rewrite subst_app_distr.
rewrite subst_app_distr.
Opaque concat10_op.
Opaque godelize_op.
simpl.
rewrite subst_const.
rewrite subst_const.
rewrite subst_const.
rewrite subst_const.
apply steps_none.
apply is_const_rep_n.
apply is_const_godelize_op.
apply is_const_rep_n.
apply is_const_concat10_op.
Qed.
Theorem normalize_spec : forall n, normalize_op [+] rep n ~>* rep (concat10 3 (concat10 n (concat10 (godel_number (rep n)) 4))).
Proof.
intros.
eapply steps_star_trans.
apply steps_star_normalize_op.
eapply steps_star_trans.
apply steps_star_app_r.
apply steps_star_app_r.
apply steps_star_app_l.
apply steps_star_app_r.
apply godelize_spec.
eapply steps_star_trans.
apply steps_star_app_r.
apply steps_star_app_r.
apply concat10_spec.
eapply steps_star_trans.
eapply steps_star_app_r.
apply concat10_spec.
apply concat10_spec.
Qed.
Definition sage2 c := B [+] c [+] normalize_op [+] rep (godel_number (B [+] c [+] normalize_op)).
Theorem fixed_point_principle :
forall c, (sage2 c) ~>* c [+] rep (godel_number (sage2 c)).
Proof.
intros.
unfold sage2 at 1.
eapply steps_star_trans.
apply steps_star_B.
eapply steps_star_trans.
apply steps_star_app_r.
apply normalize_spec.
assert (godel_number(sage2 c) = concat10 3 (concat10 (godel_number (B [+] c [+] normalize_op)) (concat10 (godel_number (rep (godel_number (B [+] c [+] normalize_op)))) 4))). {
unfold sage2.
pose digits_represent_app.
assert (digits_represent (godel_digits (B [+] c [+] normalize_op [+] rep (godel_number (B [+] c [+] normalize_op))))
(concat10 3 (concat10 (godel_number (B [+] c [+] normalize_op)) (concat10 (godel_number (rep (godel_number (B [+] c [+] normalize_op)))) 4)))). {
apply digits_represent_app.
apply godel_number_spec.
apply godel_number_spec.
}
generalize (godel_number_spec (B [+] c [+] normalize_op [+] rep (godel_number (B [+] c [+] normalize_op)))). intros.
destruct H. destruct H1. rewrite <- H2.
apply H0.
}
rewrite H.
apply steps_none.
Qed.
Definition is_computable (P : nat -> Prop) := exists c, is_const c /\ (forall n, (P n /\ c [+] rep n ~>* t) \/ (~ P n /\ c [+] rep n ~>* f)).
Theorem is_computable_complement (P : nat-> Prop) : is_computable P -> is_computable (fun n => ~P n).
Proof.
intros.
unfold is_computable.
unfold is_computable in H.
destruct H.
exists (B [+] neg [+] x).
split.
rewrite is_const_app_iff. split.
rewrite is_const_app_iff. split.
apply is_const_B.
apply is_const_neg.
destruct H. auto.
destruct H as [HH H].
intros.
generalize (H n). intros. clear H.
destruct H0. destruct H.
right.
split.
unfold not.
intros. apply H1 in H. apply H.
eapply steps_star_trans.
apply steps_star_B.
eapply steps_star_trans.
apply steps_star_app_r.
apply H0.
apply steps_star_neg_t.
left.
destruct H.
split.
apply H.
eapply steps_star_trans.
apply steps_star_B.
eapply steps_star_trans.
apply steps_star_app_r.
apply H0.
apply steps_star_neg_f.
Qed.
Definition godel_sentence (P : nat -> Prop) (c : expr) := is_const c /\ ((c ~>* t /\ P (godel_number c)) \/ (~ c ~>* t /\ ~P (godel_number c))).
Theorem computable_prop_has_godel_sentence : forall P, is_computable P -> exists g, godel_sentence P g.
Proof.
intros.
unfold is_computable in H.
destruct H as [x [HH H]].
exists (sage2 x).
unfold godel_sentence.
split.
unfold sage2.
rewrite is_const_app_iff.
rewrite is_const_app_iff.
rewrite is_const_app_iff.
split. split. split.
apply is_const_B.
auto.
apply is_const_normalize_op.
apply is_const_rep_n.
generalize (H (godel_number (sage2 x))). intros.
destruct H0.
destruct H0.
clear H.
left.
split.
eapply steps_star_trans.
apply fixed_point_principle.
apply H1. apply H0.
right.
destruct H0.
split.
unfold not.
intros.
assert (t = f). {
eapply unique_normal_form.
apply H2.
eapply steps_star_trans.
apply fixed_point_principle.
apply H1.
apply is_normal_t.
apply is_normal_f.
}
inversion H3.
apply H0.
Qed.
Definition converges_t n := exists c, n = godel_number c /\ c ~>* t.
Theorem converges_t_not_computable : ~is_computable converges_t.
Proof.
unfold not.
intros.
generalize (is_computable_complement converges_t H). intros.
apply computable_prop_has_godel_sentence in H0.
destruct H0.
unfold godel_sentence in H0.
unfold is_computable in H.
destruct H as [c [HH H]].
generalize (H (godel_number x)). intros. clear H.
destruct H0 as [HHH H].
destruct H; destruct H1.
*
destruct H. destruct H0. contradiction.
*
destruct H.
apply H1.
unfold converges_t.
exists x. split; auto.
*
destruct H.
destruct H0.
unfold converges_t in H0.
destruct H0.
destruct H0.
assert (x = x0). {
generalize (godel_number_spec x). intros.
generalize (godel_number_spec x0). intros.
assert (godel_digits x = godel_digits x0). {
eapply digits_rep_unique.
apply H4.
rewrite H0.
auto.
}
generalize (all_unambiguous_parse (godel_digits x0)). intros.
unfold has_unambiguous_parse in H7.
apply H7.
split. auto. auto. auto.
}
subst. contradiction.
*
destruct H.
destruct H0.
contradiction.
Qed.
|
{"author": "markisus", "repo": "coq-ski", "sha": "ef866785105d463e771a16e576faf940e832e91b", "save_path": "github-repos/coq/markisus-coq-ski", "path": "github-repos/coq/markisus-coq-ski/coq-ski-ef866785105d463e771a16e576faf940e832e91b/src/godel.v"}
|
# Library
import math
import datetime
import numpy as np
# constant variable
orbit_days = 365.256363004 # Earth orbit in days
au = 149598261 # The semi-major axis of the oribital ellipse
e = 0.01671123 # Earth orbit elliptical eccentricity
solar_c = 1367 # Solar constant
def el_az_changer(time_raw, lat = 36, lte = 127):
hour = time_raw.hour + time_raw.minute / 60
do = datetime.datetime(time_raw.year, 1, 1)
d1 = datetime.datetime(time_raw.year, time_raw.month, time_raw.day, time_raw.hour)
day_delta = d1 - do
day = day_delta.days
dy = time_raw.year - 1949
leap = round(dy / 4)
Jd = 2432916.5 + dy * 365 + leap + day + hour / 24 # 율리우스 적일
n = Jd - 2451545.0
L = 280.460 + 0.9856474 * n # 평균 경도 L (0~360도)
while L > 360: # 경도가 360도가 넘어갈때 보정하는 식
L = L - 360
g = 357.528 + 0.9856003 * n # 평균 근점이각 g (0~360도)
while g > 360: # 평균 근점이각이 360도가 넘어갈때 보정하는 식
g = g - 360
I = L + 1.915 * math.sin(math.radians(g)) + 0.02 * math.sin(2 * math.radians(g)) # 황도 경도 I(대문자 i) (0~360도)
while I > 360: # 황도 경도가 360도가 넘어갈때 보정하는 식
I = I - 360
ep = 23.439 - 0.0000004 * n
ra = 360 + math.atan2(math.cos(math.radians(ep)) * math.sin(math.radians(I)),
math.cos(math.radians(I))) * 180 / np.pi # 적경 ra (0~360도)
while ra > 360: # 적경이 360도가 넘어갈때 보정하는 식
ra = ra - 360
dec = math.asin(math.sin(math.radians(ep)) * math.sin(math.radians(I))) * 180 / np.pi # 적위 dec
gmst = 6.697375 + 0.0657098242 * n + hour # Greenwich 평균항성시 (각도)
gmst = gmst * 15
while gmst > 360: # Greenwich 평균항성시를 24시간으로 표현
gmst = gmst - 360
gmst = gmst / 15
lmst = gmst + lte / 15 # 레일 계측 지점의 평균항성시간 / lte(longitude) : 경도 / (예 : 서울지방 경도 : 126.98333)
ha = lmst - ra / 15 # 시간각 (-12<ha<12)
ha = ha * 15 # 시간각(24시간)을 각도로 표현
el = math.asin(math.sin(math.radians(dec)) * math.sin(math.radians(lat)) + math.cos(math.radians(dec)) * math.cos(
math.radians(lat)) * math.cos(math.radians(ha))) * 180 / np.pi # 태양고도각 el / lat(latitude) : 위도
az = math.asin(-math.cos(math.radians(dec)) * math.sin(math.radians(ha)) / math.cos(
math.radians(el))) * 180 / np.pi # 태양 방위각 az
if math.sin(math.radians(el)) >= math.sin(math.radians(dec)) / math.sin(math.radians(lat)):
az = 180 - az;
elif math.sin(math.radians(el)) <= math.sin(math.radians(dec)) / math.sin(math.radians(lat)) and ha > 0:
az = 360 + az;
if az < 0: # 태양 방위각 보정식
az = az + 360
else:
while az > 360:
az = az - 360
return az,el
def TSI(time_raw):
per_date = datetime.datetime(time_raw.year, 1, 2)
seq_date = time_raw
day_delta = seq_date - per_date
day_delta_raw = day_delta.days
theta = (day_delta_raw * (360 / orbit_days)) * (np.pi / 180)
r = (au * (1 - pow(e, 2))) / (1 + e * math.cos(theta)) / (pow(10, 8))
TSI_raw = solar_c * pow((1.4957911194950864 / r), 2)
return TSI_raw
|
{"hexsha": "cf13cc5dcd33a3b8867a6f7e28d66ce519635e3f", "size": 3087, "ext": "py", "lang": "Python", "max_stars_repo_path": "feature_preprocessor.py", "max_stars_repo_name": "CheolJ/rail_temperature_prediction_amledu", "max_stars_repo_head_hexsha": "5486c95ff43a681d326261bdc57686c1580df6bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-11-23T14:43:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T06:10:55.000Z", "max_issues_repo_path": "feature_preprocessor.py", "max_issues_repo_name": "jw3498/rail_temperature_prediction_amledu", "max_issues_repo_head_hexsha": "5486c95ff43a681d326261bdc57686c1580df6bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-23T14:14:06.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-23T14:33:14.000Z", "max_forks_repo_path": "feature_preprocessor.py", "max_forks_repo_name": "jw3498/rail_temperature_prediction_amledu", "max_forks_repo_head_hexsha": "5486c95ff43a681d326261bdc57686c1580df6bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-24T06:41:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-24T12:56:53.000Z", "avg_line_length": 35.8953488372, "max_line_length": 119, "alphanum_fraction": 0.5704567541, "include": true, "reason": "import numpy", "num_tokens": 1194}
|
import random
import torch
from matplotlib import pyplot as plt
from torch import manual_seed, cuda, backends
import numpy as np
from sklearn.metrics import confusion_matrix
import seaborn as sn
import pandas as pd
class Meter:
def __init__(self):
self.values, self.avg, self.sum, self.cnt = [], 0, 0, 0
def reset(self):
self.values, self.avg, self.sum, self.cnt = [], 0, 0, 0
def update(self, value, k=1):
self.values.append(value)
self.sum += value
self.cnt += k
self.avg = self.sum / self.cnt
def fix_random_seed(seed=1234):
# Ref.: https://github.com/bentrevett/pytorch-image-classification/blob/master/5_resnet.ipynb
random.seed(seed)
np.random.seed(seed)
manual_seed(seed)
cuda.manual_seed(seed)
backends.cudnn.deterministic = True
def store_txt(path, txt):
with open(path, 'w') as f:
f.write(str(txt))
f.flush()
def createConfusionMatrix(net, loader, num_of_classes, labels=None):
y_pred, y_true = [], [] # save prediction, ground truth
# iterate over data
with torch.no_grad():
for _, inputs, labels in loader:
output = net(inputs.cuda()) # Feed Network
output = (torch.max(output, 1)[1]).data.cpu().numpy()
y_pred.extend(output) # save prediction
labels = labels.data.cpu().numpy()
y_true.extend(labels) # save ground truth
if num_of_classes <= 15:
if labels is None:
labels = list(range(num_of_classes))
# Build confusion matrix
cf_matrix = confusion_matrix(y_true, y_pred)
new_matrix = cf_matrix/np.sum(cf_matrix) * num_of_classes
p = sn.heatmap(np.array(new_matrix), annot=True, vmin=0, vmax=1, fmt='.2f', square='True', cmap="Blues")
else:
classes = list(range(num_of_classes))
# constant for classes
cf_matrix = confusion_matrix(y_true, y_pred)
df_cm = pd.DataFrame(cf_matrix/np.sum(cf_matrix) * num_of_classes,
index=classes, columns=classes)
p = sn.heatmap(df_cm, vmin=0, vmax=1, square='True', cmap="Blues")
plt.figure(figsize=(11, 8))
p.set_xlabel('Ground Truth Class')
p.set_ylabel('Predicted Class')
p.axhline(y=0, color='k', linewidth=1)
p.axhline(y=num_of_classes, color='k', linewidth=2)
p.axvline(x=0, color='k', linewidth=1)
p.axvline(x=num_of_classes, color='k', linewidth=2)
plt.tight_layout()
return p.get_figure()
|
{"hexsha": "856fcccefa16aeefb977da1893a75f622abf80d9", "size": 2513, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/my_utils/util.py", "max_stars_repo_name": "Dodant/Knowledge-Distillation", "max_stars_repo_head_hexsha": "fcaef389632c4f3631d47098cab5f0bcdb232201", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/my_utils/util.py", "max_issues_repo_name": "Dodant/Knowledge-Distillation", "max_issues_repo_head_hexsha": "fcaef389632c4f3631d47098cab5f0bcdb232201", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/my_utils/util.py", "max_forks_repo_name": "Dodant/Knowledge-Distillation", "max_forks_repo_head_hexsha": "fcaef389632c4f3631d47098cab5f0bcdb232201", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.024691358, "max_line_length": 112, "alphanum_fraction": 0.6358933546, "include": true, "reason": "import numpy", "num_tokens": 651}
|
# pylint: disable=no-name-in-module
import io
import re
import inspect
from functools import wraps
from itertools import tee, chain
from tqdm import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.estimator import ModeKeys # noqa
from tensorflow.train import (
BytesList,
Feature,
Features,
Example,
Int64List,
) # noqa
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import ops
from tensorflow.contrib.metrics import confusion_matrix as cm
from tsaplay.constants import TF_DELIMITER, MAX_EMBEDDING_SHARDS
from tsaplay.utils.io import export_run_metadata, cprnt
from tsaplay.utils.data import zero_norm_labels, split_list
from tsaplay.utils.debug import timeit
def embed_sequences(model_fn):
@wraps(model_fn)
def wrapper(self, features, labels, mode, params):
embedded_sequences = {}
embedding_init = self.aux_config.get("embedding_init", "partitioned")
embedding_init_fn = params["_embedding_init"]
num_shards = params["_embedding_num_shards"]
embedding_partitioner = (
tf.fixed_size_partitioner(num_shards)
if embedding_init == "partitioned"
else None
)
embedding_initializer = (
params["_embedding_init"]
if embedding_init in ["variable", "partitioned"]
else None
)
vocab_size = params["_vocab_size"]
dim_size = params["_embedding_dim"]
trainable = params.get("train_embeddings", True)
with tf.variable_scope("embedding_layer", reuse=tf.AUTO_REUSE):
embeddings = tf.get_variable(
"embeddings",
shape=[vocab_size, dim_size],
initializer=embedding_initializer,
partitioner=embedding_partitioner,
trainable=trainable,
dtype=tf.float32,
)
for key, value in features.items():
if "_ids" in key:
component = key.replace("_ids", "")
embdd_key = component + "_emb"
embedded_sequence = tf.nn.embedding_lookup(
params=embeddings, ids=value, partition_strategy="div"
)
embedded_sequences[embdd_key] = embedded_sequence
features.update(embedded_sequences)
spec = model_fn(self, features, labels, mode, params)
if embedding_init == "constant":
def init_embeddings(sess):
sess.run(
embeddings.initializer,
{embeddings.initial_value: embedding_init_fn()},
)
spec = scaffold_init_fn_on_spec(spec, init_embeddings)
return spec
return wrapper
def checkpoints_state_data(model_dir):
chkpt_state = tf.train.get_checkpoint_state(model_dir)
if chkpt_state is not None:
extract_step = lambda p: (
int(re.match(r".*-(?P<step>[0-9]+)", p).groupdict().get("step"))
if re.match(r".*-(?P<step>[0-9]+)", p) is not None
else 0
)
# pylint: disable=no-member
path = chkpt_state.model_checkpoint_path
# pylint: disable=no-member
all_paths = chkpt_state.all_model_checkpoint_paths
return {
"restore_path": path,
"step": extract_step(path),
"all_paths": all_paths,
"all_steps": [extract_step(p) for p in all_paths],
}
return {"step": 0}
def resolve_optimizer(**params):
opt_fn = {
"sgd": tf.train.GradientDescentOptimizer,
"momentum": tf.train.MomentumOptimizer,
"adagrad": tf.train.AdagradOptimizer,
"adadelta": tf.train.AdadeltaOptimizer,
"adam": tf.train.AdamOptimizer,
"rmsprop": tf.train.RMSPropOptimizer,
}[params["optimizer"].lower()]
opt_sig = inspect.signature(opt_fn)
opt_args = {
p.name: params[p.name]
for p in opt_sig.parameters.values()
if p.kind == p.POSITIONAL_OR_KEYWORD and p.name in [*params]
}
return opt_fn(**opt_args)
def sharded_saver(model_fn):
@wraps(model_fn)
def wrapper(self, features, labels, mode, params):
spec = model_fn(self, features, labels, mode, params)
scaffold = spec.scaffold or tf.train.Scaffold()
scaffold._saver = tf.train.Saver( # pylint: disable=W0212
sharded=True,
max_to_keep=self.run_config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=self.run_config.keep_checkpoint_every_n_hours,
)
spec = spec._replace(scaffold=scaffold)
return spec
return wrapper
def scaffold_init_fn_on_spec(spec, new_fn):
if spec.mode != ModeKeys.TRAIN:
return spec
scaffold = spec.scaffold or tf.train.Scaffold()
prev_init = scaffold.init_fn
def new_init_fn(scaffold, sess):
if prev_init is not None:
prev_init(sess)
new_fn(sess)
scaffold._init_fn = lambda sess: new_init_fn(scaffold, sess)
return spec._replace(scaffold=scaffold)
def parse_tf_example(example):
feature_spec = {
"left": tf.VarLenFeature(dtype=tf.string),
"target": tf.VarLenFeature(dtype=tf.string),
"right": tf.VarLenFeature(dtype=tf.string),
"left_ids": tf.VarLenFeature(dtype=tf.int64),
"target_ids": tf.VarLenFeature(dtype=tf.int64),
"right_ids": tf.VarLenFeature(dtype=tf.int64),
"labels": tf.FixedLenFeature(dtype=tf.int64, shape=[]),
}
parsed_example = tf.parse_example([example], features=feature_spec)
features = {
"left": parsed_example["left"],
"target": parsed_example["target"],
"right": parsed_example["right"],
"left_ids": parsed_example["left_ids"],
"target_ids": parsed_example["target_ids"],
"right_ids": parsed_example["right_ids"],
}
labels = tf.squeeze(parsed_example["labels"], axis=0)
return (features, labels)
def make_dense_features(features):
dense_features = {}
for key in features:
if "_ids" in key:
name, _, _ = key.partition("_")
if features.get(name):
dense_features.update(
{name: sparse_sequences_to_dense(features[name])}
)
name_ids = sparse_sequences_to_dense(features[key])
name_lens = get_seq_lengths(name_ids)
dense_features.update(
{name + "_ids": name_ids, name + "_len": name_lens}
)
features.update(dense_features)
return features
def make_input_fn(mode):
def decorator(func):
@wraps(func)
def input_fn(*args, **kwargs):
if mode in ["TRAIN", "EVAL"]:
try:
tfrecords = args[1]
except IndexError:
tfrecords = kwargs.get("tfrecords")
try:
params = args[2]
except IndexError:
params = kwargs.get("params")
def process_dataset(features, labels):
return (args[0].processing_fn(features), labels)
return prep_dataset(
tfrecords=tfrecords,
params=params,
processing_fn=process_dataset,
mode=mode,
)
raise ValueError("Invalid mode: {0}".format(mode))
return input_fn
return decorator
def prep_dataset(tfrecords, params, processing_fn, mode):
shuffle_buffer = params.get("shuffle_buffer", 100000)
parallel_calls = params.get("parallel_calls", 4)
parallel_batches = params.get("parallel_batches", parallel_calls)
prefetch_buffer = params.get("prefetch_buffer", 100)
dataset = tf.data.Dataset.list_files(
file_pattern=tfrecords, shuffle=(mode != "EVAL")
)
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
tf.data.TFRecordDataset,
cycle_length=3,
buffer_output_elements=prefetch_buffer,
prefetch_input_elements=parallel_calls,
)
)
if mode == "TRAIN":
print("Reshuffling each iteration")
dataset = dataset.shuffle(
buffer_size=shuffle_buffer, reshuffle_each_iteration=True
)
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
lambda example: processing_fn(*parse_tf_example(example)),
params["batch-size"],
num_parallel_batches=parallel_batches,
)
)
dataset = dataset.map(
lambda features, labels: (make_dense_features(features), labels),
num_parallel_calls=parallel_calls,
)
if mode == "TRAIN":
# ? epochs == 0 => repeat indefinitely => count = None
dataset = dataset.repeat(count=(params.get("epochs") or None))
return dataset
def sparse_sequences_to_dense(sp_sequences):
if sp_sequences.dtype == tf.string:
default = b""
else:
default = 0
dense = tf.sparse.to_dense(sp_sequences, default_value=default)
needs_squeezing = tf.equal(tf.size(sp_sequences.dense_shape), 3)
dense = tf.cond(
needs_squeezing, lambda: tf.squeeze(dense, axis=1), lambda: dense
)
dense = tf.pad(dense, paddings=[[0, 0], [0, 1]], constant_values=default)
return dense
def sparse_reverse(sp_input):
reversed_indices = tf.reverse(sp_input.indices, axis=[0])
reversed_sp_input = tf.SparseTensor(
reversed_indices, sp_input.values, sp_input.dense_shape
)
return tf.sparse_reorder(reversed_sp_input)
def get_seq_lengths(batched_sequences):
lengths = tf.reduce_sum(tf.sign(batched_sequences), axis=1)
return tf.cast(lengths, tf.int32)
def variable_len_batch_mean(input_tensor, seq_lengths, op_name):
with tf.name_scope(name=op_name):
input_sum = tf.reduce_sum(
input_tensor=input_tensor, axis=1, keepdims=True
)
seq_lengths_t = tf.transpose([[seq_lengths]])
seq_lengths_tiled = tf.tile(
seq_lengths_t, multiples=[1, 1, tf.shape(input_sum)[2]]
)
seq_lengths_float = tf.to_float(seq_lengths_tiled)
batched_means = tf.divide(input_sum, seq_lengths_float)
return batched_means
def masked_softmax(logits, mask):
"""
Masked softmax over dim 1, mask broadcasts over dim 2
:param logits: (N, L, T)
:param mask: (N, L)
:return: probabilities (N, L, T)
"""
seq_len = tf.shape(logits)[2]
indices = tf.cast(tf.where(tf.logical_not(mask)), tf.int32)
inf = tf.constant(
np.array([[tf.float32.max]], dtype=np.float32), dtype=tf.float32
)
infs = tf.tile(inf, [tf.shape(indices)[0], seq_len])
infmask = tf.scatter_nd(
indices=indices, updates=infs, shape=tf.shape(logits)
)
masked_sm = tf.nn.softmax(logits - infmask, axis=1)
return masked_sm
def gru_cell(**params):
hidden_units = params.get("gru_hidden_units", params.get("hidden_units"))
initializer = params.get("gru_initializer", params.get("initializer"))
bias_initializer = params.get(
"gru_bias_initializer", params.get("bias_initializer")
)
gru = tf.nn.rnn_cell.GRUCell(
num_units=hidden_units,
kernel_initializer=initializer,
bias_initializer=(bias_initializer or initializer),
)
return gru
def lstm_cell(**params):
hidden_units = params.get("lstm_hidden_units", params.get("hidden_units"))
initializer = params.get("lstm_initializer", params.get("initializer"))
initial_bias = params.get("lstm_initial_bias", 1)
lstm = tf.nn.rnn_cell.LSTMCell(
num_units=hidden_units,
initializer=initializer,
forget_bias=initial_bias,
)
return lstm
def l2_regularized_loss(
labels,
logits,
l2_weight,
variables=tf.trainable_variables(),
loss_fn=tf.losses.sparse_softmax_cross_entropy,
):
with tf.name_scope("l2_loss"):
loss = loss_fn(labels=labels, logits=logits)
l2_reg = l2_weight * tf.reduce_sum(
[tf.nn.l2_loss(v) for v in variables], name="l2_reg"
)
loss += l2_reg
return loss
def attention_unit(
h_states,
hidden_units,
seq_lengths,
attn_focus,
init,
bias_init=None,
sp_literal=None,
):
batch_size = tf.shape(h_states)[0]
max_seq_len = tf.shape(h_states)[1]
weights = tf.get_variable(
name="weights",
shape=[hidden_units, hidden_units],
dtype=tf.float32,
initializer=init,
)
bias = tf.get_variable(
name="bias",
shape=[1],
dtype=tf.float32,
initializer=(bias_init or init),
)
weights = tf.expand_dims(input=weights, axis=0)
weights = tf.tile(
input=weights, multiples=[batch_size * max_seq_len, 1, 1]
)
weights = tf.reshape(
tensor=weights, shape=[-1, max_seq_len, hidden_units, hidden_units]
)
h_states = tf.expand_dims(input=h_states, axis=2)
attn_focus = tf.tile(input=attn_focus, multiples=[1, max_seq_len, 1])
attn_focus = tf.expand_dims(input=attn_focus, axis=3)
bias_mask = tf.sequence_mask(
lengths=seq_lengths, maxlen=max_seq_len, dtype=tf.float32
)
bias = bias_mask * bias
bias = tf.reshape(tensor=bias, shape=[batch_size, -1, 1, 1])
f_score = tf.nn.tanh(
tf.einsum("Baij,Bajk,Bakn->Bain", h_states, weights, attn_focus) + bias
)
f_score = tf.squeeze(input=f_score, axis=3)
mask = tf.sequence_mask(lengths=seq_lengths, maxlen=max_seq_len)
attn_vec = masked_softmax(logits=f_score, mask=mask)
# attn_summary_info = tf.tuple([sp_literal, attn_vec])
attn_summary_info = tf.tuple([sp_literal, f_score])
attn_vec = tf.expand_dims(attn_vec, axis=3)
weighted_h_states = tf.einsum("Baij,Bajk->Baik", attn_vec, h_states)
weighted_h_states_sum = tf.reduce_sum(
input_tensor=weighted_h_states, axis=1
)
final_rep = tf.squeeze(input=weighted_h_states_sum, axis=1)
return (
final_rep, # dim: [batch_size, hidden_units*2] (for BiLSTM)
attn_summary_info, # to optionally use for summary heatmaps
)
def append_snapshot(container, new_snap, index):
new_snap = tf.expand_dims(new_snap, axis=0)
total_snaps = tf.shape(container)[0]
batch_diff = tf.shape(container)[1] - tf.shape(new_snap)[1]
new_snap = tf.pad(
new_snap,
paddings=[
[index - 1, total_snaps - index],
[0, batch_diff],
[0, 0],
[0, 0],
],
)
container = tf.add(container, new_snap)
return container
def create_snapshots_container(shape_like, n_snaps):
container = tf.zeros_like(shape_like, dtype=tf.float32)
container = tf.expand_dims(container, axis=0)
container = tf.expand_dims(container, axis=3)
container = tf.tile(container, multiples=[n_snaps, 1, 1, 1])
return container
def zip_attn_snapshots_with_literals(literals, snapshots, num_layers):
max_len = tf.shape(snapshots)[2]
snapshots = tf.transpose(snapshots, perm=[1, 0, 2, 3])
snapshots = tf.reshape(snapshots, shape=[-1, max_len, 1])
# literals = sparse_sequences_to_dense(sp_literals)
literals = tf.expand_dims(literals, axis=1)
literals = tf.tile(literals, multiples=[1, num_layers, 1])
literals = tf.reshape(literals, shape=[-1, max_len])
return literals, snapshots
def bulk_add_to_collection(collection, *variables):
for variable in variables:
tf.add_to_collection(collection, variable)
def generate_attn_heatmap_summary(*attn_infos):
for attn_info in attn_infos:
tf.add_to_collection("ATTENTION", attn_info)
def image_to_summary(name, image):
with io.BytesIO() as output:
image.save(output, "PNG")
png_encoded = output.getvalue()
summary_image = tf.Summary.Image(
height=image.size[1],
width=image.size[0],
colorspace=4, # RGB-A
encoded_image_string=png_encoded,
)
summary = tf.Summary(
value=[tf.Summary.Value(tag=name, image=summary_image)]
)
return summary
def ids_lookup_table(vocab_file_path, oov_buckets=1, vocab_size=None):
return tf.contrib.lookup.index_table_from_file(
vocabulary_file=vocab_file_path,
vocab_size=vocab_size,
key_column_index=0,
value_column_index=1,
num_oov_buckets=oov_buckets,
delimiter="\t",
)
def fetch_lookup_ops(lookup_table, **tokens_lists):
list_lengths = [len(tkn_list) for tkn_list in tokens_lists.values()]
tkns_lsts = sum([tkn_list for tkn_list in tokens_lists.values()], [])
tokens_list = (
TF_DELIMITER.join(tkns_list).encode("utf-8") for tkns_list in tkns_lsts
)
tokens_tensors = (
tf.constant([tkns_list], dtype=tf.string) for tkns_list in tokens_list
)
tokens_sp_tensors = (
tf.string_split(tkn_ten, TF_DELIMITER) for tkn_ten in tokens_tensors
)
string_sp_tensors, id_sp_tensors = tee(tokens_sp_tensors)
string_ops = (
tf.sparse.to_dense(sp_tensor, default_value=b"")
for sp_tensor in string_sp_tensors
)
id_ops = (
tf.sparse.to_dense(lookup_table.lookup(sp_tensor))
for sp_tensor in id_sp_tensors
)
op_generator = chain(string_ops, id_ops)
total = len(tkns_lsts) * 2
op_generator = tqdm(op_generator, total=total, desc="Building Lookup Ops")
ops = [op for op in op_generator]
string_ops, id_ops = split_list(ops, parts=2)
return {
key: str_ops + id_ops
for (key, str_ops, id_ops) in zip(
[*tokens_lists],
split_list(string_ops, counts=list_lengths),
split_list(id_ops, counts=list_lengths),
)
}
@timeit("Executing token ID lookups", "Token IDs generated")
def run_lookups(fetch_dict, metadata_path=None, eager=False):
if tf.executing_eagerly() and not eager:
raise ValueError("Eager execution is not supported.")
run_metadata = tf.RunMetadata()
run_opts = (
tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE # pylint: disable=E1101
)
if metadata_path
else None
)
with tf.Session() as sess:
sess.run(tf.tables_initializer())
values_dict = sess.run(
fetch_dict, options=run_opts, run_metadata=run_metadata
)
if metadata_path:
export_run_metadata(run_metadata, path=metadata_path)
return {
key: [value.tolist()[0] for value in values]
for (key, values) in values_dict.items()
}
def make_tf_examples(string_features, int_features, labels):
int_features += [[label] for label in zero_norm_labels(labels)]
string_features = [
Feature(bytes_list=BytesList(value=val)) for val in string_features
]
int_features = [
Feature(int64_list=Int64List(value=val)) for val in int_features
]
all_features = string_features + int_features
return [
Example(
features=Features(
feature={
"left": left,
"target": target,
"right": right,
"left_ids": left_ids,
"target_ids": target_ids,
"right_ids": right_ids,
"labels": label,
}
)
)
for (
left,
target,
right,
left_ids,
target_ids,
right_ids,
label,
) in zip(*split_list(all_features, parts=7))
]
def partitioner_num_shards(vocab_size, max_shards=MAX_EMBEDDING_SHARDS):
for i in range(max_shards, 0, -1):
if vocab_size % i == 0:
return i
return 1
def embedding_initializer_fn(vectors, num_shards, structure=None):
shape = vectors.shape
partition_size = int(shape[0] / num_shards)
def _init_part_var(shape=shape, dtype=tf.float32, partition_info=None):
part_offset = partition_info.single_offset(shape)
this_slice = part_offset + partition_size
return vectors[part_offset:this_slice]
def _init_var(shape=shape, dtype=tf.float32, partition_info=None):
return vectors
def _init_const():
return vectors
return {
"partitioned": _init_part_var,
"variable": _init_var,
"constant": _init_const,
}.get(structure, _init_part_var)
def metric_variable(shape, dtype, validate_shape=True, name=None):
"""Create variable in `GraphKeys.(LOCAL|METRIC_VARIABLES`) collections.
https://github.com/tensorflow/tensorflow/blob/r1.8/tensorflow/python/ops/metrics_impl.py
"""
return variable_scope.variable(
lambda: array_ops.zeros(shape, dtype),
trainable=False,
collections=[
ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.METRIC_VARIABLES,
],
validate_shape=validate_shape,
name=name,
)
def streaming_conf_matrix(labels, predictions, num_classes):
conf_mat = metric_variable(
shape=[num_classes, num_classes],
dtype=tf.int64,
validate_shape=False,
name="total_confusion_matrix",
)
up_conf_mat = tf.assign_add(
conf_mat,
cm(labels, predictions, dtype=tf.int64, num_classes=num_classes),
)
return conf_mat, up_conf_mat
# pylint: disable=too-many-locals
def streaming_f1_scores(labels, predictions, num_classes):
y_true = tf.cast(
tf.one_hot(indices=labels, depth=num_classes), dtype=tf.int64
)
y_pred = tf.cast(
tf.one_hot(indices=predictions, depth=num_classes), dtype=tf.int64
)
weights = metric_variable(
shape=[num_classes],
dtype=tf.int64,
validate_shape=False,
name="weights",
)
tp_mac = metric_variable(
shape=[num_classes],
dtype=tf.int64,
validate_shape=False,
name="tp_mac",
)
fp_mac = metric_variable(
shape=[num_classes],
dtype=tf.int64,
validate_shape=False,
name="fp_mac",
)
fn_mac = metric_variable(
shape=[num_classes],
dtype=tf.int64,
validate_shape=False,
name="fn_mac",
)
tp_mic = metric_variable(
shape=[], dtype=tf.int64, validate_shape=False, name="tp_mic"
)
fp_mic = metric_variable(
shape=[], dtype=tf.int64, validate_shape=False, name="fp_mic"
)
fn_mic = metric_variable(
shape=[], dtype=tf.int64, validate_shape=False, name="fn_mic"
)
up_tp_mac = tf.assign_add(
tp_mac, tf.count_nonzero(y_pred * y_true, axis=0)
)
up_fp_mac = tf.assign_add(
fp_mac, tf.count_nonzero(y_pred * (y_true - 1), axis=0)
)
up_fn_mac = tf.assign_add(
fn_mac, tf.count_nonzero((y_pred - 1) * y_true, axis=0)
)
up_tp_mic = tf.assign_add(
tp_mic, tf.count_nonzero(y_pred * y_true, axis=None)
)
up_fp_mic = tf.assign_add(
fp_mic, tf.count_nonzero(y_pred * (y_true - 1), axis=None)
)
up_fn_mic = tf.assign_add(
fn_mic, tf.count_nonzero((y_pred - 1) * y_true, axis=None)
)
up_weights = tf.assign_add(weights, tf.reduce_sum(y_true, axis=0))
updates = tf.group(
up_tp_mac,
up_fp_mac,
up_fn_mac,
up_tp_mic,
up_fp_mic,
up_fn_mic,
up_weights,
)
weights = weights / tf.reduce_sum(weights)
prec_mic = tp_mic / (tp_mic + fp_mic)
prec_mic = tf.where(tf.is_nan(prec_mic), tf.zeros_like(prec_mic), prec_mic)
rec_mic = tp_mic / (tp_mic + fn_mic)
rec_mic = tf.where(tf.is_nan(rec_mic), tf.zeros_like(rec_mic), rec_mic)
f1_mic = 2 * prec_mic * rec_mic / (prec_mic + rec_mic)
f1_mic = tf.where(tf.is_nan(f1_mic), tf.zeros_like(f1_mic), f1_mic)
f1_mic = tf.reduce_mean(f1_mic)
prec_mac = tp_mac / (tp_mac + fp_mac)
prec_mac = tf.where(tf.is_nan(prec_mac), tf.zeros_like(prec_mac), prec_mac)
rec_mac = tp_mac / (tp_mac + fn_mac)
rec_mac = tf.where(tf.is_nan(rec_mac), tf.zeros_like(rec_mac), rec_mac)
f1_mac = 2 * prec_mac * rec_mac / (prec_mac + rec_mac)
f1_mac = tf.where(tf.is_nan(f1_mac), tf.zeros_like(f1_mac), f1_mac)
f1_wei = tf.reduce_sum(f1_mac * weights)
f1_mac = tf.reduce_mean(f1_mac)
return {
"micro-f1": (f1_mic, updates),
"macro-f1": (f1_mac, updates),
"weighted-f1": (f1_wei, updates),
}
|
{"hexsha": "9332f6c5003ffbdbc5ef0a389b7c00f89f3f8cd7", "size": 24572, "ext": "py", "lang": "Python", "max_stars_repo_path": "tsaplay/utils/tf.py", "max_stars_repo_name": "SijanC147/Msc", "max_stars_repo_head_hexsha": "08a6ae6c8755a9a2392d441d8b84cfbb83bee0bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tsaplay/utils/tf.py", "max_issues_repo_name": "SijanC147/Msc", "max_issues_repo_head_hexsha": "08a6ae6c8755a9a2392d441d8b84cfbb83bee0bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tsaplay/utils/tf.py", "max_forks_repo_name": "SijanC147/Msc", "max_forks_repo_head_hexsha": "08a6ae6c8755a9a2392d441d8b84cfbb83bee0bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4219948849, "max_line_length": 92, "alphanum_fraction": 0.634055022, "include": true, "reason": "import numpy", "num_tokens": 5844}
|
from typing import List
import numpy as np
from constants import BERLIN_ZIP_CODES
class GuestResponse:
def __init__(self,
zip_code: int,
num_adults: int,
num_kids: int,
languages: List[str],
has_vaccination: bool,
asks_vaccination: bool,
asks_transportation: bool,
has_pets_allergies: bool,
dietary_restrictions: List[str],
days_since_response: int = 0
):
self.zip_code = zip_code
self.num_adults = num_adults
self.num_kids = num_kids
self.languages = languages
self.has_vaccination = has_vaccination
self.asks_vaccination = asks_vaccination
self.asks_transportation = asks_transportation
self.has_pets_allergies = has_pets_allergies
self.dietary_restrictions = dietary_restrictions
self.days_since_response = days_since_response
self.validate_zipcode(self.zip_code)
self.validate_num_adults(self.num_adults)
def validate_zipcode(self, zip_code):
assert zip_code in BERLIN_ZIP_CODES, f"Value Error: Unknown zip code {zip_code}"
def validate_num_adults(self, num_adults):
assert type(num_adults) in [int, np.int64], f"Value Error: Number of adults has to be integer and not {type(num_adults)}"
assert num_adults < 20, f"Value Error: Number of adults ({num_adults}) is to high"
|
{"hexsha": "3c3fa1d2c829cb3229579cebe8a2979f1d075817", "size": 1500, "ext": "py", "lang": "Python", "max_stars_repo_path": "guest_response.py", "max_stars_repo_name": "eduryev/dinners_for_refugees", "max_stars_repo_head_hexsha": "dba4e1afbb0bb1521a1d127836b0be8fd4832cca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "guest_response.py", "max_issues_repo_name": "eduryev/dinners_for_refugees", "max_issues_repo_head_hexsha": "dba4e1afbb0bb1521a1d127836b0be8fd4832cca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "guest_response.py", "max_forks_repo_name": "eduryev/dinners_for_refugees", "max_forks_repo_head_hexsha": "dba4e1afbb0bb1521a1d127836b0be8fd4832cca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.4615384615, "max_line_length": 129, "alphanum_fraction": 0.6433333333, "include": true, "reason": "import numpy", "num_tokens": 354}
|
/*
* Copyright 2010 Vicente J. Botet Escriba
* Copyright 2014 Renato Tegon Forti, Antony Polukhin
* Copyright 2015 Andrey Semashev
* Copyright 2015 Antony Polukhin
*
* Distributed under the Boost Software License, Version 1.0.
* See http://www.boost.org/LICENSE_1_0.txt
*/
#ifndef BOOST_WINAPI_DLL_HPP_INCLUDED_
#define BOOST_WINAPI_DLL_HPP_INCLUDED_
#include <boost/winapi/basic_types.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if BOOST_WINAPI_PARTITION_DESKTOP || BOOST_WINAPI_PARTITION_SYSTEM
#if !defined( BOOST_USE_WINDOWS_H )
extern "C" {
namespace boost { namespace winapi {
#ifdef _WIN64
typedef INT_PTR_ (WINAPI *FARPROC_)();
typedef INT_PTR_ (WINAPI *NEARPROC_)();
typedef INT_PTR_ (WINAPI *PROC_)();
#else
typedef int (WINAPI *FARPROC_)();
typedef int (WINAPI *NEARPROC_)();
typedef int (WINAPI *PROC_)();
#endif // _WIN64
}} // namespace boost::winapi
#if !defined( BOOST_NO_ANSI_APIS )
BOOST_SYMBOL_IMPORT boost::winapi::HMODULE_ WINAPI
LoadLibraryA(boost::winapi::LPCSTR_ lpFileName);
BOOST_SYMBOL_IMPORT boost::winapi::HMODULE_ WINAPI
LoadLibraryExA(
boost::winapi::LPCSTR_ lpFileName,
boost::winapi::HANDLE_ hFile,
boost::winapi::DWORD_ dwFlags
);
BOOST_SYMBOL_IMPORT boost::winapi::HMODULE_ WINAPI
GetModuleHandleA(boost::winapi::LPCSTR_ lpFileName);
BOOST_SYMBOL_IMPORT boost::winapi::DWORD_ WINAPI
GetModuleFileNameA(
boost::winapi::HMODULE_ hModule,
boost::winapi::LPSTR_ lpFilename,
boost::winapi::DWORD_ nSize
);
#endif
BOOST_SYMBOL_IMPORT boost::winapi::HMODULE_ WINAPI
LoadLibraryW(boost::winapi::LPCWSTR_ lpFileName);
BOOST_SYMBOL_IMPORT boost::winapi::HMODULE_ WINAPI
LoadLibraryExW(
boost::winapi::LPCWSTR_ lpFileName,
boost::winapi::HANDLE_ hFile,
boost::winapi::DWORD_ dwFlags
);
BOOST_SYMBOL_IMPORT boost::winapi::HMODULE_ WINAPI
GetModuleHandleW(boost::winapi::LPCWSTR_ lpFileName);
BOOST_SYMBOL_IMPORT boost::winapi::DWORD_ WINAPI
GetModuleFileNameW(
boost::winapi::HMODULE_ hModule,
boost::winapi::LPWSTR_ lpFilename,
boost::winapi::DWORD_ nSize
);
#if !defined( UNDER_CE )
BOOST_SYMBOL_IMPORT boost::winapi::FARPROC_ WINAPI
GetProcAddress(boost::winapi::HMODULE_ hModule, boost::winapi::LPCSTR_ lpProcName);
#else
// On Windows CE there are two functions: GetProcAddressA (since Windows CE 3.0) and GetProcAddressW.
// GetProcAddress is a macro that is _always_ defined to GetProcAddressW.
BOOST_SYMBOL_IMPORT boost::winapi::FARPROC_ WINAPI
GetProcAddressA(boost::winapi::HMODULE_ hModule, boost::winapi::LPCSTR_ lpProcName);
BOOST_SYMBOL_IMPORT boost::winapi::FARPROC_ WINAPI
GetProcAddressW(boost::winapi::HMODULE_ hModule, boost::winapi::LPCWSTR_ lpProcName);
#endif
struct _MEMORY_BASIC_INFORMATION;
#if !defined( BOOST_WINAPI_IS_MINGW )
BOOST_SYMBOL_IMPORT boost::winapi::SIZE_T_ WINAPI
VirtualQuery(
boost::winapi::LPCVOID_ lpAddress,
::_MEMORY_BASIC_INFORMATION* lpBuffer,
boost::winapi::ULONG_PTR_ dwLength
);
#else // !defined( BOOST_WINAPI_IS_MINGW )
BOOST_SYMBOL_IMPORT boost::winapi::DWORD_ WINAPI
VirtualQuery(
boost::winapi::LPCVOID_ lpAddress,
::_MEMORY_BASIC_INFORMATION* lpBuffer,
boost::winapi::DWORD_ dwLength
);
#endif // !defined( BOOST_WINAPI_IS_MINGW )
} // extern "C"
#endif // #if !defined( BOOST_USE_WINDOWS_H )
namespace boost {
namespace winapi {
typedef struct BOOST_MAY_ALIAS MEMORY_BASIC_INFORMATION_ {
PVOID_ BaseAddress;
PVOID_ AllocationBase;
DWORD_ AllocationProtect;
SIZE_T_ RegionSize;
DWORD_ State;
DWORD_ Protect;
DWORD_ Type;
} *PMEMORY_BASIC_INFORMATION_;
#if defined( BOOST_USE_WINDOWS_H )
typedef ::FARPROC FARPROC_;
typedef ::NEARPROC NEARPROC_;
typedef ::PROC PROC_;
const DWORD_ DONT_RESOLVE_DLL_REFERENCES_ = DONT_RESOLVE_DLL_REFERENCES;
const DWORD_ LOAD_WITH_ALTERED_SEARCH_PATH_ = LOAD_WITH_ALTERED_SEARCH_PATH;
#else // defined( BOOST_USE_WINDOWS_H )
const DWORD_ DONT_RESOLVE_DLL_REFERENCES_ = 0x00000001;
const DWORD_ LOAD_WITH_ALTERED_SEARCH_PATH_ = 0x00000008;
#endif // defined( BOOST_USE_WINDOWS_H )
// This one is not defined by MinGW
const DWORD_ LOAD_IGNORE_CODE_AUTHZ_LEVEL_ = 0x00000010;
#if !defined( BOOST_NO_ANSI_APIS )
using ::LoadLibraryA;
using ::LoadLibraryExA;
using ::GetModuleHandleA;
using ::GetModuleFileNameA;
#endif // !defined( BOOST_NO_ANSI_APIS )
using ::LoadLibraryW;
using ::LoadLibraryExW;
using ::GetModuleHandleW;
using ::GetModuleFileNameW;
#if !defined( UNDER_CE )
// For backward compatibility, don't use directly. Use get_proc_address instead.
using ::GetProcAddress;
#else
using ::GetProcAddressA;
using ::GetProcAddressW;
#endif
BOOST_FORCEINLINE FARPROC_ get_proc_address(HMODULE_ hModule, LPCSTR_ lpProcName)
{
#if !defined( UNDER_CE )
return ::GetProcAddress(hModule, lpProcName);
#else
return ::GetProcAddressA(hModule, lpProcName);
#endif
}
BOOST_FORCEINLINE SIZE_T_ VirtualQuery(LPCVOID_ lpAddress, MEMORY_BASIC_INFORMATION_* lpBuffer, ULONG_PTR_ dwLength)
{
return ::VirtualQuery(lpAddress, reinterpret_cast< ::_MEMORY_BASIC_INFORMATION* >(lpBuffer), dwLength);
}
#if !defined( BOOST_NO_ANSI_APIS )
BOOST_FORCEINLINE HMODULE_ load_library(LPCSTR_ lpFileName)
{
return ::LoadLibraryA(lpFileName);
}
BOOST_FORCEINLINE HMODULE_ load_library_ex(LPCSTR_ lpFileName, HANDLE_ hFile, DWORD_ dwFlags)
{
return ::LoadLibraryExA(lpFileName, hFile, dwFlags);
}
BOOST_FORCEINLINE HMODULE_ get_module_handle(LPCSTR_ lpFileName)
{
return ::GetModuleHandleA(lpFileName);
}
BOOST_FORCEINLINE DWORD_ get_module_file_name(HMODULE_ hModule, LPSTR_ lpFilename, DWORD_ nSize)
{
return ::GetModuleFileNameA(hModule, lpFilename, nSize);
}
#endif // #if !defined( BOOST_NO_ANSI_APIS )
BOOST_FORCEINLINE HMODULE_ load_library(LPCWSTR_ lpFileName)
{
return ::LoadLibraryW(lpFileName);
}
BOOST_FORCEINLINE HMODULE_ load_library_ex(LPCWSTR_ lpFileName, HANDLE_ hFile, DWORD_ dwFlags)
{
return ::LoadLibraryExW(lpFileName, hFile, dwFlags);
}
BOOST_FORCEINLINE HMODULE_ get_module_handle(LPCWSTR_ lpFileName)
{
return ::GetModuleHandleW(lpFileName);
}
BOOST_FORCEINLINE DWORD_ get_module_file_name(HMODULE_ hModule, LPWSTR_ lpFilename, DWORD_ nSize)
{
return ::GetModuleFileNameW(hModule, lpFilename, nSize);
}
} // namespace winapi
} // namespace boost
#endif // BOOST_WINAPI_PARTITION_DESKTOP || BOOST_WINAPI_PARTITION_SYSTEM
//
// FreeLibrary is in a different partition set (slightly)
//
#if BOOST_WINAPI_PARTITION_APP || BOOST_WINAPI_PARTITION_SYSTEM
#if !defined(BOOST_USE_WINDOWS_H)
extern "C" {
BOOST_SYMBOL_IMPORT boost::winapi::BOOL_ WINAPI
FreeLibrary(boost::winapi::HMODULE_ hModule);
}
#endif
namespace boost {
namespace winapi {
using ::FreeLibrary;
}
}
#endif // BOOST_WINAPI_PARTITION_APP || BOOST_WINAPI_PARTITION_SYSTEM
#endif // BOOST_WINAPI_DLL_HPP_INCLUDED_
|
{"hexsha": "8b1ecd239ca96007e7655a643caf9728aaec3d0e", "size": 6834, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "thirdparty/boost_1_67_0/boost/winapi/dll.hpp", "max_stars_repo_name": "cfsengineering/tigl", "max_stars_repo_head_hexsha": "abfbb57b82dc6beac7cde212a4cd5e0aed866db8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 918.0, "max_stars_repo_stars_event_min_datetime": "2016-12-22T02:53:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T06:21:35.000Z", "max_issues_repo_path": "thirdparty/boost_1_67_0/boost/winapi/dll.hpp", "max_issues_repo_name": "cfsengineering/tigl", "max_issues_repo_head_hexsha": "abfbb57b82dc6beac7cde212a4cd5e0aed866db8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 203.0, "max_issues_repo_issues_event_min_datetime": "2016-12-27T12:09:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T20:46:55.000Z", "max_forks_repo_path": "thirdparty/boost_1_67_0/boost/winapi/dll.hpp", "max_forks_repo_name": "cfsengineering/tigl", "max_forks_repo_head_hexsha": "abfbb57b82dc6beac7cde212a4cd5e0aed866db8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 122.0, "max_forks_repo_forks_event_min_datetime": "2016-12-22T17:38:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-22T14:25:49.000Z", "avg_line_length": 28.5941422594, "max_line_length": 116, "alphanum_fraction": 0.780801873, "num_tokens": 1826}
|
"""
Plot figures from npy or logging files saved while training
================================================
*Author*: Yu Zhang, Northwestern Polytechnical University
"""
import matplotlib.pyplot as plt
import os
import numpy as np
import sys
def plot_from_npy(npyfile):
assert os.path.isfile(npyfile)
info = np.load(npyfile).item()
tr_loss = info['train_loss']
tr_loss_detail = info['train_loss_detail']
tr_acc = info['train_accuracy']
val_loss = info['val_loss']
val_acc = info['val_accuracy']
epochs = len(tr_loss)
x = np.linspace(1, epochs, epochs)
xx = np.linspace(1, epochs, len(tr_loss_detail))
plt.plot(x, tr_loss, ls='-', color='r')
plt.plot(x, tr_acc, ls='-.', color='r')
plt.plot(x, val_loss, ls='-', color='b')
plt.plot(x, val_acc, ls='-.', color='b')
plt.plot(xx, tr_loss_detail, ls='-', color='y')
plt.legend(labels=['tr_loss', 'tr_accuracy', 'val_loss',
'val_accuracy', 'tr_detail_loss'], loc='best')
plt.xlabel('epoch')
plt.ylabel('loss or accuracy')
plt.title('training loss and accuracy')
plt.show()
if __name__ == '__main__':
if len(sys.argv) < 1:
print('Please input at least one log file for plotting')
else:
for log_file in sys.argv[1:]:
plot_from_npy(log_file)
|
{"hexsha": "c603dc1a5c1fe970bf85ecfd0e172bd2eb5282b3", "size": 1333, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluate/plot_figure.py", "max_stars_repo_name": "zhangyuygss/WSL", "max_stars_repo_head_hexsha": "c622f606c4b6557b45cec6068713ff05cdb8962a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-01-10T08:29:38.000Z", "max_stars_repo_stars_event_max_datetime": "2018-06-15T09:07:25.000Z", "max_issues_repo_path": "evaluate/plot_figure.py", "max_issues_repo_name": "zhangyuygss/WSL", "max_issues_repo_head_hexsha": "c622f606c4b6557b45cec6068713ff05cdb8962a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "evaluate/plot_figure.py", "max_forks_repo_name": "zhangyuygss/WSL", "max_forks_repo_head_hexsha": "c622f606c4b6557b45cec6068713ff05cdb8962a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-05-28T07:50:20.000Z", "max_forks_repo_forks_event_max_datetime": "2018-08-01T15:33:59.000Z", "avg_line_length": 30.2954545455, "max_line_length": 69, "alphanum_fraction": 0.6136534134, "include": true, "reason": "import numpy", "num_tokens": 341}
|
using JuLIP
using JuLIP.Testing
using JuLIP: sigvol_d
using Test
using LinearAlgebra
##
h2("Testing `minimise!` with equilibration with LJ calculator to lattice")
calc = lennardjones(r0=rnn(:Al))
at = bulk(:Al, cubic=true) * 10
X0 = positions(at) |> mat
at = rattle!(at, 0.02)
set_calculator!(at, calc)
x = dofs(at)
println(@test (energy(at) == energy(at, x) == energy(calc, at)
== energy(calc, at, x) )
)
minimise!(at, precond=:id, verbose=2)
X1 = positions(at) |> mat
X0 .-= X0[:, 1]
X1 .-= X1[:, 1]
F = X1 / X0
println("check that the optimiser really converged to a lattice")
@show norm(F'*F - I, Inf)
@show norm(F*X0 - X1, Inf)
@test norm(F*X0 - X1, Inf) < 1e-4
##
h2("same test but large and with Exp preconditioner")
at = bulk(:Al, cubic=true) * (20,20,2)
at = rattle!(at, 0.02)
set_calculator!(at, calc)
minimise!(at, precond = :exp, method = :lbfgs,
robust_energy_difference = true, verbose=2
)
##
h2("Variable Cell Test")
calc = lennardjones(r0=rnn(:Al))
at = set_pbc!(bulk(:Al, cubic=true), true)
set_calculator!(at, calc)
variablecell!(at)
x = dofs(at)
println(@test (energy(at) == energy(at, x) == energy(calc, at)
== energy(calc, at, x) )
)
minimise!(at, verbose = 2)
##
h2("FF preconditioner for StillingerWeber")
at = bulk(:Si, cubic=true) * (10,10,2)
at = set_pbc!(at, true)
at = rattle!(at, 0.02)
set_calculator!(at, StillingerWeber())
P = FF(at, StillingerWeber())
minimise!(at, precond = P, method = :lbfgs,
robust_energy_difference = true, verbose=2)
##
h2("FF preconditioner for EAM")
at = bulk(:W, cubic=true) * (10,10,2)
at = set_pbc!(at, true)
at = rattle!(at, 0.02)
X0 = positions(at)
set_positions!(at, X0)
set_calculator!(at, eam_W)
P = FF(at, eam_W)
minimise!(at, precond = P, method = :lbfgs, robust_energy_difference = true, verbose=2)
## steepest descent
set_positions!(at, X0)
set_calculator!(at, eam_W)
P = FF(at, eam_W)
minimise!(at, precond = P, method = :sd, robust_energy_difference = true, verbose=2)
##
h2("Optimise again with some different stabilisation options")
set_positions!(at, X0)
set_calculator!(at, eam_W)
P = FF(at, eam_W, stab=0.1, innerstab=0.2)
minimise!(at, precond = P, method = :lbfgs, robust_energy_difference = true, verbose=2)
##
h2("for comparison now with Exp")
set_positions!(at, X0)
minimise!(at, precond = :exp, method = :lbfgs, robust_energy_difference = true, verbose=2)
h2("Test optimisation with VariableCell")
# start with a clean `at`
at = bulk(:Al) * 2 # cubic=true,
apply_defm!(at, I + 0.02 * rand(3,3))
calc = lennardjones(r0=rnn(:Al))
set_calculator!(at, calc)
variablecell!(at)
println(@test JuLIP.Testing.fdtest(calc, at, verbose=true, rattle=0.1))
h2("For the initial state, stress/virial is far from 0:")
@show norm(virial(at), Inf)
JuLIP.Solve.minimise!(at, verbose=2)
println("After optimisation, stress/virial should be 0:")
@show norm(virial(at), Inf)
@test norm(virial(at), Inf) < 1e-4
h2("Check sigvol derivative")
println(@test fdtest(c -> JuLIP.sigvol(reshape(c, (3,3))),
c -> JuLIP.sigvol_d(reshape(c, (3,3)))[:],
rand(3,3)))
# TODO: revive this after introducing external potentials
# h2("And now with pressure . . .")
# set_constraint!(at, VariableCell(at, pressure=10.0123))
# JuLIP.Testing.fdtest(calc, at, verbose=true, rattle=0.02)
# at = bulk(:Al) * 2
# set_calculator!(at, calc)
# set_constraint!(at, VariableCell(at, pressure=0.01))
# JuLIP.Solve.minimise!(at, verbose = 2)
# @show norm(virial(at), Inf)
# @show norm(JuLIP.gradient(at), Inf)
# println(@test norm(JuLIP.gradient(at), Inf) < 1e-4)
# @info "note it is correct that virial is O(1) since we applied pressure"
|
{"hexsha": "9c3a9dc21b0090667507ae892c08e8c403d88a0d", "size": 3745, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/testsolve.jl", "max_stars_repo_name": "jamesgardner1421/JuLIP.jl", "max_stars_repo_head_hexsha": "f5340580337a64d35292b91286c96d20d5c172c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-12-27T05:32:23.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-27T05:32:23.000Z", "max_issues_repo_path": "test/testsolve.jl", "max_issues_repo_name": "YangshuaiWang/JuLIP.jl", "max_issues_repo_head_hexsha": "67cfb5c009e3706f5ef7871aa1d886ddae76bd84", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/testsolve.jl", "max_forks_repo_name": "YangshuaiWang/JuLIP.jl", "max_forks_repo_head_hexsha": "67cfb5c009e3706f5ef7871aa1d886ddae76bd84", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8076923077, "max_line_length": 90, "alphanum_fraction": 0.6576769025, "num_tokens": 1248}
|
[STATEMENT]
lemma bouncing_ball_flow: "g < 0 \<Longrightarrow> h \<ge> 0 \<Longrightarrow>
\<^bold>{\<lambda>s. s$1 = h \<and> s$2 = 0\<^bold>}
(LOOP
((x\<acute>= f g & (\<lambda> s. s$1 \<ge> 0));
(IF (\<lambda> s. s$1 = 0) THEN (2 ::= (\<lambda>s. - s$2)) ELSE skip))
INV (\<lambda>s. 0 \<le> s$1 \<and> 2 \<cdot> g \<cdot> s$1 = 2 \<cdot> g \<cdot> h + s$2 \<cdot> s$2)
) \<^bold>{\<lambda>s. 0 \<le> s$1 \<and> s$1 \<le> h\<^bold>}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>g < 0; 0 \<le> h\<rbrakk> \<Longrightarrow> \<^bold>{\<lambda>s. s $ 1 = h \<and> s $ 2 = 0\<^bold>}LOOP x\<acute>=\<lambda>t. f g & \<lambda>s. 0 \<le> s $ 1 on \<lambda>s. {t. 0 \<le> t} UNIV @ 0 ; (IF (\<lambda>s. s $ 1 = 0) THEN (2 ::= (\<lambda>s. - s $ 2)) ELSE skip) INV (\<lambda>s. 0 \<le> s $ 1 \<and> 2 \<cdot> g \<cdot> s $ 1 = 2 \<cdot> g \<cdot> h + s $ 2 \<cdot> s $ 2) \<^bold>{\<lambda>s. 0 \<le> s $ 1 \<and> s $ 1 \<le> h\<^bold>}
[PROOF STEP]
apply(rule H_loopI; (rule H_seq[where R="\<lambda>s. 0 \<le> s$1 \<and> 2 \<cdot> g \<cdot> s$1 = 2 \<cdot> g \<cdot> h + s$2 \<cdot> s$2"])?)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<lbrakk>g < 0; 0 \<le> h\<rbrakk> \<Longrightarrow> \<^bold>{\<lambda>s. 0 \<le> s $ 1 \<and> 2 \<cdot> g \<cdot> s $ 1 = 2 \<cdot> g \<cdot> h + s $ 2 \<cdot> s $ 2\<^bold>}x\<acute>=\<lambda>t. f g & \<lambda>s. 0 \<le> s $ 1 on \<lambda>s. {t. 0 \<le> t} UNIV @ 0\<^bold>{\<lambda>s. 0 \<le> s $ 1 \<and> 2 \<cdot> g \<cdot> s $ 1 = 2 \<cdot> g \<cdot> h + s $ 2 \<cdot> s $ 2\<^bold>}
2. \<lbrakk>g < 0; 0 \<le> h\<rbrakk> \<Longrightarrow> \<^bold>{\<lambda>s. 0 \<le> s $ 1 \<and> 2 \<cdot> g \<cdot> s $ 1 = 2 \<cdot> g \<cdot> h + s $ 2 \<cdot> s $ 2\<^bold>}IF (\<lambda>s. s $ 1 = 0) THEN (2 ::= (\<lambda>s. - s $ 2)) ELSE skip\<^bold>{\<lambda>s. 0 \<le> s $ 1 \<and> 2 \<cdot> g \<cdot> s $ 1 = 2 \<cdot> g \<cdot> h + s $ 2 \<cdot> s $ 2\<^bold>}
3. \<lbrakk>g < 0; 0 \<le> h\<rbrakk> \<Longrightarrow> \<lceil>\<lambda>s. s $ 1 = h \<and> s $ 2 = 0\<rceil> \<subseteq> \<lceil>\<lambda>s. 0 \<le> s $ 1 \<and> 2 \<cdot> g \<cdot> s $ 1 = 2 \<cdot> g \<cdot> h + s $ 2 \<cdot> s $ 2\<rceil>
4. \<lbrakk>g < 0; 0 \<le> h\<rbrakk> \<Longrightarrow> \<lceil>\<lambda>s. 0 \<le> s $ 1 \<and> 2 \<cdot> g \<cdot> s $ 1 = 2 \<cdot> g \<cdot> h + s $ 2 \<cdot> s $ 2\<rceil> \<subseteq> \<lceil>\<lambda>s. 0 \<le> s $ 1 \<and> s $ 1 \<le> h\<rceil>
[PROOF STEP]
apply(subst local_flow.sH_g_ode_subset[OF local_flow_ball])
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. \<And>s. \<lbrakk>g < 0; 0 \<le> h; s \<in> UNIV\<rbrakk> \<Longrightarrow> 0 \<in> Collect ((\<le>) 0) \<and> is_interval (Collect ((\<le>) 0)) \<and> Collect ((\<le>) 0) \<subseteq> UNIV
2. \<lbrakk>g < 0; 0 \<le> h\<rbrakk> \<Longrightarrow> \<forall>s\<in>UNIV. 0 \<le> s $ 1 \<and> 2 \<cdot> g \<cdot> s $ 1 = 2 \<cdot> g \<cdot> h + s $ 2 \<cdot> s $ 2 \<longrightarrow> (\<forall>t\<in>Collect ((\<le>) 0). (\<forall>\<tau>\<in>down (Collect ((\<le>) 0)) t. 0 \<le> (\<chi>i. if i = 1 then g \<cdot> \<tau>\<^sup>2 / 2 + s $ 2 \<cdot> \<tau> + s $ 1 else g \<cdot> \<tau> + s $ 2) $ 1) \<longrightarrow> 0 \<le> (\<chi>i. if i = 1 then g \<cdot> t\<^sup>2 / 2 + s $ 2 \<cdot> t + s $ 1 else g \<cdot> t + s $ 2) $ 1 \<and> 2 \<cdot> g \<cdot> (\<chi>i. if i = 1 then g \<cdot> t\<^sup>2 / 2 + s $ 2 \<cdot> t + s $ 1 else g \<cdot> t + s $ 2) $ 1 = 2 \<cdot> g \<cdot> h + (\<chi>i. if i = 1 then g \<cdot> t\<^sup>2 / 2 + s $ 2 \<cdot> t + s $ 1 else g \<cdot> t + s $ 2) $ 2 \<cdot> (\<chi>i. if i = 1 then g \<cdot> t\<^sup>2 / 2 + s $ 2 \<cdot> t + s $ 1 else g \<cdot> t + s $ 2) $ 2)
3. \<lbrakk>g < 0; 0 \<le> h\<rbrakk> \<Longrightarrow> \<^bold>{\<lambda>s. 0 \<le> s $ 1 \<and> 2 \<cdot> g \<cdot> s $ 1 = 2 \<cdot> g \<cdot> h + s $ 2 \<cdot> s $ 2\<^bold>}IF (\<lambda>s. s $ 1 = 0) THEN (2 ::= (\<lambda>s. - s $ 2)) ELSE skip\<^bold>{\<lambda>s. 0 \<le> s $ 1 \<and> 2 \<cdot> g \<cdot> s $ 1 = 2 \<cdot> g \<cdot> h + s $ 2 \<cdot> s $ 2\<^bold>}
4. \<lbrakk>g < 0; 0 \<le> h\<rbrakk> \<Longrightarrow> \<lceil>\<lambda>s. s $ 1 = h \<and> s $ 2 = 0\<rceil> \<subseteq> \<lceil>\<lambda>s. 0 \<le> s $ 1 \<and> 2 \<cdot> g \<cdot> s $ 1 = 2 \<cdot> g \<cdot> h + s $ 2 \<cdot> s $ 2\<rceil>
5. \<lbrakk>g < 0; 0 \<le> h\<rbrakk> \<Longrightarrow> \<lceil>\<lambda>s. 0 \<le> s $ 1 \<and> 2 \<cdot> g \<cdot> s $ 1 = 2 \<cdot> g \<cdot> h + s $ 2 \<cdot> s $ 2\<rceil> \<subseteq> \<lceil>\<lambda>s. 0 \<le> s $ 1 \<and> s $ 1 \<le> h\<rceil>
[PROOF STEP]
by (auto simp: bb_real_arith)
\<comment> \<open>Refined with annotated dynamics \<close>
|
{"llama_tokens": 2228, "file": "Hybrid_Systems_VCs_KleeneAlgebraTests_HS_VC_KAT_Examples_rel", "length": 3}
|
using ModelConstructors, HDF5, Random, JLD2, FileIO, SMC, Test
include("modelsetup.jl")
path = dirname(@__FILE__)
writing_output = false
if VERSION < v"1.5"
ver = "111"
else
ver = "150"
end
m = setup_linear_model(; regime_switching = true)
m <= Setting(:regime_switching, true, true, "rs", "") # For file output purposes
save = normpath(joinpath(dirname(@__FILE__),"save"))
m <= Setting(:saveroot, save)
savepath = rawpath(m, "estimate", "smc_cloud.jld2")
particle_store_path = rawpath(m, "estimate", "smcsave.h5")
data = h5read("reference/test_data.h5", "rsdata")
@everywhere Random.seed!(42)
println("Estimating Linear Model... (approx. 8 minutes)")
SMC.smc(rs_loglik_fn, m.parameters, data, verbose = :none,
use_fixed_schedule = true, parallel = false,
n_Φ = 120, n_mh_steps = 1, resampling_method = :polyalgo,
data_vintage = "200707", target = 0.25, savepath = savepath,
particle_store_path = particle_store_path, α = .9,
threshold_ratio = .5, smc_iteration = 0,
regime_switching = true, toggle = true)
println("Estimation done!")
test_file = load(rawpath(m, "estimate", "smc_cloud.jld2"))
test_cloud = test_file["cloud"]
test_w = test_file["w"]
test_W = test_file["W"]
mean_para = mean(SMC.get_vals(test_cloud), dims = 2)
true_para = [1., 1., 1., # α1, β1, σ1 (regime 1)
2., 2., 1., # α2, β2, σ2 (regime 1)
3., 3., 1., # α3, β3, σ3 (regime 1)
1., 1., # α1 regimes = 2-3
2., 3., # β1 regimes = 2-3
2., 2., # α2 regimes = 2-3
3., 4., # β2 regimes = 2-3
3., 3., # α3 regimes = 2-3
4., 5.] # β3 regimes = 2-3
if writing_output
jldopen(string("reference/smc_cloud_fix=true_rs=true_version=", ver, ".jld2"), true, true, true, IOStream) do file
write(file, "cloud", test_cloud)
write(file, "w", test_w)
write(file, "W", test_W)
end
end
saved_file = JLD2.jldopen(string("reference/smc_cloud_fix=true_rs=true_version=", ver, ".jld2"), "r")
saved_cloud = saved_file["cloud"]
saved_w = saved_file["w"]
saved_W = saved_file["W"]
####################################################################
cloud_fields = fieldnames(typeof(test_cloud))
@testset "Linear Regression Regime-Switching Parameter Estimates Are Close" begin
@test maximum(abs.(mean_para - true_para)) < .5
end
@testset "ParticleCloud Fields: Linear" begin
@test @test_matrix_approx_eq SMC.get_vals(test_cloud) SMC.get_vals(saved_cloud)
@test @test_matrix_approx_eq SMC.get_loglh(test_cloud) SMC.get_loglh(saved_cloud)
@test length(test_cloud.particles) == length(saved_cloud.particles)
@test test_cloud.tempering_schedule == saved_cloud.tempering_schedule
@test test_cloud.ESS ≈ saved_cloud.ESS
@test test_cloud.stage_index == saved_cloud.stage_index
@test test_cloud.n_Φ == saved_cloud.n_Φ
@test test_cloud.resamples == saved_cloud.resamples
@test test_cloud.c == saved_cloud.c
@test test_cloud.accept == saved_cloud.accept
end
test_particle = test_cloud.particles[1,:]
saved_particle = saved_cloud.particles[1,:]
N = length(test_particle)
@testset "Individual Particle Fields Post-SMC: Linear" begin
@test test_particle[1:SMC.ind_para_end(N)] ≈ saved_particle[1:SMC.ind_para_end(N)]
@test test_particle[SMC.ind_loglh(N)] ≈ saved_particle[SMC.ind_loglh(N)]
@test test_particle[SMC.ind_logprior(N)] ≈ saved_particle[SMC.ind_logprior(N)]
@test test_particle[SMC.ind_old_loglh(N)] == saved_particle[SMC.ind_old_loglh(N)]
@test test_particle[SMC.ind_accept(N)] == saved_particle[SMC.ind_accept(N)]
@test test_particle[SMC.ind_weight(N)] ≈ saved_particle[SMC.ind_weight(N)]
end
@testset "Weight Matrices: Linear" begin
@test @test_matrix_approx_eq test_w saved_w
@test @test_matrix_approx_eq test_W saved_W
end
# Clean output files up
rm(rawpath(m, "estimate", "smc_cloud.jld2"))
rm(rawpath(m, "estimate", "smcsave.h5"))
|
{"hexsha": "a15b9911f3b8f5dff192f651229b55db7f40739a", "size": 4023, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/regime_switching_smc.jl", "max_stars_repo_name": "FRBNY-DSGE/SequentialMonteCarlo.jl", "max_stars_repo_head_hexsha": "de6c3180572bfe397917c69059fc242ba8bfb7ca", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 59, "max_stars_repo_stars_event_min_datetime": "2019-08-14T14:44:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T02:04:58.000Z", "max_issues_repo_path": "test/regime_switching_smc.jl", "max_issues_repo_name": "FRBNY-DSGE/SequentialMonteCarlo.jl", "max_issues_repo_head_hexsha": "de6c3180572bfe397917c69059fc242ba8bfb7ca", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-09-04T18:52:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-21T19:09:46.000Z", "max_forks_repo_path": "test/regime_switching_smc.jl", "max_forks_repo_name": "FRBNY-DSGE/SequentialMonteCarlo.jl", "max_forks_repo_head_hexsha": "de6c3180572bfe397917c69059fc242ba8bfb7ca", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2019-08-23T01:04:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-06T09:06:58.000Z", "avg_line_length": 38.6826923077, "max_line_length": 118, "alphanum_fraction": 0.6659209545, "num_tokens": 1190}
|
Module AssetTheory.
Require Export Coq.Lists.ListSet.
Require Export Coq.Lists.List.
Require Export Coq.Bool.Bool.
Require Export Coq.Classes.RelationClasses.
Require Export maps_def.
Import Maps.
Definition Asset : Type := T.
Definition AssetName : Type := S.
Variable a a1 a2 a3 : Asset.
Variable aSet S1 S2 : set Asset.
(* Decidibilidade *)
Axiom ANdec_eq_axiom :
forall x y: AssetName, {x = y} + {x <> y}.
Axiom Asset_dec_axiom :
forall x y: Asset, {x = y} + {x <> y}.
(*Assumption <Assets refinement>*)
Parameter inline assetRef_func :
set Asset -> set Asset -> Prop.
Inductive wfProduct_ind (aSet : set Asset) : Prop.
(*Axiom <Asset refinement is pre-order>*)
Axiom assetRefinement_axiom:
forall x y z: set Asset, assetRef_func x x /\
assetRef_func x y -> assetRef_func y z -> assetRef_func x z.
(*Axiom 5 <Asset refinement compositionality>*)
Axiom asRefCompositional_axiom :
forall (S1 : set Asset) (S2 : set Asset) (aSet : set Asset),
(assetRef_func S1 S2) /\ wfProduct_ind (union_t_func S1 aSet)
-> (wfProduct_ind (union_t_func S2 aSet))
/\ assetRef_func (union_t_func S1 aSet)
(union_t_func S2 aSet).
End AssetTheory.
|
{"author": "spgroup", "repo": "theory-pl-refinement-coq", "sha": "9587dddac0d6f4792db18629fa1ea3bd3d933abe", "save_path": "github-repos/coq/spgroup-theory-pl-refinement-coq", "path": "github-repos/coq/spgroup-theory-pl-refinement-coq/theory-pl-refinement-coq-9587dddac0d6f4792db18629fa1ea3bd3d933abe/typeclass/Util/assettheory_def.v"}
|
# Resumo, Teoria e Prática - Equações Diferenciais
> Autor: Gil Miranda<br>
> Contato: gilsmneto@gmail.com<br>
> Repo: [@mirandagil](https://github.com/mirandagil/university-courses/analise-numerica-edo-2019-1)<br>
> Fontes bibliográficas:
* Rosa, R. (2017). <i>Equações Diferenciais</i>.
* Trefethen, L. & Bau, D. (1997) <i>Numerical Linear Algebra</i>. SIAM
* Keith R. Symon (1988) <i>Mecânica</i>. CAMPUS
`last update: 06/04/2019`
---
# O que é uma EDO
Equações Diferenciais estão presentes em diversos modelos em física, química, biologia, economia, engenharia, etc. Vários fenômenos envolvem a variação de uma quantidade em relaçao a outra, levando naturalmente a modelos baseados em equações diferenciais. Podemos ter variações temporais de, por exemplo, a posição de um objeto, a temperatura de um material, a concentração de um agente químico, a concentração de um poluente ou nutriente em um meio, a umidade do ar, o número de habitantes de uma cidade, a densidade de bactérias de uma cultura, a densidade de massa de um gás, o valor de uma mercadoria, o câmbio entre moedas, o produto interno bruto de um país, etc. Além de variações temporais dessas quantidades, podemos ter variações em relação a outras quantidades, como variação de temperatura em relação à posição e variação de densidade de massa de um fluido em relação à temperatura, por exemplo.
As equações diferenciais são expressões matemáticas de certas leis envolvidas em uma modelagem, que podem, por exemplo, ser leis fundamentais, como a segunda lei de Newton, empíricas, como em reações químicas, ou heurísticas, como em dinâmica populacional.
<br><br><br>
Uma equacão diferencial é uma equação cuja incógnita é uma função e cuja equação envolve derivadas dessa função procurada. Mais especificamente, consideramos uma equação da forma
$$
F\left(t,x,\frac{\mathrm{d}x}{\mathrm{d}t}, \dots, \frac{\mathrm{d}^nx}{\mathrm{d}t^n} \right) = 0
$$
onde $t$ é uma variável independente, $F = F (t, x, x_1,\dots, x_n) $ é uma função $F: \mathbb{R}^{n+2} \to \mathbb{R}$ e $x = x(t)$ é uma variável dependente, que é a função procurada (incógnita). Esta é uma equação de ordem $n$, indicando a derivada de ordem mais alta presente na equação.
REFERÊNCIA: Ricardo Rosa . **Equações Diferenciais **, 2017.<br>
http://www.labma.ufrj.br/~rrosa/dvifiles/apostila-ed-maio2017.pdf
Cédric Villani, Medalha Fields, falando sobre motivação e utilidade das equações diferenciais (em inglês com legendas)
https://www.youtube.com/watch?v=o9zQpQjfQ80
3Blue1Brown, Canal de divulgação matemática com vídeo explicativo sobre EDOs
https://www.youtube.com/watch?v=p_di4Zn4wz4
### Uma descrição mais 'ingênua'
Equações diferenciais são equações que modelam vários fenomenos do mundo em que vivemos, essas equações estão ligadas a maneira que esses fenomenos mudam. Quando dizemos que queremos resolver uma equação diferencial, é por que queremos descobrir como esse fenomeno ocorre, ou como estará no futuro mas sabemos apenas como ele muda, seja como ele muda a cada minuto como por exemplo a chuva que vai ficando mais forte ou mais fraca, ou se muda a cada lugar diferente que está sendo observado, por exemplo medir a temperatura próximo do ar condicionado numa sala ou longe do ar na mesma sala.
---
## Solução de EDOs
```python
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
import sympy
from sympy import Function, dsolve, Eq, Derivative, symbols, init_printing, plot, Matrix, exp
```
#### Um modelo simples
Vamos primeiro olhar para uma EDO bem simples, que modela decaimento radioativo com uma massa inicial $y_0$ e coeficiente de decaimento $\lambda$
$$
\dot{y} = -\lambda y\\
\dot{y} + \lambda y = 0\\
e^{\lambda y} (\dot{y} + \lambda y) = 0\\
(e^{\lambda y} y)' = 0 \\
e^{\lambda y} y = c \\
\therefore y = ce^{-\lambda y}
$$
Onde a condição inicial $y_0 = c$, pois
$$
y(0) = c e^{-\lambda \cdot 0}\\
y(0) = c
$$
#### Visualizando soluções
$y_0 = 5, \lambda = 0.3$
```python
def model1(y, t, l):
# modelo para decaimento radioativo
return -l*y
ts = np.linspace(0,20)
ys = odeint(model1,50,ts,args=(0.3,))
plt.plot(ts,ys,label=0.3)
plt.legend(title='$\lambda$')
plt.ylabel('massa')
plt.xlabel('tempo')
plt.show()
```
Aqui podemos ver o 'poder' de modelagem de uma EDO, o gráfico nos diz que: Um elemento com massa 5 levará 20 unidades de tempo para chegar a uma massa 0.
Vamos observar que alterando o coeficiente $\lambda$, obtemos uma nova solução, para isso segue um plot com $\lambda$ variando de 0.1 a 0.5
```python
lambdas = np.linspace(0.1,0.5,num=5)
ts = np.linspace(0,20)
for l in lambdas:
ys = odeint(model1,5,ts,(l,))
plt.plot(ts,ys,label=str(l))
plt.ylabel('massa')
plt.xlabel('tempo')
plt.title('Visualização com Diferentes $\lambda$')
plt.legend(title='$\lambda$')
plt.show()
```
---
## EDOs de ordem superior
Vamos olhar para a modelagem do sistema massa-mola, novamente temos uma EDO a ser resolvida.
$$
m\frac{\mathrm{d}^2 x}{\mathrm{d}t^2} = -kx\\
\frac{\mathrm{d}^2 x}{\mathrm{d}t^2} = -\frac{k}{m}x
$$
Para ter um pouco de intuição física e algébrica sobre com o que estamos lidando, vamos fazer a seguinte mudança de varíaveis: $\omega_0 = \sqrt{\frac{k}{m}}$<br>
O motivo ficará claro quando chegarmos a solução.<br>
Portanto agora nossa EDO tem a seguinte cara
$$
\frac{\mathrm{d}^2 x}{\mathrm{d}t^2} = -\omega_0^2x
$$
##### Solução analítica
$$
\frac{\mathrm{d}^2 x}{\mathrm{d}t^2} = -\omega_0^2x\\
\frac{\mathrm{d}^2 x}{\mathrm{d}t^2} + \omega_0^2x = 0\\
$$
Tomando o operador linear $D^2$
$$
(D^2+\omega_0^2)x = 0\\
$$
Podemos olhar para $(D^2+\omega_0^2)$ como um polinômio do segundo grau em D, e portanto fatorar
$$
(D-r_1)(D-r_2)x = 0\\
$$
Onde $r_n$ são as raízes do polinômio. Se chamarmos o termo $(D-r_2)x = z$
$$
(D-r_2)x = z\\
\therefore (D-r_1)z = 0
$$
Mas esta é uma EDO de primeira ordem, a qual conhecemos a solução geral:
$$
(D-r_1)z = 0\\
\frac{\mathrm{d}}{\mathrm{d}t}z - r_1z = 0\\
\therefore z = c_1 e^{r_1 t}
$$
Voltando na equação $(D-r_2)x = z$
$$
(D-r_2)x = z\\
\frac{\mathrm{d}}{\mathrm{d}t} x - r_2 x = c_1 e^{r_1 t}\\
e^{-r_2 t}(\frac{\mathrm{d}}{\mathrm{d}t} x - r_2 x) = (c_1 e^{r_1 t})e^{-r_2 t}\\
\frac{\mathrm{d}}{\mathrm{d}t}(e^{-r_2 t} x) = c_1 e^{(r_1⁻r_2) t}\\
e^{-r_2 t} x = \frac{c_1}{r_1 - r_2} e^{(r_1⁻r_2) t}\\
\therefore x = c_2 e^{r_1t} + c_3 e^{r_2 t}
$$
Com $r_1 \neq r_2$ <br>
A solução de $(D^2+\omega_0^2) = 0$, claramente, é $r_1 = i\omega_0, r_2 = -i\omega_0$<br>
Temos então duas raízes complexas, vamos olhar para algumas propriedades dos complexos, seja $C \in \mathbb{C}$, chamamos de $C^*$ o conjugado complexo de $C$
$$
C = a + bi \\
C^* = a - bi\\
C + C^* = a\\
C - C^* = 2bi
$$
Tomando $C_1 = C, C_2 = C^*$, nossa solução se torna:
$$x(t) = C e^{i\omega_0t} + C^* e^{-i\omega_0t}$$
Passando $C$ para coordenadas polares
$$
C = r e^{i\theta} \\
C^* = r e^{-i\theta}\\
r = \sqrt{a^2 + b^2}\\
tg(\theta) = \frac{b}{a}\\
a = r cos(\theta)\\
b = r sen(\theta)\\
\theta \in \left[-\frac{\pi}{2},\dfrac{\pi}{2}\right]\\
$$
Para carregar menos a notação, chamaremos $r = \frac{1}{2}A$, podemos reescrever agora, usando a identidade de Euler:
$$
\begin{align}
x(t) &= \frac{1}{2}A e^{i\theta} e^{i\omega_0t} + \frac{1}{2}A e^{-i\theta}e^{-i\omega_0 t}\\
& = \frac{1}{2}A e^{i(\omega_0t + \theta)} + \frac{1}{2}A e^{-i(\omega_0t + \theta)}\\
& = \frac{1}{2}A \big[cos(\omega_0t + \theta) + i sen(\omega_0t + \theta)\big] + \frac{1}{2} A \big[cos(\omega_0t + \theta) - isen(\omega_0t + \theta)\big] \\
& = \frac{1}{2}A \big[cos(\omega_0t + \theta) + i sen(\omega_0t + \theta) + cos(\omega_0t + \theta) - i sen(\omega_0t + \theta) \big]\\
x(t) &= A cos(\omega_0t + \theta)
\end{align}
$$
Onde temos um significado para cada valor:
$$
\begin{align}
A &\to \text{Amplitudade da curva senoidal} \\
\omega_0 &\to \text{Frequência Angular} \\
\theta &\to \text{Fase, ou defasagem da curva senoidal}
\end{align}
$$
#### Passar para exponencial complexa e identidade de euler
Podemos representar como um sistema de equações diferenciais
$$
\begin{cases}
\frac{\mathrm{d}x}{\mathrm{d}t} &= v \\
\frac{\mathrm{d}v}{\mathrm{d}t} &= -\omega_0^2 x - \omega_1^2 v
\end{cases}
$$
Vamos reescrever o sistema de equações como uma equação matricial<br>
$$
Ay = \frac{\mathrm{d}y}{\mathrm{d}t}
$$
Onde
$$
y = \begin{pmatrix} x \\ v \end{pmatrix}, \\
A = \begin{pmatrix} 0 & 1 \\ -\omega_0^2 & - \omega_1^2\end{pmatrix}
$$
Portanto podemos escrever como
$$
\frac{\mathrm{d}}{\mathrm{d}t}\begin{pmatrix} \dot{y} \\ y \end{pmatrix} = \begin{pmatrix} -\omega^2_0y \\ \dot{y} \end{pmatrix}
$$
Agora escrevemos um modelo de forma vetorial
```python
```
|
{"hexsha": "a16afc86063d877a66f9236ef780fbece11260f2", "size": 61090, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "analise-numerica-edo-2019-1/.ipynb_checkpoints/EDOs-checkpoint.ipynb", "max_stars_repo_name": "mirandagil/university-courses", "max_stars_repo_head_hexsha": "e70ce5262555e84cffb13e53e139e7eec21e8907", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-23T16:39:01.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-23T16:39:01.000Z", "max_issues_repo_path": "analise-numerica-edo-2019-1/.ipynb_checkpoints/EDOs-checkpoint.ipynb", "max_issues_repo_name": "mirandagil/university-courses", "max_issues_repo_head_hexsha": "e70ce5262555e84cffb13e53e139e7eec21e8907", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analise-numerica-edo-2019-1/.ipynb_checkpoints/EDOs-checkpoint.ipynb", "max_forks_repo_name": "mirandagil/university-courses", "max_forks_repo_head_hexsha": "e70ce5262555e84cffb13e53e139e7eec21e8907", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 179.6764705882, "max_line_length": 35016, "alphanum_fraction": 0.8785234899, "converted": true, "num_tokens": 3235}
|
import networkx as nx
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def plot2d_graph(graph):
pos = nx.get_node_attributes(graph, 'pos')
c = [colors[i % (len(colors))]
for i in nx.get_node_attributes(graph, 'cluster').values()]
if c: # is set
nx.draw(graph, pos, node_color=c, node_size=0.25)
else:
nx.draw(graph, pos, node_size=0.25)
plt.show(block=False)
def plot2d_data(df):
if (len(df.columns) > 3):
print("Plot Warning: more than 2-Dimensions!")
limit = len(df.index)
matrix = []
xx = []
yy = []
cc = []
for i in range(limit):
valor =str(df[0][i]).split(",")
xx.append(float(valor[0]))
yy.append(float(valor[1]))
cc.append(float(df['cluster'][i]))
dff = pd.DataFrame(matrix, columns=['x', 'y', 'cluster'])
dff.plot.scatter(x='x',
y='y',
c='cluster', colormap='gist_rainbow')
print('----------')
colors = np.random.rand(limit)
plt.scatter(xx, yy, s=cc, c=colors,alpha=0.5)
plt.show()
|
{"hexsha": "c8a14f61d018fa131727e9bd20b1fbf48c3458fc", "size": 1119, "ext": "py", "lang": "Python", "max_stars_repo_path": "visualization.py", "max_stars_repo_name": "champagneSeth/chameleon-clustering-for-python", "max_stars_repo_head_hexsha": "cf271188b28e9a7269dbb90facdc8bbf6f7274e2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-06-20T02:14:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-30T15:48:04.000Z", "max_issues_repo_path": "visualization.py", "max_issues_repo_name": "champagneSeth/chameleon-clustering-for-python", "max_issues_repo_head_hexsha": "cf271188b28e9a7269dbb90facdc8bbf6f7274e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "visualization.py", "max_forks_repo_name": "champagneSeth/chameleon-clustering-for-python", "max_forks_repo_head_hexsha": "cf271188b28e9a7269dbb90facdc8bbf6f7274e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-02-11T07:02:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-05T07:44:24.000Z", "avg_line_length": 28.6923076923, "max_line_length": 68, "alphanum_fraction": 0.5674709562, "include": true, "reason": "import numpy,import networkx", "num_tokens": 303}
|
from __future__ import print_function
from collections import defaultdict, Counter
import colorama
import itertools
colorama.init()
colorama.deinit()
from copy import copy
from StringIO import StringIO
from gflags import (DEFINE_list, DEFINE_float, DEFINE_bool, DEFINE_string,
DuplicateFlagError, FLAGS)
import logging
import numpy as np
import operator
import os
import sys
from textwrap import wrap
from causeway.because_data import CausationInstance, OverlappingRelationInstance
from nlpypline.data import StanfordParsedSentence, Token
from nlpypline.util import (Enum, print_indented, truncated_string,
get_terminal_size, merge_dicts, make_setter,
make_getter)
from nlpypline.util.diff import SequenceDiff
from nlpypline.util.metrics import (ClassificationMetrics, ConfusionMatrix,
AccuracyMetrics, safe_divide,
FloatWithStddev)
np.seterr(divide='ignore') # Ignore nans in division
try:
DEFINE_list(
'iaa_given_connective_ids', [], "Annotation IDs for connectives"
" that were given as gold and should be treated separately for IAA.")
DEFINE_float(
'iaa_min_partial_overlap', 0.5, "Minimum fraction of the larger of two"
" annotations that must be overlapping for the two annotations to be"
" considered a partial match.")
DEFINE_bool('iaa_log_confusion', False, "Log confusion matrices for IAA.")
DEFINE_bool('iaa_log_stats', True, 'Log IAA statistics.')
DEFINE_bool('iaa_log_differences', False,
'Log differing annotations during IAA comparison.')
DEFINE_bool('iaa_log_agreements', False,
'Log agreeing annotations during IAA comparison.')
DEFINE_string('iaa_cause_color', 'Blue',
'ANSI color to use for formatting cause words in IAA'
' comparison output')
DEFINE_string('iaa_effect_color', 'Red',
'ANSI color to use for formatting cause words in IAA'
' comparison output')
DEFINE_string('iaa_means_color', 'Magenta',
'ANSI color to use for formatting means words in IAA'
' comparison output')
DEFINE_bool('iaa_force_color', False,
"Force ANSI color in IAA comparisons even when we're not"
" outputting to a TTY")
DEFINE_bool('iaa_check_punct', False,
'Whether IAA should compare punctuation tokens to determine'
' argument matches')
DEFINE_bool('iaa_log_by_connective', False,
"When logging a stage's results, include per-connective stats")
DEFINE_bool('iaa_log_by_category', False,
"When logging a stage's results, include per-category stats")
DEFINE_bool('iaa_compute_overlapping', True,
"Compute overlapping relations as part of causality IAA stats")
except DuplicateFlagError as e:
logging.warn('Ignoring flag redefinitions; assuming module reload')
def make_annotation_comparator(allow_partial):
def match_annotations(token_list_1, token_list_2):
if allow_partial:
min_partial_overlap = FLAGS.iaa_min_partial_overlap
else:
min_partial_overlap = 1.0
# Just in case we accidentally added tokens to an annotation in the
# wrong order, sort by token position.
sort_key = lambda token: (token.parent_sentence.document_char_offset,
token.index)
offsets_1 = [(token.start_offset, token.end_offset)
for token in sorted(token_list_1, key=sort_key)]
offsets_2 = [(token.start_offset, token.end_offset)
for token in sorted(token_list_2, key=sort_key)]
if offsets_1 == offsets_2:
return True
# No partial matching allowed
if min_partial_overlap == 1.0:
return False
a1_length, a2_length = len(offsets_1), len(offsets_2)
num_overlapping = float(len(set(offsets_1).intersection(set(offsets_2))))
if a1_length > a2_length:
fraction_of_larger_overlapping = num_overlapping / a1_length
else:
fraction_of_larger_overlapping = num_overlapping / a2_length
return fraction_of_larger_overlapping > min_partial_overlap
return match_annotations
compare_annotations_partial = make_annotation_comparator(True)
compare_annotations_exact = make_annotation_comparator(False)
def _get_printable_connective_word(word):
if sys.stdout.isatty() or FLAGS.iaa_force_color:
return colorama.Style.BRIGHT + word.upper() + colorama.Style.RESET_ALL
else:
return word.upper()
def _wrapped_sentence_highlighting_instance(instance):
# TODO: use GFlags wrapping machinery?
sentence = instance.sentence
words = [(_get_printable_connective_word(t.original_text)
if t in instance.connective else t.original_text)
for t in sentence.tokens[1:]]
lines = wrap(' '.join(words), 100, subsequent_indent=' ',
break_long_words=False)
return '\n'.join(lines)
class ArgumentMetrics(object):
def __init__(self, span_metrics, jaccard, instance_count):
self.span_metrics = span_metrics
self.jaccard = jaccard
# Track instance counts so that when we add Jaccard indices, we know
# how much to weight each one by to get the right average
self._instance_count = instance_count
def __add__(self, other):
if self.span_metrics is not None and other.span_metrics is not None:
added_span_metrics = self.span_metrics + other.span_metrics
else:
added_span_metrics = None
instance_count = self._instance_count + other._instance_count
# Ignore NaNs and Nones.
if self.jaccard is None or np.isnan(self.jaccard):
added_jaccard = other.jaccard
elif other.jaccard is None or np.isnan(other.jaccard):
added_jaccard = self.jaccard
else:
added_jaccard = safe_divide(self.jaccard * self._instance_count
+ other.jaccard * other._instance_count,
instance_count)
return ArgumentMetrics(added_span_metrics, added_jaccard,
instance_count)
class _RelationMetrics(object):
IDsConsidered = Enum(['GivenOnly', 'NonGivenOnly', 'Both'])
_SAVED_ATTR_NAMES = ['gold_only_instances', 'predicted_only_instances',
'agreeing_instances', 'property_differences',
'argument_differences']
# To be overridden by subclasses
_GOLD_INSTANCES_PROPERTY_NAME = None
_INSTANCE_CLASS = None
# TODO: Refactor order of parameters
def __init__(self, gold, predicted, allow_partial, save_differences,
ids_considered, compare_args, properties_to_compare,
pairwise_only, save_agreements, instances_property_name):
# properties_to_compare is a list of (property_name,
# property_values_enum, should_compare_property) tuples.
assert len(gold) == len(predicted), (
"Cannot compute IAA for different-sized datasets")
if ids_considered is None:
ids_considered = _RelationMetrics.IDsConsidered.Both
self.allow_partial = allow_partial
self._annotation_comparator = make_annotation_comparator(allow_partial)
self.ids_considered = ids_considered
self.save_differences = save_differences
self.save_agreements = save_agreements
self.pairwise_only = pairwise_only
self.instances_property_name = instances_property_name
self.properties_to_compare = properties_to_compare
self.gold_only_instances = []
self.predicted_only_instances = []
self.agreeing_instances = []
self.argument_differences = []
self.property_differences = []
# Compute attributes that take a little more work.
self.connective_metrics, matches = self._match_connectives(
gold, predicted)
for property_name, property_enum, compare_property in (
properties_to_compare):
matrix_attr_name = '%s_matrix' % property_name
if compare_property:
matrix = self._compute_agreement_matrix(matches, property_enum,
property_name, gold)
setattr(self, matrix_attr_name, matrix)
else:
setattr(self, matrix_attr_name, None)
if compare_args:
self._match_arguments(matches, gold)
else:
null_metrics = ArgumentMetrics(None, None, 0)
for arg_num in range(self._INSTANCE_CLASS._num_args):
setattr(self, 'arg%d_metrics' % arg_num, null_metrics)
def __add__(self, other):
if (self.allow_partial != other.allow_partial or
self.properties_to_compare != other.properties_to_compare):
raise ValueError("Can't add binary relation annotation metrics with"
" different comparison criteria")
sum_metrics = copy(self)
# Add recorded instances/differences.
for attr_name in self._SAVED_ATTR_NAMES:
getattr(sum_metrics, attr_name).extend(getattr(other, attr_name))
# Add together submetrics, if they exist
sum_metrics.connective_metrics += other.connective_metrics
submetric_names = ['_'.join([property_name, 'matrix']) for
(property_name, _, _) in self.properties_to_compare]
submetric_names += ['%s_metrics' % arg_type for arg_type
in self._INSTANCE_CLASS.get_arg_types()]
for attr_name in submetric_names:
self_attr = getattr(self, attr_name)
other_attr = getattr(other, attr_name)
if self_attr is not None and other_attr is not None:
attr_value = self_attr + other_attr
else:
attr_value = None
setattr(sum_metrics, attr_name, attr_value)
return sum_metrics
def __get_instances(self, sentence, is_gold):
instances = []
property_name = [self.instances_property_name,
self._GOLD_INSTANCES_PROPERTY_NAME][is_gold]
for instance in getattr(sentence, property_name):
if (# First set of conditions: matches givenness specified
(self.ids_considered == self.IDsConsidered.Both or
(instance.id in FLAGS.iaa_given_connective_ids and
self.ids_considered == self.IDsConsidered.GivenOnly) or
(instance.id not in FLAGS.iaa_given_connective_ids and
self.ids_considered == self.IDsConsidered.NonGivenOnly))
# Second set of conditions: is pairwise if necessary
and (not is_gold or not self.pairwise_only or
(instance.arg0 != None and instance.arg1 != None))):
instances.append(instance)
return instances
@staticmethod
def get_connective_matches(gold_instances, predicted_instances,
allow_partial):
def compare_connectives_exact(instance_1, instance_2):
return compare_annotations_exact(
instance_1.connective, instance_2.connective)
if allow_partial:
def compare_connectives(instance_1, instance_2):
return compare_annotations_partial(
instance_1.connective, instance_2.connective)
else:
compare_connectives = compare_connectives_exact
# Sort instances in case they're somehow out of order, or there are
# multiple annotations with the same connective that may be unordered.
# TODO: is this sufficient? Do we need to worry about, e.g., ordering by
# head, or what happens if arg0 is None?
sort_key = lambda inst: (
inst.connective[0].start_offset,
inst.arg0[0].start_offset if inst.arg0 else 0,
inst.arg1[0].start_offset if inst.arg1 else 0)
matching_instances = []
gold_only_instances = []
predicted_only_instances = []
# If we're allowing partial matches, we don't want any partial
# matches to override full matches. So we first do an exact match,
# and remove the ones that matched from the partial matching.
if allow_partial:
diff = SequenceDiff(gold_instances, predicted_instances,
compare_connectives_exact, sort_key)
matching_pairs = diff.get_matching_pairs()
matching_instances.extend(matching_pairs)
# Instances that were gold-only or predicted-only may still generate
# partial matches.
gold_instances = diff.get_a_only_elements()
predicted_instances = diff.get_b_only_elements()
diff = SequenceDiff(gold_instances, predicted_instances,
compare_connectives, sort_key)
matching_instances.extend(diff.get_matching_pairs())
gold_only_instances.extend(diff.get_a_only_elements())
predicted_only_instances.extend(diff.get_b_only_elements())
return matching_instances, gold_only_instances, predicted_only_instances
def _match_connectives(self, gold, predicted):
matching_instances = []
gold_only_instances = []
predicted_only_instances = []
for gold_sentence, predicted_sentence in zip(gold, predicted):
assert (gold_sentence.original_text ==
predicted_sentence.original_text), (
"Can't compare annotations on non-identical sentences")
gold_instances = self.__get_instances(gold_sentence, True)
predicted_instances = self.__get_instances(predicted_sentence,
False)
sentence_matching, sentence_gold_only, sentence_predicted_only = (
self.get_connective_matches(
gold_instances, predicted_instances, self.allow_partial))
matching_instances.extend(sentence_matching)
gold_only_instances.extend(sentence_gold_only)
predicted_only_instances.extend(sentence_predicted_only)
if (self.ids_considered ==
_RelationMetrics.IDsConsidered.GivenOnly):
assert len(matching_instances) == len(
FLAGS.iaa_given_connective_ids), (
"Didn't find all expected given connectives! Perhaps"
" annotators re-annotated spans with different IDs?")
# Leave connective_metrics as None to indicate that there aren't
# any interesting values here. (Everything should be perfect.)
connective_metrics = None
# "Both" will only affect the connective stats if there are actually
# some given connectives.
elif (self.ids_considered == _RelationMetrics.IDsConsidered.Both
and FLAGS.iaa_given_connective_ids):
connective_metrics = None
else:
connective_metrics = ClassificationMetrics(
len(matching_instances), len(predicted_only_instances),
len(gold_only_instances))
def sentences_by_file(sentences):
by_file = defaultdict(list)
for sentence in sentences:
filename = os.path.split(sentence.source_file_path)[-1]
by_file[filename].append(sentence)
return by_file
if self.save_differences or self.save_agreements:
gold_by_file = sentences_by_file(gold)
if self.save_differences:
predicted_by_file = sentences_by_file(predicted)
self.gold_only_instances = [
(gold_by_file[os.path.split(i.sentence.source_file_path)[-1]]
.index(i.sentence) + 1, i)
for i in gold_only_instances]
self.predicted_only_instances = [
(predicted_by_file[os.path.split(
i.sentence.source_file_path)[-1]]
.index(i.sentence) + 1, i)
for i in predicted_only_instances]
if self.save_agreements:
self.agreeing_instances = [
(gold_by_file[os.path.split(i1.sentence.source_file_path)[-1]]
.index(i1.sentence) + 1, i1)
for i1, _i2 in matching_instances]
return (connective_metrics, matching_instances)
def _get_jaccard(self, matches, arg_property_name):
'''
Returns average Jaccard index across `matches` for property
`arg_property_name`.
'''
iaa_check_punct = FLAGS.iaa_check_punct
jaccard_avg_numerator = 0
def get_arg_indices(instance):
arg = getattr(instance, arg_property_name)
if arg is None:
return []
else:
if not iaa_check_punct:
arg = self._filter_punct_tokens(arg)
return [token.index for token in arg]
for instance_pair in matches:
i1_indices, i2_indices = [get_arg_indices(i) for i in instance_pair]
if i1_indices or i2_indices:
# TODO: This could maybe be done more efficiently by walking
# along each set of indices.
diff = SequenceDiff(i1_indices, i2_indices)
num_matching = len(diff.get_matching_pairs())
match_jaccard = num_matching / float(
len(i1_indices) + len(i2_indices) - num_matching)
else: # both empty arguments; overlap is defined as 1.
match_jaccard = 1.0
jaccard_avg_numerator += match_jaccard
return safe_divide(jaccard_avg_numerator, len(matches))
def _compute_agreement_matrix(self, matches, labels_enum, property_name,
gold_sentences):
labels_1 = []
labels_2 = []
def log_missing(instance, number):
print(property_name,
('property not set in Annotation %d;' % number),
'not including in analysis (sentence: "',
_wrapped_sentence_highlighting_instance(instance).encode(
'utf-8') + '")',
file=sys.stderr)
for instance_1, instance_2 in matches:
property_1 = getattr(instance_1, property_name)
property_2 = getattr(instance_2, property_name)
if property_1 >= len(labels_enum) or property_1 is None:
log_missing(instance_1, 1)
elif property_2 >= len(labels_enum) or property_2 is None:
log_missing(instance_2, 2)
else:
labels_1.append(labels_enum[property_1])
labels_2.append(labels_enum[property_2])
sentence_num = gold_sentences.index(instance_1.sentence) + 1
if property_1 != property_2 and self.save_differences:
self.property_differences.append(
(instance_1, instance_2, labels_enum, sentence_num))
return ConfusionMatrix(labels_1, labels_2)
_MATCH_TYPES = Enum(['TP', 'FP', 'FN', 'Mismatch'])
def _match_instance_args(self, arg_1, arg_2):
# Normalize null args
if arg_1 is None: arg_1 = []
if arg_2 is None: arg_2 = []
# No arguments -> don't count this argument for span match stats
if not arg_1 and not arg_2:
return None
spans_match = self._annotation_comparator(arg_1, arg_2)
if spans_match:
return self._MATCH_TYPES.TP
elif not arg_1: # no gold
return self._MATCH_TYPES.FP
elif not arg_2: # no predicted
return self._MATCH_TYPES.FN
else:
return self._MATCH_TYPES.Mismatch
@staticmethod
def _filter_punct_tokens(tokens):
return [t for t in tokens if t.pos not in Token.PUNCT_TAGS]
def _match_arguments(self, matches, gold=None):
# NOTE: Assumes that self.connectives is available with tp, fp, fn.
iaa_check_punct = FLAGS.iaa_check_punct
arg_match_counts = [Counter() for _ in
range(self._INSTANCE_CLASS._num_args)]
MATCH_TYPES = self._MATCH_TYPES
for instance_1, instance_2 in matches:
if gold is not None:
sentence_num = gold.index(instance_1.sentence) + 1
else:
sentence_num = -1 # No valid sentence number
i1_args = instance_1.get_args()
i2_args = instance_2.get_args()
all_args_match = True
for arg_num in range(self._INSTANCE_CLASS._num_args):
if iaa_check_punct:
first_arg, second_arg = i1_args[arg_num], i2_args[arg_num]
else:
first_arg, second_arg = [
self._filter_punct_tokens(arg) if arg else None
for arg in [i1_args[arg_num], i2_args[arg_num]]]
match_result = self._match_instance_args(first_arg, second_arg)
arg_match_counts[arg_num].update([match_result])
all_args_match = (all_args_match and
match_result in [MATCH_TYPES.TP, None])
# If there's any difference, record it.
if self.save_differences and not all_args_match:
self.argument_differences.append((instance_1, instance_2,
sentence_num))
for arg_num in range(self._INSTANCE_CLASS._num_args):
arg_name = 'arg%d' % arg_num
arg_counts = arg_match_counts[arg_num]
arg_span_metrics = ClassificationMetrics(
arg_counts[MATCH_TYPES.TP],
self.connective_metrics.fp + arg_counts[MATCH_TYPES.FP]
+ arg_counts[MATCH_TYPES.Mismatch],
self.connective_metrics.fn + arg_counts[MATCH_TYPES.FN]
+ arg_counts[MATCH_TYPES.Mismatch])
arg_jaccard = self._get_jaccard(matches, arg_name)
arg_metrics = ArgumentMetrics(arg_span_metrics, arg_jaccard,
len(matches))
setattr(self, arg_name + '_metrics', arg_metrics)
def pp(self, log_confusion=None, log_stats=None, log_differences=None,
log_agreements=None, log_by_connective=None, indent=0,
log_file=sys.stdout):
# Flags aren't available as defaults when the function is created, so
# set the defaults here.
if log_confusion is None:
log_confusion = FLAGS.iaa_log_confusion
if log_stats is None:
log_stats = FLAGS.iaa_log_stats
if log_differences is None:
log_differences = FLAGS.iaa_log_differences
if log_agreements is None:
log_agreements = FLAGS.iaa_log_agreements
if log_by_connective is None:
log_by_connective = FLAGS.iaa_log_by_connective
if log_differences:
colorama.reinit()
if log_agreements:
print_indented(indent, "Agreeing instances:", file=log_file)
for sentence_num, instance in self.agreeing_instances:
self._log_instance_for_connective(
instance, sentence_num, "", indent + 1, log_file)
if log_differences and (
self.gold_only_instances or self.predicted_only_instances
or self.property_differences or self.argument_differences):
print_indented(indent, 'Annotation differences:', file=log_file)
for sentence_num, instance in self.gold_only_instances:
self._log_instance_for_connective(
instance, sentence_num, "Annotator 1 only:", indent + 1,
log_file)
for sentence_num, instance in self.predicted_only_instances:
self._log_instance_for_connective(
instance, sentence_num, "Annotator 2 only:", indent + 1,
log_file)
for property_name, property_enum, comparing_property in (
self.properties_to_compare):
if comparing_property:
self._log_property_differences(property_name, property_enum,
indent + 1, log_file)
self._log_arg_label_differences(indent + 1, log_file)
# Ignore connective-related metrics if we have nothing interesting to
# show there.
printing_connective_metrics = (log_stats and self.connective_metrics)
if printing_connective_metrics or log_confusion:
print_indented(indent, 'Connectives:', file=log_file)
if printing_connective_metrics:
print_indented(indent + 1, self.connective_metrics, file=log_file)
if log_stats or log_confusion:
for property_name, property_enum, comparing_property in (
self.properties_to_compare):
if comparing_property:
matrix_attr_name = '%s_matrix' % property_name
self._log_property_metrics(
property_name, getattr(self, matrix_attr_name),
indent + 1, log_confusion, log_stats, log_file)
# If any argument properties are set, all should be.
if log_stats and self.arg0_metrics is not None:
print_indented(indent, 'Arguments:', file=log_file)
for arg_type in self._INSTANCE_CLASS.get_arg_types():
arg_name = self._INSTANCE_CLASS.arg_names[arg_type]
print_indented(indent + 1, arg_name.title(),
's:' if arg_name[-1] != 's' else ':',
sep='', file=log_file)
print_indented(indent + 2, 'Spans:', file=log_file)
print_indented(
indent + 3,
getattr(self, arg_type + '_metrics').span_metrics,
file=log_file)
print_indented(indent + 2, 'Jaccard index (conditional): ',
getattr(self, arg_type + '_metrics').jaccard,
file=log_file)
if log_differences:
colorama.deinit()
if log_by_connective and any(getattr(self, attr_name)
for attr_name in self._SAVED_ATTR_NAMES):
print(file=log_file)
print_indented(indent, 'Metrics by connective:', file=log_file)
by_connective = self.metrics_by_connective()
print_indented(indent + 1, self._csv_metrics(by_connective),
file=log_file)
def metrics_by_connective(self):
return self.get_aggregate_metrics(stringify_connective)
def get_aggregate_metrics(self, instance_to_category):
metrics = defaultdict(lambda: type(self)([], [], False))
# Compute connective accuracy metrics by category.
for _, instance in self.agreeing_instances:
metrics[instance_to_category(instance)].connective_metrics.tp += 1
for _, instance in self.gold_only_instances:
metrics[instance_to_category(instance)].connective_metrics.fn += 1
for _, instance in self.predicted_only_instances:
metrics[instance_to_category(instance)].connective_metrics.fp += 1
for category_metrics in metrics.values():
category_metrics.connective_metrics._finalize_counts()
# Compute arg match metrics by connective.
arg_diffs_by_category = defaultdict(list)
for instance_1, instance_2, _ in self.argument_differences:
connective = instance_to_category(instance_1)
arg_diffs_by_category[connective].append((instance_1, instance_2))
for connective, conn_matches in arg_diffs_by_category.iteritems():
conn_metrics = metrics[connective]
if self.arg0_metrics is not None: # we're matching arguments
conn_metrics._match_arguments(conn_matches)
else:
null_metrics = ArgumentMetrics(None, None, 0)
for arg_num in range(self._INSTANCE_CLASS._num_args):
setattr(conn_metrics, 'arg%d_metrics' % arg_num,
null_metrics)
return metrics
def __repr__(self):
'''
This is a dumb hack, but it's easier than trying to rewrite all of pp to
operate on strings, and possibly faster too (since then we'd have to
keep copying strings over to concatenate them).
'''
string_buffer = StringIO()
self.pp(indent=0, log_file=string_buffer)
return string_buffer.getvalue()
@staticmethod
def aggregate(metrics_list):
'''
Aggregates IAA statistics. Classification and accuracy metrics are
averaged; confusion matrices are summed.
'''
assert metrics_list, "Can't aggregate empty list of metrics!"
metrics_type = type(metrics_list[0])
aggregated = object.__new__(metrics_type)
aggregated.ids_considered = None
aggregated.save_differences = any(m.save_differences
for m in metrics_list)
# TODO: confirm that all properties_to_compare lists are the same?
aggregated.properties_to_compare = metrics_list[0].properties_to_compare
# Save lists of instances needed for metrics_by_connective.
for attr_name in metrics_type._SAVED_ATTR_NAMES:
if aggregated.save_differences:
all_relevant_instances = itertools.chain.from_iterable(
getattr(m, attr_name) for m in metrics_list)
setattr(aggregated, attr_name, list(all_relevant_instances))
else:
setattr(aggregated, attr_name, [])
aggregated.connective_metrics = (
metrics_list[0].connective_metrics.average(
[m.connective_metrics for m in metrics_list]))
property_matrices = defaultdict(list)
for m in metrics_list:
for property_name, _property_enum, compare_property in (
m.properties_to_compare):
matrix_attr_name = '%s_matrix' % property_name
if not compare_property:
property_matrices[matrix_attr_name] = None
else:
matrices = property_matrices[matrix_attr_name]
if matrices is not None:
matrices.append(getattr(m, matrix_attr_name))
for matrix_name, matrices in property_matrices.iteritems():
try:
aggregated_matrix = reduce(operator.add, matrices)
except TypeError: # happens if matrices is None
aggregated_matrix = None
setattr(aggregated, matrix_name, aggregated_matrix)
for arg_type in metrics_type._INSTANCE_CLASS.get_arg_types():
arg_metrics_attr_name = arg_type + '_metrics'
arg_metrics = object.__new__(ArgumentMetrics)
setattr(aggregated, arg_metrics_attr_name, arg_metrics)
span_metrics_values = [getattr(m, arg_metrics_attr_name
).span_metrics for m in metrics_list]
span_metrics_values = [v for v in span_metrics_values
if v is not None]
if span_metrics_values:
arg_metrics.span_metrics = span_metrics_values[0].average(
span_metrics_values)
else:
arg_metrics.span_metrics = None
jaccard_values = [getattr(m, arg_metrics_attr_name).jaccard
for m in metrics_list]
jaccard_values = [v for v in jaccard_values if v is not None]
if jaccard_values: # At least some are not None
jaccard_values = [v for v in jaccard_values if not np.isnan(v)]
if jaccard_values:
arg_metrics.jaccard = FloatWithStddev.from_list(
jaccard_values)
else:
arg_metrics.jaccard = np.nan
else:
arg_metrics.jaccard = None
return aggregated
def _log_property_metrics(self, name, matrix, indent, log_confusion,
log_stats, log_file):
print_name = name.title() + 's'
print_indented(indent, print_name, ':', sep='', file=log_file)
if log_confusion:
print_indented(indent + 1, matrix.pretty_format(metrics=log_stats),
file=log_file)
else: # we must be logging just stats
print_indented(indent + 1, matrix.pretty_format_metrics(),
file=log_file)
@staticmethod
def _log_instance_for_connective(instance, sentence_num, msg, indent,
log_file):
filename = os.path.split(instance.sentence.source_file_path)[-1]
print_indented(
indent, msg,
_wrapped_sentence_highlighting_instance(instance).encode('utf-8'),
'(%s:%d)' % (filename, sentence_num),
file=log_file)
@staticmethod
def _print_with_labeled_args(instance, indent, out_file, arg_token_starts,
arg_token_ends):
'''
Prints sentences annotated according to a particular instance.
Connectives are printed in ALL CAPS. If run from a TTY, arguments are
printed in color; otherwise, they're indicated as '/arg0/' and
'*arg1*' (and _arg2_, if applicable).
'''
def get_printable_word(token):
word = token.original_text
if token in instance.connective:
word = _get_printable_connective_word(word)
for arg, token_start, token_end in zip(
instance.get_args(), arg_token_starts, arg_token_ends):
if arg and token in arg:
word = token_start + word + token_end
break
return word
tokens = instance.sentence.tokens[1:] # skip ROOT
# TODO: should this be checking out_file?
if sys.stdout.isatty() or FLAGS.iaa_force_color:
words = [token.original_text
for token in tokens]
# -10 allows viewing later in a slightly smaller terminal/editor.
available_term_width = get_terminal_size()[0] - indent * 4 - 10
else:
words = [get_printable_word(token) for token in tokens]
available_term_width = 75 - indent * 4 # 75 to allow for long words
lines = wrap(' '.join(words), available_term_width,
subsequent_indent=' ', break_long_words=False)
# For TTY, we now have to re-process the lines to add in color and
# capitalizations.
if sys.stdout.isatty() or FLAGS.iaa_force_color:
tokens_processed = 0
for i, line in enumerate(lines):
# NOTE: This assumes no tokens with spaces in them.
words = line.split()
zipped = zip(words, tokens[tokens_processed:])
printable_line = ' '.join([get_printable_word(token)
for _, token in zipped])
print_indented(indent, printable_line.encode('utf-8'))
tokens_processed += len(words)
if i == 0:
indent += 1 # future lines should be printed more indented
else: # non-TTY: we're ready to print
print_indented(indent, *[line.encode('utf-8') for line in lines],
sep='\n', file=out_file)
def _log_arg_label_differences(self, indent, log_file):
if sys.stdout.isatty() or FLAGS.iaa_force_color:
arg_token_starts = [
getattr(colorama.Fore, FLAGS.iaa_cause_color.upper()),
getattr(colorama.Fore,FLAGS.iaa_effect_color.upper()),
getattr(colorama.Fore, FLAGS.iaa_means_color.upper())]
arg_token_ends = [colorama.Fore.RESET] * 3
else:
arg_token_starts = ['/', '*', '_']
arg_token_ends = arg_token_starts
for instance_1, instance_2, sentence_num in self.argument_differences:
filename = os.path.split(instance_1.sentence.source_file_path)[-1]
connective_text = StanfordParsedSentence.get_text_for_tokens(
instance_1.connective).encode('utf-8)')
print_indented(
indent, 'Arguments differ for connective "', connective_text,
'" (', filename, ':', sentence_num, ')',
' with ', sep='', end='', file=log_file)
arg_types = self._INSTANCE_CLASS.get_arg_types()
for arg_type, arg_token_start, arg_token_end in zip(
arg_types, arg_token_starts, arg_token_ends):
print(arg_token_start, self._INSTANCE_CLASS.arg_names[arg_type],
arg_token_end, sep='', end='', file=log_file)
if arg_type != arg_types[-1]: print(', ', end='', file=log_file)
print(':', file=log_file)
self._print_with_labeled_args(
instance_1, indent + 1, log_file, arg_token_starts,
arg_token_ends)
# print_indented(indent + 1, "vs.", file=log_file)
self._print_with_labeled_args(
instance_2, indent + 1, log_file, arg_token_starts,
arg_token_ends)
def _log_property_differences(self, property_name, property_enum, indent,
log_file):
filtered_differences = [x for x in self.property_differences
if x[2] is property_enum]
for instance_1, instance_2, _, sentence_num in filtered_differences:
values = [property_enum[getattr(instance, property_name)]
for instance in [instance_1, instance_2]]
filename = os.path.split(instance_1.sentence.source_file_path)[-1]
encoded_instance = _wrapped_sentence_highlighting_instance(
instance_1).encode('utf-8')
print_indented(
indent, property_name, 's for connective "',
StanfordParsedSentence.get_text_for_tokens(
instance_1.connective).encode('utf-8)'),
'" differ: ', values[0], ' vs. ', values[1], ' ',
'(', filename, ':', sentence_num, ': "', encoded_instance, '")',
sep='', file=log_file)
@staticmethod
def _csv_metrics(metrics_dict):
lines = [',TP,FP,FN,P_c,R_c,F_c,J_c,P_e,R_e,F_e,J_e']
for category, metrics in metrics_dict.iteritems():
csv_metrics = (str(x) for x in [
category,
metrics.connective_metrics.tp,
metrics.connective_metrics.fp,
metrics.connective_metrics.fn,
metrics.arg0_metrics.span_metrics.precision,
metrics.arg0_metrics.span_metrics.recall,
metrics.arg0_metrics.span_metrics.f1,
metrics.arg0_metrics.jaccard,
metrics.arg1_metrics.span_metrics.precision,
metrics.arg1_metrics.span_metrics.recall,
metrics.arg1_metrics.span_metrics.f1,
metrics.arg1_metrics.jaccard])
lines.append(','.join(csv_metrics))
return '\n'.join(lines)
class CausalityMetrics(_RelationMetrics):
_GOLD_INSTANCES_PROPERTY_NAME = 'causation_instances'
_INSTANCE_CLASS = CausationInstance
# TODO: Refactor order of parameters
# TODO: provide both pairwise and non-pairwise stats
def __init__(self, gold, predicted, allow_partial, save_differences=False,
ids_considered=None, compare_degrees=True, compare_types=True,
compare_args=True, pairwise_only=False, save_agreements=False,
compute_overlapping=None,
causations_property_name=_GOLD_INSTANCES_PROPERTY_NAME):
properties_to_compare = [
('degree', CausationInstance.Degrees, compare_degrees),
('type', CausationInstance.CausationTypes, compare_types)]
super(CausalityMetrics, self).__init__(
gold, predicted, allow_partial, save_differences, ids_considered,
compare_args, properties_to_compare, pairwise_only, save_agreements,
causations_property_name)
if compute_overlapping is None:
compute_overlapping = FLAGS.iaa_compute_overlapping
if compute_overlapping:
self.overlapping = OverlappingRelMetrics(
gold, predicted, allow_partial, save_differences,
ids_considered, compare_types, compare_args, pairwise_only,
save_agreements)
else:
self.overlapping = None
def metrics_by_connective(self):
by_connective = super(CausalityMetrics, self).metrics_by_connective()
self._remap_by_connective(by_connective)
return by_connective
@staticmethod
def _remap_by_connective(by_connective):
to_remap = {'for too to': 'too for to', 'for too': 'too for',
'that now': 'now that', 'to for': 'for to', 'give': 'given',
'citizen-sparked': 'spark', 'encouraging': 'encourage',
'have to for to': 'for to have to', 'thank to': 'thanks to',
'on ground of': 'on grounds of', 'precipitating': 'precipitate',
'to need': 'need to', 'to need to': 'need to to',
'to take': 'take to', 'reason be': 'reason',
'result of': 'result' # 'of' and 'be' for old corpus
}
for connective, metrics in by_connective.items():
if connective.startswith('be '): # for old corpus
by_connective[connective[3:]] += metrics
del by_connective[connective]
# print 'Replaced', connective
elif connective in to_remap:
by_connective[to_remap[connective]] += metrics
del by_connective[connective]
# print "Replaced", connective
def metrics_by_connective_category(self):
return self.get_aggregate_metrics(self.get_connective_category)
def pp(self, log_confusion=None, log_stats=None, log_differences=None,
log_agreements=None, log_by_connective=None, log_by_category=None,
indent=0, log_file=sys.stdout):
super(CausalityMetrics, self).pp(
log_confusion, log_stats, log_differences, log_agreements,
log_by_connective, indent, log_file)
if log_by_category is None:
log_by_category = FLAGS.iaa_log_by_category
if log_by_category:
print(file=log_file)
print_indented(indent, 'Metrics by category:', file=log_file)
by_category = self.metrics_by_connective_category()
print_indented(indent + 1, self._csv_metrics(by_category),
file=log_file)
if self.overlapping:
print(file=log_file)
print_indented(indent, 'Overlapping:', file=log_file)
self.overlapping.pp(log_confusion, log_stats, log_differences,
log_agreements, log_by_connective, indent + 1,
log_file)
__connective_types = merge_dicts([
{'CC': 'Conjunctive (coordinating)', 'IN': 'Prepositional',
'MD': 'Verbal', 'TO': 'Prepositional'},
{'JJ' + suffix: 'Adjectival' for suffix in ['', 'R', 'S']},
{'VB' + suffix: 'Verbal' for suffix in ['', 'D', 'G', 'N', 'P', 'Z']},
{'RB' + suffix: 'Adverbial' for suffix in ['', 'R', 'S']},
{'NN' + suffix: 'Nominal' for suffix in ['', 'S', 'P', 'PS']}])
@staticmethod
def get_connective_category(instance):
connective = instance.connective
# Treat if/thens like normal ifs
if len(connective) == 1 or connective[1].lemma == 'then':
connective = connective[0]
if connective.pos == 'IN':
edge_label, _parent = instance.sentence.get_most_direct_parent(
connective)
if edge_label == 'mark' and connective.lemma != 'for':
return 'Conjunctive (subordinating)'
return CausalityMetrics.__connective_types.get(connective.pos,
connective.pos)
connective_head = instance.sentence.get_head(connective)
# Special MWE cases: "because of", "thanks to", "now that", "out of"
if len(connective) == 2:
stringified = stringify_connective(instance)
if stringified == 'because of':
return 'Adverbial'
elif stringified in ['thank to', 'now that']:
return 'Conjunctive (subordinating)'
elif stringified == 'out of':
return 'Prepositional'
# Anything tagged IN or TO is probably an argument realization word. If
# there are non-argument-realization words in there, or if it's a
# copula, it's complex.
if any(t.lemma == 'be'
or (t is not connective_head and (t.pos not in ['IN', 'TO']
or t.lemma in ['as', 'for']))
for t in connective):
return 'Complex'
# A connective that's headed by a preposition or adverb and is otherwise
# all prepositions is complex.
elif connective_head.pos in ['IN', 'TO', 'RB', 'RBR', 'RBS']:
return 'Complex'
elif connective_head.pos.startswith('NN') and len(connective) > 2:
return 'Complex'
else:
conn_type = CausalityMetrics.__connective_types[connective_head.pos]
if (conn_type == 'Adjectival'
and not StanfordParsedSentence.is_contiguous(connective)):
return 'Complex'
else:
return conn_type
def __add__(self, other):
summed = super(CausalityMetrics, self).__add__(other)
if self.overlapping and other.overlapping:
summed.overlapping = self.overlapping + other.overlapping
else:
summed.overlapping = None
return summed
@staticmethod
def aggregate(metrics_list):
aggregated = _RelationMetrics.aggregate(metrics_list)
all_overlapping = [m.overlapping for m in metrics_list]
if None in all_overlapping:
aggregated.overlapping = None
else:
aggregated.overlapping = reduce(operator.add, all_overlapping)
return aggregated
# Map (cause|effect|means)_metrics to argi_metrics.
for underlying_name, arg_name in CausationInstance.arg_names.iteritems():
underlying_name = underlying_name + '_metrics'
getter = make_getter(underlying_name)
setter = make_setter(underlying_name)
setattr(CausalityMetrics, arg_name + '_metrics', property(getter, setter))
class OverlappingRelMetrics(_RelationMetrics):
_INSTANCE_CLASS = OverlappingRelationInstance
_GOLD_INSTANCES_PROPERTY_NAME = 'overlapping_rel_instances'
def __init__(self, gold, predicted, allow_partial, save_differences=False,
ids_considered=None, compare_types=True, compare_args=True,
pairwise_only=False, save_agreements=False,
causations_property_name=_GOLD_INSTANCES_PROPERTY_NAME):
properties_to_compare = [
('type', OverlappingRelationInstance.RelationTypes, compare_types)]
super(OverlappingRelMetrics, self).__init__(
gold, predicted, allow_partial, save_differences, ids_considered,
compare_args, properties_to_compare, pairwise_only, save_agreements,
causations_property_name)
def stringify_connective(instance):
return ' '.join(t.lemma for t in instance.connective)
|
{"hexsha": "018d527c08c32c4b4dc7afcf0482a944d122a481", "size": 48557, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/causeway/because_data/iaa/__init__.py", "max_stars_repo_name": "duncanka/causeway", "max_stars_repo_head_hexsha": "cc5e66d117ccc4e6fba710eac533c926edd7a668", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2018-10-16T08:16:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T21:55:19.000Z", "max_issues_repo_path": "src/causeway/because_data/iaa/__init__.py", "max_issues_repo_name": "duncanka/causeway", "max_issues_repo_head_hexsha": "cc5e66d117ccc4e6fba710eac533c926edd7a668", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/causeway/because_data/iaa/__init__.py", "max_forks_repo_name": "duncanka/causeway", "max_forks_repo_head_hexsha": "cc5e66d117ccc4e6fba710eac533c926edd7a668", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-04-24T20:38:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-24T13:11:23.000Z", "avg_line_length": 46.5998080614, "max_line_length": 81, "alphanum_fraction": 0.6170274111, "include": true, "reason": "import numpy", "num_tokens": 9930}
|
import MacroTools
const DSL_STATIC_ANNOTATION = :static
const DSL_ARG_GRAD_ANNOTATION = :grad
const DSL_RET_GRAD_ANNOTATION = :grad
const DSL_TRACK_DIFFS_ANNOTATION = :diffs
const DSL_NO_JULIA_CACHE_ANNOTATION = :nojuliacache
struct Argument
name::Symbol
typ::Union{Symbol,Expr}
annotations::Set{Symbol}
default::Union{Some{Any}, Nothing}
end
Argument(name, typ) = Argument(name, typ, Set{Symbol}(), nothing)
Argument(name, typ, annotations) = Argument(name, typ, annotations, nothing)
function parse_annotations(annotations_expr)
annotations = Set{Symbol}()
if isa(annotations_expr, Symbol)
push!(annotations, annotations_expr)
elseif isa(annotations_expr, Expr) && annotations_expr.head == :tuple
for annotation in annotations_expr.args
if !isa(annotation, Symbol)
error("syntax error in annotations_expr at $annotation")
else
push!(annotations, annotation)
end
end
else
error("syntax error in annotations at $annotations")
end
annotations
end
function parse_arg(expr)
if isa(expr, Symbol)
# x
arg = Argument(expr, :Any)
elseif isa(expr, Expr) && expr.head == :(::)
# x::Int
arg = Argument(expr.args[1], expr.args[2])
elseif isa(expr, Expr) && expr.head == :kw
# x::Int=1
sub_arg = parse_arg(expr.args[1])
default = Some(expr.args[2])
arg = Argument(sub_arg.name, sub_arg.typ, Set{Symbol}(), default)
elseif isa(expr, Expr) && expr.head == :call
# (grad,foo)(x::Int)
annotations_expr = expr.args[1]
sub_arg = parse_arg(expr.args[2])
annotations = parse_annotations(annotations_expr)
arg = Argument(sub_arg.name, sub_arg.typ, annotations, sub_arg.default)
else
dump(expr)
error("syntax error in gen function argument at $expr")
end
arg
end
include("dynamic.jl")
include("static.jl")
function address_from_expression(lhs)
if lhs isa Symbol
QuoteNode(lhs)
else
error("Syntax error: Only a variable or an address expression can appear on the lefthand side of a ~. Invalid left-hand side: $(lhs).")
end
end
function desugar_tildes(expr)
MacroTools.postwalk(expr) do e
if MacroTools.@capture(e, {*} ~ rhs_)
:(@trace($rhs))
elseif MacroTools.@capture(e, {addr_} ~ rhs_)
:(@trace($rhs, $(addr)))
elseif MacroTools.@capture(e, lhs_ ~ rhs_)
addr_expr = address_from_expression(lhs)
:($lhs = @trace($rhs, $(addr_expr)))
else
e
end
end
end
function parse_gen_function(ast, annotations)
ast = MacroTools.longdef(ast)
if ast.head != :function
error("syntax error at $ast in $(ast.head)")
end
if length(ast.args) != 2
error("syntax error at $ast in $(ast.args)")
end
signature = ast.args[1]
body = desugar_tildes(ast.args[2])
if signature.head == :(::)
(call_signature, return_type) = signature.args
elseif signature.head == :call
(call_signature, return_type) = (signature, :Any)
else
error("syntax error at $(signature)")
end
name = call_signature.args[1]
args = map(parse_arg, call_signature.args[2:end])
static = DSL_STATIC_ANNOTATION in annotations
if static
make_static_gen_function(name, args, body, return_type, annotations)
else
make_dynamic_gen_function(name, args, body, return_type, annotations)
end
end
macro gen(annotations_expr, ast)
# parse the annotations
annotations = parse_annotations(annotations_expr)
# parse the function definition
parse_gen_function(ast, annotations)
end
macro gen(ast)
parse_gen_function(ast, Set{Symbol}())
end
|
{"hexsha": "f3f5bdab58f551e6f90097f6d4c59f741202ee30", "size": 3822, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/dsl/dsl.jl", "max_stars_repo_name": "ali-ramadhan/Gen", "max_stars_repo_head_hexsha": "a24877a703b9944e1a115a67270716b35fe7e8b7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/dsl/dsl.jl", "max_issues_repo_name": "ali-ramadhan/Gen", "max_issues_repo_head_hexsha": "a24877a703b9944e1a115a67270716b35fe7e8b7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/dsl/dsl.jl", "max_forks_repo_name": "ali-ramadhan/Gen", "max_forks_repo_head_hexsha": "a24877a703b9944e1a115a67270716b35fe7e8b7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.094488189, "max_line_length": 143, "alphanum_fraction": 0.6462585034, "num_tokens": 940}
|
#!/usr/bin/env python2.7
import numpy as np
import subprocess
import sys
# custom imports
from mimLocator import *
from mimDrawer import *
def main():
x,y,z = 0,0,0
x = input("Get first coordinate?: ")
scriptsPopen = subprocess.Popen(["python", "forward_kinematics.py"],
stdout=subprocess.PIPE)
scriptsPopen.wait()
firstCoord = scriptsPopen.stdout.read().strip()
y = input("Get second coordinate?: ")
scriptsPopen = subprocess.Popen(["python", "forward_kinematics.py"],
stdout=subprocess.PIPE)
scriptsPopen.wait()
secondCoord = scriptsPopen.stdout.read().strip()
z = input("Get third coordinate?: ")
scriptsPopen = subprocess.Popen(["python", "forward_kinematics.py"],
stdout=subprocess.PIPE)
scriptsPopen.wait()
thirdCoord = scriptsPopen.stdout.read().strip()
for l in firstCoord.split("\n"):
if "X" in l:
x = float(l.split(" ")[-1])
if "Y" in l:
y = float(l.split(" ")[-1])
if "Z" in l:
z = float(l.split(" ")[-1])
firstCoord = [x,z,y]
for l in secondCoord.split("\n"):
if "X" in l:
x = float(l.split(" ")[-1])
if "Y" in l:
y = float(l.split(" ")[-1])
if "Z" in l:
z = float(l.split(" ")[-1])
secondCoord = [x,z,y]
for l in thirdCoord.split("\n"):
if "X" in l:
x = float(l.split(" ")[-1])
if "Y" in l:
y = float(l.split(" ")[-1])
if "Z" in l:
z = float(l.split(" ")[-1])
thirdCoord = [x,z,y]
print firstCoord, secondCoord, thirdCoord
# firstCoord = [0.623791890947, -0.012931432131, -0.0246224299732]
# secondCoord = [0.744245444214, 0.0430351361199, -0.00974796572852]
# thirdCoord = [0.631311941661, 0.0576380496228, -0.146248721335]
l = Locator(firstCoord, secondCoord, thirdCoord)
m = Drawer(firstCoord, secondCoord, thirdCoord)
path = scale(quartCircle(10)[0], 0.1), scale(quartCircle(10)[1], 0.1)
for i in range(len(path[0])):
tp = l.planeToCartesian(path[0][i], path[1][i])
print tp
scriptsPopen = subprocess.Popen(["python", "inverse_kinematics.py", str(tp[0]), str(tp[1]), str(tp[2]), str(l.q[0]), str(l.q[1]), str(l.q[2]), str(l.q[3])])
scriptsPopen.wait()
#l = Locator([0.704303194185, -0.0689719359237, 0.0323706170901],[0.652674084377, -0.193660960182, 0.0901786136888],[0.839161903371, -0.0809407755815, 0.0937020338332])
##print l.planeToCartesian(1,1)
#print l.planeToCartesian(float(sys.argv[1]), float(sys.argv[2]))
if __name__ == '__main__':
main()
|
{"hexsha": "ca6c25f51b8753af603af900b83f575b04ed3a5b", "size": 2723, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "smdth/mimLab", "max_stars_repo_head_hexsha": "78a49c17a4e103841f49cd4b880561a490682864", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "smdth/mimLab", "max_issues_repo_head_hexsha": "78a49c17a4e103841f49cd4b880561a490682864", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "smdth/mimLab", "max_forks_repo_head_hexsha": "78a49c17a4e103841f49cd4b880561a490682864", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.597826087, "max_line_length": 172, "alphanum_fraction": 0.5714285714, "include": true, "reason": "import numpy", "num_tokens": 809}
|
This singlelevel industrialstyle building was constructed with a http://dateline.ucdavis.edu/dl_detail.lasso?id6974 $5 million donation from the AnheuserBusch Foundation. It is used by Viticulture and Enology for beer brewing research. It is part of the fivebuilding Robert Mondavi Institute for Wine and Food Science complex.
There is a webcam showing construction on the webcams page.
This building was originally to be called the AnheuserBusch Brewing and Food Science Laboratory, but the AnheuserBusch Foundation requested a name change to recognize August A. Busch IIIs contributions to the art of brewing.
http://www.news.ucdavis.edu/search/news_detail.lasso?id8809 UC Davis News & Information Article on the final bid process
http://wineserver.ucdavis.edu/content.php?id726 Viticulture and Oenology Department Statement on the Bid Process
|
{"hexsha": "b1cee256a0ce4ba61d476fa0e81017e1aa15b2f0", "size": 852, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/August_A._Busch_III_Brewing_and_Food_Science_Laboratory.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/August_A._Busch_III_Brewing_and_Food_Science_Laboratory.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/August_A._Busch_III_Brewing_and_Food_Science_Laboratory.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 71.0, "max_line_length": 326, "alphanum_fraction": 0.8251173709, "num_tokens": 187}
|
'''
PID control for SISO (single input single output) system
'''
import numpy as np
class PID():
def __init__(self, pgain=0, igain=0, dgain=0,
windup=False, method='euler', dt=0.01):
self.e_intg = 0
self.e_prev = 0 # initial guess for differentiator
self.windup = windup
self.dt = dt
self.p = pgain
self.i = igain
self.d = dgain
if method == 'euler':
self.integrate = intg_euler
self.differentiate = diff_euler
def get(self, e: float) -> float:
dt = self.dt
e_i = self.e_intg
e_d = self.differentiate(e, self.e_prev, dt)
u = self.p * e + self.i * e_i + self.d * e_d
# Update
if self.windup:
self.e_intg = self.int_windup(self.integrate(e, self.e_intg, dt))
else:
self.e_intg = self.integrate(e, self.e_intg, dt)
self.e_prev = e
return u
def int_windup(self, x: float) -> float:
x_max = self.windup
x_min = -self.windup
if x > x_max:
x = x_max
elif x < x_min:
x = x_min
return x
def intg_euler(e, e_intg, dt):
return e_intg + e * dt
def diff_euler(e, e_prev, dt):
return (e - e_prev) / dt
if __name__ == '__main__':
e = 100
gain = np.array([1, 2, 3])
ctrllr = PID(gain)
print(ctrllr.input(e))
print(ctrllr.e_intg, ctrllr.e_prev)
e = -100
gain = np.array([1, 3, 3])
print(ctrllr.input(e))
print(ctrllr.e_intg, ctrllr.e_prev)
|
{"hexsha": "96dcc7eef8ead40c25106860c41cdffd476f00c2", "size": 1559, "ext": "py", "lang": "Python", "max_stars_repo_path": "fym/agents/PID.py", "max_stars_repo_name": "JungYT/fym", "max_stars_repo_head_hexsha": "d519c50086e3c7793b960e0326c92ed407836790", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-08-23T10:02:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-24T13:04:43.000Z", "max_issues_repo_path": "fym/agents/PID.py", "max_issues_repo_name": "JungYT/fym", "max_issues_repo_head_hexsha": "d519c50086e3c7793b960e0326c92ed407836790", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 110, "max_issues_repo_issues_event_min_datetime": "2019-08-23T08:09:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-29T06:54:48.000Z", "max_forks_repo_path": "fym/agents/PID.py", "max_forks_repo_name": "JungYT/fym", "max_forks_repo_head_hexsha": "d519c50086e3c7793b960e0326c92ed407836790", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-09-02T03:49:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-10T04:35:40.000Z", "avg_line_length": 23.6212121212, "max_line_length": 77, "alphanum_fraction": 0.5445798589, "include": true, "reason": "import numpy", "num_tokens": 488}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 2 11:00:06 2021
@author: Jose Antonio
"""
import networkx as nx
#G for test small ecore
G_test_small_ecore = nx.MultiDiGraph()
G_test_small_ecore.add_node(0, type = 'EPackage', atts = {'name':'<none>'})
G_test_small_ecore.add_node(1, type = 'EClass', atts = {'name':'<none>',
'abstract':False})
G_test_small_ecore.add_node(2, type = 'EDataType', atts = {'name':'<none>'})
G_test_small_ecore.add_node(3, type = 'EDataType', atts = {'name':'<none>'})
G_test_small_ecore.add_node(4, type = 'EReference', atts = {'name':'<none>',
'containment':True,
'lowerBound':0,
'upperBound':0})
G_test_small_ecore.add_node(5, type = 'EReference', atts = {'name':'<none>',
'containment':False,
'lowerBound':0,
'upperBound':0})
G_test_small_ecore.add_node(6, type = 'EReference', atts = {'name':'<none>',
'containment':False,
'lowerBound':0,
'upperBound':0})
G_test_small_ecore.add_edge(0, 1, type = 'eClassifiers')
G_test_small_ecore.add_edge(0, 2, type = 'eClassifiers')
G_test_small_ecore.add_edge(0, 3, type = 'eClassifiers')
G_test_small_ecore.add_edge(1, 0, type = 'ePackage')
G_test_small_ecore.add_edge(2, 0, type = 'ePackage')
G_test_small_ecore.add_edge(3, 0, type = 'ePackage')
G_test_small_ecore.add_edge(1, 4, type = 'eStructuralFeatures')
G_test_small_ecore.add_edge(1, 5, type = 'eStructuralFeatures')
G_test_small_ecore.add_edge(1, 6, type = 'eStructuralFeatures')
G_test_small_ecore.add_edge(4, 1, type = 'eContainingClass')
G_test_small_ecore.add_edge(5, 1, type = 'eContainingClass')
G_test_small_ecore.add_edge(6, 1, type = 'eContainingClass')
G_test_small_ecore.add_edge(4, 1, type = 'eType')
G_test_small_ecore.add_edge(5, 1, type = 'eType')
G_test_small_ecore.add_edge(6, 1, type = 'eType')
#G for test metafilter in small ecore
G_test_small_ecore_mf = nx.MultiDiGraph()
G_test_small_ecore_mf.add_node(0, type = 'EPackage', atts = {'name':'<none>'})
G_test_small_ecore_mf.add_node(1, type = 'EClass', atts = {'name':'<none>'})
G_test_small_ecore_mf.add_node(4, type = 'EReference', atts = {'name':'<none>',
'containment':True,
'lowerBound':0})
G_test_small_ecore_mf.add_node(5, type = 'EReference', atts = {'name':'<none>',
'containment':False,
'lowerBound':0})
G_test_small_ecore_mf.add_node(6, type = 'EReference', atts = {'name':'<none>',
'containment':False,
'lowerBound':0})
G_test_small_ecore_mf.add_edge(0, 1, type = 'eClassifiers')
G_test_small_ecore_mf.add_edge(1, 4, type = 'eStructuralFeatures')
G_test_small_ecore_mf.add_edge(1, 5, type = 'eStructuralFeatures')
G_test_small_ecore_mf.add_edge(1, 6, type = 'eStructuralFeatures')
G_test_small_ecore_mf.add_edge(4, 1, type = 'eType')
G_test_small_ecore_mf.add_edge(5, 1, type = 'eType')
G_test_small_ecore_mf.add_edge(6, 1, type = 'eType')
#G for yakindu
G_yak = nx.MultiDiGraph()
G_yak.add_node(0, type = 'Statechart')
G_yak.add_node(1, type = 'Region')
G_yak.add_node(2, type = 'Entry')
G_yak.add_node(3, type = 'State')
G_yak.add_node(4, type = 'Transition')
G_yak.add_edge(0, 1, type = 'regions')
G_yak.add_edge(1, 2, type = 'vertices')
G_yak.add_edge(1, 3, type = 'vertices')
G_yak.add_edge(2, 4, type = 'outgoingTransitions')
G_yak.add_edge(4, 2, type = 'source')
G_yak.add_edge(4, 3, type = 'target')
G_yak.add_edge(3, 4, type = 'incomingTransitions')
#G yak inconsistency
G_yak_inco = nx.MultiDiGraph()
G_yak_inco.add_node(0, type = 'Statechart')
G_yak_inco.add_node(1, type = 'Region')
G_yak_inco.add_node(2, type = 'State')
G_yak_inco.add_node(3, type = 'Transition')
G_yak_inco.add_node(4, type = 'Region')
G_yak_inco.add_edge(0, 1, type = 'regions')
G_yak_inco.add_edge(1, 2, type = 'vertices')
G_yak_inco.add_edge(2, 4, type = 'regions')
G_yak_inco.add_edge(2, 3, type = 'outgoingTransitions')
G_yak_inco.add_edge(3, 2, type = 'source')
G_yak_inco.add_edge(3, 2, type = 'target')
G_yak_inco.add_edge(2, 3, type = 'incomingTransitions')
#G yak inconsistency
G_yak_inco_2 = nx.MultiDiGraph()
G_yak_inco_2.add_node(0, type = 'Statechart')
G_yak_inco_2.add_node(1, type = 'Region')
G_yak_inco_2.add_node(2, type = 'Entry')
G_yak_inco_2.add_node(3, type = 'State')
G_yak_inco_2.add_node(4, type = 'Transition')
G_yak_inco_2.add_node(5, type = 'Entry')
G_yak_inco_2.add_node(6, type = 'Transition')
G_yak_inco_2.add_node(7, type = 'Choice')
G_yak_inco_2.add_node(8, type = 'Transition')
G_yak_inco_2.add_edge(0, 1, type = 'regions')
G_yak_inco_2.add_edge(1, 2, type = 'vertices')
G_yak_inco_2.add_edge(1, 3, type = 'vertices')
G_yak_inco_2.add_edge(1, 5, type = 'vertices')
G_yak_inco_2.add_edge(1, 7, type = 'vertices')
G_yak_inco_2.add_edge(2, 4, type = 'outgoingTransitions')
G_yak_inco_2.add_edge(4, 2, type = 'source')
G_yak_inco_2.add_edge(4, 3, type = 'target')
G_yak_inco_2.add_edge(3, 4, type = 'incomingTransitions')
G_yak_inco_2.add_edge(5, 6, type = 'outgoingTransitions')
G_yak_inco_2.add_edge(5, 6, type = 'source')
G_yak_inco_2.add_edge(6, 2, type = 'target')
G_yak_inco_2.add_edge(2, 6, type = 'incomingTransitions')
G_yak_inco_2.add_edge(5, 8, type = 'outgoingTransitions')
G_yak_inco_2.add_edge(5, 8, type = 'source')
G_yak_inco_2.add_edge(8, 7, type = 'target')
G_yak_inco_2.add_edge(7, 8, type = 'incomingTransitions')
#G for yakindu inconsistency
G_yak_inco_3 = nx.MultiDiGraph()
G_yak_inco_3.add_node(0, type = 'Statechart')
G_yak_inco_3.add_node(1, type = 'Region')
G_yak_inco_3.add_node(2, type = 'Entry')
G_yak_inco_3.add_node(3, type = 'State')
G_yak_inco_3.add_node(4, type = 'Exit')
G_yak_inco_3.add_node(5, type = 'Transition')
G_yak_inco_3.add_edge(0, 1, type = 'regions')
G_yak_inco_3.add_edge(1, 2, type = 'vertices')
G_yak_inco_3.add_edge(1, 3, type = 'vertices')
G_yak_inco_3.add_edge(4, 5, type = 'outgoingTransitions')
G_yak_inco_3.add_edge(5, 4, type = 'source')
G_yak_inco_3.add_edge(5, 3, type = 'target')
G_yak_inco_3.add_edge(3, 5, type = 'incomingTransitions')
#G for yakindu metafiler
G_yak_meta = nx.MultiDiGraph()
G_yak_meta.add_node(0, type = 'Statechart')
G_yak_meta.add_node(1, type = 'Region')
G_yak_meta.add_node(2, type = 'Entry')
G_yak_meta.add_node(3, type = 'State')
G_yak_meta.add_node(4, type = 'Transition')
G_yak_meta.add_edge(0, 1, type = 'regions')
G_yak_meta.add_edge(1, 2, type = 'vertices')
G_yak_meta.add_edge(1, 3, type = 'vertices')
G_yak_meta.add_edge(2, 4, type = 'outgoingTransitions')
G_yak_meta.add_edge(4, 3, type = 'target')
#G for yak for shapes
G_yak_shape = nx.MultiDiGraph()
G_yak_shape.add_node(2, type = 'Entry')
G_yak_shape.add_node(3, type = 'State')
G_yak_shape.add_node(4, type = 'Transition')
G_yak_shape.add_node(5, type = 'Transition')
G_yak_shape.add_node(6, type = 'Transition')
G_yak_shape.add_node(7, type = 'State')
G_yak_shape.add_edge(4, 2, type = 'source')
G_yak_shape.add_edge(4, 3, type = 'target')
G_yak_shape.add_edge(5, 3, type = 'source')
G_yak_shape.add_edge(5, 7, type = 'target')
G_yak_shape.add_edge(6, 7, type = 'source')
G_yak_shape.add_edge(6, 3, type = 'target')
#G for rds
G_rds = nx.MultiDiGraph()
G_rds.add_node(0, type = 'Database', atts = {'name':'model'})
G_rds.add_node(1, type = 'Table', atts = {'name':'CAMPUS_DEFAULTS'})
G_rds.add_node(2, type = 'Column', atts = {'name':'Name'})
G_rds.add_node(3, type = 'Column', atts = {'name':'column'})
G_rds.add_edge(0, 1, type = 'elements')
G_rds.add_edge(1, 2, type = 'columns')
G_rds.add_edge(1, 3, type = 'columns')
### RDS incosistence
rds_inco = nx.MultiDiGraph()
rds_inco.add_node(0, type = 'Table')
rds_inco.add_node(1, type = 'Index')
rds_inco.add_node(2, type = 'IndexColumn')
rds_inco.add_node(3, type = 'Column')
rds_inco.add_node(4, type = 'Table')
rds_inco.add_node(9, type = 'Database')
rds_inco.add_edge(9, 0, type = 'elements')
rds_inco.add_edge(9, 6, type = 'elements')
rds_inco.add_edge(9, 8, type = 'elements')
rds_inco.add_edge(0, 1, type = 'indexes')
rds_inco.add_edge(1, 2, type = 'indexColumns')
rds_inco.add_edge(2, 3, type = 'column')
rds_inco.add_edge(4, 3, type = 'columns')
rds_inco.add_node(5, type = 'Column')
rds_inco.add_node(6, type = 'Reference')
rds_inco.add_node(7, type = 'Column')
rds_inco.add_node(8, type = 'Reference')
rds_inco.add_edge(0, 5, type = 'columns')
rds_inco.add_edge(0, 7, type = 'columns')
rds_inco.add_node(9, type = 'Reference')
rds_inco.add_edge(5, 6, type = 'primaryReferences')
rds_inco.add_edge(6, 7, type = 'foreignKeyColumns')
rds_inco.add_edge(6, 5, type = 'primaryKeyColumns')
rds_inco.add_edge(7, 6, type = 'foreignReferences')
rds_inco.add_edge(5, 8, type = 'primaryReferences')
rds_inco.add_edge(8, 7, type = 'foreignKeyColumns')
rds_inco.add_edge(8, 5, type = 'primaryKeyColumns')
rds_inco.add_edge(7, 8, type = 'foreignReferences')
rds_inco.add_edge(5, 9, type = 'primaryReferences')
rds_inco.add_edge(9, 5, type = 'foreignKeyColumns')
rds_inco.add_edge(9, 5, type = 'primaryKeyColumns')
rds_inco.add_edge(5, 9, type = 'foreignReferences')
#G ecore insconsistent
G_ecore_inco = nx.MultiDiGraph()
G_ecore_inco.add_node(0, type = 'EPackage')
G_ecore_inco.add_node(1, type = 'EClass')
G_ecore_inco.add_node(2, type = 'EDataType')
G_ecore_inco.add_node(3, type = 'EDataType')
G_ecore_inco.add_node(4, type = 'EReference')
G_ecore_inco.add_node(5, type = 'EReference')
G_ecore_inco.add_node(6, type = 'EReference')
G_ecore_inco.add_node(7, type = 'EClass')
G_ecore_inco.add_node(8, type = 'EClass')
G_ecore_inco.add_node(9, type = 'EAttribute')
G_ecore_inco.add_edge(0, 1, type = 'eClassifiers')
G_ecore_inco.add_edge(0, 2, type = 'eClassifiers')
G_ecore_inco.add_edge(0, 3, type = 'eClassifiers')
G_ecore_inco.add_edge(1, 0, type = 'ePackage')
G_ecore_inco.add_edge(2, 0, type = 'ePackage')
G_ecore_inco.add_edge(3, 0, type = 'ePackage')
G_ecore_inco.add_edge(0, 7, type = 'eClassifiers')
G_ecore_inco.add_edge(0, 8, type = 'eClassifiers')
G_ecore_inco.add_edge(7, 0, type = 'ePackage')
G_ecore_inco.add_edge(8, 0, type = 'ePackage')
G_ecore_inco.add_edge(1, 4, type = 'eStructuralFeatures')
G_ecore_inco.add_edge(1, 5, type = 'eStructuralFeatures')
G_ecore_inco.add_edge(1, 6, type = 'eStructuralFeatures')
G_ecore_inco.add_edge(4, 1, type = 'eContainingClass')
G_ecore_inco.add_edge(5, 1, type = 'eContainingClass')
G_ecore_inco.add_edge(6, 1, type = 'eContainingClass')
G_ecore_inco.add_edge(6, 8, type = 'eContainingClass')
G_ecore_inco.add_edge(9, 8, type = 'eContainingClass')
G_ecore_inco.add_edge(4, 1, type = 'eType')
G_ecore_inco.add_edge(5, 1, type = 'eType')
G_ecore_inco.add_edge(1, 7, type = 'eSuperTypes')
G_ecore_inco.add_edge(7, 8, type = 'eSuperTypes')
G_ecore_inco.add_edge(8, 1, type = 'eSuperTypes')
G_ecore_inco.add_edge(4, 5, type = 'eOpposite')
G_ecore_inco.add_edge(4, 4, type = 'eOpposite')
G_ecore_inco_2 = nx.MultiDiGraph()
G_ecore_inco_2.add_node(0, type = 'EPackage')
G_ecore_inco_2.add_node(1, type = 'EClass')
G_ecore_inco_2.add_node(2, type = 'EDataType')
G_ecore_inco_2.add_node(3, type = 'EDataType')
G_ecore_inco_2.add_node(4, type = 'EReference')
G_ecore_inco_2.add_node(5, type = 'EReference')
G_ecore_inco_2.add_node(6, type = 'EReference')
G_ecore_inco_2.add_node(9, type = 'EAttribute')
G_ecore_inco_2.add_node(7, type = 'EClass')
G_ecore_inco_2.add_node(8, type = 'EClass')
G_ecore_inco_2.add_edge(0, 1, type = 'eClassifiers')
G_ecore_inco_2.add_edge(0, 2, type = 'eClassifiers')
G_ecore_inco_2.add_edge(0, 3, type = 'eClassifiers')
G_ecore_inco_2.add_edge(1, 0, type = 'ePackage')
G_ecore_inco_2.add_edge(2, 0, type = 'ePackage')
G_ecore_inco_2.add_edge(3, 0, type = 'ePackage')
G_ecore_inco_2.add_edge(0, 7, type = 'eClassifiers')
G_ecore_inco_2.add_edge(0, 8, type = 'eClassifiers')
G_ecore_inco_2.add_edge(7, 0, type = 'ePackage')
G_ecore_inco_2.add_edge(8, 0, type = 'ePackage')
G_ecore_inco_2.add_edge(1, 4, type = 'eStructuralFeatures')
G_ecore_inco_2.add_edge(7, 5, type = 'eStructuralFeatures')
G_ecore_inco_2.add_edge(8, 6, type = 'eStructuralFeatures')
G_ecore_inco_2.add_edge(8, 9, type = 'eStructuralFeatures')
G_ecore_inco_2.add_edge(4, 1, type = 'eContainingClass')
G_ecore_inco_2.add_edge(5, 7, type = 'eContainingClass')
G_ecore_inco_2.add_edge(6, 8, type = 'eContainingClass')
G_ecore_inco_2.add_edge(9, 8, type = 'eContainingClass')
G_ecore_inco_2.add_edge(4, 2, type = 'eType')
G_ecore_inco_2.add_edge(5, 8, type = 'eType')
G_ecore_inco_2.add_edge(6, 1, type = 'eType')
G_ecore_inco_2.add_edge(9, 1, type = 'eType')
G_ecore_inco_2.add_edge(1, 7, type = 'eSuperTypes')
G_ecore_inco_2.add_edge(7, 8, type = 'eSuperTypes')
G_ecore_inco_2.add_edge(8, 1, type = 'eSuperTypes')
G_ecore_inco_2.add_edge(4, 5, type = 'eOpposite')
G_ecore_inco_2.add_edge(5, 4, type = 'eOpposite')
G_ecore_inco_correct = nx.MultiDiGraph()
G_ecore_inco_correct.add_node(0, type = 'EPackage')
G_ecore_inco_correct.add_node(1, type = 'EClass')
G_ecore_inco_correct.add_node(2, type = 'EDataType')
G_ecore_inco_correct.add_node(3, type = 'EDataType')
G_ecore_inco_correct.add_node(4, type = 'EReference')
G_ecore_inco_correct.add_node(5, type = 'EReference')
G_ecore_inco_correct.add_node(6, type = 'EReference')
G_ecore_inco_correct.add_node(7, type = 'EClass')
G_ecore_inco_correct.add_node(8, type = 'EClass')
G_ecore_inco_correct.add_edge(0, 1, type = 'eClassifiers')
G_ecore_inco_correct.add_edge(0, 2, type = 'eClassifiers')
G_ecore_inco_correct.add_edge(0, 3, type = 'eClassifiers')
G_ecore_inco_correct.add_edge(1, 0, type = 'ePackage')
G_ecore_inco_correct.add_edge(2, 0, type = 'ePackage')
G_ecore_inco_correct.add_edge(3, 0, type = 'ePackage')
G_ecore_inco_correct.add_edge(0, 7, type = 'eClassifiers')
G_ecore_inco_correct.add_edge(0, 8, type = 'eClassifiers')
G_ecore_inco_correct.add_edge(7, 0, type = 'ePackage')
G_ecore_inco_correct.add_edge(8, 0, type = 'ePackage')
G_ecore_inco_correct.add_edge(1, 4, type = 'eStructuralFeatures')
G_ecore_inco_correct.add_edge(7, 5, type = 'eStructuralFeatures')
G_ecore_inco_correct.add_edge(8, 6, type = 'eStructuralFeatures')
G_ecore_inco_correct.add_edge(4, 1, type = 'eContainingClass')
G_ecore_inco_correct.add_edge(5, 7, type = 'eContainingClass')
G_ecore_inco_correct.add_edge(6, 8, type = 'eContainingClass')
G_ecore_inco_correct.add_edge(4, 7, type = 'eType')
G_ecore_inco_correct.add_edge(5, 1, type = 'eType')
G_ecore_inco_correct.add_edge(6, 1, type = 'eType')
G_ecore_inco_correct.add_edge(4, 5, type = 'eOpposite')
G_ecore_inco_correct.add_edge(5, 4, type = 'eOpposite')
##### graphs for test edits
##########################graph2 - add a refernece in the same class
G_wo_ref_itself = nx.MultiDiGraph()
G_wo_ref_itself.add_node(0, type = 'EPackage')
G_wo_ref_itself.add_node(1, type = 'EClass', ids = {0,1})
G_wo_ref_itself.add_node(4, type = 'EReference')
G_wo_ref_itself.add_node(5, type = 'EReference')
G_wo_ref_itself.add_node(6, type = 'EReference')
G_wo_ref_itself.add_edge(0, 1, type = 'eClassifiers')
G_wo_ref_itself.add_edge(1, 0, type = 'ePackage')
G_wo_ref_itself.add_edge(1, 4, type = 'eStructuralFeatures')
G_wo_ref_itself.add_edge(1, 5, type = 'eStructuralFeatures')
G_wo_ref_itself.add_edge(1, 6, type = 'eStructuralFeatures')
G_wo_ref_itself.add_edge(4, 1, type = 'eContainingClass')
G_wo_ref_itself.add_edge(5, 1, type = 'eContainingClass')
G_wo_ref_itself.add_edge(6, 1, type = 'eContainingClass')
G_wo_ref_itself.add_edge(4, 1, type = 'eType')
G_wo_ref_itself.add_edge(5, 1, type = 'eType')
G_wo_ref_itself.add_edge(6, 1, type = 'eType')
############################ add a reference that connects two classes
G_wo_ref = nx.MultiDiGraph()
G_wo_ref.add_node(0, type = 'EPackage')
G_wo_ref.add_node(1, type = 'EClass', ids = {0})
G_wo_ref.add_node(2, type = 'EClass', ids = {1})
G_wo_ref.add_node(4, type = 'EReference')
G_wo_ref.add_node(5, type = 'EReference')
G_wo_ref.add_node(6, type = 'EReference')
G_wo_ref.add_edge(0, 1, type = 'eClassifiers')
G_wo_ref.add_edge(1, 0, type = 'ePackage')
G_wo_ref.add_edge(0, 2, type = 'eClassifiers')
G_wo_ref.add_edge(2, 0, type = 'ePackage')
G_wo_ref.add_edge(1, 4, type = 'eStructuralFeatures')
G_wo_ref.add_edge(1, 5, type = 'eStructuralFeatures')
G_wo_ref.add_edge(1, 6, type = 'eStructuralFeatures')
G_wo_ref.add_edge(4, 1, type = 'eContainingClass')
G_wo_ref.add_edge(5, 1, type = 'eContainingClass')
G_wo_ref.add_edge(6, 1, type = 'eContainingClass')
G_wo_ref.add_edge(4, 1, type = 'eType')
G_wo_ref.add_edge(5, 1, type = 'eType')
G_wo_ref.add_edge(6, 1, type = 'eType')
############################ to check false
G_with_one_id = nx.MultiDiGraph()
G_with_one_id.add_node(0, type = 'EPackage')
G_with_one_id.add_node(1, type = 'EClass', ids = {0})
G_with_one_id.add_node(2, type = 'EClass')
G_with_one_id.add_node(4, type = 'EReference')
G_with_one_id.add_node(5, type = 'EReference')
G_with_one_id.add_node(6, type = 'EReference')
G_with_one_id.add_edge(0, 1, type = 'eClassifiers')
G_with_one_id.add_edge(1, 0, type = 'ePackage')
G_with_one_id.add_edge(0, 2, type = 'eClassifiers')
G_with_one_id.add_edge(2, 0, type = 'ePackage')
G_with_one_id.add_edge(1, 4, type = 'eStructuralFeatures')
G_with_one_id.add_edge(1, 5, type = 'eStructuralFeatures')
G_with_one_id.add_edge(1, 6, type = 'eStructuralFeatures')
G_with_one_id.add_edge(4, 1, type = 'eContainingClass')
G_with_one_id.add_edge(5, 1, type = 'eContainingClass')
G_with_one_id.add_edge(6, 1, type = 'eContainingClass')
G_with_one_id.add_edge(4, 1, type = 'eType')
G_with_one_id.add_edge(5, 1, type = 'eType')
G_with_one_id.add_edge(6, 1, type = 'eType')
############################ to check false
G_with_nonsenseid = nx.MultiDiGraph()
G_with_nonsenseid.add_node(0, type = 'EPackage')
G_with_nonsenseid.add_node(1, type = 'EClass', ids = {0})
G_with_nonsenseid.add_node(2, type = 'EClass')
G_with_nonsenseid.add_node(4, type = 'EReference', ids = {1})
G_with_nonsenseid.add_node(5, type = 'EReference')
G_with_nonsenseid.add_node(6, type = 'EReference')
G_with_nonsenseid.add_edge(0, 1, type = 'eClassifiers')
G_with_nonsenseid.add_edge(1, 0, type = 'ePackage')
G_with_nonsenseid.add_edge(0, 2, type = 'eClassifiers')
G_with_nonsenseid.add_edge(2, 0, type = 'ePackage')
G_with_nonsenseid.add_edge(1, 4, type = 'eStructuralFeatures')
G_with_nonsenseid.add_edge(1, 5, type = 'eStructuralFeatures')
G_with_nonsenseid.add_edge(1, 6, type = 'eStructuralFeatures')
G_with_nonsenseid.add_edge(4, 1, type = 'eContainingClass')
G_with_nonsenseid.add_edge(5, 1, type = 'eContainingClass')
G_with_nonsenseid.add_edge(6, 1, type = 'eContainingClass')
G_with_nonsenseid.add_edge(4, 1, type = 'eType')
G_with_nonsenseid.add_edge(5, 1, type = 'eType')
G_with_nonsenseid.add_edge(6, 1, type = 'eType')
#########################graph5
G_expected_added_ref_itself = nx.MultiDiGraph()
G_expected_added_ref_itself.add_node(0, type = 'EPackage')
G_expected_added_ref_itself.add_node(1, type = 'EClass')
G_expected_added_ref_itself.add_node(4, type = 'EReference')
G_expected_added_ref_itself.add_node(5, type = 'EReference')
G_expected_added_ref_itself.add_node(6, type = 'EReference')
G_expected_added_ref_itself.add_node(7, type = 'EReference')
G_expected_added_ref_itself.add_edge(0, 1, type = 'eClassifiers')
G_expected_added_ref_itself.add_edge(1, 0, type = 'ePackage')
G_expected_added_ref_itself.add_edge(1, 4, type = 'eStructuralFeatures')
G_expected_added_ref_itself.add_edge(1, 5, type = 'eStructuralFeatures')
G_expected_added_ref_itself.add_edge(1, 6, type = 'eStructuralFeatures')
G_expected_added_ref_itself.add_edge(1, 7, type = 'eStructuralFeatures')
G_expected_added_ref_itself.add_edge(4, 1, type = 'eContainingClass')
G_expected_added_ref_itself.add_edge(5, 1, type = 'eContainingClass')
G_expected_added_ref_itself.add_edge(6, 1, type = 'eContainingClass')
G_expected_added_ref_itself.add_edge(7, 1, type = 'eContainingClass')
G_expected_added_ref_itself.add_edge(4, 1, type = 'eType')
G_expected_added_ref_itself.add_edge(5, 1, type = 'eType')
G_expected_added_ref_itself.add_edge(6, 1, type = 'eType')
G_expected_added_ref_itself.add_edge(7, 1, type = 'eType')
############################ add a reference that connects two classes
G_expected_added_ref = nx.MultiDiGraph()
G_expected_added_ref.add_node(0, type = 'EPackage')
G_expected_added_ref.add_node(1, type = 'EClass')
G_expected_added_ref.add_node(2, type = 'EClass')
G_expected_added_ref.add_node(4, type = 'EReference')
G_expected_added_ref.add_node(5, type = 'EReference')
G_expected_added_ref.add_node(6, type = 'EReference')
G_expected_added_ref.add_node(7, type = 'EReference')
G_expected_added_ref.add_edge(0, 1, type = 'eClassifiers')
G_expected_added_ref.add_edge(1, 0, type = 'ePackage')
G_expected_added_ref.add_edge(0, 2, type = 'eClassifiers')
G_expected_added_ref.add_edge(2, 0, type = 'ePackage')
G_expected_added_ref.add_edge(1, 4, type = 'eStructuralFeatures')
G_expected_added_ref.add_edge(1, 5, type = 'eStructuralFeatures')
G_expected_added_ref.add_edge(1, 6, type = 'eStructuralFeatures')
G_expected_added_ref.add_edge(1, 7, type = 'eStructuralFeatures')
G_expected_added_ref.add_edge(4, 1, type = 'eContainingClass')
G_expected_added_ref.add_edge(5, 1, type = 'eContainingClass')
G_expected_added_ref.add_edge(6, 1, type = 'eContainingClass')
G_expected_added_ref.add_edge(7, 1, type = 'eContainingClass')
G_expected_added_ref.add_edge(4, 1, type = 'eType')
G_expected_added_ref.add_edge(5, 1, type = 'eType')
G_expected_added_ref.add_edge(6, 1, type = 'eType')
G_expected_added_ref.add_edge(7, 2, type = 'eType')
############################ add eSuperType
G_expected_added_super = nx.MultiDiGraph()
G_expected_added_super.add_node(0, type = 'EPackage')
G_expected_added_super.add_node(1, type = 'EClass')
G_expected_added_super.add_node(2, type = 'EClass')
G_expected_added_super.add_node(4, type = 'EReference')
G_expected_added_super.add_node(5, type = 'EReference')
G_expected_added_super.add_node(6, type = 'EReference')
G_expected_added_super.add_edge(0, 1, type = 'eClassifiers')
G_expected_added_super.add_edge(1, 0, type = 'ePackage')
G_expected_added_super.add_edge(0, 2, type = 'eClassifiers')
G_expected_added_super.add_edge(2, 0, type = 'ePackage')
G_expected_added_super.add_edge(1, 4, type = 'eStructuralFeatures')
G_expected_added_super.add_edge(1, 5, type = 'eStructuralFeatures')
G_expected_added_super.add_edge(1, 6, type = 'eStructuralFeatures')
G_expected_added_super.add_edge(4, 1, type = 'eContainingClass')
G_expected_added_super.add_edge(5, 1, type = 'eContainingClass')
G_expected_added_super.add_edge(6, 1, type = 'eContainingClass')
G_expected_added_super.add_edge(4, 1, type = 'eType')
G_expected_added_super.add_edge(5, 1, type = 'eType')
G_expected_added_super.add_edge(6, 1, type = 'eType')
G_expected_added_super.add_edge(1, 2, type = 'eSuperTypes')
################ initial graph
G_initial = nx.MultiDiGraph()
G_initial.add_node(0, type = 'EPackage')
G_initial.add_node(1, type = 'EClass')
G_initial.add_edge(0, 1, type = 'eClassifiers')
G_initial.add_edge(1, 0, type = 'ePackage')
############### for graph2sequence
G_g2s = nx.MultiDiGraph()
G_g2s.add_node(0, type = 'EPackage')
G_g2s.add_node(1, type = 'EClass')
G_g2s.add_node(2, type = 'EClass')
G_g2s.add_node(4, type = 'EReference')
G_g2s.add_edge(0, 1, type = 'eClassifiers')
G_g2s.add_edge(1, 0, type = 'ePackage')
G_g2s.add_edge(0, 2, type = 'eClassifiers')
G_g2s.add_edge(2, 0, type = 'ePackage')
G_g2s.add_edge(1, 4, type = 'eStructuralFeatures')
G_g2s.add_edge(4, 1, type = 'eContainingClass')
G_g2s.add_edge(4, 1, type = 'eType')
G_g2s.add_edge(1, 2, type = 'eSuperTypes')
############### for graph2sequence with inv
G_g2s_inv = nx.MultiDiGraph()
G_g2s_inv.add_node(0, type = 'EPackage')
G_g2s_inv.add_node(1, type = 'EClass')
G_g2s_inv.add_node(2, type = 'EClass')
G_g2s_inv.add_node(4, type = 'EReference')
G_g2s_inv.add_edge(0, 1, type = 'eClassifiers')
G_g2s_inv.add_edge(1, 0, type = 'ePackage')
G_g2s_inv.add_edge(0, 2, type = 'eClassifiers')
G_g2s_inv.add_edge(2, 0, type = 'ePackage')
G_g2s_inv.add_edge(1, 4, type = 'eStructuralFeatures')
G_g2s_inv.add_edge(4, 1, type = 'eContainingClass')
G_g2s_inv.add_edge(4, 1, type = 'eType')
G_g2s_inv.add_edge(1, 2, type = 'eSuperTypes')
G_g2s_inv.add_edge(1, 4, type = 'eType_inv')
G_g2s_inv.add_edge(2, 1, type = 'eSuperTypes_inv')
############### for testEcoregraph
G_testEcore = nx.MultiDiGraph()
G_testEcore.add_node(0, type = 'EPackage')
G_testEcore.add_node(1, type = 'EClass')
G_testEcore.add_node(2, type = 'EClass')
G_testEcore.add_node(3, type = 'EClass')
G_testEcore.add_edge(0, 1, type = 'eClassifiers')
G_testEcore.add_edge(1, 0, type = 'ePackage')
G_testEcore.add_edge(0, 2, type = 'eClassifiers')
G_testEcore.add_edge(2, 0, type = 'ePackage')
G_testEcore.add_edge(0, 3, type = 'eClassifiers')
G_testEcore.add_edge(3, 0, type = 'ePackage')
G_testEcore.add_edge(1, 2, type = 'eSuperTypes')
################ patterns for add a reference
pattern1_ref = nx.MultiDiGraph()
pattern1_ref.add_node(0, type = ['EClass'], ids = {0,1})
pattern1_ref.add_node(1, type = 'EReference')
pattern1_ref.add_edge(0, 1, type = 'eStructuralFeatures')
pattern1_ref.add_edge(1, 0, type = 'eContainingClass')
pattern1_ref.add_edge(1, 0, type = 'eType')
pattern2_ref = nx.MultiDiGraph()
pattern2_ref.add_node(0, type = ['EClass'], ids = {0})
pattern2_ref.add_node(1, type = 'EReference')
pattern2_ref.add_node(2, type = ['EClass'], ids = {1})
pattern2_ref.add_edge(0, 1, type = 'eStructuralFeatures')
pattern2_ref.add_edge(1, 0, type = 'eContainingClass')
pattern2_ref.add_edge(1, 2, type = 'eType')
################ patterns for add eSuperTypes
pattern1_st = nx.MultiDiGraph()
pattern1_st.add_node(0, type = ['EClass'], ids = {0})
pattern1_st.add_node(1, type = ['EClass'], ids = {1})
pattern1_st.add_edge(0, 1, type = 'eSuperTypes')
################ patterns for addClass
pattern1_ac = nx.MultiDiGraph()
pattern1_ac.add_node(0, type = ['EPackage'], ids = {0})
pattern1_ac.add_node(1, type = 'EClass')
pattern1_ac.add_edge(0, 1, type = 'eClassifiers')
pattern1_ac.add_edge(1, 0, type = 'ePackage')
|
{"hexsha": "3cb678c32c6aaeac7f456b4836d5d0a999029dbb", "size": 27007, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/graphs4test.py", "max_stars_repo_name": "Antolin1/DMG-Python", "max_stars_repo_head_hexsha": "ba3942e13006e1a32f3fe9f1b29615311f667274", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/graphs4test.py", "max_issues_repo_name": "Antolin1/DMG-Python", "max_issues_repo_head_hexsha": "ba3942e13006e1a32f3fe9f1b29615311f667274", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/graphs4test.py", "max_forks_repo_name": "Antolin1/DMG-Python", "max_forks_repo_head_hexsha": "ba3942e13006e1a32f3fe9f1b29615311f667274", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.5492307692, "max_line_length": 79, "alphanum_fraction": 0.7041507757, "include": true, "reason": "import networkx", "num_tokens": 9194}
|
# Minimum spanning tree (MST) algorithms
#
# Long Le
# University of Illinois
#
import numpy as np
import matplotlib.pyplot as plt
from pqdict import minpq
class Node:
def __init__(self,x):
self.val = x
class Edge:
def __init__(self,n0,n1,w):
self.ePts = set([n0,n1]) # end points
self.w = w # weight
# union-find set
class UnionFindSet:
def __init__(self,iterable):
self.memMap = {} # membership map
self.id = 0 # membership ID
for item in iterable:
self.memMap[item] = self.id
self.id += 1
def find(self,a):
return self.memMap.get(a,None)
def union(self,a,b):
aId = self.memMap[a]
bId = self.memMap[b]
for key,val in self.memMap.items():
if val == aId or val == bId:
self.memMap[key] = self.id # new id
self.id += 1
def Prim(nodes,edges):
# https://en.wikipedia.org/wiki/Prim's_algorithm
C = minpq() # cheapest cost of a connection to a node
E = {} # the edge providing that cheapest connection
for node in nodes:
C[node] = np.infty
E[node] = None
# forest
edgesF = set()
while len(C) > 0:
node = C.pop()
if E[node] != None:
edgesF.add(E[node])
for edge in edges:
u,v = edge.ePts
if u == node:
ngb = v
elif v == node:
ngb = u
else:
continue
if ngb in C:
if edge.w < C[ngb]:
C[ngb] = edge.w
E[ngb] = edge
return edgesF
def Kruskal(nodes,edges):
# https://en.wikipedia.org/wiki/Kruskal's_algorithm
# sort all edges in non-decreasing order of weights
edgesSort = minpq()
for edge in edges:
edgesSort[edge] = edge.w
edgesF = set()
ufset = UnionFindSet(nodes)
for edge in edgesSort.popkeys():
if len(edgesF) == len(nodes)-1:
break
u,v = edge.ePts
if ufset.find(u) != ufset.find(v):
edgesF.add(edge)
ufset.union(u,v)
return edgesF
def visualize(nodes,edges,locMap):
plt.figure(figsize=(10,10))
for node in nodes:
loc = locMap[node]
plt.scatter(loc[0],loc[1],lw=32)
plt.annotate(str(node.val),xy=loc,xytext=(loc[0]+.1,loc[1]+.1),fontsize=15)
for edge in edges:
u,v = edge.ePts
locU = locMap[u]
locV = locMap[v]
#print('loc = %s' % loc)
add_label(plt.plot([locU[0],locV[0]],[locU[1],locV[1]])[0],'%.2f' % edge.w)
axes = plt.axes()
axes.axison=False
plt.show()
return
def add_label(line,label,size=20,color=None):
if color is None:
color = line.get_color()
xdata = line.get_xdata()
ydata = line.get_ydata()
xStart = np.percentile(xdata,55)
xEnd = np.percentile(xdata,45)
yStart = np.percentile(ydata,55)
yEnd = np.percentile(ydata,45)
line.axes.annotate(label,
xy=((xEnd+xStart)/2,(yEnd+yStart)/2),size=18)
|
{"hexsha": "6848c2158450215e865a8d574ca8945e81297549", "size": 3078, "ext": "py", "lang": "Python", "max_stars_repo_path": "mst/mst.py", "max_stars_repo_name": "longle2718/randProbs", "max_stars_repo_head_hexsha": "f22735dd14cc31bf459aafd06367a68bac89df68", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-01-19T22:30:27.000Z", "max_stars_repo_stars_event_max_datetime": "2017-01-19T22:30:27.000Z", "max_issues_repo_path": "mst/mst.py", "max_issues_repo_name": "longle2718/fundaCS", "max_issues_repo_head_hexsha": "f22735dd14cc31bf459aafd06367a68bac89df68", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mst/mst.py", "max_forks_repo_name": "longle2718/fundaCS", "max_forks_repo_head_hexsha": "f22735dd14cc31bf459aafd06367a68bac89df68", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.624, "max_line_length": 83, "alphanum_fraction": 0.5454840806, "include": true, "reason": "import numpy", "num_tokens": 866}
|
import tensorflow as tf
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.model_selection import KFold
from bayes_opt import BayesianOptimization
from tqdm import tqdm
from attrdict import AttrDict
from sklearn.metrics import classification_report, log_loss
import functools
import gc
import csv
import contextlib
from timeit import default_timer as timer
import pandas as pd
import numpy as np
from .base_dataloader import DataLoader
tf.logging.set_verbosity(tf.logging.WARN)
class MeanPoolModel(BaseEstimator, ClassifierMixin):
def __init__(self, *args, ckpt_path='best_model', **kwargs):
self.ckpt_path = ckpt_path
self.init_graph(*args, **kwargs)
def init_graph(self, X, batch_size, **model_params):
self.batch_size = batch_size
n_dim_x = len(X[0].values[0])
n_dim_q = len(X[1].values[0])
n_dim_p = len(X[2].values[0])
n_dim_c = len(X[3].values[0])
# n_dim_c = len(X[3].values[0][0])
for key, val in model_params.items():
if key.startswith('n_hidden'):
model_params[key] = int(model_params[key])
self.model = self.create_graph(
None,
n_dim_x=n_dim_x,
n_dim_q=n_dim_q,
n_dim_p=n_dim_p,
n_dim_c=n_dim_c,
**model_params)
self.init = tf.initializers.global_variables()
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
self.sess = sess = tf.Session(config=config)
self.saver = tf.train.Saver()
def fit(self,
X,
y=None,
X_val=None,
y_val=None,
batch_size=32,
n_epochs=50,
patience=5,
dropout_rate_l0=0.5,
dropout_rate_l1=0.5,
dropout_rate_l2=0.5,
dropout_rate_l3=0.5,
verbose=1):
start = timer()
sess = self.sess
sess.run(init)
print('model creation time: ', timer()-start)
start = timer()
train_dl = DataLoader(X, batch_size, shuffle=True)
print('data loader time: ', timer()-start)
start = timer()
model = self.model
best_score = 2
best_probs = None
since_best = 0
for epoch in range(n_epochs):
pbar = tqdm(desc='Trn', total=len(X)+len(X_val)) if verbose else contextlib.suppress()
with pbar:
loss = []
for idx, (batch_x, batch_q, batch_p, batch_y, seq_lens) in enumerate(train_dl):
loss_, _ = sess.run([model.loss,
model.train_op],
feed_dict={model.d_X: batch_x,
model.d_Q: batch_q,
model.d_P: batch_p,
model.d_y: batch_y,
model.d_seq_lens: seq_lens,
model.d_dropout_l0: dropout_rate_l0,
model.d_dropout_l1: dropout_rate_l1,
model.d_dropout_l2: dropout_rate_l2,
model.d_dropout_l3: dropout_rate_l3})
loss.append(loss_)
if verbose:
pbar.set_description('Trn {:2d}, Loss={:.3f}, Val-Loss={:.3f}'.format(epoch, np.mean(loss), np.inf))
pbar.update(len(batch_x))
trn_loss = np.mean(loss)
loss, y_true, probs = self.predict(X_val, epoch=epoch, trn_loss=trn_loss, pbar=pbar)
score = log_loss(np.array(y_true), np.array(probs))
if score < best_score:
best_score = score
best_probs = probs
since_best = 0
self.saver.save(sess, 'tmp/best_model.ckpt')
else:
since_best += 1
if verbose:
pbar.set_description('Trn {:2d}, Loss={:.3f}, Val-Loss={:.3f}'.format(epoch, trn_loss, score))
pbar.update(len(X_val))
if since_best > patience:
break
if verbose:
print('Best score on validation set: ', best_score)
self.best_score = best_score
self.saver.restore(self.sess, 'tmp/best_model.ckpt')
return self
def predict(self,
X,
y=None,
epoch=None,
trn_loss=None,
pbar=None,
verbose=1):
# if verbose and pbar is None:
# pbar_ = pbar = tqdm(desc='Predict', total=len(X))
# else:
# pbar_ = contextlib.suppress()
# with pbar_:
test_dl = DataLoader(X, self.batch_size)
loss, y_true, probs = [], [], []
for idx, (batch_x, batch_q, batch_p, batch_y, seq_lens) in enumerate(test_dl):
loss_, y_true_, probs_ = self.sess.run([self.model.loss,
self.model.d_y,
self.model.probs],
feed_dict={self.model.d_X:batch_x,
self.model.d_Q: batch_q,
self.model.d_P: batch_p,
self.model.d_y: batch_y,
self.model.d_seq_lens: seq_lens,
self.model.d_dropout_l0: 0.0,
self.model.d_dropout_l1: 0.0,
self.model.d_dropout_l2: 0.0,
self.model.d_dropout_l3: 0.0})
loss.append(loss_)
y_true += y_true_.tolist()
probs += probs_.tolist()
# if verbose:
# if trn_loss is not None:
# pbar.set_description('Trn {:2d}, Loss={:.3f}, Val-Loss={:.3f}'.format(epoch, trn_loss, np.mean(loss)))
# else:
# pbar.set_description('Predict, Loss={:.3f}'.format(np.mean(loss)))
#
# pbar.update(len(batch_x))
return loss, np.array(y_true), probs
def train_evaluate(self,
X,
X_val=None,
X_tst=None,
batch_size=32,
verbose=1,
return_probs=False,
n_trials=None,
**parameters):
self.init_graph(X, batch_size, device='gpu', **parameters)
self.fit(X,
X_val=X_val,
verbose=verbose,
batch_size=batch_size,
**parameters)
_, y_true_val, probs_val = self.predict(X_val, batch_size, verbose=verbose, **parameters)
probs_tst = None
if X_tst is not None:
_, y_true_tst, probs_tst = self.predict(X_tst, batch_size, verbose=verbose, **parameters)
if verbose:
print('Validation score: ', self.best_score)
if X_tst is not None:
print('Test score: ', log_loss(y_true_tst, probs_tst))
if return_probs:
return AttrDict(locals())
return -self.best_score
def train_evaluate_cv(self,
X,
X_val=None,
X_tst=[],
n_folds=5,
n_trials=1,
batch_size=32,
seed=None,
verbose=1,
return_probs=True,
**parameters):
if X_val is not None:
X = pd.concat([X, X_val], axis = 0).reset_index(drop=True)
# self.init_graph(X, batch_size, **parameters)
folds = KFold(n_splits=n_folds, random_state=seed, shuffle=True)
probs = [[] for X in X_tst]
y_true = None
scores = []
for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):
start = timer()
X_trn, X_val = X.loc[train_index], X.loc[valid_index]
scores_ = []
for i in range(n_trials):
self.init_graph(X, batch_size, **parameters)
self.fit(X_trn, X_val=X_val, verbose=verbose, batch_size=batch_size, **parameters)
scores_.append(self.best_score)
for j, X_ in enumerate(X_tst):
_, y_true_, probs_ = self.predict(X_, batch_size, verbose=verbose, **parameters)
if j==0:
y_true = y_true_
probs[j].append(probs_)
scores.append(np.mean(scores_))
if verbose:
print('Fold {} done in {}'.format(fold_n, timer()-start))
start = timer()
probs_raw = probs.copy()
probs = np.mean(probs[0], axis=0)
score = log_loss(y_true, probs)
if verbose > 0:
print('Validation scores: ', scores)
print('Mean val score: {} +/- {}'.format(np.mean(scores), np.std(scores)))
if X_tst is not None:
print('Test score: ', score)
if return_probs:
return AttrDict(locals())
return -score
def repeated_cv(self,
X,
X_val=None,
X_tst=None,
n_trials=5,
seed=None,
return_probs=True,
**kwargs):
if seed is None:
seed = [seed]*n_trials
probs = []
probs_raw = []
scores = []
for i in range(n_trials):
start = timer()
res = self.train_evaluate_cv(X,
X_val,
X_tst,
seed=seed[i],
return_probs=True,
**kwargs)
probs.append(res.probs)
probs_raw += res.probs_raw
y_true = res.y_true
scores.append(res.score)
if verbose:
print('Trial {} done in {}'.format(i, timer()-start))
start = timer()
probs = np.mean(probs, axis=0)
if return_probs:
print('Repeated bag scores: ', scores)
print('Repeated bag mean: {} +/- {}'.format(np.mean(scores), np.std(scores)))
print('CV Bagged score: ', log_loss(y_true, probs))
return AttrDict(locals())
return -log_loss(y_true, probs)
def hyperopt(self,
fn,
X,
X_val=None,
X_tst=None,
n_trials=5,
init_points=5,
n_iter=20,
batch_size=64,
params={'n_hidden_l1': (32, 256),
'n_hidden_l2': (32, 256),
'dropout_rate': (.1, .8)},
verbose=0,
seed=None,
**kwargs):
self.bo = bo = BayesianOptimization(
f=functools.partial(fn,
X,
X_val,
X_tst,
n_trials=n_trials,
batch_size=batch_size,
verbose=verbose-1,
seed=seed,
return_probs=False),
pbounds=params,
# random_state=1,
**kwargs
)
bo.maximize(
init_points=init_points,
n_iter=n_iter,
)
print(bo.max)
def create_graph(self,
batch_size=None,
seq_len=None,
n_hidden_x=1024,
n_hidden_q=1024,
n_hidden_p=1024,
n_hidden_l1=59,
n_hidden_l2=59,
n_hidden_l3=59,
l2_l1=0.05,
l2_l2=0.05,
l2_l3=0.05,
label_smoothing=0.1,
data_type=tf.float32,
activation='tanh'):
# np.random.seed(0)
# tf.set_random_seed(0)
tf.reset_default_graph()
d_X = tf.placeholder(data_type, [batch_size, n_hidden_x])
d_Q = tf.placeholder(data_type, [batch_size, n_hidden_q])
d_P = tf.placeholder(data_type, [batch_size, n_hidden_p])
d_y = tf.placeholder(tf.int32, [batch_size, 3])
d_seq_lens = tf.placeholder(tf.int32, [batch_size])
d_dropout_l0 = tf.placeholder(tf.float32, None)
d_dropout_l1 = tf.placeholder(tf.float32, None)
d_dropout_l2 = tf.placeholder(tf.float32, None)
d_dropout_l3 = tf.placeholder(tf.float32, None)
with tf.name_scope('dense_concat_layers'):
X = tf.concat([d_X, d_P, d_Q], axis=-1)
X = tf.keras.layers.Dropout(d_dropout_l0)(X)
X = tf.layers.dense(X, n_hidden_l1, activation=None)
X = tf.layers.batch_normalization(X)
X = tf.nn.relu(X)
X = tf.keras.layers.Dropout(d_dropout_l1)(X)
# X = tf.layers.dense(X, n_hidden_l2, activation=None)
# X = tf.layers.batch_normalization(X)
# X = tf.nn.relu(X)
# X = tf.keras.layers.Dropout(d_dropout_l2)(X)
y_hat = tf.layers.dense(X, 3, name = 'output', kernel_regularizer = tf.contrib.layers.l2_regularizer(l2_l3))
with tf.name_scope('loss'):
probs = tf.nn.softmax(y_hat, axis=-1)
# label smoothing works
loss = tf.losses.softmax_cross_entropy(d_y, logits=y_hat, label_smoothing=label_smoothing)
loss = tf.reduce_mean(loss)
with tf.name_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss)
return AttrDict(locals())
@staticmethod
def mean_featurizer(X, X_plus):
batch_x = []
batch_q = []
batch_p = []
seq_lens = []
batch_y = []
for idx, row in X.iterrows():
plus_row = X_plus.loc[idx]
x = np.array(row.bert)
q = np.array(plus_row.plus)
p = row.pretrained
pronoun_vec = x[row.pronoun_offset_token]
a_vec = x[row.a_span[0]:row.a_span[1]+1]
b_vec = x[row.b_span[0]:row.b_span[1]+1]
x = np.hstack((pronoun_vec, np.mean(a_vec, axis=0), np.mean(b_vec, axis=0))).reshape(-1)
pronoun_vec = q[plus_row.pronoun_offset_token]
a_vec = q[plus_row.a_span[0]:plus_row.a_span[1]+1]
b_vec = q[plus_row.b_span[0]:plus_row.b_span[1]+1]
q = np.hstack((pronoun_vec, np.mean(a_vec, axis=0), np.mean(b_vec, axis=0))).reshape(-1)
batch_q.append(q)
batch_x.append(x)
batch_p.append(p)
seq_lens.append(len(row.tokens))
batch_y.append(np.array([row.a_coref, row.b_coref, (row.a_coref == 0 and row.b_coref == 0)]))
return pd.DataFrame([batch_x, batch_q, batch_p, batch_y, seq_lens]).T
|
{"hexsha": "902eff9c41a33e8dbd7a731efbe824aa8f5844dd", "size": 16706, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/apex_tf/mean_pool_model.py", "max_stars_repo_name": "airxiechao/gap", "max_stars_repo_head_hexsha": "1262bb7063da95011479839b4ccb4d9ed2e97020", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2019-06-08T11:45:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T15:02:34.000Z", "max_issues_repo_path": "models/apex_tf/mean_pool_model.py", "max_issues_repo_name": "airxiechao/gap", "max_issues_repo_head_hexsha": "1262bb7063da95011479839b4ccb4d9ed2e97020", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2019-12-07T02:03:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:30:20.000Z", "max_forks_repo_path": "models/apex_tf/mean_pool_model.py", "max_forks_repo_name": "airxiechao/gap", "max_forks_repo_head_hexsha": "1262bb7063da95011479839b4ccb4d9ed2e97020", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-06-11T03:44:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T20:43:10.000Z", "avg_line_length": 38.4045977011, "max_line_length": 124, "alphanum_fraction": 0.450616545, "include": true, "reason": "import numpy", "num_tokens": 3457}
|
"""
function compile(abm=SimulationFree();platform="cpu", neighbours="full", integrator = "euler", save = "RAM", debug = false, user_=true)
Function that takes an Agent and a simulation and constructs the function in charge of the evolutions of the model.
"""
function compile(abmOriginal::Union{Agent,Array{Agent}}; platform="cpu", integrator::String = "Euler", integratorMedium::String = "FTCS", neighbors::String="full", save::String = "RAM", debug = false, user_=true)
abm = deepcopy(abmOriginal)
p = Program_(abm)
p.integrator = integrator
p.integratorMedium = integratorMedium
p.neighbors = neighbors
#Update
updates_!(p)
#Neighbours declare
arguments_
#Declare all the agent properties related functions, arguments, code...
addCleanInteraction_!(p,platform)
addParameters_!(p,platform)
addCopyInitialisation_!(p,platform)
addIntegrator_
addUpdateGlobal_!(p,platform)
addUpdateLocal_!(p,platform)
addUpdateLocalInteraction_!(p,platform)
addCheckBounds_!(p,platform)
addUpdateMediumInteraction_!(p,platform)
addIntegratorMedium_
addUpdate_!(p,platform)
#Saving
addSaving_
if platform == "gpu"
gpuConf = :()
end
program = quote
function (com::Community;
dt::Real, tMax::Real,
nMax::Integer=com.N,
dtSave::Real=dt,
tSave::Real=0, saveFile::String="")
#Promoting to the correct type
dt = $FLOAT(dt)
tMax = $FLOAT(tMax)
t = $FLOAT(com.t)
N = $INT(com.N)
#Declaration of variables
$(p.declareVar)
#Declaration of functions
$(p.declareF)
#Execution of the program
$(p.execInit)
while t <= (tMax-dt)
$(p.execInloop)
t += dt
end
$(p.execAfter)
return $(p.returning)
end
end
if platform == "cpu"
program = postwalk(x->@capture(x,@platformAdapt v_(ARGS__)) ? :($v($(ARGS...))) : x, program)
elseif platform == "gpu"
program = postwalk(x->@capture(x,@platformAdapt v_(ARGS__)) ? :(kernel_ = @cuda launch = false $v($(ARGS...));
prop_ = AgentBasedModels.configurator_(kernel_,N);
kernel_($(ARGS...);threads=prop_[1],blocks=prop_[2])) : x, program)
program = cudaAdapt_(program)
end
program = postwalk(x->@capture(x,v_(g_)) && g == :ARGS_ ? :($v($(p.args...))) : x, program)
program = postwalk(x->@capture(x,v_(g_;h__)) && g == :ARGS_ ? :($v($(p.args...);$(h...))) : x, program)
program = randomAdapt_(p,program,platform)
programugly = gensym_ids(program)
program = flatten(programugly)
if debug == true
println(prettify(programugly))
end
model = Model(abm,program,Main.eval(program))
return model
end
|
{"hexsha": "9d5cb2f5fe34fb433a63623928eef53d7222311a", "size": 3193, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/model/compile.jl", "max_stars_repo_name": "gatocor/AgentBasedModels.jl", "max_stars_repo_head_hexsha": "b552cbff9e13660670782754bf25b0f334cb1e70", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-07-13T10:36:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T14:48:30.000Z", "max_issues_repo_path": "src/model/compile.jl", "max_issues_repo_name": "gatocor/AgentBasedModels.jl", "max_issues_repo_head_hexsha": "b552cbff9e13660670782754bf25b0f334cb1e70", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/model/compile.jl", "max_forks_repo_name": "gatocor/AgentBasedModels.jl", "max_forks_repo_head_hexsha": "b552cbff9e13660670782754bf25b0f334cb1e70", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3333333333, "max_line_length": 212, "alphanum_fraction": 0.5684309427, "num_tokens": 806}
|
"""Some helper function for PyGeoHydro."""
from typing import Any, Dict
import async_retriever as ar
import defusedxml.ElementTree as etree
import numpy as np
import pandas as pd
def nlcd_helper() -> Dict[str, Any]:
"""Get legends and properties of the NLCD cover dataset.
Notes
-----
The following references have been used:
- https://github.com/jzmiller1/nlcd
- https://www.mrlc.gov/data-services-page
- https://www.mrlc.gov/data/legends/national-land-cover-database-2016-nlcd2016-legend
Returns
-------
dict
Years where data is available and cover classes and categories, and roughness estimations.
"""
base_url = "https://www.mrlc.gov/downloads/sciweb1/shared/mrlc/metadata"
base_path = "eainfo/detailed/attr/attrdomv/edom"
def _get_xml(layer):
root = etree.fromstring(ar.retrieve([f"{base_url}/{layer}.xml"], "text")[0])
return root, root.findall(f"{base_path}/edomv"), root.findall(f"{base_path}/edomvd")
root, edomv, edomvd = _get_xml("NLCD_2019_Land_Cover_Science_Product_L48_20210604")
cover_classes = {}
for t, v in zip(edomv, edomvd):
cover_classes[t.text] = v.text
clist = [i.split() for i in root.find("eainfo/overview/eadetcit").text.split("\n")[2:]]
colors = {
int(c): (float(r) / 255.0, float(g) / 255.0, float(b) / 255.0) for c, r, g, b in clist
}
_, edomv, edomvd = _get_xml("nlcd_2019_impervious_descriptor_l48_20210604")
descriptors = {}
for t, v in zip(edomv, edomvd):
tag = t.text.split(" - ")
descriptors[tag[0]] = v.text if tag[-1].isnumeric() else f"{tag[-1]}: {v.text}"
cyear = [2019, 2016, 2013, 2011, 2008, 2006, 2004, 2001]
nlcd_meta = {
"cover_years": cyear,
"impervious_years": cyear,
"descriptor_years": cyear,
"canopy_years": [2016, 2011],
"classes": cover_classes,
"categories": {
"Background": ("127",),
"Unclassified": ("0",),
"Water": ("11", "12"),
"Developed": ("21", "22", "23", "24"),
"Barren": ("31",),
"Forest": ("41", "42", "43", "45", "46"),
"Shrubland": ("51", "52"),
"Herbaceous": ("71", "72", "73", "74"),
"Planted/Cultivated": ("81", "82"),
"Wetlands": ("90", "95"),
},
"descriptors": descriptors,
"roughness": {
"11": 0.001,
"12": 0.022,
"21": 0.0404,
"22": 0.0678,
"23": 0.0678,
"24": 0.0404,
"31": 0.0113,
"41": 0.36,
"42": 0.32,
"43": 0.4,
"45": 0.4,
"46": 0.24,
"51": 0.24,
"52": 0.4,
"71": 0.368,
"72": np.nan,
"81": 0.325,
"82": 0.16,
"90": 0.086,
"95": 0.1825,
},
"colors": colors,
}
return nlcd_meta
def nwis_errors() -> pd.DataFrame:
"""Get error code lookup table for USGS sites that have daily values."""
return pd.read_html("https://waterservices.usgs.gov/rest/DV-Service.html")[0]
|
{"hexsha": "d2f143f4e48ee68c9e72f65e023c88196e15bdc0", "size": 3188, "ext": "py", "lang": "Python", "max_stars_repo_path": "pygeohydro/helpers.py", "max_stars_repo_name": "cheginit/hydrodata", "max_stars_repo_head_hexsha": "3c11051604b350543bf2f411e40f3479773fa190", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 36, "max_stars_repo_stars_event_min_datetime": "2020-02-06T04:08:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-06T18:17:57.000Z", "max_issues_repo_path": "pygeohydro/helpers.py", "max_issues_repo_name": "cheginit/hydrodata", "max_issues_repo_head_hexsha": "3c11051604b350543bf2f411e40f3479773fa190", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-02-11T04:51:18.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-06T14:46:50.000Z", "max_forks_repo_path": "pygeohydro/helpers.py", "max_forks_repo_name": "cheginit/hydrodata", "max_forks_repo_head_hexsha": "3c11051604b350543bf2f411e40f3479773fa190", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-02-06T15:33:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-01T01:55:27.000Z", "avg_line_length": 32.202020202, "max_line_length": 98, "alphanum_fraction": 0.5323086575, "include": true, "reason": "import numpy", "num_tokens": 981}
|
SUBROUTINE MULTIMODEFLOQUETMATRIX_SP_C(ATOM__C,NM,NF,MODES_NUM,FIELDS_C,INFO)!VALUES_,ROW_INDEX_,COLUMN_,SP,INFO)
! THIS SUBROUTINE BUILDS THE MULTIMODE FLOQUET MATRIX
!ATOM_ (IN) : type of quantum system
!NM (IN) : number of modes
!NF (IN) : number of driving fields
!MODES_NUM (IN) : vector indicating the number of harmonics of each driving field (mode)
!FIELDS_C (IN) : Fields
! THE FOLLOWING VARIABLES ARE DECLARED AS GLOBAL ALLOCATABLE ARRAYS. THIS SUBROUTINE SET THEIR VALUES AND SIZE.
!VALUES_ (OUT) : Hamiltonian values
!ROW_INDEX_ (OUT) : vector indicating the row position of values
!COLUMN_ (OUT) : vector indicating the column position of the values
!INFO (INOUT) : error flag. INFO=0 means there is no error
USE TYPES_C !(modes.f90)
USE MERGINGARRAYS !(utils.f90)
USE SPARSE_INTERFACE !(sparse_utils.f90)
USE MODES_4F ! DEFINED IN modes_C.f90, declares atom_,coupling, values__, row_index__, column__
IMPLICIT NONE
INTEGER , INTENT(IN) :: NM,NF
TYPE(MODE_C), DIMENSION(NF), INTENT(INout) :: FIELDS_C
TYPE(ATOM_C), INTENT(IN) :: ATOM__C
INTEGER, DIMENSION(NM), INTENT(IN) :: MODES_NUM
INTEGER, INTENT(INOUT) :: INFO
! COMPLEX*16, DIMENSION(:), ALLOCATABLE :: VALUES__
! INTEGER, DIMENSION(:), ALLOCATABLE :: COLUMN__
! INTEGER, DIMENSION(:), ALLOCATABLE :: ROW_INDEX__
!INTEGER, INTENT(OUT) :: SP
! write(*,*) 'FORTRAN FLOQUETMATRIX_SP SAYS',NM,NF,MODES_NUM!, COULPLIG(3)%OMEGA
IF (INFO.EQ.0 .or. INFO.EQ.6) THEN
CALL MULTIMODEFLOQUETMATRIX_SP(ATOM_,NM,NF,MODES_NUM,COUPLING,VALUES__,ROW_INDEX__,COLUMN__,INFO)
! WRITE(*,*) "FORTRAN MULTIMODEHAMILTONAISPC SAYS: SIZE(VALUES__,1) =)",SIZE(VALUES__,1),SIZE(ROW_INDEX__,1)
H_FLOQUET_SIZE = SIZE(ROW_INDEX__,1)-1
END IF
END SUBROUTINE MULTIMODEFLOQUETMATRIX_SP_C ! _SP sparse packing
SUBROUTINE MULTIMODEFLOQUETMATRIX_PYTHON_SP_C(ATOM__C,NM,NF,MODES_NUM,FIELDS_C,MMF_DIM,INFO)! RESULT(MMF_DIM)
! THIS SUBROUTINE BUILDS THE MULTIMODE FLOQUET MATRIX
!ATOM_ (IN) : type of quantum system
!NM (IN) : number of modes
!NF (IN) : number of driving fields
!MODES_NUM (IN) : vector indicating the number of harmonics of each driving field (mode)
!FIELDS_C (IN) : Fields
! THE FOLLOWING VARIABLES ARE DECLARED AS GLOBAL ALLOCATABLE ARRAYS. THIS SUBROUTINE SET THEIR VALUES AND SIZE.
!VALUES_ (OUT) : Hamiltonian values
!ROW_INDEX_ (OUT) : vector indicating the row position of values
!COLUMN_ (OUT) : vector indicating the column position of the values
!INFO (INOUT) : error flag. INFO=0 means there is no error
USE TYPES_C !(modes.f90)
USE MERGINGARRAYS !(utils.f90)
USE SPARSE_INTERFACE !(sparse_utils.f90)
USE MODES_4F ! DEFINED IN modes_C.f90, declares atom_,coupling, values__, row_index__, column__
IMPLICIT NONE
INTEGER , INTENT(IN) :: NM,NF
TYPE(MODE_C), DIMENSION(NF), INTENT(INout) :: FIELDS_C
TYPE(ATOM_C), INTENT(IN) :: ATOM__C
INTEGER, DIMENSION(NM), INTENT(IN) :: MODES_NUM
INTEGER, DIMENSION(4), INTENT(OUT) :: MMF_DIM
INTEGER, INTENT(INOUT) :: INFO
!INTEGER, DIMENSION(3) :: MMF_DIM
! INTEGER MMF_DIM
! WRITE(*,*) INFO
IF (INFO.EQ.0 .OR. INFO.EQ.6) THEN
CALL MULTIMODEFLOQUETMATRIX_SP(ATOM_,NM,NF,MODES_NUM,COUPLING,VALUES__,ROW_INDEX__,COLUMN__,INFO)
H_FLOQUET_SIZE = INFO!SIZE(ROW_INDEX__,1)-1
MMF_DIM(1) = SIZE(VALUES__,1)
MMF_DIM(2) = SIZE(ROW_INDEX__,1)
MMF_DIM(3) = SIZE(COLUMN__,1)
MMF_DIM(4) = H_FLOQUET_SIZE
!write(*,*) MMF_DIM
END IF
END SUBROUTINE MULTIMODEFLOQUETMATRIX_PYTHON_SP_C
!END FUNCTION MULTIMODEFLOQUETMATRIX_PYTHON_SP_C ! _SP sparse packing
SUBROUTINE GET_H_FLOQUET_SP_C(h_floquet_size_,VALUES,ROW_INDEX,COLUMN,INFO)
!SUBROUTINE GET_H_FLOQUET_SP_C(h_floquet_size_,VALUES)!, VALUES,ROW_INDEX,COLUMN,INFO)
USE MODES_4F ! DEFINED IN modes_C.f90, declares atom_,coupling, values__, row_index__, column__
IMPLICIT NONE
INTEGER, DIMENSION(3), INTENT(IN) :: h_floquet_size_
COMPLEX*16,DIMENSION(h_floquet_size_(1)), intent(OUT) :: VALUES
INTEGER, DIMENSION(h_floquet_size_(2)), intent(OUT) :: ROW_INDEX
INTEGER, DIMENSION(h_floquet_size_(3)), intent(OUT) :: COLUMN
INTEGER, INTENT(INOUT) :: INFO
!write(*,*) h_floquet_size_
!write(*,*) values__
!write(*,*) size(values__,1),h_floquet_size_
VALUES = VALUES__
ROW_INDEX = ROW_INDEX__-1
COLUMN = COLUMN__-1
IF(INFO.EQ.-1) THEN ! THIS MEANS WE SHOULD USE AN EXTERNAL TOOL TO DIAGONALIZSE THE HAMILTONIAN
IF(ALLOCATED(VALUES__)) DEALLOCATE(VALUES__)
IF(ALLOCATED(COLUMN__)) DEALLOCATE(COLUMN__)
IF(ALLOCATED(ROW_INDEX__)) DEALLOCATE(ROW_INDEX__)
INFO = 0
END IF
END SUBROUTINE GET_H_FLOQUET_SP_C
|
{"hexsha": "94eb5a5ecb3e55d335b07d21d34c5345a0c7ba77", "size": 5166, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/MultimodeHamiltonian_SP_C.f90", "max_stars_repo_name": "gsinuco/OPENMMF", "max_stars_repo_head_hexsha": "2cc0d0f2a4ded895c189050c38dbf2e8985e2d55", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-05-12T19:28:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T05:37:17.000Z", "max_issues_repo_path": "src/MultimodeHamiltonian_SP_C.f90", "max_issues_repo_name": "gsinuco/OPENMMF", "max_issues_repo_head_hexsha": "2cc0d0f2a4ded895c189050c38dbf2e8985e2d55", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/MultimodeHamiltonian_SP_C.f90", "max_forks_repo_name": "gsinuco/OPENMMF", "max_forks_repo_head_hexsha": "2cc0d0f2a4ded895c189050c38dbf2e8985e2d55", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-05-12T19:28:13.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-16T21:09:32.000Z", "avg_line_length": 44.5344827586, "max_line_length": 113, "alphanum_fraction": 0.6637630662, "num_tokens": 1609}
|
#Thanks to : Satwik Bhattamishra
"""
Graph Regularized NMF:
[3] Cai, D., He, X., Han, J., & Huang, T. S. (2011). Graph regularized
nonnegative matrix factorization for data representation. IEEE Transactions
on Pattern Analysis and Machine Intelligence, 33(8), 1548-1560.
"""
import numpy as np
from numpy import random
import numpy.linalg as LA
import scipy.sparse as sp
from sys import exit
from NMF_Base import NMFBase
from metrics import clustering_metrics
class GNMF(NMFBase):
"""
Attributes
----------
W : matrix of basis vectors
H : matrix of coefficients
frob_error : frobenius norm
"""
def compute_graph(self, weight_type='heat-kernel', param=0.3):
if weight_type == 'heat-kernel':
samples = np.matrix(self.X.T)
sigma= param
A= np.zeros((samples.shape[0], samples.shape[0]))
for i in range(A.shape[0]):
for j in range(A.shape[1]):
A[i][j]= np.exp(-(LA.norm(samples[i] - samples[j] ))/sigma )
return A
elif weight_type == 'dot-weighting':
samples = np.matrix(self.X.T)
A= np.zeros((samples.shape[0], samples.shape[0]))
for i in range(A.shape[0]):
for j in range(A.shape[1]):
A[i][j]= np.dot(samples[i],samples[j])
return A
# def compute_factors(self, max_iter=100, lmd=0, weight_type='heat-kernel', adj_type='clique', param=None, A=None, labels=None):
def compute_factors(self, max_iter=100, lmd=0, weight_type='heat-kernel', adj_type='clique', A=None, labels=None):
if self.check_non_negativity():
pass
else:
print("The given matrix contains negative values")
exit()
if not hasattr(self,'W'):
self.initialize_w()
if not hasattr(self,'H'):
self.initialize_h()
if adj_type in ['clique', 'HyperAdj', 'precomputed']:
print('Using graph / Hypergraph adjacency or clique')
D = np.matrix(np.diag(np.asarray(A).sum(axis=0))) # degree matrix for clique or HyperAdj
# check = np.matrix(np.diag(np.asarray(A).sum(axis=1)))
# checking = check - D
# print('====================checking==================')
# print(checking.sum())
# exit()
elif adj_type=='HyperNcut':
print('Using Laplacian. GNMFL so I used instead of D')
D = sp.eye(A.shape[0]).toarray() # identity atrix if use hypergraph laplacian
else:
print('building manifold graph')
A = self.compute_graph(weight_type, param) # building manifold graph from node attributes
D = np.matrix(np.diag(np.asarray(A).sum(axis=0)))
self.frob_error = np.zeros(max_iter)
for i in range(max_iter):
print('iter: {}'.format(i))
print('using original paper update')
self.update_w(lmd, A, D)
self.update_h(lmd, A, D)
self.frob_error[i] = self.frobenius_norm()
# h = self.H
# #e = gnmf.frob_error
# H = h.T
predict_labels = np.asarray(np.argmax(self.H.T, axis=1)).squeeze()
# print(predict_labels.shape)
cm = clustering_metrics(labels, predict_labels)
cm.evaluationClusterModelFromLabel()
# ac[i], nm[i], f1[i], pre[i], adj_s[i], rec[i] = cm.evaluationClusterModelFromLabel()
# mod[i] = modularity(predict_labels, adj)
def update_h(self, lmd, A, D):
eps = 2**-8
h_num = lmd*np.dot(A, self.H.T)+ np.dot(self.X.T, self.W )
h_den = lmd*np.dot(D, self.H.T)+np.dot(self.H.T, np.dot(self.W.T, self.W))
self.H = np.multiply(self.H.T, (h_num+eps)/(h_den+eps))
self.H = self.H.T
self.H[self.H <= 0] = eps
self.H[np.isnan(self.H)] = eps
def update_w(self, lmd, A, D):
XH = self.X.dot(self.H.T)
WHtH = self.W.dot(self.H.dot(self.H.T)) + 2**-8
self.W *= XH
self.W /= WHtH
|
{"hexsha": "9c201f255a50e1187f19bfb5c0b2cbc910caa62c", "size": 3682, "ext": "py", "lang": "Python", "max_stars_repo_path": "GNMF_hyper_and_Graph.py", "max_stars_repo_name": "BarakeelFanseu/GRAC_CIKM", "max_stars_repo_head_hexsha": "3cbdbbb6c4902653f633c6d8f1c80f370b2938cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "GNMF_hyper_and_Graph.py", "max_issues_repo_name": "BarakeelFanseu/GRAC_CIKM", "max_issues_repo_head_hexsha": "3cbdbbb6c4902653f633c6d8f1c80f370b2938cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GNMF_hyper_and_Graph.py", "max_forks_repo_name": "BarakeelFanseu/GRAC_CIKM", "max_forks_repo_head_hexsha": "3cbdbbb6c4902653f633c6d8f1c80f370b2938cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.765625, "max_line_length": 130, "alphanum_fraction": 0.6265616513, "include": true, "reason": "import numpy,from numpy,import scipy", "num_tokens": 1063}
|
module Metrics
# Module for DL Metrics
export mae, mse, msle, male, r2_score, adjusted_r2_score
export binary_accuracy, confusion_matrix, categorical_accuracy, sparse_categorical, top_k_categorical, top_k_sparse_categorical, precision, recall, sensitivity, detection_rate, f_beta_score, specificity, false_alarm_rate, cohen_kappa, statsfromTFPN, classwise_stats, global_stats
export IoU, PSNR
export bleu_score, rouge, rouge_l_summary_level
export ranking_stats_k, avg_precision
export report_stats
using StatsBase, DataFrames
using DataStructures: OrderedDict
include("./CV_Metrics/CVMetrics.jl")
include("./NLP_Metrics/NLPMetrics.jl")
include("Classification.jl")
include("Regression.jl")
include("Ranking_n_Statistical.jl")
include("utils.jl")
end
|
{"hexsha": "0a19197bd624252b7c3f9e112302d6d268da8589", "size": 764, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Metrics.jl", "max_stars_repo_name": "yuehhua/Metrics.jl", "max_stars_repo_head_hexsha": "6dc6fd6155afe551dd6424debdf7f034e68acb29", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-06-02T14:09:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T00:08:26.000Z", "max_issues_repo_path": "src/Metrics.jl", "max_issues_repo_name": "yuehhua/Metrics.jl", "max_issues_repo_head_hexsha": "6dc6fd6155afe551dd6424debdf7f034e68acb29", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-12-22T06:28:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T02:47:49.000Z", "max_forks_repo_path": "src/Metrics.jl", "max_forks_repo_name": "yuehhua/Metrics.jl", "max_forks_repo_head_hexsha": "6dc6fd6155afe551dd6424debdf7f034e68acb29", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-13T11:32:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-22T06:04:19.000Z", "avg_line_length": 28.2962962963, "max_line_length": 282, "alphanum_fraction": 0.8246073298, "num_tokens": 187}
|
// Created by xufeiwang on 21/12/19.
#include <cstdlib>
#include <iostream>
#include <RcppArmadillo.h>
#define _USE_MATH_DEFINES
#include <cmath>
#include <ctime>
#include <vector>
#include <fstream>
#include <sstream>
#include <algorithm>
#include <utility>
#include <armadillo>
using namespace std;
using namespace Rcpp;
using namespace arma;
double log_res(float m, mat xx, colvec xy, double yy)
{
if (det(xx) < 1e-10)
return (log(yy));
double res = yy - as_scalar(trans(xy) * solve(xx, xy));
return (log(res) - log(m));
}
double compute_loss(mat xx, colvec xy, double yy, colvec beta)
{
return (yy - 2 * as_scalar(trans(xy) * beta) + as_scalar(trans(beta) * (xx * beta)));
}
double index(int n, int i, int j)
{
int ind = (2*n-i+1)*i/2+j-i-1;
return (ind);
}
void M_cal(NumericVector* F, vector<int>* S, int p, double gamma)
{
NumericVector M(p+1,0.0);
for (int i=1; i<p+1; i++)
{
M[i] = gamma +(*F)[i-1];
}
for (int i=2;i<p+1; i++)
{
for (int k=1; k<i; k++)
{
double temp = gamma + M[k] +(*F)[index(p,k,i)];
if (temp > M[i])
{
M[i] = temp;
(*S)[i] = k;
}
}
}
return;
}
// [[Rcpp::export]]
NumericVector knots_selection_cpp(NumericMatrix X, NumericVector y, int m, double lam0, NumericVector Knots, NumericVector u)
{
int n = X.nrow();
int l = X.ncol();
NumericMatrix SXX(n+1, l*l);
NumericMatrix SXy(n+1, l);
NumericVector Syy(n+1);
for (int j=0; j<l*l; j++)
SXX(0, j) = 0;
for (int j=0; j<l; j++)
SXy(0, j) = 0;
Syy[0] = 0;
for (int i=1; i<n+1; i++)
{
SXy(i, 0) = SXy(i-1, 0) + y[i-1];
Syy[i] = Syy[i-1] + y[i-1] * y[i-1];
for (int j=0; j<l; j++) {
SXy(i, j) = SXy(i-1, j) + X(i-1, j) * y[i-1];
for (int k=0; k<l; k++)
SXX(i, j*l+k) = SXX(i-1, j*l+k) + X(i-1, j) * X(i-1, k);
}
}
int p;
if (m == 0)
p = Knots.size() + 1;
else
p = n / m;
NumericVector num(p+1, 0.0);
if (m == 0) {
int ind = 0;
for (int i=0; i<n; i++) {
if (u[i] > Knots[ind]) {
num[ind] = i;
ind = ind + 1;
}
num[p] = n;
}
} else {
int r = n - p * m;
int ind = 0 ;
for (int i=1;i<p+1;i++) {
if (i <= r)
ind += m+1;
else
ind += m;
num[i] = ind;
}
}
int len = p*(p+1)/2;
NumericVector *ls = new NumericVector(len, 0.0);
vector<int> *S = new vector<int>(p+1, 0);
NumericMatrix SXX_simple(p+1, l*l);
NumericMatrix SXy_simple(p+1, l);
NumericVector Syy_simple(p+1);
for (int i=1;i<p+1;i++) {
int ind = int(num[i]);
for (int k=0; k<l*l; k++)
SXX_simple(i,k) = SXX(ind,k);
for (int k=0; k<l; k++)
SXy_simple(i,k) = SXy(ind,k);
Syy_simple[i] = Syy[ind];
}
for (int i=0; i<p; i++) {
for (int j=i+1; j<p+1; j++){
mat xx(l, l);
colvec xy(l);
for (int k=0; k<l; k++)
xy[k] = SXy_simple(j, k) - SXy_simple(i, k);
for (int k1=0; k1<l; k1++)
for (int k2=0;k2<l; k2++)
xx(k1, k2) = SXX_simple(j, k1*l+k2) - SXX_simple(i, k1*l+k2);
(*ls)[index(p,i,j)] = -(num[j]-num[i])/2.0*log_res(num[j]-num[i], xx, xy, Syy_simple[j] - Syy_simple[i]);
}
}
double lam = -lam0*log(n)/2;
M_cal(ls, S, p, lam);
int slice = 1;
int temp = (*S)[p];
NumericVector knots(1);
knots[0] = n;
while (temp != 0)
{
knots.push_back(num[temp]);
temp = (*S)[temp];
slice += 1;
}
delete ls;
delete S;
return (knots);
}
|
{"hexsha": "000040ce24070e3a3148d386fa45436a46a98f74", "size": 3500, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/knots_selection.cpp", "max_stars_repo_name": "wangxf0106/vcmasf", "max_stars_repo_head_hexsha": "55f2b09a4d4d290a90d08fb12bcccf45c599bd37", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/knots_selection.cpp", "max_issues_repo_name": "wangxf0106/vcmasf", "max_issues_repo_head_hexsha": "55f2b09a4d4d290a90d08fb12bcccf45c599bd37", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/knots_selection.cpp", "max_forks_repo_name": "wangxf0106/vcmasf", "max_forks_repo_head_hexsha": "55f2b09a4d4d290a90d08fb12bcccf45c599bd37", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.1518987342, "max_line_length": 125, "alphanum_fraction": 0.5077142857, "num_tokens": 1350}
|
# 허브 변환 원 검출
import cv2
import sys
import numpy as np
src = cv2.imread('HappyFish.jpg')
if src is None:
print('no img')
sys.exit()
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
# 블러링 하고 약간 디테일을 둑이면 잘됨
# 블러링을 잘해줘야 한다
blr = cv2.GaussianBlur(gray, (0,0), 1.0)
def on_trackbar(pos):
rmin = cv2.getTrackbarPos('minRadius', 'img')
rmax = cv2.getTrackbarPos('maxRadius', 'img')
th = cv2.getTrackbarPos('threshold', 'img')
circles = cv2.HoughCircles(blr, cv2.HOUGH_GRADIENT, 1, 50 , param1= 120, param2=th, minRadius=rmin, maxRadius=rmax)
dst = src.copy()
for i in range(circles.shape[1]):
cx, cy, radius = circles[0][i]
cv2.circle(dst, (cx,cy), int(radius), (0,0,255), 2, cv2.LINE_AA)
return cv2.imshow('img', dst)
cv2.imshow('img', src)
cv2.createTrackbar('minRadius', 'img', 0, 100, on_trackbar)
cv2.createTrackbar('maxRadius', 'img', 0, 150, on_trackbar)
cv2.createTrackbar('threshold', 'img', 0, 100, on_trackbar)
cv2.setTrackbarPos('minRadius', 'img', 10)
cv2.setTrackbarPos('maxRadius', 'img', 20)
cv2.setTrackbarPos('threshold', 'img', 10)
cv2.waitKey()
cv2.destroyAllWindows()
|
{"hexsha": "3eb14797081a139e7f7cec24e9067ba07636c754", "size": 1151, "ext": "py", "lang": "Python", "max_stars_repo_path": "TIL/HoughCircles.py", "max_stars_repo_name": "FLY-CODE77/opencv", "max_stars_repo_head_hexsha": "5644e6c1ef43d81efb54ccde6c06f1adf000fb96", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-23T14:29:24.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-23T14:29:24.000Z", "max_issues_repo_path": "TIL/HoughCircles.py", "max_issues_repo_name": "FLY-CODE77/opencv", "max_issues_repo_head_hexsha": "5644e6c1ef43d81efb54ccde6c06f1adf000fb96", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TIL/HoughCircles.py", "max_forks_repo_name": "FLY-CODE77/opencv", "max_forks_repo_head_hexsha": "5644e6c1ef43d81efb54ccde6c06f1adf000fb96", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1590909091, "max_line_length": 121, "alphanum_fraction": 0.6655082537, "include": true, "reason": "import numpy", "num_tokens": 430}
|
/**
* Copyright (c) 2016, Adrien Devresse <adrien.devresse@epfl.ch>
*
* Boost Software License - Version 1.0
*
* Permission is hereby granted, free of charge, to any person or organization
* obtaining a copy of the software and accompanying documentation covered by
* this license (the "Software") to use, reproduce, display, distribute,
* execute, and transmit the Software, and to prepare derivative works of the
* Software, and to permit third-parties to whom the Software is furnished to
* do so, all subject to the following:
*
* The copyright notices in the Software and this entire statement, including
* the above license grant, this restriction and the following disclaimer,
* must be included in all copies of the Software, in whole or in part, and
* all derivative works of the Software, unless such copies or derivative
* works are solely in the form of machine-executable object code generated by
* a source language processor.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
* SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
* FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#define BOOST_TEST_MODULE stringTests
#define BOOST_TEST_MAIN
#include <chrono>
#include <iostream>
#include <map>
#include <stdexcept>
#include <boost/mpl/list.hpp>
#include <boost/test/unit_test.hpp>
#include <hadoken/string/algorithm.hpp>
#include <hadoken/string/string_view.hpp>
#include <hadoken/string/wildcard.hpp>
#include <hadoken/utility/range.hpp>
#include "test_helpers.hpp"
BOOST_AUTO_TEST_CASE(string_view_simple) {
using namespace hadoken;
const char* msg = "hello bob #42~ێ";
string_view empty, truncated(msg, 5), full(msg);
BOOST_CHECK_EQUAL(empty.empty(), true);
BOOST_CHECK_EQUAL(truncated.empty(), false);
BOOST_CHECK_EQUAL(full.empty(), false);
BOOST_CHECK_EQUAL(empty.size(), 0);
BOOST_CHECK_EQUAL(empty.size(), empty.length());
BOOST_CHECK_EQUAL(truncated.size(), 5);
BOOST_CHECK_EQUAL(truncated.size(), truncated.length());
BOOST_CHECK_EQUAL(full.size(), std::string(msg).size());
string_view new_view_copied = full;
string_view truncated_copy = truncated;
string_view new_view_moved = std::move(truncated_copy);
BOOST_CHECK_EQUAL(full, new_view_copied);
BOOST_CHECK_EQUAL(new_view_moved, truncated);
std::ostringstream ss;
ss << truncated;
BOOST_CHECK_EQUAL(ss.str(), to_string(truncated));
}
BOOST_AUTO_TEST_CASE(string_view_compare_sbustr) {
using namespace hadoken;
const char* msg = "hello bob #42~ێ";
string_view msg_view(msg);
std::string msg_str(msg);
string_view msg_view_str(msg_str);
BOOST_CHECK_EQUAL(msg_view.compare(0, 6, msg_str.substr(0, 6)), 0);
BOOST_CHECK_EQUAL(msg_view.compare(msg_view_str), 0);
BOOST_CHECK_EQUAL(msg_view.compare(0, /* way larger */ 1024, msg_view_str), 0);
BOOST_CHECK_EQUAL(msg_view.compare(1 , 1024, msg_view_str), -1);
}
BOOST_AUTO_TEST_CASE(string_tokenize_view) {
using namespace hadoken;
const char* msg = "hello bob #42~ێ tada";
std::vector<std::string> res = string::split_string(msg, " ");
BOOST_CHECK_EQUAL(res.size(), 4);
std::string reconstruct = string::join(res, " ");
BOOST_CHECK_EQUAL(std::string(msg), reconstruct);
}
void test_wildcard(const std::string & wildcard, const std::string & str, bool result){
const bool res = hadoken::match_wildcard(wildcard, str);
// std::cout << " test " << wildcard << " " << str << "\n";
BOOST_CHECK_EQUAL(res, result);
}
BOOST_AUTO_TEST_CASE(wildcard_simple) {
auto t1 = std::chrono::steady_clock::now();
auto maker = std::make_tuple<std::string, std::string, bool>;
std::vector<std::tuple<std::string, std::string, bool> > test_cases = {
{ maker("hello*ld", "hello world", true) },
{ maker("dude" , "hello world", false)},
{ maker("h**", "hello world", true) },
{ maker("*hello*", "hello world", true) },
{ maker("world*", "hello world", false) },
{ maker("*", "hello world", true) },
{ maker("**dude**", "hello world", false) },
{ maker("**blabla", "blafdfd", false) },
{ maker("***", "hello*world*", true) },
{ maker("**deb", "debug1", false) },
{ maker("hello wo*rld", "hello world", true) },
{ maker("hello world*", "hello world", true) }
};
for(const auto & t : test_cases){
test_wildcard(std::get<0>(t), std::get<1>(t), std::get<2>(t));
}
auto t2 = std::chrono::steady_clock::now();
std::cout << "time taken wildcard " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count())
<< " µs" << std::endl;
}
BOOST_AUTO_TEST_CASE(wildcard_stack_explosion_test) {
std::string message, pattern, bad_pattern;
message = random_string_generator(16 * 1024 * 1024, 42);
std::cout << " " << message.size() << std::endl;
pattern = message;
pattern[pattern.size() / 2] = '*';
pattern[pattern.size() / 4] = '*';
pattern[pattern.size() / 8] = '*';
bad_pattern = pattern;
bad_pattern.back() = bad_pattern.back() + 1;
auto t1 = std::chrono::steady_clock::now();
BOOST_CHECK(hadoken::match_wildcard(pattern, message));
BOOST_CHECK(hadoken::match_wildcard(bad_pattern, message) == false);
auto t2 = std::chrono::steady_clock::now();
std::cout << "time taken "
<< double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000.0 / 1000.0 << " s"
<< std::endl;
}
|
{"hexsha": "a9fe454878aef5e00c8a848d9cd7c047c6621b89", "size": 5915, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/unit/test_string.cpp", "max_stars_repo_name": "adevress/hadoken", "max_stars_repo_head_hexsha": "c501c53b14dfd256ae745d417b6417855b77ed05", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2018-08-11T17:35:30.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-06T01:21:17.000Z", "max_issues_repo_path": "tests/unit/test_string.cpp", "max_issues_repo_name": "adevress/hadoken", "max_issues_repo_head_hexsha": "c501c53b14dfd256ae745d417b6417855b77ed05", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 5.0, "max_issues_repo_issues_event_min_datetime": "2017-08-29T13:32:12.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-04T08:40:23.000Z", "max_forks_repo_path": "tests/unit/test_string.cpp", "max_forks_repo_name": "adevress/hadoken", "max_forks_repo_head_hexsha": "c501c53b14dfd256ae745d417b6417855b77ed05", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 10.0, "max_forks_repo_forks_event_min_datetime": "2016-11-16T10:18:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-30T04:35:59.000Z", "avg_line_length": 31.6310160428, "max_line_length": 121, "alphanum_fraction": 0.6710059172, "num_tokens": 1484}
|
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2001-2002 by NaN Holding BV.
* All rights reserved.
*
* The Original Code is: all of this file.
*
* Contributor(s): none yet.
*
* ***** END GPL LICENSE BLOCK *****
* Start up of the Blender Player on GHOST.
*/
/** \file gameengine/GamePlayer/GPG_ghost.cpp
* \ingroup player
*/
#include <boost/algorithm/string.hpp>
#ifdef __linux__
# ifdef __alpha__
# include <signal.h>
# endif /* __alpha__ */
#endif /* __linux__ */
#include "BKE_addon.h"
#include "BKE_appdir.h"
#include "BKE_blender.h"
#include "BKE_blendfile.h"
#include "BKE_brush.h"
#include "BKE_cachefile.h"
#include "BKE_callbacks.h"
#include "BKE_context.h"
#include "BKE_font.h"
#include "BKE_global.h"
#include "BKE_gpencil_modifier.h"
#include "BKE_icons.h"
#include "BKE_idtype.h"
#include "BKE_image.h"
#include "BKE_keyconfig.h"
#include "BKE_lib_remap.h"
#include "BKE_main.h"
#include "BKE_mask.h"
#include "BKE_material.h"
#include "BKE_mball_tessellate.h"
#include "BKE_modifier.h"
#include "BKE_node.h"
#include "BKE_report.h"
#include "BKE_screen.h"
#include "BKE_shader_fx.h"
#include "BKE_sound.h"
#include "BKE_studiolight.h"
#include "BKE_subdiv.h"
#include "BKE_tracking.h"
#include "BKE_volume.h"
#include "BLF_api.h"
#include "BLI_blenlib.h"
#include "BLI_mempool.h"
#include "BLI_system.h"
#include "BLI_task.h"
#include "BLI_timer.h"
#include "BLO_readfile.h"
#include "BLO_runtime.h"
#include "BLT_lang.h"
#include "BPY_extern_python.h"
#include "BPY_extern_run.h"
#include "CLG_log.h"
#include "DEG_depsgraph.h"
#include "DNA_genfile.h"
#include "DNA_space_types.h"
#include "DRW_engine.h"
#include "ED_datafiles.h"
#include "ED_gpencil.h"
#include "ED_keyframes_edit.h"
#include "ED_keyframing.h"
#include "ED_render.h"
#include "ED_screen.h"
#include "ED_space_api.h"
#include "ED_undo.h"
#include "ED_util.h"
#include "GHOST_ISystem.h"
#include "GHOST_Path-api.h"
#include "GPU_context.h"
#include "GPU_init_exit.h"
#include "GPU_material.h"
#include "IMB_imbuf.h"
#include "MEM_CacheLimiterC-Api.h"
#include "MEM_guardedalloc.h"
#include "RE_engine.h"
#include "RE_pipeline.h"
#include "RE_texture.h"
#include "RNA_define.h"
#include "SEQ_clipboard.h"
#include "UI_interface.h"
#include "UI_resources.h"
#include "wm.h"
#include "WM_api.h"
#include "wm_event_system.h"
#include "wm_message_bus.h"
#include "wm_surface.h"
#include "wm_window.h"
#include "CM_Message.h"
#include "KX_Globals.h"
#include "KX_PythonInit.h"
#include "LA_PlayerLauncher.h"
#include "LA_SystemCommandLine.h"
#ifdef __APPLE__
extern "C" int GHOST_HACK_getFirstFile(char buf[]);
#endif
#ifdef WIN32
# include <windows.h>
# if !defined(DEBUG)
# include <wincon.h>
# endif // !defined(DEBUG)
# if defined(_MSC_VER) && defined(_M_X64)
# include <math.h> /* needed for _set_FMA3_enable */
# endif
# include "utfconv.h"
#endif // WIN32
#ifdef WITH_SDL_DYNLOAD
# include "sdlew.h"
#endif
#ifdef WITH_GAMEENGINE_BPPLAYER
# include "SpindleEncryption.h"
#endif // WITH_GAMEENGINE_BPPLAYER
const int kMinWindowWidth = 100;
const int kMinWindowHeight = 100;
static void mem_error_cb(const char *errorStr)
{
fprintf(stderr, "%s", errorStr);
fflush(stderr);
}
#ifdef WIN32
typedef enum {
SCREEN_SAVER_MODE_NONE = 0,
SCREEN_SAVER_MODE_PREVIEW,
SCREEN_SAVER_MODE_SAVER,
SCREEN_SAVER_MODE_CONFIGURATION,
SCREEN_SAVER_MODE_PASSWORD,
} ScreenSaverMode;
static ScreenSaverMode scr_saver_mode = SCREEN_SAVER_MODE_NONE;
static HWND scr_saver_hwnd = nullptr;
static BOOL scr_saver_init(int argc, char **argv)
{
scr_saver_mode = SCREEN_SAVER_MODE_NONE;
scr_saver_hwnd = nullptr;
BOOL ret = false;
int len = ::strlen(argv[0]);
if (len > 4 && !::stricmp(".scr", argv[0] + len - 4)) {
scr_saver_mode = SCREEN_SAVER_MODE_CONFIGURATION;
ret = true;
if (argc >= 2) {
if (argc >= 3) {
scr_saver_hwnd = (HWND)(INT_PTR)::atoi(argv[2]);
}
if (!::stricmp("/c", argv[1])) {
scr_saver_mode = SCREEN_SAVER_MODE_CONFIGURATION;
if (scr_saver_hwnd == nullptr)
scr_saver_hwnd = ::GetForegroundWindow();
}
else if (!::stricmp("/s", argv[1])) {
scr_saver_mode = SCREEN_SAVER_MODE_SAVER;
}
else if (!::stricmp("/a", argv[1])) {
scr_saver_mode = SCREEN_SAVER_MODE_PASSWORD;
}
else if (!::stricmp("/p", argv[1]) || !::stricmp("/l", argv[1])) {
scr_saver_mode = SCREEN_SAVER_MODE_PREVIEW;
}
}
}
return ret;
}
# define SCR_SAVE_MOUSE_MOVE_THRESHOLD 15
static HWND found_ghost_window_hwnd;
static GHOST_IWindow *ghost_window_to_find;
static WNDPROC ghost_wnd_proc;
static POINT scr_save_mouse_pos;
static LRESULT CALLBACK screenSaverWindowProc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam)
{
BOOL close = false;
switch (uMsg) {
case WM_MOUSEMOVE: {
POINT pt;
GetCursorPos(&pt);
LONG dx = scr_save_mouse_pos.x - pt.x;
LONG dy = scr_save_mouse_pos.y - pt.y;
if (abs(dx) > SCR_SAVE_MOUSE_MOVE_THRESHOLD || abs(dy) > SCR_SAVE_MOUSE_MOVE_THRESHOLD) {
close = true;
}
scr_save_mouse_pos = pt;
break;
}
case WM_LBUTTONDOWN:
case WM_MBUTTONDOWN:
case WM_RBUTTONDOWN:
case WM_KEYDOWN:
close = true;
}
if (close)
PostMessage(hwnd, WM_CLOSE, 0, 0);
return CallWindowProc(ghost_wnd_proc, hwnd, uMsg, wParam, lParam);
}
BOOL CALLBACK findGhostWindowHWNDProc(HWND hwnd, LPARAM lParam)
{
GHOST_IWindow *p = (GHOST_IWindow *)GetWindowLongPtr(hwnd, GWLP_USERDATA);
BOOL ret = true;
if (p == ghost_window_to_find) {
found_ghost_window_hwnd = hwnd;
ret = false;
}
return ret;
}
static HWND findGhostWindowHWND(GHOST_IWindow *window)
{
found_ghost_window_hwnd = nullptr;
ghost_window_to_find = window;
EnumWindows(findGhostWindowHWNDProc, NULL);
return found_ghost_window_hwnd;
}
static GHOST_IWindow *startScreenSaverPreview(GHOST_ISystem *system,
HWND parentWindow,
const bool stereoVisual)
{
RECT rc;
if (GetWindowRect(parentWindow, &rc)) {
int windowWidth = rc.right - rc.left;
int windowHeight = rc.bottom - rc.top;
const char *title = "";
GHOST_GLSettings glSettings = {0};
if (stereoVisual) {
glSettings.flags |= GHOST_glStereoVisual;
}
GHOST_IWindow *window = system->createWindow(title,
0,
0,
windowWidth,
windowHeight,
GHOST_kWindowStateMinimized,
GHOST_kDrawingContextTypeOpenGL,
glSettings);
if (!window) {
CM_Error("could not create main window");
exit(-1);
}
HWND ghost_hwnd = findGhostWindowHWND(window);
if (!ghost_hwnd) {
CM_Error("could find main window");
exit(-1);
}
SetParent(ghost_hwnd, parentWindow);
LONG_PTR style = GetWindowLongPtr(ghost_hwnd, GWL_STYLE);
LONG_PTR exstyle = GetWindowLongPtr(ghost_hwnd, GWL_EXSTYLE);
RECT adjrc = {0, 0, windowWidth, windowHeight};
AdjustWindowRectEx(&adjrc, style, false, exstyle);
style = (style & (~(WS_POPUP | WS_OVERLAPPEDWINDOW | WS_OVERLAPPED | WS_CAPTION | WS_SYSMENU |
WS_THICKFRAME | WS_MINIMIZEBOX | WS_MAXIMIZEBOX | WS_TILEDWINDOW))) |
WS_CHILD;
SetWindowLongPtr(ghost_hwnd, GWL_STYLE, style);
SetWindowPos(ghost_hwnd,
nullptr,
adjrc.left,
adjrc.top,
0,
0,
SWP_NOZORDER | SWP_NOSIZE | SWP_NOACTIVATE);
/* Check the size of the client rectangle of the window and resize the window
* so that the client rectangle has the size requested.
*/
window->setClientSize(windowWidth, windowHeight);
return window;
}
return nullptr;
}
#endif // WIN32
static GHOST_IWindow *startFullScreen(GHOST_ISystem *system,
int width,
int height,
int bpp,
int frequency,
const bool stereoVisual,
const int alphaBackground,
bool useDesktop)
{
GHOST_TUns32 sysWidth = 0, sysHeight = 0;
system->getMainDisplayDimensions(sysWidth, sysHeight);
// Create the main window
GHOST_DisplaySetting setting;
setting.xPixels = (useDesktop) ? sysWidth : width;
setting.yPixels = (useDesktop) ? sysHeight : height;
setting.bpp = bpp;
setting.frequency = frequency;
GHOST_IWindow *window = nullptr;
system->beginFullScreen(setting, &window, stereoVisual, alphaBackground);
window->setCursorVisibility(false);
/* note that X11 ignores this (it uses a window internally for fullscreen) */
window->setState(GHOST_kWindowStateFullScreen);
return window;
}
#ifdef WIN32
static GHOST_IWindow *startScreenSaverFullScreen(GHOST_ISystem *system,
int width,
int height,
int bpp,
int frequency,
const bool stereoVisual,
const int alphaBackground)
{
GHOST_IWindow *window = startFullScreen(
system, width, height, bpp, frequency, stereoVisual, alphaBackground, 0);
HWND ghost_hwnd = findGhostWindowHWND(window);
if (ghost_hwnd != nullptr) {
GetCursorPos(&scr_save_mouse_pos);
ghost_wnd_proc = (WNDPROC)GetWindowLongPtr(ghost_hwnd, GWLP_WNDPROC);
SetWindowLongPtr(ghost_hwnd, GWLP_WNDPROC, (uintptr_t)screenSaverWindowProc);
}
return window;
}
#endif // WIN32
static GHOST_IWindow *startWindow(GHOST_ISystem *system,
const char *title,
int windowLeft,
int windowTop,
int windowWidth,
int windowHeight,
const bool stereoVisual,
const int alphaBackground)
{
GHOST_GLSettings glSettings = {0};
// Create the main window
// std::string title ("Blender Player - GHOST");
if (stereoVisual)
glSettings.flags |= GHOST_glStereoVisual;
if (alphaBackground)
glSettings.flags |= GHOST_glAlphaBackground;
GHOST_IWindow *window = system->createWindow(title,
windowLeft,
windowTop,
windowWidth,
windowHeight,
GHOST_kWindowStateNormal,
GHOST_kDrawingContextTypeOpenGL,
glSettings);
if (!window) {
CM_Error("could not create main window");
exit(-1);
}
/* Check the size of the client rectangle of the window and resize the window
* so that the client rectangle has the size requested.
*/
window->setClientSize(windowWidth, windowHeight);
window->setCursorVisibility(false);
return window;
}
static GHOST_IWindow *startEmbeddedWindow(GHOST_ISystem *system,
const char *title,
const GHOST_TEmbedderWindowID parentWindow,
const bool stereoVisual,
const int alphaBackground)
{
GHOST_TWindowState state = GHOST_kWindowStateNormal;
GHOST_GLSettings glSettings = {0};
if (stereoVisual)
glSettings.flags |= GHOST_glStereoVisual;
if (alphaBackground)
glSettings.flags |= GHOST_glAlphaBackground;
if (parentWindow != 0)
state = GHOST_kWindowStateEmbedded;
GHOST_IWindow *window = system->createWindow(
title, 0, 0, 0, 0, state, GHOST_kDrawingContextTypeOpenGL, glSettings, false, parentWindow);
if (!window) {
CM_Error("could not create main window");
exit(-1);
}
return window;
}
static void usage(const std::string &program, bool isBlenderPlayer)
{
std::string example_filename = "";
std::string example_pathname = "";
#ifdef _WIN32
const std::string consoleoption = "[-c] ";
#else
const std::string consoleoption = "";
#endif
if (isBlenderPlayer) {
example_filename = "filename.blend";
#ifdef _WIN32
example_pathname = "c:\\";
#else
example_pathname = "/home/user/";
#endif
}
CM_Message(std::endl)
CM_Message("usage: " << program << " [--options] " << example_filename << std::endl);
CM_Message("Available options are: [-w [w h l t]] [-f [fw fh fb ff]] "
<< consoleoption << "[-g gamengineoptions] "
<< "[-s stereomode] [-m aasamples]");
CM_Message("Optional parameters must be passed in order.");
CM_Message("Default values are set in the blend file." << std::endl);
CM_Message(" -h: Prints this command summary" << std::endl);
CM_Message(" -w: display in a window");
CM_Message(" --Optional parameters--");
CM_Message(" w = window width");
CM_Message(" h = window height");
CM_Message(" l = window left coordinate");
CM_Message(" t = window top coordinate");
CM_Message(" Note: To define 'w' or 'h', both must be used."
<< "Also, to define 'l' or 't', all four parameters must be used.");
CM_Message(" Example: -w or -w 500 300 or -w 500 300 0 0" << std::endl);
CM_Message(" -f: start game in fullscreen mode");
CM_Message(" --Optional parameters--");
CM_Message(" fw = fullscreen mode pixel width (use 0 to detect automatically)");
CM_Message(" fh = fullscreen mode pixel height (use 0 to detect automatically)");
CM_Message(
" fb = fullscreen mode bits per pixel (default unless set in the blend file: 32)");
CM_Message(
" ff = fullscreen mode frequency (default unless set in the blend file: 60)");
CM_Message(" Note: To define 'fw'' or 'fh'', both must be used.");
CM_Message(" Example: -f or -f 1024 768 or -f 0 0 16 or -f 1024 728 16 30"
<< std::endl);
CM_Message(" -s: start player in stereoscopy mode (requires 3D capable hardware)");
CM_Message(
" stereomode: nostereo (default unless stereo is set in the blend file)");
CM_Message(" anaglyph (Red-Blue glasses)");
CM_Message(" sidebyside (Left Right)");
CM_Message(" syncdoubling (Above Below)");
CM_Message(" 3dtvtopbottom (Squashed Top-Bottom for passive glasses)");
CM_Message(" interlace (Interlace horizontally)");
CM_Message(" vinterlace (Vertical interlace for autostereo display)");
CM_Message(" hwpageflip (Quad buffered shutter glasses)");
CM_Message(" Example: -s sidebyside or -s vinterlace" << std::endl);
CM_Message(" -m: maximum anti-aliasing (eg. 2,4,8,16)" << std::endl);
CM_Message(" -n: maximum anisotropic filtering (eg. 2,4,8,16)" << std::endl);
CM_Message(" -i: parent window's ID" << std::endl);
#ifdef _WIN32
CM_Message(" -c: keep console window open" << std::endl);
#endif
CM_Message(" -d: debugging options:");
CM_Message(" memory Debug memory leaks");
CM_Message(" gpu Debug gpu error and warnings" << std::endl);
CM_Message(" -g: game engine options:" << std::endl);
CM_Message(" Name Default Description");
CM_Message(" ------------------------------------------------------------------------");
CM_Message(" fixedtime 0 \"Enable all frames\"");
CM_Message(" wireframe 0 Wireframe render");
CM_Message(" show_framerate 0 Show the frame rate");
CM_Message(" show_properties 0 Show debug properties");
CM_Message(" show_profile 0 Show profiling information");
CM_Message(" show_bounding_box 0 Show debug bounding box volume");
CM_Message(" show_armatures 0 Show debug armatures");
CM_Message(" show_camera_frustum 0 Show debug camera frustum volume");
CM_Message(
" show_shadow_frustum 0 Show debug light shadow frustum volume");
CM_Message(" ignore_deprecation_warnings 1 Ignore deprecation warnings"
<< std::endl);
CM_Message(" -p: override python main loop script");
CM_Message(std::endl);
CM_Message(
" - : all arguments after this are ignored, allowing python to access them from sys.argv");
CM_Message(std::endl);
CM_Message("example: " << program << " -w 320 200 10 10 -g noaudio " << example_pathname
<< example_filename);
CM_Message("example: " << program << " -g show_framerate = 0 " << example_pathname
<< example_filename);
CM_Message("example: " << program << " -i 232421 -m 16 " << example_pathname
<< example_filename);
}
static void get_filename(int argc, char **argv, char *filename)
{
#ifdef __APPLE__
/* On Mac we park the game file (called game.blend) in the application bundle.
* The executable is located in the bundle as well.
* Therefore, we can locate the game relative to the executable.
*/
int srclen = ::strlen(argv[0]);
int len = 0;
char *gamefile = nullptr;
filename[0] = '\0';
if (argc > 1) {
if (BLI_exists(argv[argc - 1])) {
BLI_strncpy(filename, argv[argc - 1], FILE_MAX);
}
if (::strncmp(argv[argc - 1], "-psn_", 5) == 0) {
static char firstfilebuf[512];
if (GHOST_HACK_getFirstFile(firstfilebuf)) {
BLI_strncpy(filename, firstfilebuf, FILE_MAX);
}
}
}
srclen -= ::strlen("MacOS/Blenderplayer");
if (srclen > 0) {
len = srclen + ::strlen("Resources/game.blend");
gamefile = new char[len + 1];
::strcpy(gamefile, argv[0]);
::strcpy(gamefile + srclen, "Resources/game.blend");
if (BLI_exists(gamefile))
BLI_strncpy(filename, gamefile, FILE_MAX);
delete[] gamefile;
}
#else
filename[0] = '\0';
if (argc > 1)
BLI_strncpy(filename, argv[argc - 1], FILE_MAX);
#endif // !_APPLE
}
static BlendFileData *load_game_data(const char *progname,
char *filename = nullptr,
char *relativename = nullptr)
{
ReportList reports;
BlendFileData *bfd = nullptr;
BKE_reports_init(&reports, RPT_STORE);
/* try to load ourself, will only work if we are a runtime */
if (BLO_is_a_runtime(progname)) {
bfd = BLO_read_runtime(progname, &reports);
if (bfd) {
bfd->type = BLENFILETYPE_RUNTIME;
BLI_strncpy(bfd->main->name, progname, sizeof(bfd->main->name));
}
}
else {
bfd = BLO_read_from_file(progname, BLO_READ_SKIP_NONE, &reports);
}
if (!bfd && filename) {
bfd = load_game_data(filename);
if (!bfd) {
CM_Error("loading " << filename << " failed: ");
BKE_reports_print(&reports, RPT_ERROR);
}
}
BKE_reports_clear(&reports);
return bfd;
}
/// Return true when the exit code ask to quit the engine.
static bool quitGame(KX_ExitRequest exitcode)
{
// Exit the game engine if we are not restarting the game or loading an other file.
return (exitcode != KX_ExitRequest::RESTART_GAME &&
exitcode != KX_ExitRequest::START_OTHER_GAME);
}
#ifdef WITH_GAMEENGINE_BPPLAYER
static BlendFileData *load_encrypted_game_data(const char *filename, std::string encryptKey)
{
ReportList reports;
BlendFileData *bfd = NULL;
char *fileData = NULL;
int fileSize;
std::string localPath(SPINDLE_GetFilePath());
BKE_reports_init(&reports, RPT_STORE);
if (filename == NULL) {
return NULL;
}
if (!localPath.empty() && !encryptKey.empty()) {
// Load file and decrypt.
fileData = SPINDLE_DecryptFromFile(filename, &fileSize, encryptKey.c_str(), 0);
}
if (fileData) {
bfd = BLO_read_from_memory(fileData, fileSize, BLO_READ_SKIP_USERDEF, &reports);
delete[] fileData;
}
if (!bfd) {
BKE_reports_print(&reports, RPT_ERROR);
}
BKE_reports_clear(&reports);
return bfd;
}
#endif // WITH_GAMEENGINE_BPPLAYER
static void wm_init_reports(bContext *C)
{
ReportList *reports = CTX_wm_reports(C);
BLI_assert(!reports || BLI_listbase_is_empty(&reports->list));
BKE_reports_init(reports, RPT_STORE);
}
static void wm_free_reports(bContext *C)
{
ReportList *reports = CTX_wm_reports(C);
BKE_reports_clear(reports);
}
static void callback_clg_fatal(void *fp)
{
BLI_system_backtrace((FILE *)fp);
}
static void InitBlenderContextVariables(bContext *C, wmWindowManager *wm, Scene *scene)
{
ARegion *ar;
wmWindow *win;
for (win = (wmWindow *)wm->windows.first; win; win = win->next) {
bScreen *screen = WM_window_get_active_screen(win);
if (!screen) {
continue;
}
for (ScrArea *sa = (ScrArea *)screen->areabase.first; sa; sa = sa->next) {
if (sa->spacetype == SPACE_VIEW3D) {
ListBase *regionbase = &sa->regionbase;
for (ar = (ARegion *)regionbase->first; ar; ar = ar->next) {
if (ar->regiontype == RGN_TYPE_WINDOW) {
if (ar->regiondata) {
CTX_wm_screen_set(C, screen);
CTX_wm_area_set(C, sa);
CTX_wm_region_set(C, ar);
CTX_data_scene_set(C, scene);
win->scene = scene;
return;
}
}
}
}
}
}
}
int main(int argc,
#ifdef WIN32
char **UNUSED(argv_c)
#else
char **argv
#endif
)
{
int i;
int argc_py_clamped = argc; /* use this so python args can be added after ' - ' */
bool error = false;
SYS_SystemHandle syshandle = SYS_GetSystem();
bool fullScreen = false;
bool fullScreenParFound = false;
bool windowParFound = false;
#ifdef WIN32
bool closeConsole = true;
#endif
#ifdef WITH_GAMEENGINE_BPPLAYER
bool useLocalPath = false;
std::string hexKey;
#endif // WITH_GAMEENGINE_BPPLAYER
RAS_Rasterizer::StereoMode stereomode = RAS_Rasterizer::RAS_STEREO_NOSTEREO;
bool stereoWindow = false;
bool stereoParFound = false;
int windowLeft = 100;
int windowTop = 100;
int windowWidth = 640;
int windowHeight = 480;
GHOST_TUns32 fullScreenWidth = 0;
GHOST_TUns32 fullScreenHeight = 0;
GHOST_IWindow *window = nullptr;
int fullScreenBpp = 32;
int fullScreenFrequency = 60;
GHOST_TEmbedderWindowID parentWindow = 0;
bool isBlenderPlayer =
false; // true when lauching from blender or command line. false for bundled player
int validArguments = 0;
bool samplesParFound = false;
std::string pythonControllerFile;
GHOST_TUns16 aasamples = 0;
int alphaBackground = 0;
#ifdef WIN32
char **argv;
int argv_num;
/* We delay loading of openmp so we can set the policy here. */
# if defined(_MSC_VER)
_putenv_s("OMP_WAIT_POLICY", "PASSIVE");
# endif
/* FMA3 support in the 2013 CRT is broken on Vista and Windows 7 RTM (fixed in SP1). Just disable
* it. */
# if defined(_MSC_VER) && defined(_M_X64)
_set_FMA3_enable(0);
# endif
/* Win32 Unicode Args */
/* NOTE: cannot use guardedalloc malloc here, as it's not yet initialized
* (it depends on the args passed in, which is what we're getting here!)
*/
{
wchar_t **argv_16 = CommandLineToArgvW(GetCommandLineW(), &argc);
argv = (char **)malloc(argc * sizeof(char *));
for (argv_num = 0; argv_num < argc; argv_num++) {
argv[argv_num] = alloc_utf_8_from_16(argv_16[argv_num], 0);
}
LocalFree(argv_16);
}
#endif /* WIN32 */
#ifdef __linux__
# ifdef __alpha__
signal(SIGFPE, SIG_IGN);
# endif /* __alpha__ */
#endif /* __linux__ */
#ifdef WITH_SDL_DYNLOAD
sdlewInit();
#endif
BlendFileData *bfd = nullptr;
/* Initialize logging */
CLG_init();
CLG_fatal_fn_set(callback_clg_fatal);
bContext *C = CTX_create();
BKE_appdir_program_path_init(argv[0]);
BKE_tempdir_init(nullptr);
// We don't use threads directly in the BGE, but we need to call this so things like
// freeing up GPU_Textures works correctly.
BLI_threadapi_init();
BLI_thread_put_process_on_fast_node();
DNA_sdna_current_init();
BKE_blender_globals_init(); /* blender.c */
MEM_CacheLimiter_set_disabled(true);
BKE_cachefiles_init();
BKE_idtype_init();
BKE_appdir_init();
BLI_task_scheduler_init();
IMB_init();
BKE_images_init();
BKE_modifier_init();
BKE_gpencil_modifier_init();
BKE_shaderfx_init();
BKE_volumes_init();
DEG_register_node_types();
BKE_brush_system_init();
RE_texture_rng_init();
BKE_callback_global_init();
RNA_init();
GHOST_CreateSystemPaths();
BKE_addon_pref_type_init();
BKE_keyconfig_pref_type_init();
wm_operatortype_init();
wm_operatortypes_register();
wm_gizmotype_init();
wm_gizmogrouptype_init();
WM_paneltype_init(); /* Lookup table only. */
WM_menutype_init();
WM_uilisttype_init();
ED_undosys_type_init();
BKE_library_callback_free_notifier_reference_set(
WM_main_remove_notifier_reference); /* library.c */
BKE_library_callback_remap_editor_id_reference_set(
WM_main_remap_editor_id_reference); /* library.c */
BKE_spacedata_callback_id_remap_set(ED_spacedata_id_remap); /* screen.c */
DEG_editors_set_update_cb(ED_render_id_flush_update, ED_render_scene_update);
ED_spacetypes_init(); /* editors/space_api/spacetype.c */
ED_file_init(); /* for fsmenu */
// Setup builtin font for BLF (mostly copied from creator.c, wm_init_exit.c and
// interface_style.c)
BLF_init();
BLT_lang_init();
BLT_lang_set("");
/* Init icons before reading .blend files for preview icons, which can
* get triggered by the depsgraph. This is also done in background mode
* for scripts that do background processing with preview icons. */
BKE_icons_init(BIFICONID_LAST);
/* reports cant be initialized before the wm,
* but keep before file reading, since that may report errors */
wm_init_reports(C);
WM_msgbus_types_init();
/* Studio-lights needs to be init before we read the home-file,
* otherwise the versioning cannot find the default studio-light. */
BKE_studiolight_init();
ED_spacemacros_init();
BKE_node_system_init();
// We load our own G_MAIN, so free the one that BKE_blender_globals_init() gives us
BKE_main_free(G_MAIN);
G_MAIN = nullptr;
#ifdef WITH_FFMPEG
IMB_ffmpeg_init();
#endif
/* background render uses this font too */
BKE_vfont_builtin_register(datatoc_bfont_pfb, datatoc_bfont_pfb_size);
const bool unique = false;
BLF_load_default(unique);
if (blf_mono_font == -1)
blf_mono_font = BLF_load_mono_default(true);
// Parse command line options
#if defined(DEBUG)
CM_Debug("argv[0] = '" << argv[0] << "'");
#endif
#ifdef WIN32
if (scr_saver_init(argc, argv)) {
switch (scr_saver_mode) {
case SCREEN_SAVER_MODE_CONFIGURATION:
MessageBox(scr_saver_hwnd,
"This screen saver has no options that you can set",
"Screen Saver",
MB_OK);
break;
case SCREEN_SAVER_MODE_PASSWORD:
/* This is W95 only, which we currently do not support.
* Fall-back to normal screen saver behavior in that case... */
case SCREEN_SAVER_MODE_SAVER:
fullScreen = true;
fullScreenParFound = true;
break;
case SCREEN_SAVER_MODE_PREVIEW:
case SCREEN_SAVER_MODE_NONE:
/* This will actually be handled somewhere below... */
break;
}
}
#endif
UI_theme_init_default();
UserDef *user_def = BKE_blendfile_userdef_from_defaults();
BKE_blender_userdef_data_set_and_free(user_def);
BKE_sound_init_once();
// Initialize a default material for meshes without materials.
BKE_materials_init();
/* if running blenderplayer the last argument can't be parsed since it has to be the filename.
* else it is bundled */
isBlenderPlayer = !BLO_is_a_runtime(argv[0]);
if (isBlenderPlayer)
validArguments = argc - 1;
else
validArguments = argc;
/* Parsing command line arguments (can be set from WM_OT_blenderplayer_start) */
#if defined(DEBUG)
CM_Debug("parsing command line arguments...");
CM_Debug("num of arguments is: " << validArguments - 1); //-1 because i starts at 1
#endif
for (i = 1; (i < validArguments) && !error
#ifdef WIN32
&& scr_saver_mode == SCREEN_SAVER_MODE_NONE
#endif
;)
{
#if defined(DEBUG)
CM_Debug("argv[" << i << "] = '" << argv[i] << "'");
#endif
if (argv[i][0] == '-') {
/* ignore all args after " - ", allow python to have own args */
if (argv[i][1] == '\0') {
argc_py_clamped = i;
break;
}
switch (argv[i][1]) {
case 'g': // game engine options (show_framerate, fixedtime, etc)
{
i++;
if (i <= validArguments) {
char *paramname = argv[i];
// Check for single value versus assignment
if (i + 1 <= validArguments && (*(argv[i + 1]) == '=')) {
i++;
if (i + 1 <= validArguments) {
i++;
// Assignment
SYS_WriteCommandLineInt(syshandle, paramname, atoi(argv[i]));
SYS_WriteCommandLineFloat(syshandle, paramname, atof(argv[i]));
SYS_WriteCommandLineString(syshandle, paramname, argv[i]);
#if defined(DEBUG)
CM_Debug(paramname << " = '" << argv[i] << "'");
#endif
i++;
}
else {
error = true;
CM_Error("argument assignment " << paramname << " without value.");
}
}
else {
// SYS_WriteCommandLineInt(syshandle, argv[i++], 1);
}
}
break;
}
case 'd': // debug on
{
++i;
if (strcmp(argv[i], "gpu") == 0) {
G.debug |= G_DEBUG_GPU | G_DEBUG;
++i;
}
else if (strcmp(argv[i], "memory") == 0) {
G.debug |= G_DEBUG;
CM_Debug("Switching to fully guarded memory allocator.");
MEM_use_guarded_allocator();
MEM_set_memory_debug();
#ifndef NDEBUG
BLI_mempool_set_memory_debug();
#endif
++i;
}
else {
CM_Error("debug mode '" << argv[i] << "' unrecognized.");
}
break;
}
#ifdef WITH_GAMEENGINE_BPPLAYER
case 'L': {
// Find the requested base file directory.
if (!useLocalPath) {
SPINDLE_SetFilePath(&argv[i][2]);
useLocalPath = true;
}
i++;
break;
}
case 'K': {
// Find and set keys
hexKey = SPINDLE_FindAndSetEncryptionKeys(argv, i);
i++;
break;
}
#endif // WITH_GAMEENGINE_BPPLAYER
case 'f': // fullscreen mode
{
i++;
fullScreen = true;
fullScreenParFound = true;
if ((i + 2) <= validArguments && argv[i][0] != '-' && argv[i + 1][0] != '-') {
fullScreenWidth = atoi(argv[i++]);
fullScreenHeight = atoi(argv[i++]);
if ((i + 1) <= validArguments && argv[i][0] != '-') {
fullScreenBpp = atoi(argv[i++]);
if ((i + 1) <= validArguments && argv[i][0] != '-')
fullScreenFrequency = atoi(argv[i++]);
}
}
else if ((i + 1) <= validArguments && argv[i][0] != '-' && argv[i + 1][0] != '-') {
error = true;
CM_Error("to define fullscreen width or height, both options must be used.");
}
break;
}
case 'w': // display in a window
{
i++;
fullScreen = false;
windowParFound = true;
// Parse window position and size options
if ((i + 2) <= validArguments && argv[i][0] != '-' && argv[i + 1][0] != '-') {
windowWidth = atoi(argv[i++]);
windowHeight = atoi(argv[i++]);
if ((i + 2) <= validArguments && argv[i][0] != '-' && argv[i + 1][0] != '-') {
windowLeft = atoi(argv[i++]);
windowTop = atoi(argv[i++]);
}
else if ((i + 1) <= validArguments && argv[i][0] != '-' && argv[i + 1][0] != '-') {
error = true;
CM_Error(
"to define the window left or right coordinates, both options must be used.");
}
}
else if ((i + 1) <= validArguments && argv[i][0] != '-' && argv[i + 1][0] != '-') {
error = true;
CM_Error("to define the window's width or height, both options must be used.");
}
break;
}
case 'h': // display help
{
usage(argv[0], isBlenderPlayer);
return 0;
break;
}
case 'i': // parent window ID
{
i++;
if ((i + 1) <= validArguments)
parentWindow = (GHOST_TEmbedderWindowID)atoll(argv[i++]);
else {
error = true;
CM_Error("too few options for parent window argument.");
}
#if defined(DEBUG)
CM_Debug("XWindows ID = " << int(parentWindow));
#endif // defined(DEBUG)
break;
}
case 'm': // maximum anti-aliasing (eg. 2,4,8,16)
{
i++;
samplesParFound = true;
if ((i + 1) <= validArguments)
aasamples = atoi(argv[i++]);
else {
error = true;
CM_Error("no argument supplied for -m");
}
break;
}
case 'n': {
++i;
if ((i + 1) <= validArguments) {
U.anisotropic_filter = atoi(argv[i++]);
}
else {
error = true;
CM_Error("no argument supplied for -n");
}
break;
}
case 'c': // keep console (windows only)
{
i++;
#ifdef WIN32
closeConsole = false;
#endif
break;
}
case 's': // stereo mode
{
i++;
if ((i + 1) <= validArguments) {
stereoParFound = true;
if (!strcmp(argv[i],
"nostereo")) // may not be redundant if the file has different setting
{
stereomode = RAS_Rasterizer::RAS_STEREO_NOSTEREO;
}
// only the hardware pageflip method needs a stereo window
else if (!strcmp(argv[i], "hwpageflip")) {
stereomode = RAS_Rasterizer::RAS_STEREO_QUADBUFFERED;
stereoWindow = true;
}
else if (!strcmp(argv[i], "syncdoubling"))
stereomode = RAS_Rasterizer::RAS_STEREO_ABOVEBELOW;
else if (!strcmp(argv[i], "3dtvtopbottom"))
stereomode = RAS_Rasterizer::RAS_STEREO_3DTVTOPBOTTOM;
else if (!strcmp(argv[i], "anaglyph"))
stereomode = RAS_Rasterizer::RAS_STEREO_ANAGLYPH;
else if (!strcmp(argv[i], "sidebyside"))
stereomode = RAS_Rasterizer::RAS_STEREO_SIDEBYSIDE;
else if (!strcmp(argv[i], "interlace"))
stereomode = RAS_Rasterizer::RAS_STEREO_INTERLACED;
else if (!strcmp(argv[i], "vinterlace"))
stereomode = RAS_Rasterizer::RAS_STEREO_VINTERLACE;
#if 0
// // future stuff
// else if (!strcmp(argv[i], "stencil")
// stereomode = RAS_STEREO_STENCIL;
#endif
else {
error = true;
CM_Error("stereomode '" << argv[i] << "' unrecognized.");
}
i++;
}
else {
error = true;
CM_Error("too few options for stereo argument.");
}
break;
}
case 'a': // allow window to blend with display background
{
i++;
alphaBackground = 1;
break;
}
case 'p': {
++i;
pythonControllerFile = argv[i++];
break;
}
default: // not recognized
{
CM_Warning("unknown argument: " << argv[i++]);
break;
}
}
}
else {
i++;
}
}
if ((windowWidth < kMinWindowWidth) || (windowHeight < kMinWindowHeight)) {
error = true;
CM_Error("window size too small.");
}
if (error) {
usage(argv[0], isBlenderPlayer);
return 0;
}
GHOST_ISystem *system = nullptr;
#ifdef WIN32
if (scr_saver_mode != SCREEN_SAVER_MODE_CONFIGURATION)
#endif
{
// Create the system
if (GHOST_ISystem::createSystem() == GHOST_kSuccess) {
system = GHOST_ISystem::getSystem();
BLI_assert(system);
if (!fullScreenWidth || !fullScreenHeight)
system->getMainDisplayDimensions(fullScreenWidth, fullScreenHeight);
// process first batch of events. If the user
// drops a file on top off the blenderplayer icon, we
// receive an event with the filename
system->processEvents(0);
// this bracket is needed for app (see below) to get out
// of scope before GHOST_ISystem::disposeSystem() is called.
{
KX_ExitRequest exitcode = KX_ExitRequest::NO_REQUEST;
std::string exitstring = "";
bool firstTimeRunning = true;
char filename[FILE_MAX];
char pathname[FILE_MAX];
char *titlename;
get_filename(argc_py_clamped, argv, filename);
if (filename[0])
BLI_path_abs_from_cwd(filename, sizeof(filename));
// fill the GlobalSettings with the first scene files
// those may change during the game and persist after using Game Actuator
GlobalSettings gs;
#ifdef WITH_PYTHON
PyObject *globalDict = nullptr;
#endif // WITH_PYTHON
DRW_engines_register();
#ifdef WITH_PYTHON
initGamePlayerPythonScripting(argc, argv, C);
#endif
bool first_time_window = true;
do {
// Read the Blender file
// if we got an exitcode 3 (KX_ExitRequest::START_OTHER_GAME) load a different file
if (exitcode == KX_ExitRequest::START_OTHER_GAME ||
exitcode == KX_ExitRequest::RESTART_GAME) {
/* This normally exits/close the GHOST_IWindow */
if (bfd) {
/* Hack to not free the win->ghosting AND win->gpu_ctx when we restart/load new
* .blend */
CTX_wm_window(C)->ghostwin = nullptr;
/* Hack to not free wm->message_bus when we restart/load new .blend */
CTX_wm_manager(C)->message_bus = nullptr;
BLO_blendfiledata_free(bfd);
}
char basedpath[FILE_MAX];
// base the actuator filename relative to the last file
if (exitcode == KX_ExitRequest::RESTART_GAME) {
/* We have weird issues with exitstring ("~" in the exitstring which mess the path)
* when we use Game Restart actuator).
* Then instead of using exitstring we can recycle filename
* However this is not a proper fix but a temp fix and it would need to
* understand why when we start blenderplayer from blender (not when we start blenderplayer
* from Visual Studio), the exitstring can be corrupted.
*/
BLI_strncpy(basedpath, filename[0] ? filename : NULL, sizeof(basedpath));
}
else {
BLI_strncpy(basedpath, exitstring.c_str(), sizeof(basedpath));
}
BLI_path_abs(basedpath, pathname);
bfd = load_game_data(basedpath);
if (!bfd) {
// just add "//" in front of it
char temppath[FILE_MAX] = "//";
BLI_strncpy(temppath + 2, basedpath, FILE_MAX - 2);
BLI_path_abs(temppath, pathname);
bfd = load_game_data(temppath);
}
}
else {
#ifdef WITH_GAMEENGINE_BPPLAYER
if (useLocalPath) {
bfd = load_encrypted_game_data(filename[0] ? filename : NULL, hexKey);
// The file is valid and it's the original file name.
if (bfd) {
remove(filename);
KX_SetOrigPath(bfd->main->name);
}
}
else
#endif // WITH_GAMEENGINE_BPPLAYER
{
bfd = load_game_data(BKE_appdir_program_path(), filename[0] ? filename : NULL);
// The file is valid and it's the original file name.
if (bfd) {
/* Without this step, the bmain->name can be ".blend~"
* and as I don't understand why and as the bug has been
* reported, we ensure the extension is ".blend"
* else this is causing issues with globalDict. (youle)
*/
char *blend_name = bfd->main->name;
BLI_path_extension_ensure(blend_name, FILE_MAX, ".blend");
KX_SetOrigPath(blend_name);
}
}
}
#if defined(DEBUG)
CM_Debug("game data loaded from " << filename);
#endif
if (!bfd) {
usage(argv[0], isBlenderPlayer);
error = true;
exitcode = KX_ExitRequest::QUIT_GAME;
}
else {
/* Setting options according to the blend file if not overriden in the command line */
#ifdef WIN32
# if !defined(DEBUG)
if (closeConsole) {
system->toggleConsole(0); // Close a console window
}
# endif // !defined(DEBUG)
#endif // WIN32
Main *maggie = bfd->main;
Scene *scene = bfd->curscene;
CTX_data_main_set(C, maggie);
CTX_data_scene_set(C, scene);
G.main = maggie;
G_MAIN = G.main;
if (firstTimeRunning) {
G.fileflags = bfd->fileflags;
gs.glslflag = scene->gm.flag;
}
titlename = maggie->name;
// Check whether the game should be displayed full-screen
if ((!fullScreenParFound) && (!windowParFound)) {
// Only use file settings when command line did not override
if ((scene->gm.playerflag & GAME_PLAYER_FULLSCREEN)) {
fullScreen = true;
fullScreenWidth = scene->gm.xplay;
fullScreenHeight = scene->gm.yplay;
fullScreenFrequency = scene->gm.freqplay;
fullScreenBpp = scene->gm.depth;
}
else {
fullScreen = false;
windowWidth = scene->gm.xplay;
windowHeight = scene->gm.yplay;
}
}
// Check whether the game should be displayed in stereo
if (!stereoParFound) {
// Only use file settings when command line did not override
if (scene->gm.stereoflag == STEREO_ENABLED) {
switch (scene->gm.stereomode) {
case STEREO_QUADBUFFERED: {
stereomode = RAS_Rasterizer::RAS_STEREO_QUADBUFFERED;
break;
}
case STEREO_ABOVEBELOW: {
stereomode = RAS_Rasterizer::RAS_STEREO_ABOVEBELOW;
break;
}
case STEREO_INTERLACED: {
stereomode = RAS_Rasterizer::RAS_STEREO_INTERLACED;
break;
}
case STEREO_ANAGLYPH: {
stereomode = RAS_Rasterizer::RAS_STEREO_ANAGLYPH;
break;
}
case STEREO_SIDEBYSIDE: {
stereomode = RAS_Rasterizer::RAS_STEREO_SIDEBYSIDE;
break;
}
case STEREO_VINTERLACE: {
stereomode = RAS_Rasterizer::RAS_STEREO_VINTERLACE;
break;
}
case STEREO_3DTVTOPBOTTOM: {
stereomode = RAS_Rasterizer::RAS_STEREO_3DTVTOPBOTTOM;
break;
}
}
if (stereomode == RAS_Rasterizer::RAS_STEREO_QUADBUFFERED)
stereoWindow = true;
}
}
else {
scene->gm.stereoflag = STEREO_ENABLED;
}
if (!samplesParFound)
aasamples = scene->gm.aasamples;
BLI_strncpy(pathname, maggie->name, sizeof(pathname));
if (firstTimeRunning) {
firstTimeRunning = false;
if (fullScreen) {
#ifdef WIN32
if (scr_saver_mode == SCREEN_SAVER_MODE_SAVER) {
window = startScreenSaverFullScreen(system,
fullScreenWidth,
fullScreenHeight,
fullScreenBpp,
fullScreenFrequency,
stereoWindow,
alphaBackground);
}
else
#endif
{
window = startFullScreen(
system,
fullScreenWidth,
fullScreenHeight,
fullScreenBpp,
fullScreenFrequency,
stereoWindow,
alphaBackground,
(scene->gm.playerflag & GAME_PLAYER_DESKTOP_RESOLUTION));
}
}
else {
#ifdef __APPLE__
// on Mac's we'll show the executable name instead of the 'game.blend' name
char tempname[1024], *appstring;
::strcpy(tempname, titlename);
appstring = strstr(tempname, ".app/");
if (appstring) {
appstring[2] = 0;
titlename = &tempname[0];
}
#endif
// Strip the path so that we have the name of the game file
std::string path = titlename;
std::vector<std::string> parts;
#ifndef WIN32
boost::split(parts, path, boost::is_any_of("/"));
#else // WIN32
boost::split(parts, path, boost::is_any_of("\\"));
#endif // WIN32
std::string title;
if (parts.size()) {
title = parts[parts.size() - 1];
std::vector<std::string> sublastparts;
boost::split(sublastparts, title, boost::is_any_of("."));
if (sublastparts.size() > 1) {
title = sublastparts[0];
}
}
else {
title = "blenderplayer";
}
#ifdef WIN32
if (scr_saver_mode == SCREEN_SAVER_MODE_PREVIEW) {
window = startScreenSaverPreview(system, scr_saver_hwnd, stereoWindow);
}
else
#endif
{
const char *strtitle = title.c_str();
if (parentWindow != 0)
window = startEmbeddedWindow(
system, strtitle, parentWindow, stereoWindow, alphaBackground);
else
window = startWindow(system,
strtitle,
windowLeft,
windowTop,
windowWidth,
windowHeight,
stereoWindow,
alphaBackground);
}
}
/* wm context */
wmWindowManager *wm = (wmWindowManager *)G_MAIN->wm.first;
wmWindow *win = (wmWindow *)wm->windows.first;
CTX_wm_manager_set(C, wm);
CTX_wm_window_set(C, win);
}
wmWindowManager *wm = (wmWindowManager *)bfd->main->wm.first;
wmWindow *win = (wmWindow *)wm->windows.first;
CTX_wm_manager_set(C, wm);
CTX_wm_window_set(C, win);
InitBlenderContextVariables(C, wm, bfd->curscene);
wm_window_ghostwindow_blenderplayer_ensure(wm, win, window, first_time_window);
/* The following is needed to run some bpy operators in blenderplayer */
ED_screen_refresh_blenderplayer(wm, win);
if (first_time_window) {
/* We need to have first an ogl context bound and it's done
* in wm_window_ghostwindow_blenderplayer_ensure.
*/
WM_init_opengl_blenderplayer(G_MAIN, system, win);
}
first_time_window = false;
// This argc cant be argc_py_clamped, since python uses it.
LA_PlayerLauncher launcher(system,
window,
maggie,
scene,
&gs,
stereomode,
aasamples,
argc,
argv,
pythonControllerFile,
C);
#ifdef WITH_PYTHON
if (!globalDict) {
globalDict = PyDict_New();
}
launcher.SetPythonGlobalDict(globalDict);
#endif // WITH_PYTHON
launcher.InitEngine();
// Enter main loop
launcher.EngineMainLoop();
exitcode = launcher.GetExitRequested();
exitstring = launcher.GetExitString();
gs = *launcher.GetGlobalSettings();
/* Delete the globalDict before free the launcher, because the launcher calls
* Py_Finalize() which disallow any python commands after.
*/
if (quitGame(exitcode)) {
#ifdef WITH_PYTHON
// If the globalDict is to nullptr then python is certainly not initialized.
if (globalDict) {
PyDict_Clear(globalDict);
Py_DECREF(globalDict);
}
#endif
}
launcher.ExitEngine();
}
/* refer to WM_exit_ext() and BKE_blender_free(),
* these are not called in the player but we need to match some of there behavior here,
* if the order of function calls or blenders state isn't matching that of blender
* proper, we may get troubles later on */
WM_jobs_kill_all(CTX_wm_manager(C));
for (wmWindow *win = (wmWindow *)CTX_wm_manager(C)->windows.first; win;
win = win->next) {
CTX_wm_window_set(C, win); /* needed by operator close callbacks */
WM_event_remove_handlers(C, &win->handlers);
WM_event_remove_handlers(C, &win->modalhandlers);
ED_screen_exit(C, win, WM_window_get_active_screen(win));
}
} while (!quitGame(exitcode));
}
}
else {
error = true;
CM_Error("couldn't create a system.");
}
}
DRW_engines_free();
if ((U.pref_flag & USER_PREF_FLAG_SAVE) && ((G.f & G_FLAG_USERPREF_NO_SAVE_ON_EXIT) == 0)) {
if (U.runtime.is_dirty) {
BKE_blendfile_userdef_write_all(NULL);
}
}
const char *imports[] = {"addon_utils", NULL};
BPY_run_string_eval(C, imports, "addon_utils.disable_all()");
BLI_timer_free();
WM_paneltype_clear();
BKE_addon_pref_type_free();
BKE_keyconfig_pref_type_free();
BKE_materials_exit();
wm_operatortype_free();
wm_surfaces_free();
wm_dropbox_free();
WM_menutype_free();
WM_uilisttype_free();
/* all non-screen and non-space stuff editors did, like editmode */
if (C) {
Main *bmain = CTX_data_main(C);
ED_editors_exit(bmain, true);
}
ED_undosys_type_free();
BKE_mball_cubeTable_free();
/* render code might still access databases */
RE_FreeAllRender();
RE_engines_exit();
ED_preview_free_dbase(); /* frees a Main dbase, before BKE_blender_free! */
if (CTX_wm_manager(C)) {
/* Before BKE_blender_free! - since the ListBases get freed there. */
wm_free_reports(C);
}
SEQ_clipboard_free(); /* sequencer.c */
BKE_tracking_clipboard_free();
BKE_mask_clipboard_free();
BKE_vfont_clipboard_free();
BKE_node_clipboard_free();
#ifdef WITH_COMPOSITOR
COM_deinitialize();
#endif
BKE_subdiv_exit();
BKE_image_free_unused_gpu_textures();
BKE_blender_free(); /* blender.c, does entire library and spacetypes */
// free_matcopybuf();
if (bfd && bfd->user) {
MEM_freeN(bfd->user);
}
MEM_freeN(bfd);
/* G_MAIN == bfd->main, it gets referenced in free_nodesystem so we can't have a dangling pointer
*/
G_MAIN = nullptr;
ANIM_fcurves_copybuf_free();
ANIM_drivers_copybuf_free();
ANIM_driver_vars_copybuf_free();
ANIM_fmodifiers_copybuf_free();
ED_gpencil_anim_copybuf_free();
ED_gpencil_strokes_copybuf_free();
/* free gizmo-maps after freeing blender,
* so no deleted data get accessed during cleaning up of areas. */
wm_gizmomaptypes_free();
wm_gizmogrouptype_free();
wm_gizmotype_free();
BLF_exit();
DRW_opengl_context_enable_ex(false);
GPU_pass_cache_free();
GPU_exit();
DRW_opengl_context_disable_ex(false);
DRW_opengl_context_destroy();
if (window) {
system->disposeWindow(window);
}
// Dispose the system
GHOST_ISystem::disposeSystem();
#ifdef WITH_INTERNATIONAL
BLT_lang_free();
#endif
ANIM_keyingset_infos_exit();
#ifdef WITH_PYTHON
BPY_python_end();
#endif
ED_file_exit(); /* for fsmenu */
BKE_icons_free(); // In UI_exit
BKE_blender_userdef_data_free(&U, false);
RNA_exit(); /* should be after BPY_python_end so struct python slots are cleared */
SYS_DeleteSystem(syshandle);
wm_ghost_exit();
GPU_backend_exit();
CTX_free(C);
GHOST_DisposeSystemPaths();
DNA_sdna_current_free();
BLI_threadapi_exit();
BLI_task_scheduler_exit();
/* No need to call this early, rather do it late so that other
* pieces of Blender using sound may exit cleanly, see also T50676. */
BKE_sound_exit();
BKE_appdir_exit();
CLG_exit();
BKE_blender_atexit();
int totblock = MEM_get_memory_blocks_in_use();
if (totblock != 0) {
CM_Error("totblock: " << totblock);
MEM_set_error_callback(mem_error_cb);
MEM_printmemlist();
}
wm_autosave_delete();
BKE_tempdir_session_purge();
#ifdef WIN32
while (argv_num) {
free(argv[--argv_num]);
}
free(argv);
argv = nullptr;
#endif
return error ? -1 : 0;
}
|
{"hexsha": "3273261da85a50254acb98f7866db633954a910e", "size": 56664, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "source/gameengine/GamePlayer/GPG_ghost.cpp", "max_stars_repo_name": "lordloki/upbge", "max_stars_repo_head_hexsha": "18d0f5419cc1338ecf739362afef56bd96b42cfb", "max_stars_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-01-11T10:02:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T10:02:21.000Z", "max_issues_repo_path": "source/gameengine/GamePlayer/GPG_ghost.cpp", "max_issues_repo_name": "lordloki/upbge", "max_issues_repo_head_hexsha": "18d0f5419cc1338ecf739362afef56bd96b42cfb", "max_issues_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/gameengine/GamePlayer/GPG_ghost.cpp", "max_forks_repo_name": "lordloki/upbge", "max_forks_repo_head_hexsha": "18d0f5419cc1338ecf739362afef56bd96b42cfb", "max_forks_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8158338012, "max_line_length": 105, "alphanum_fraction": 0.576697727, "num_tokens": 13508}
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor2tensor.models.research.glow_model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensor2tensor import problems
from tensor2tensor.data_generators import cifar # pylint: disable=unused-import
from tensor2tensor.models.research import glow
from tensor2tensor.utils import registry # pylint: disable=unused-import
import tensorflow as tf
MODES = tf.estimator.ModeKeys
class GlowModelTest(tf.test.TestCase):
def batch(self, one_shot_iterator, batch_size=16):
x_batch, y_batch = [], []
for _ in range(batch_size):
curr = one_shot_iterator.get_next()
x_batch.append(curr['inputs'])
y_batch.append(curr['targets'])
return tf.stack(x_batch), tf.stack(y_batch)
def test_glow(self):
with tf.Graph().as_default():
hparams = glow.glow_hparams()
model = glow.Glow(hparams, tf.estimator.ModeKeys.TRAIN)
cifar_problem = problems.problem('image_cifar10_plain_random_shift')
train_dataset = cifar_problem.dataset(MODES.TRAIN)
one_shot = train_dataset.make_one_shot_iterator()
x_batch, y_batch = self.batch(one_shot)
features = {'inputs': x_batch, 'targets': y_batch}
_, obj_dict = model.body(features)
objective = obj_dict['training']
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
obj_np = sess.run(objective)
mean_obj = np.mean(obj_np)
# Check that one forward-propagation does not NaN, i.e
# initialization etc works as expected.
is_undefined = np.isnan(mean_obj) or np.isinf(mean_obj)
self.assertTrue(not is_undefined)
def test_glow_inference(self):
hparams = glow.glow_hparams()
hparams.depth = 15
hparams.n_levels = 2
curr_dir = tempfile.mkdtemp()
# Training pipeline
with tf.Graph().as_default():
model = glow.Glow(hparams, tf.estimator.ModeKeys.TRAIN)
cifar_problem = problems.problem('image_cifar10_plain_random_shift')
train_dataset = cifar_problem.dataset(MODES.TRAIN)
one_shot = train_dataset.make_one_shot_iterator()
x_batch, y_batch = self.batch(one_shot)
features = {'inputs': x_batch, 'targets': y_batch}
model_path = os.path.join(curr_dir, 'model')
model(features)
with tf.Session() as session:
saver = tf.train.Saver()
session.run(tf.global_variables_initializer())
z = session.run([model.z])
mean_z = np.mean(z)
is_undefined = np.isnan(mean_z) or np.isinf(mean_z)
self.assertTrue(not is_undefined)
saver.save(session, model_path)
# Inference pipeline
with tf.Graph().as_default():
model = glow.Glow(hparams, tf.estimator.ModeKeys.PREDICT)
cifar_problem = problems.problem('image_cifar10_plain_random_shift')
test_dataset = cifar_problem.dataset(MODES.EVAL)
one_shot = test_dataset.make_one_shot_iterator()
x_batch, y_batch = self.batch(one_shot)
features = {'inputs': x_batch, 'targets': y_batch}
model_path = os.path.join(curr_dir, 'model')
predictions = model.infer(features)
with tf.Session() as session:
saver = tf.train.Saver()
saver.restore(session, model_path)
predictions_np = session.run(predictions)
self.assertTrue(np.all(predictions_np <= 255))
self.assertTrue(np.all(predictions_np >= 0))
if __name__ == '__main__':
tf.test.main()
|
{"hexsha": "43557cbf71be9235bc1de260c2690dbdf4b92631", "size": 4121, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensor2tensor/models/research/glow_test.py", "max_stars_repo_name": "gonglinyuan/t2tTest", "max_stars_repo_head_hexsha": "c100fa85d581922b212b45a9fdf4f07cd1752a56", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-01-14T16:56:54.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-17T23:16:21.000Z", "max_issues_repo_path": "tensor2tensor/models/research/glow_test.py", "max_issues_repo_name": "gonglinyuan/t2tTest", "max_issues_repo_head_hexsha": "c100fa85d581922b212b45a9fdf4f07cd1752a56", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensor2tensor/models/research/glow_test.py", "max_forks_repo_name": "gonglinyuan/t2tTest", "max_forks_repo_head_hexsha": "c100fa85d581922b212b45a9fdf4f07cd1752a56", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4636363636, "max_line_length": 80, "alphanum_fraction": 0.7078379034, "include": true, "reason": "import numpy", "num_tokens": 957}
|
'''
Takes a dot product in parallel.
Example usage:
$ mpirun -n 4 python.exe dot.py 1000
Assumes n is divisible by SIZE
command line arguments: n, the length of the vector to dot with itself
'''
from mpi4py import MPI
import numpy as np
from sys import argv
COMM = MPI.COMM_WORLD
RANK = COMM.Get_rank()
SIZE = COMM.Get_size()
ROOT = 0
n = int(argv[1])
if RANK == ROOT:
x = np.linspace(0, 100, n)
y = np.linspace(20, 300, n)
else:
x, y = None, None
# Prepare variables
local_n = n // SIZE
if n % SIZE != 0:
print "The number of processors must evenly divide n."
COMM.Abort()
local_x = np.zeros(local_n)
local_y = np.zeros(local_n)
COMM.Scatter(x, local_x)
COMM.Scatter(y, local_y)
local_dot_product = np.dot(local_x, local_y)
buf = np.array(local_dot_product)
result_buf = np.zeros(1) if RANK == ROOT else None
COMM.Reduce(buf, result_buf, MPI.SUM)
if RANK == ROOT:
print "Parallel Dot Product: ", str(result_buf[0])
print "Serial Dot Product: ", str(np.dot(x, y))
|
{"hexsha": "7113d034faa031e77fb11ef59fc57485032265db", "size": 1010, "ext": "py", "lang": "Python", "max_stars_repo_path": "Labs/MPICollectiveCommunication/dot.py", "max_stars_repo_name": "jessicaleete/numerical_computing", "max_stars_repo_head_hexsha": "cc71f51f35ca74d00e617af3d1a0223e19fb9a68", "max_stars_repo_licenses": ["CC-BY-3.0"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2016-10-18T19:54:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-09T20:12:38.000Z", "max_issues_repo_path": "Labs/MPICollectiveCommunication/dot.py", "max_issues_repo_name": "jessicaleete/numerical_computing", "max_issues_repo_head_hexsha": "cc71f51f35ca74d00e617af3d1a0223e19fb9a68", "max_issues_repo_licenses": ["CC-BY-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Labs/MPICollectiveCommunication/dot.py", "max_forks_repo_name": "jessicaleete/numerical_computing", "max_forks_repo_head_hexsha": "cc71f51f35ca74d00e617af3d1a0223e19fb9a68", "max_forks_repo_licenses": ["CC-BY-3.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-05-14T16:07:59.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-20T09:05:06.000Z", "avg_line_length": 19.8039215686, "max_line_length": 70, "alphanum_fraction": 0.6831683168, "include": true, "reason": "import numpy", "num_tokens": 297}
|
\SetAPI{J-C}
\section{ambeth.merge.entityfactory.type}
\label{configuration:AmbethMergeEntityfactoryType}
\ClearAPI
Defines which IEntityFactory should be used. Has to be a fully qualified class name. If not specified a default IEntityFactory will be used. For more information see \refname{feature:EntityFactory}.
%% GENERATED USAGE REFERENCE - DO NOT EDIT
\begin{longtable}{ l l } \hline \textbf{Used in bean} & \textbf{Module} \
\endhead
\hline
\type{com.koch.ambeth.merge.ioc.MergeModule} &
\prettyref{module:Merge} \\
\hline
\type{com.koch.ambeth.merge.ioc.MergeModule} &
\prettyref{module:Merge} \\
\hline
\end{longtable}
%% GENERATED USAGE REFERENCE END
\type{com.koch.ambeth.merge.config.MergeConfigurationConstants.EntityFactoryType}
\begin{lstlisting}[style=Props,caption={Usage example for \textit{ambeth.merge.entityfactory.type}}]
ambeth.merge.entityfactory.type=
\end{lstlisting}
|
{"hexsha": "1ae5369dc50e9d06d55c7e3e1c20c2fbfa342482", "size": 905, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/reference-manual/tex/configuration/AmbethMergeEntityfactoryType.tex", "max_stars_repo_name": "Dennis-Koch/ambeth", "max_stars_repo_head_hexsha": "8552b210b8b37d3d8f66bdac2e094bf23c8b5fda", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/reference-manual/tex/configuration/AmbethMergeEntityfactoryType.tex", "max_issues_repo_name": "Dennis-Koch/ambeth", "max_issues_repo_head_hexsha": "8552b210b8b37d3d8f66bdac2e094bf23c8b5fda", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2017-04-24T06:55:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-21T23:15:36.000Z", "max_forks_repo_path": "doc/reference-manual/tex/configuration/AmbethMergeEntityfactoryType.tex", "max_forks_repo_name": "Dennis-Koch/ambeth", "max_forks_repo_head_hexsha": "8552b210b8b37d3d8f66bdac2e094bf23c8b5fda", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-10-28T14:05:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-08T12:54:51.000Z", "avg_line_length": 43.0952380952, "max_line_length": 198, "alphanum_fraction": 0.7812154696, "num_tokens": 261}
|
(* Title: HOL/Auth/n_germanSymIndex_lemma_inv__29_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_germanSymIndex Protocol Case Study*}
theory n_germanSymIndex_lemma_inv__29_on_rules imports n_germanSymIndex_lemma_on_inv__29
begin
section{*All lemmas on causal relation between inv__29*}
lemma lemma_inv__29_on_rules:
assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__29 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_StoreVsinv__29) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqSVsinv__29) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqE__part__0Vsinv__29) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqE__part__1Vsinv__29) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqSVsinv__29) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqEVsinv__29) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__0Vsinv__29) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__1Vsinv__29) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInvAckVsinv__29) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__29) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntSVsinv__29) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntEVsinv__29) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntSVsinv__29) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntEVsinv__29) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
{"author": "lyj238Gmail", "repo": "newParaVerifier", "sha": "5c2d49bf8e6c46c60efa53c98b0ba5c577d59618", "save_path": "github-repos/isabelle/lyj238Gmail-newParaVerifier", "path": "github-repos/isabelle/lyj238Gmail-newParaVerifier/newParaVerifier-5c2d49bf8e6c46c60efa53c98b0ba5c577d59618/examples/n_germanSymIndex/n_germanSymIndex_lemma_inv__29_on_rules.thy"}
|
import logging
from pathlib import Path
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
import yaml
def parse_config(config_file):
with open(config_file, "rb") as f:
config = yaml.safe_load(f)
return config
def set_logger(log_path):
"""
Read more about logging: https://www.machinelearningplus.com/python/python-logging-guide/
Args:
log_path [str]: eg: "../log/train.log"
"""
log_path = Path(log_path)
log_path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(log_path, mode="w")
formatter = logging.Formatter(
"%(asctime)s : %(levelname)s : %(name)s : %(message)s")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.info(f"Finished logger configuration!")
return logger
def time_train(x):
"""
It flags if the train is delayed, too delayed, or on time.
"""
if x <= 60:
return '0_on_time'
if 60 < x and x <= 180:
return '1_late'
return '2_too_late'
# Adding a custom metric
def calculate_rMSE(y, y_pred):
"""
Calculating the rMSE in percentage
"""
return mean_squared_error(y_pred, y) / np.var(y)
|
{"hexsha": "568f6261ae524a073cbc5dad8e5c3a683f97d906", "size": 1310, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/utility.py", "max_stars_repo_name": "jdpinedaj/CFLDelays", "max_stars_repo_head_hexsha": "27c2f3d0cf39654b21dc600c3cc3f9b6b8aaeb99", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/utility.py", "max_issues_repo_name": "jdpinedaj/CFLDelays", "max_issues_repo_head_hexsha": "27c2f3d0cf39654b21dc600c3cc3f9b6b8aaeb99", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/utility.py", "max_forks_repo_name": "jdpinedaj/CFLDelays", "max_forks_repo_head_hexsha": "27c2f3d0cf39654b21dc600c3cc3f9b6b8aaeb99", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.1923076923, "max_line_length": 93, "alphanum_fraction": 0.6702290076, "include": true, "reason": "import numpy", "num_tokens": 316}
|
import numpy as np
import random
import torch
import torch.nn as nn
from torch.optim import Adam
from torch.distributions import Categorical
import dgl
from enviroment.ChemEnv import ChemEnv
from enviroment.Utils import selfLoop
from models import init_weights_recursive, BaseLine, CriticSqueeze
device = None
class PPOTrainer:
def __init__(self,
env: ChemEnv,
batch_size: int,
timesteps_per_batch: int,
clip: float,
a_lr: float,
c_lr: float,
n_updates_per_iteration: int,
max_timesteps_per_episode: int,
gamma: float,
actor: nn.Module,
writer):
"""PPO Initialization
Args:
env (ChemEnv): environment that adhers to OpenAI gym interface
batch_size (int): though not in original paper batch size improved performance, size of batch
timesteps_per_batch (int): how many steps to take before we train on the new data
clip (float): how much to clip gradients
a_lr (float): actor learning rate
c_lr (float): critic learning rate
n_updates_per_iteration (int): how many times we train through the experience before generating new ones
max_timesteps_per_episode (int): the largest number of steps we can take in an episode
gamma (float): discount factor
actor (nn.Module): actor module
writer ([type]): logger for tensorboard stuff
"""
print("correcto")
self.writer = writer
# Initialize hyperparameters for training with PPO
self._init_hyperparameters(batch_size, timesteps_per_batch, clip, a_lr,
c_lr, n_updates_per_iteration, max_timesteps_per_episode, gamma)
# Extract environment information
self.env = env
input_dim = env.num_node_feats
# Initialize actor and critic networks
self.critic = CriticSqueeze(input_dim, 300)
self.actor = actor
# Initialize optimizers for actor and critic
self.actor_optim = Adam(self.actor.parameters(
), lr=self.a_lr, eps=1e-5, weight_decay=.001)
self.critic_optim = Adam(self.critic.parameters(
), lr=self.c_lr, eps=1e-5, weight_decay=.001)
self.actor.apply(init_weights_recursive)
self.critic.apply(init_weights_recursive)
self.batch_iter = 0
def to_device(self):
"""put actor and critic onto gpu
"""
self.actor.cuda()
self.critic.cuda()
def assignActor(self, new_actor):
self.actor = new_actor
def _init_hyperparameters(self, batch_size, timesteps_per_batch, clip, a_lr, c_lr, n_updates_per_iteration, max_timesteps_per_episode, gamma):
self.batch_size = batch_size
self.timesteps_per_batch = timesteps_per_batch
self.clip = clip
self.a_lr = a_lr
self.max_timesteps_per_episode = max_timesteps_per_episode
self.c_lr = c_lr
self.n_updates_per_iteration = n_updates_per_iteration
self.gamma = gamma
def learn(self, total_timesteps):
"""
Train the actor and critic networks. Here is where the main PPO algorithm resides.
Parameters:
total_timesteps - the total number of timesteps to train for
Return:
None
"""
t_so_far = 0
# ALG STEP 2
while t_so_far < total_timesteps:
print(t_so_far)
batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens = self.rollout()
print(np.sum(batch_lens))
t_so_far += np.sum(batch_lens)
# Increment the number of iterations
# Calculate advantage at k-th iteration
V, _ = self.evaluate(batch_obs, batch_acts, batch_rtgs)
A_k = batch_rtgs.to(device) - V.detach()
A_k = (A_k - A_k.mean()) / (A_k.std() + 1e-10)
# This is the loop where we update our network for some n times
train_data_tuple = []
for i in range(len(batch_obs)):
single_tuple = (
batch_obs[i], batch_acts[i], batch_rtgs[i], batch_log_probs[i], A_k[i])
train_data_tuple.append(single_tuple)
random.shuffle(train_data_tuple)
batchlet_obs, batchlet_acts, batchlet_rtgs, batchlet_log_probs, A_k_let = zip(
*train_data_tuple)
failed_outer = False
i = 0
# ALG STEP 6 & 7
for _ in range(self.n_updates_per_iteration):
random.shuffle(train_data_tuple)
i = 0
batchlet_obs, batchlet_acts, batchlet_rtgs, batchlet_log_probs, A_k_let = zip(
*train_data_tuple)
failed_outer = False
while i < len(batch_obs)-65:
# get batches
batchlet_obs_slice = batchlet_obs[i: (i+self.batch_size)]
batchlet_acts_slice = torch.stack(
batchlet_acts[i: (i+self.batch_size)], 0).to(device)
batchlet_rtgs_slice = torch.stack(
batchlet_rtgs[i: (i+self.batch_size)], 0).to(device)
batchlet_log_probs_slice = torch.stack(
batchlet_log_probs[i: (i+self.batch_size)], 0).to(device)
batchlet_A_k_slice = torch.stack(
A_k_let[i: (i+self.batch_size)], 0).to(device)
# failed if KL is too high
failed = self.train_on_batch(batchlet_obs_slice, batchlet_acts_slice, batchlet_rtgs_slice,
batchlet_log_probs_slice, batchlet_A_k_slice)
if failed:
failed_outer = True
break
i += self.batch_size
if failed_outer:
break
def train_on_batch(self, batch_obs, batch_acts, batch_rtgs, batch_log_probs, A_k):
V, curr_log_probs = self.evaluate(batch_obs, batch_acts, batch_rtgs)
failed = False
kl_approx = torch.mean(batch_log_probs - curr_log_probs)
self.writer.add_scalar('Approximate KL', kl_approx, self.batch_iter)
if kl_approx > .06:
failed = True
ratios = torch.exp(curr_log_probs - batch_log_probs)
# Calculate surrogate losses.
surr1 = ratios * A_k
surr2 = torch.clamp(ratios, 1 - self.clip, 1 + self.clip) * A_k
actor_loss = (-torch.min(surr1, surr2)).mean()
critic_loss = nn.MSELoss()(V, batch_rtgs)
self.writer.add_scalar('Actor Loss', actor_loss, self.batch_iter)
self.writer.add_scalar('Critic Loss', actor_loss, self.batch_iter)
self.batch_iter += 1
#
self.critic_optim.zero_grad()
critic_loss.backward(retain_graph=True)
torch.nn.utils.clip_grad_norm_(self.critic.parameters(), .5)
self.critic_optim.step()
self.actor_optim.zero_grad()
actor_loss.backward()
torch.nn.utils.clip_grad_norm_(self.actor.parameters(), .5)
self.actor_optim.step()
return failed
def generate_graphs(self, num_graphs):
graph_list = []
for i in range(num_graphs):
obs = self.env.reset()
for ep_t in range(self.max_timesteps_per_episode):
action, log_prob = self.get_action(obs)
obs, rew, done, reward_dict = self.env.step(action[0])
if done:
graph_list.append(selfLoop(self.env.stateSpaceGraph))
break
return (graph_list)
def inference(self, verbose=False):
reward = 0
obs = self.env.reset()
for ep_t in range(self.max_timesteps_per_episode):
action, log_prob = self.get_action(obs, True)
print(action)
obs, rew, done, reward_dict = self.env.step(action[0], verbose)
reward += rew
if done:
break
return self.env.StateSpace
def rollout(self):
"""
Return:
batch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)
batch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)
batch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)
batch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)
batch_lens - the lengths of each episode this batch. Shape: (number of episodes)
"""
# Batch data. For more details, check function header.
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
# Episodic data. Keeps track of rewards per episode, will get cleared
# upon each new episode
ep_rews = []
t = 0 # Keeps track of how many timesteps we've run so far this batch
# Keep simulating until we've run more than or equal to specified timesteps per batch
self.batch_reward_plot = 0
'''Plotting Variables'''
total_reward = 0
total_disc_reward = 0
total_hist_reward = 0
num_episodes = 0
while t < self.timesteps_per_batch:
num_episodes += 1
ep_rews = [] # rewards collected per episode
obs = self.env.reset()
done = False
reward_for_episode = 0
# Run an episode for a maximum of max_timesteps_per_episode timesteps
for ep_t in range(self.max_timesteps_per_episode):
# If render is specified, render the environment
# self.env.render()
final = False
if ep_t == self.max_timesteps_per_episode-1: # final step of the generation
final = True
t += 1 # Increment timesteps ran this batch so far
# Track observations in this batch
batch_obs.append((obs[0].clone(), obs[1], obs[2]))
# Calculate action and make a step in the env.
# Note that rew is short for reward.
action, log_prob = self.get_action(obs)
obs, rew, done, reward_dict = self.env.step(
action[0], final_step=final)
reward_for_episode += rew
total_disc_reward += reward_dict['model_reward']
total_hist_reward += reward_dict['property_reward']
total_reward += rew # track total rewards to get reward per step
# Track recent reward, action, and action log probability
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
# If the environment tells us the episode is terminated, break
if done:
break
self.batch_reward_plot += reward_for_episode
# Track episodic lengths and rewards
batch_lens.append(ep_t + 1)
batch_rews.append(ep_rews)
# Reshape data as tensors in the shape specified in function description, before returning
batch_acts = torch.tensor(batch_acts, dtype=torch.float)
batch_log_probs = torch.tensor(
batch_log_probs, dtype=torch.float).flatten()
# ALG STEP 4
batch_rtgs = self.compute_rtgs(batch_rews)
# Log the episodic returns and episodic lengths in this batch.
print("roll out")
return batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens
def compute_rtgs(self, batch_rews):
"""
Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
"""
# The rewards-to-go (rtg) per episode per batch to return.
# The shape will be (num episodes per batch, num timesteps per episode)
batch_rtgs = []
# Iterate through each episode
for ep_rews in reversed(batch_rews):
discounted_reward = 0 # The discounted reward so far
for rew in reversed(ep_rews):
discounted_reward = rew + discounted_reward * self.gamma
batch_rtgs.insert(0, discounted_reward)
# Convert the rewards-to-go into a tensor
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs
def get_action(self, obs, mask_on=False):
"""
Queries an action from the actor network, should be called from rollout.
Parameters:
obs - the observation at the current timestep
Return:
action - the action to take, as a numpy array
log_prob - the log probability of the selected action in the distribution
"""
# Query the actor network for a mean action
#test_out = self.Batch_norm_edge1([obs[0]],[obs[1]],[obs[2]])[0]
test_spin = self.actor(dgl.add_self_loop(dgl.remove_self_loop(obs[0])), torch.cat(
[obs[1]], 0).to(device), torch.cat([obs[2]], dim=0), mask=mask_on)
test_dist = Categorical(test_spin)
test_action = test_dist.sample()
test_log_prob = test_dist.log_prob(test_action)
return test_action.detach().cpu().numpy(), test_log_prob.detach()
def evaluate(self, batch_obs, batch_acts, batch_rtgs):
"""
Estimate the values of each observation, and the log probs of
each action in the most recent batch with the most recent
iteration of the actor network. Should be called from learn.
Parameters:
batch_obs - the observations from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of observation)
batch_acts - the actions from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of action)
batch_rtgs - the rewards-to-go calculated in the most recently collected
batch as a tensor. Shape: (number of timesteps in batch)
"""
values = []
log_prob_list = []
batch_form_obs = [[], [], []]
for i in range(len(batch_obs)):
batch_form_obs[0].append(selfLoop(batch_obs[i][0]))
batch_form_obs[1].append(batch_obs[i][1].to(device))
batch_form_obs[2].append(batch_obs[i][2].to(device))
graph_batch = dgl.batch(batch_form_obs[0])
CC = self.critic(graph_batch.to(device), torch.cat(batch_form_obs[1], 0).to(
device), torch.cat(batch_form_obs[2], 0).to(device))
A_new = self.actor(dgl.batch(batch_form_obs[0]), torch.cat(
batch_form_obs[1], 0).to(device), torch.cat(batch_form_obs[2], 0).to(device))
new_dist = Categorical(A_new)
new_log_prob = new_dist.log_prob(batch_acts.to(device).squeeze())
return CC.squeeze(), new_log_prob
|
{"hexsha": "48f97a51fa31a3115990e84e7db9dfee12ad76ba", "size": 15655, "ext": "py", "lang": "Python", "max_stars_repo_path": "CLEAN/PPO.py", "max_stars_repo_name": "tsteternlieb/DrugDesignThesis", "max_stars_repo_head_hexsha": "2ab00826dbfd2567db5a9054731bd7d49ff12126", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-29T13:14:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-07T20:03:09.000Z", "max_issues_repo_path": "CLEAN/PPO.py", "max_issues_repo_name": "tsteternlieb/DrugDesignThesis", "max_issues_repo_head_hexsha": "2ab00826dbfd2567db5a9054731bd7d49ff12126", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2022-02-04T00:39:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-16T18:37:09.000Z", "max_forks_repo_path": "CLEAN/PPO.py", "max_forks_repo_name": "tsteternlieb/DrugDesignThesis", "max_forks_repo_head_hexsha": "2ab00826dbfd2567db5a9054731bd7d49ff12126", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1852731591, "max_line_length": 146, "alphanum_fraction": 0.5946981795, "include": true, "reason": "import numpy", "num_tokens": 3369}
|
# 이 Python 3 환경에는 많은 유용한 분석 라이브러리가 설치되어 있습니다.
# 이것은 카글/도커 이미지로 정의된다: https://github.com/kaggle/docker-python
# 예를 들어, 여기에 로드해야 할 몇 가지 유용한 패키지가 있습니다.
import os
# for dirname, _, filenames in os.walk('./1024data'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# "Save & Run All"을 사용하여 버전을 만들 때 출력으로 보존되는 현재 디렉터리(/kaggle/working/)에 최대 5GB까지 쓸 수 있습니다.
# /kaggle/temp/에 임시 파일을 쓸 수도 있지만 현재 세션 외부에 저장되지는 않습니다.
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils import paths
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from keras.models import load_model
# 입력 데이터 파일은 읽기 전용 ".../input/" 디렉토리에서 사용할 수 있습니다.
# 예를 들어 실행 또는 Shift+Enter를 누르면 입력 디렉토리 아래에 모든 파일이 나열됩니다.
'''
smoking 1996
nonsmok 1279
len(trainX) 2603
len(testX) 651
===========
3254
len(imagePaths) 3254
'''
'''
# model 1 # model 2
precision recall f1-score support precision recall f1-score support
not_smoking 0.79 0.82 0.81 255 not_smoking 0.82 0.83 0.83 255
smoking 0.88 0.86 0.87 396 smoking 0.89 0.89 0.89 396
accuracy 0.85 651 accuracy 0.86 651
macro avg 0.84 0.84 0.84 651 macro avg 0.86 0.86 0.86 651
weighted avg 0.85 0.85 0.85 651 weighted avg 0.87 0.86 0.86 651
# model 3 # model 4
precision recall f1-score support precision recall f1-score support
not_smoking 0.83 0.84 0.84 439 not_smoking 0.82 0.85 0.83 227
smoking 0.88 0.87 0.87 571 smoking 0.87 0.85 0.86 281
accuracy 0.86 1010 accuracy 0.85 508
macro avg 0.85 0.85 0.85 1010 macro avg 0.85 0.85 0.85 508
weighted avg 0.86 0.86 0.86 1010 weighted avg 0.85 0.85 0.85 508
'''
def run():
dataset_path = './1024data/after'
model_store_dir = '2.model'
BS = 32
imagePaths = list(paths.list_images(dataset_path))
data = []
labels = []
for imagePath in imagePaths:
# label = non_smoking or smoking
label = imagePath.split(os.path.sep)[-2]
image = load_img(imagePath, target_size=(224, 224))
image = img_to_array(image)
image = preprocess_input(image)
data.append(image)
labels.append(label)
data = np.array(data, dtype="float32")
print(data.shape) # (3254, 224, 224, 3)
print(type(data))
model = load_model('./case3/1.model') #, custom_objects={"InstanceNormalization": InstanceNormalization}
predIdxs = model.predict(data, batch_size=BS)
predIdxs = np.argmax(predIdxs, axis=1) # 행으로 > 3254, 224, 3
# model.summary()
for i in range(len(data)):
if i % 100 == 0:
print("labels: " + str(labels[i]) + " predict: " + str(predIdxs[i]))
# print(classification_report(testY.argmax(axis=1), predIdxs,target_names=lb.classes_))
#
# N = EPOCHS
#
# plt.style.use("ggplot")
# plt.figure()
# plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
# plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
# plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
# plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
# plt.title("Training Loss and Accuracy")
# plt.xlabel("Epoch #")
# plt.ylabel("Loss/Accuracy")
# plt.legend(loc="lower left")
# plt.savefig('ploy.jpg')
if __name__ == '__main__':
run()
|
{"hexsha": "f15d1f6b50b0d1f6253721858f716691e6323aac", "size": 4827, "ext": "py", "lang": "Python", "max_stars_repo_path": "Model/predict.py", "max_stars_repo_name": "Paransaik/Capstone", "max_stars_repo_head_hexsha": "d2721dd1cac9c4b0dbdf6cb577977166af2854bb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2022-01-08T12:05:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T14:44:24.000Z", "max_issues_repo_path": "Model/predict.py", "max_issues_repo_name": "Paransaik/Realtime-Smoking-Detection", "max_issues_repo_head_hexsha": "d2721dd1cac9c4b0dbdf6cb577977166af2854bb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Model/predict.py", "max_forks_repo_name": "Paransaik/Realtime-Smoking-Detection", "max_forks_repo_head_hexsha": "d2721dd1cac9c4b0dbdf6cb577977166af2854bb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.243902439, "max_line_length": 109, "alphanum_fraction": 0.5989227263, "include": true, "reason": "import numpy", "num_tokens": 1438}
|
import pandas as pd
import numpy as np
import math
import pdb
def average(series):
a1 = sum(series)
b1 = len(series)
c1 = a1/b1
"""
print(c1)
"""
return c1
"""
implements the average of a pandas series from scratch
suggested functions:
len(list)
sum(list)
you should get the same result as calling .mean() on your series
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.mean.html
See numpy documenation for implementation details:
https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html
"""
"""
things = [1,3,5,7,9]
average(things)
"""
def standard_deviation(series):
a1 = average(series)
sum1 = 0
for i in range(0,len(series)):
sum1 = sum1 + (series[i]-a1)**2
b1 = sum1 / (len(series)-1)
c1 = math.sqrt(b1)
"""
print(c1)
"""
return c1
"""
implements the sample standard deviation of a series from scratch
you may need a for loop and your average function
also the function math.sqrt
you should get the same result as calling .std() on your data
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.std.html
See numpy documenation for implementation details:
https://docs.scipy.org/doc/numpy/reference/generated/numpy.std.html
"""
"""
things = [1,3,5,7,9]
standard_deviation(things)
print(np.std(things))
"""
def bubbleSort(arr):
n = len(arr)
for i in range(n):
for j in range(0, n-i-1):
if arr[j] > arr[j+1] :
arr[j], arr[j+1] = arr[j+1], arr[j]
return arr
def median(series):
"""
finds the median of the series from scratch
you may need to sort your values and use
modular division
this number should be the same as calling .median() on your data
See numpy documenation for implementation details:
https://docs.scipy.org/doc/numpy/reference/generated/numpy.median.html
https://pandas.pydata.org/pandas-docs/version/0.23.0/generated/pandas.Series.median.html
"""
a1 = bubbleSort(series)
print(a1)
b1 = len(a1)
med = 0
if b1%2 == 0:
med = (a1[b1/2]+a1[b1/2 + 1])/2
else:
med = (a1[b1//2])
return med
"""
things = [3,5,9,7,1]
median(things)
"""
|
{"hexsha": "34d39185b3b5b569853fdb849ed3b8d8d7409dec", "size": 2349, "ext": "py", "lang": "Python", "max_stars_repo_path": "eda_hw.py", "max_stars_repo_name": "fanyiwen9596/applied_ds", "max_stars_repo_head_hexsha": "c30a0193dd90640db10c4640d822d4caab86bf53", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-31T17:03:49.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-31T17:03:49.000Z", "max_issues_repo_path": "eda_hw.py", "max_issues_repo_name": "fanyiwen9596/applied_ds", "max_issues_repo_head_hexsha": "c30a0193dd90640db10c4640d822d4caab86bf53", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "eda_hw.py", "max_forks_repo_name": "fanyiwen9596/applied_ds", "max_forks_repo_head_hexsha": "c30a0193dd90640db10c4640d822d4caab86bf53", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1, "max_line_length": 92, "alphanum_fraction": 0.6181353768, "include": true, "reason": "import numpy", "num_tokens": 655}
|
// AirMap Platform SDK
// Copyright © 2018 AirMap, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the License);
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an AS IS BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <airmap/platform/linux/xdg.h>
#include <boost/algorithm/string.hpp>
#include <cstdlib>
#include <stdexcept>
namespace fs = boost::filesystem;
namespace xdg = airmap::platform::linux_::xdg;
namespace {
fs::path throw_if_not_absolute(const fs::path& p) {
if (p.has_root_directory())
return p;
throw std::runtime_error{"Directores MUST be absolute."};
}
namespace env {
std::string get(const std::string& key, const std::string& default_value) {
if (auto value = std::getenv(key.c_str()))
return value;
return default_value;
}
std::string get_or_throw(const std::string& key) {
if (auto value = std::getenv(key.c_str())) {
return value;
}
throw std::runtime_error{key + " not set in environment"};
}
constexpr const char* xdg_data_home{"XDG_DATA_HOME"};
constexpr const char* xdg_data_dirs{"XDG_DATA_DIRS"};
constexpr const char* xdg_config_home{"XDG_CONFIG_HOME"};
constexpr const char* xdg_config_dirs{"XDG_CONFIG_DIRS"};
constexpr const char* xdg_cache_home{"XDG_CACHE_HOME"};
constexpr const char* xdg_runtime_dir{"XDG_RUNTIME_DIR"};
} // namespace env
namespace impl {
class BaseDirSpecification : public xdg::BaseDirSpecification {
public:
static const BaseDirSpecification& instance() {
static const BaseDirSpecification spec;
return spec;
}
BaseDirSpecification() {
}
const xdg::Data& data() const override {
return data_;
}
const xdg::Config& config() const override {
return config_;
}
const xdg::Cache& cache() const override {
return cache_;
}
const xdg::Runtime& runtime() const override {
return runtime_;
}
private:
xdg::Data data_;
xdg::Config config_;
xdg::Cache cache_;
xdg::Runtime runtime_;
};
} // namespace impl
} // namespace
fs::path xdg::Data::home() const {
auto v = env::get(env::xdg_data_home, "");
if (v.empty())
return throw_if_not_absolute(fs::path{env::get_or_throw("HOME")} / ".local" / "share");
return throw_if_not_absolute(fs::path(v));
}
std::vector<fs::path> xdg::Data::dirs() const {
auto v = env::get(env::xdg_data_dirs, "");
if (v.empty())
return {fs::path{"/usr/local/share"}, fs::path{"/usr/share"}};
std::vector<std::string> tokens;
tokens = boost::split(tokens, v, boost::is_any_of(":"));
std::vector<fs::path> result;
for (const auto& token : tokens) {
result.push_back(throw_if_not_absolute(fs::path(token)));
}
return result;
}
fs::path xdg::Config::home() const {
auto v = env::get(env::xdg_config_home, "");
if (v.empty())
return throw_if_not_absolute(fs::path{env::get_or_throw("HOME")} / ".config");
return throw_if_not_absolute(fs::path(v));
}
std::vector<fs::path> xdg::Config::dirs() const {
auto v = env::get(env::xdg_config_dirs, "");
if (v.empty())
return {fs::path{"/etc/xdg"}};
std::vector<std::string> tokens;
tokens = boost::split(tokens, v, boost::is_any_of(":"));
std::vector<fs::path> result;
for (const auto& token : tokens) {
fs::path p(token);
result.push_back(throw_if_not_absolute(p));
}
return result;
}
fs::path xdg::Cache::home() const {
auto v = env::get(env::xdg_cache_home, "");
if (v.empty())
return throw_if_not_absolute(fs::path{env::get_or_throw("HOME")} / ".cache");
return throw_if_not_absolute(fs::path(v));
}
fs::path xdg::Runtime::dir() const {
auto v = env::get(env::xdg_config_home, "");
if (v.empty()) {
// We do not fall back gracefully and instead throw, dispatching to calling
// code for handling the case of a safe user-specfic runtime directory missing.
throw std::runtime_error{"Runtime directory not set"};
}
return throw_if_not_absolute(fs::path(v));
}
std::shared_ptr<xdg::BaseDirSpecification> xdg::BaseDirSpecification::create() {
return std::make_shared<impl::BaseDirSpecification>();
}
const xdg::Data& xdg::data() {
return impl::BaseDirSpecification::instance().data();
}
const xdg::Config& xdg::config() {
return impl::BaseDirSpecification::instance().config();
}
const xdg::Cache& xdg::cache() {
return impl::BaseDirSpecification::instance().cache();
}
const xdg::Runtime& xdg::runtime() {
return impl::BaseDirSpecification::instance().runtime();
}
|
{"hexsha": "63558019c83b601d39bd1f550698b5f3b3f73c46", "size": 4817, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/airmap/platform/linux/xdg.cpp", "max_stars_repo_name": "YUNEEC/platform-sdk", "max_stars_repo_head_hexsha": "5670c5096087e836ecdbde38ae401cbfa7fa5fc7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13.0, "max_stars_repo_stars_event_min_datetime": "2018-09-05T14:35:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-04T07:17:40.000Z", "max_issues_repo_path": "src/airmap/platform/linux/xdg.cpp", "max_issues_repo_name": "YUNEEC/platform-sdk", "max_issues_repo_head_hexsha": "5670c5096087e836ecdbde38ae401cbfa7fa5fc7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 31.0, "max_issues_repo_issues_event_min_datetime": "2018-09-06T13:04:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-15T22:46:50.000Z", "max_forks_repo_path": "src/airmap/platform/linux/xdg.cpp", "max_forks_repo_name": "YUNEEC/platform-sdk", "max_forks_repo_head_hexsha": "5670c5096087e836ecdbde38ae401cbfa7fa5fc7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 12.0, "max_forks_repo_forks_event_min_datetime": "2018-11-09T10:09:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-08T05:28:12.000Z", "avg_line_length": 27.683908046, "max_line_length": 91, "alphanum_fraction": 0.6923396305, "num_tokens": 1272}
|
condition1 <= ( ( not ( ( OR_NORx and ( not '0' ) ) or ( ( not OR_NORx ) and '0' ) ) ) and ( not ( ( OR_NORy and ( not '0' ) ) or ( ( not OR_NORy ) and '0' ) ) ) );
OR_NORF1 <= ( condition1 and ( '0' ) ) or ( ( not condition1 ) and ( '1' ) );
condition2 <= ( ( not ( ( OR_NORx and ( not '0' ) ) or ( ( not OR_NORx ) and '0' ) ) ) and ( not ( ( OR_NORy and ( not '0' ) ) or ( ( not OR_NORy ) and '0' ) ) ) );
OR_NORF2 <= ( condition2 and ( '1' ) ) or ( ( not condition2 ) and ( '0' ) );
|
{"hexsha": "5d1fa92aa05f851f3a11b43c488a52bc00a3a636", "size": 487, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "tests/v/staff.out/synthesize/or_nor.f", "max_stars_repo_name": "Deshiuu/351lab-code-copy", "max_stars_repo_head_hexsha": "4d1fdf1f119c6798332c662dee99dd29d7a01520", "max_stars_repo_licenses": ["BSD-4-Clause-UC"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/v/staff.out/synthesize/or_nor.f", "max_issues_repo_name": "Deshiuu/351lab-code-copy", "max_issues_repo_head_hexsha": "4d1fdf1f119c6798332c662dee99dd29d7a01520", "max_issues_repo_licenses": ["BSD-4-Clause-UC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/v/staff.out/synthesize/or_nor.f", "max_forks_repo_name": "Deshiuu/351lab-code-copy", "max_forks_repo_head_hexsha": "4d1fdf1f119c6798332c662dee99dd29d7a01520", "max_forks_repo_licenses": ["BSD-4-Clause-UC"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 81.1666666667, "max_line_length": 164, "alphanum_fraction": 0.4928131417, "num_tokens": 200}
|
from typing import Any, Optional, Tuple, Callable, overload
import numpy as np
from . import vdbfusion_pybind
class VDBVolume:
def __init__(
self,
voxel_size: float,
sdf_trunc: float,
space_carving: bool = False,
):
self._volume = vdbfusion_pybind._VDBVolume(
voxel_size=np.float32(voxel_size),
sdf_trunc=np.float32(sdf_trunc),
space_carving=space_carving,
)
# Passthrough all data members from the C++ API
self.voxel_size = self._volume._voxel_size
self.sdf_trunc = self._volume._sdf_trunc
self.space_carving = self._volume._space_carving
self.pyopenvdb_support_enabled = self._volume.PYOPENVDB_SUPPORT_ENABLED
if self.pyopenvdb_support_enabled:
self.tsdf = self._volume._tsdf
self.weights = self._volume._weights
def __repr__(self) -> str:
return (
f"VDBVolume with:\n"
f"voxel_size = {self.voxel_size}\n"
f"sdf_trunc = {self.sdf_trunc}\n"
f"space_carving = {self.space_carving}\n"
)
@overload
def integrate(
self,
points: np.ndarray,
extrinsic: np.ndarray,
weighting_function: Callable[[float], float],
) -> None:
...
@overload
def integrate(self, points: np.ndarray, extrinsic: np.ndarray, weight: float) -> None:
...
@overload
def integrate(self, points: np.ndarray, extrinsic: np.ndarray) -> None:
...
@overload
def integrate(self, grid, weighting_function: Callable[[float], float]) -> None:
...
@overload
def integrate(self, grid, weight: float) -> None:
...
@overload
def integrate(self, grid) -> None:
...
def integrate(
self,
points: Optional[np.ndarray] = None,
extrinsic: Optional[np.ndarray] = None,
grid: Optional[Any] = None,
weight: Optional[float] = None,
weighting_function: Optional[Callable[[float], float]] = None,
) -> None:
if grid is not None:
if not self.pyopenvdb_support_enabled:
raise NotImplementedError("Please compile with PYOPENVDB_SUPPORT_ENABLED")
if weighting_function is not None:
return self._volume._integrate(grid, weighting_function)
if weight is not None:
return self._volume._integrate(grid, weight)
return self._volume._integrate(grid)
else:
assert isinstance(points, np.ndarray), "points must by np.ndarray(n, 3)"
assert points.dtype == np.float64, "points dtype must be np.float64"
assert isinstance(extrinsic, np.ndarray), "origin/extrinsic must by np.ndarray"
assert extrinsic.dtype == np.float64, "origin/extrinsic dtype must be np.float64"
assert extrinsic.shape in [
(3,),
(3, 1),
(4, 4),
], "origin/extrinsic must be a (3,) array or a (4,4) matrix"
_points = vdbfusion_pybind._VectorEigen3d(points)
if weighting_function is not None:
return self._volume._integrate(_points, extrinsic, weighting_function)
if weight is not None:
return self._volume._integrate(_points, extrinsic, weight)
self._volume._integrate(_points, extrinsic)
@overload
def update_tsdf(
self, sdf: float, ijk: np.ndarray, weighting_function: Optional[Callable[[float], float]]
) -> None:
...
@overload
def update_tsdf(self, sdf: float, ijk: np.ndarray) -> None:
...
def update_tsdf(
self,
sdf: float,
ijk: np.ndarray,
weighting_function: Optional[Callable[[float], float]] = None,
) -> None:
if weighting_function is not None:
return self._volume._update_tsdf(sdf, ijk, weighting_function)
return self._volume._update_tsdf(sdf, ijk)
def extract_triangle_mesh(self, fill_holes: bool = True, min_weight: float = 0.0) -> Tuple:
"""Returns a the vertices and triangles representing the constructed the TriangleMesh.
If you can afford to use Open3D as dependency just pass the output of this function to the
TriangleMesh constructor from Open3d.
vertices, triangles = integrator.extract_triangle_mesh()
mesh = o3d.geometry.TriangleMesh(
o3d.utility.Vector3dVector(vertices),
o3d.utility.Vector3iVector(triangles),
)
"""
vertices, triangles = self._volume._extract_triangle_mesh(fill_holes, min_weight)
return np.asarray(vertices), np.asarray(triangles)
def extract_vdb_grids(self, out_file: str) -> None:
"""For now, write the internal map representation to a file.
Contains both D(x) and W(x) grids.
"""
self._volume._extract_vdb_grids(out_file)
def prune(self, min_weight: float):
"""Use the W(x) weights grid to cleanup the generated signed distance field according to a
minimum weight threshold.
This function is ideal to cleanup the TSDF grid:D(x) before exporting it.
"""
return self._volume._prune(min_weight)
|
{"hexsha": "c37957af691b8102d512bf59d9dae42915f7f2c6", "size": 5289, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/vdbfusion/pybind/vdb_volume.py", "max_stars_repo_name": "saurabh1002/vdbfusion", "max_stars_repo_head_hexsha": "e5c010931ea08eeb852854092057cf65f0f8bc7a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 119, "max_stars_repo_stars_event_min_datetime": "2022-02-08T15:25:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T12:16:35.000Z", "max_issues_repo_path": "src/vdbfusion/pybind/vdb_volume.py", "max_issues_repo_name": "saurabh1002/vdbfusion", "max_issues_repo_head_hexsha": "e5c010931ea08eeb852854092057cf65f0f8bc7a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2022-02-09T07:54:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T03:12:47.000Z", "max_forks_repo_path": "src/vdbfusion/pybind/vdb_volume.py", "max_forks_repo_name": "saurabh1002/vdbfusion", "max_forks_repo_head_hexsha": "e5c010931ea08eeb852854092057cf65f0f8bc7a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2022-02-08T15:33:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-22T02:47:27.000Z", "avg_line_length": 35.4966442953, "max_line_length": 98, "alphanum_fraction": 0.6182643222, "include": true, "reason": "import numpy", "num_tokens": 1238}
|
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
import sys
from torch.autograd import Variable
import math
import torch.nn.functional as F
from torchsummary import summary
POOLSIZE = 2
DROPOUT_RATE = .25
def init_weights(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
def initialize(m):
if isinstance(m, (nn.Conv1d)):
nn.init.kaiming_uniform_(m.weight, mode="fan_in", nonlinearity="relu")
class Deconv(nn.Module):
def __init__(self, in_chan, out_chan, kernel, stride, padding):
super(Deconv, self).__init__()
self.layers = []
self.layers.append(nn.Conv1d(in_chan, out_chan, kernel_size=kernel, stride=1, padding=padding))
self.layers.append(nn.BatchNorm1d(out_chan))
self.layers.append(nn.LeakyReLU())
self.layers.append(nn.Dropout(.2))
self.layers.append(nn.Conv1d(out_chan, out_chan, kernel_size=kernel, stride=stride, padding=padding))
self.layers.append(nn.BatchNorm1d(out_chan))
self.layers.append(nn.LeakyReLU())
self.layers.append(nn.Dropout(.2))
self.layers = nn.Sequential(*self.layers)
self.layers.apply(initialize)
def forward(self, x):
out = self.layers(x)
return out
class SingleDeconv(nn.Module):
def __init__(self, in_chan, out_chan, kernel, stride, padding):
super(SingleDeconv, self).__init__()
self.layers = []
self.layers.append(nn.Conv1d(in_chan, out_chan, kernel_size=kernel, stride=1, padding=padding))
self.layers.append(nn.BatchNorm1d(out_chan))
self.layers.append(nn.LeakyReLU())
self.layers.append(nn.Dropout(.2))
self.layers = nn.Sequential(*self.layers)
self.layers.apply(initialize)
def forward(self, x):
out = self.layers(x)
return out
class SeqUNetIshCorrect(nn.Module):
def __init__(self, in_channels=6, n_classes=1):
super(SeqUNetIshCorrect, self).__init__()
padding = 0
stride = 2
self.conv1 = Deconv(1 , int(in_channels), kernel=3, stride=stride, padding=padding)
self.conv2 = Deconv(int(in_channels), in_channels*2, kernel=3, stride=stride, padding=padding)
self.conv3 = Deconv(in_channels*2, in_channels*4, kernel=3, stride=stride, padding=padding)
self.conv4 = Deconv(in_channels*4, in_channels*8, kernel=3, stride=stride, padding=padding)
self.conv5 = Deconv(in_channels*8, in_channels*16, kernel=3, stride=stride, padding=padding)
self.transposedConv6 = nn.ConvTranspose1d(in_channels*16, in_channels*8, kernel_size=stride, stride=stride, padding=padding)
self.transposedConv7 = nn.ConvTranspose1d(in_channels*8, in_channels*4, kernel_size=stride, stride=stride, padding=padding)
self.transposedConv8 = nn.ConvTranspose1d(in_channels*4, in_channels*2, kernel_size=stride, stride=stride, padding=padding)
self.transposedConv9 = nn.ConvTranspose1d(in_channels*2, in_channels*1, kernel_size=stride, stride=stride, padding=padding)
self.conv6 = SingleDeconv(in_channels*16, in_channels*8, kernel=3, stride=1, padding=padding)
self.conv7 = SingleDeconv(in_channels*8, in_channels*4, kernel=3, stride=1, padding=padding) # 8 from trans conv and 4 from same res
self.conv8 = SingleDeconv(in_channels*4, in_channels*2, kernel=3, stride=1, padding=padding) # x from trans conv and 2 from same res
# upppermost of second u part
self.conv9 = Deconv(int(in_channels*2), in_channels*1, kernel=3, stride=stride, padding=padding) # x from trans conv and 1 from same res
# go down again
self.convDown1 = Deconv(int(in_channels*3), in_channels*2, kernel=3, stride=stride, padding=padding)
self.convDown2 = Deconv(in_channels*6, in_channels*4, kernel=3, stride=stride, padding=padding)
self.convDown3 = Deconv(in_channels*12, in_channels*8, kernel=3, stride=stride, padding=padding)
self.convDown4 = Deconv(in_channels*24, in_channels*16, kernel=3, stride=stride, padding=padding)
# tail part
self.convDown5 = Deconv(in_channels*16, in_channels*32, kernel=3, stride=stride, padding=padding)
self.convDown6 = Deconv(in_channels*32, in_channels*32, kernel=3, stride=stride, padding=padding)
self.convDown7 = Deconv(in_channels*32, in_channels*32, kernel=3, stride=stride, padding=padding)
self.convDown8 = Deconv(in_channels*32, in_channels*32, kernel=3, stride=stride, padding=padding)
self.convDown9 = Deconv(in_channels*32, in_channels*32, kernel=3, stride=stride, padding=padding)
self.convDown10 = Deconv(in_channels*32, 512, kernel=3, stride=stride, padding=padding)
self.output_avg = nn.AvgPool1d(11)
self.fc = nn.Linear(512, n_classes)
torch.nn.init.xavier_uniform(self.fc.weight)
def forward(self, x):
c1 = self.conv1(x)
c2 = self.conv2(c1)
c3 = self.conv3(c2)
c4 = self.conv4(c3)
c5 = self.conv5(c4)
# expansive
u6 = self.transposedConv6(c5)
u6 = torch.cat((u6, c4[:,:,2:-1]), axis=1) # sum to 10
c6 = self.conv6(u6)
u7 = self.transposedConv7(c6)
u7 = torch.cat((u7, c3[:,:,7:-7]), axis=1) # sum to 10
c7 = self.conv7(u7)
u8 = self.transposedConv8(c7)
u8 = torch.cat((u8, c2[:,:,18:-18]), axis=1) # sum to 10
c8 = self.conv8(u8)
u9 = self.transposedConv9(c8)
u9 = torch.cat((u9, c1[:,:,40:-39]), axis=1) # sum to 10
c9 = self.conv9(u9)
# and way down we go
newthrough1 = torch.cat((c9, c8[:,:,1:-1]), axis=1) # sum to 10
c10 = self.convDown1(newthrough1)
newthrough2 = torch.cat((c10, c7[:,:,2:-2]), axis=1) # sum to 10
c11 = self.convDown2(newthrough2)
newthrough3 = torch.cat((c11, c6[:,:,3:-2]), axis=1) # sum to 10
c12 = self.convDown3(newthrough3)
newthrough4 = torch.cat((c12, c5[:,:,3:-2]), axis=1) # sum to 10
c13 = self.convDown4(newthrough4)
# standalone three layer deeper than rest of network
c14 = self.convDown5(c13)
c15 = self.convDown6(c14)
c16 = self.convDown7(c15)
c17 = self.convDown8(c16)
c18 = self.convDown9(c17)
c19 = self.convDown10(c18)
output = self.output_avg(c19)
output = self.fc(output.permute(0,2,1))
return output.view(output.shape[0],-1)
|
{"hexsha": "e5b3cbda5517a748180c499d890b0814535723b2", "size": 6641, "ext": "py", "lang": "Python", "max_stars_repo_path": "clmr/models/preliminary_models/SeqUNetIshCorrect.py", "max_stars_repo_name": "Marcel-Velez/CLMR", "max_stars_repo_head_hexsha": "730bd9078756650a53b4c6438b29e5aeb2c15134", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "clmr/models/preliminary_models/SeqUNetIshCorrect.py", "max_issues_repo_name": "Marcel-Velez/CLMR", "max_issues_repo_head_hexsha": "730bd9078756650a53b4c6438b29e5aeb2c15134", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "clmr/models/preliminary_models/SeqUNetIshCorrect.py", "max_forks_repo_name": "Marcel-Velez/CLMR", "max_forks_repo_head_hexsha": "730bd9078756650a53b4c6438b29e5aeb2c15134", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3244680851, "max_line_length": 144, "alphanum_fraction": 0.6488480651, "include": true, "reason": "import numpy", "num_tokens": 1856}
|
"""
Module Basis3DHex
Includes DG core functions.
"""
module Basis3DHex
export vandermonde_3D, grad_vandermonde_3D
export nodes_3D, equi_nodes_3D, quad_nodes_3D
using Basis1D
using CommonUtils
using LinearAlgebra
"""
vandermonde_2D(N, r)
Initialize the 2D Vandermonde matrix of order N "Legendre" polynomials at
nodes (r,s)
# Examples
```jldoctest
"""
function vandermonde_3D(N, r, s, t)
Np = convert(Int,(N+1)^3)
sk = 1
V = zeros(length(r), Np);
for i=0:N
for j=0:N
for k=0:N
V[:,sk] = jacobiP(r, 0, 0, i).*jacobiP(s, 0, 0, j).*jacobiP(t, 0, 0, k)
sk += 1
end
end
end
return V
end
"""
grad_vandermonde_3D(N, r, s, t)
# Examples
```jldoctest
"""
function grad_vandermonde_3D(N, r, s, t)
Np = convert(Int,(N+1)^3)
sk = 1
Vr = zeros(length(r), Np);
Vs = zeros(length(r), Np);
Vt = zeros(length(r), Np);
for i=0:N
for j=0:N
for k=0:N
Vr[:,sk] = grad_jacobiP(r, 0, 0, i).*jacobiP(s, 0, 0, j).*jacobiP(t,0,0,k)
Vs[:,sk] = jacobiP(r, 0, 0, i).*grad_jacobiP(s, 0, 0, j).*jacobiP(t,0,0,k)
Vt[:,sk] = jacobiP(r, 0, 0, i).*jacobiP(s, 0, 0, j).*grad_jacobiP(t,0,0,k)
sk += 1
end
end
end
return Vr, Vs, Vt
end
# ===================================================
"""
nodes_3D(N)
# Examples
```jldoctest
"""
function nodes_3D(N)
r1D,w1D = gauss_lobatto_quad(0,0,N)
return vec.(meshgrid(r1D,r1D,r1D))
end
"""
equi_nodes_3D(N)
Compute optimized interpolation nodes using blend & warp method on equilateral
triangles for polynomial of order N, with Np points
# Examples
```jldoctest
"""
function equi_nodes_3D(N)
r1D = LinRange(-1,1,N+1)
return vec.(meshgrid(r1D,r1D,r1D))
end
"""
quad_nodes_3D(N)
# Examples
```jldoctest
"""
function quad_nodes_3D(N)
r1D,w1D = gauss_quad(0,0,N)
r,s,t = vec.(meshgrid(r1D,r1D,r1D))
wr,ws,wt = vec.(meshgrid(w1D,w1D,w1D))
w = @. wr*ws*wt
return r,s,t,w
end
end
|
{"hexsha": "29f7127b8aaeb08e77a389b182a1b19805251cd4", "size": 2096, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Basis3DHex.jl", "max_stars_repo_name": "jlchan/ESDG-CNS", "max_stars_repo_head_hexsha": "a1ed7ef8e9c4270692ab76e938d0f5b0b44d5298", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-11-09T20:59:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T16:48:18.000Z", "max_issues_repo_path": "src/Basis3DHex.jl", "max_issues_repo_name": "jlchan/ESDG-CNS", "max_issues_repo_head_hexsha": "a1ed7ef8e9c4270692ab76e938d0f5b0b44d5298", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Basis3DHex.jl", "max_forks_repo_name": "jlchan/ESDG-CNS", "max_forks_repo_head_hexsha": "a1ed7ef8e9c4270692ab76e938d0f5b0b44d5298", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-11T04:15:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-25T23:57:17.000Z", "avg_line_length": 18.3859649123, "max_line_length": 90, "alphanum_fraction": 0.5596374046, "num_tokens": 788}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This software is under a BSD license. See LICENSE.txt for details.
from datatank_py.DTStructuredGrid2D import DTStructuredGrid2D, _squeeze2d
import numpy as np
class DTStructuredMesh2D(object):
"""2D structured mesh object.
This class corresponds to DataTank's DTStructuredMesh2D.
"""
dt_type = ("2D Structured Mesh",)
"""Type strings allowed by DataTank"""
def __init__(self, values, grid=None):
"""
:param values: 2D array of values
:param grid: DTStructuredGrid2D object (defaults to unit grid) or the name of a previously saved grid
Note that the values array must be ordered as (y, x) for compatibility
with the grid and DataTank.
"""
super(DTStructuredMesh2D, self).__init__()
values = _squeeze2d(values)
shape = np.shape(values)
assert len(shape) == 2, "values array must be 2D"
if isinstance(grid, basestring) == False:
if grid == None:
grid = DTStructuredGrid2D(range(shape[1]), range(shape[0]))
assert shape == grid.shape(), "grid shape %s != value shape %s" % (grid.shape(), shape)
self._grid = grid
self._values = values
def grid(self):
""":returns: a :class:`datatank_py.DTStructuredGrid2D.DTStructuredGrid2D` instance"""
return self._grid
def values(self):
""":returns: a 2D numpy array of values at each grid node"""
return self._values
def __dt_type__(self):
return "2D Structured Mesh"
def __str__(self):
return self.__dt_type__() + ":\n " + str(self._grid) + "\n" + " Values:\n " + str(self._values)
def __dt_write__(self, datafile, name):
datafile.write_anonymous(self._grid, name)
datafile.write_anonymous(self._values, name + "_V")
def write_with_shared_grid(self, datafile, name, grid_name, time, time_index):
"""Allows saving a single grid and sharing it amongst different time
values of a variable.
:param datafile: a :class:`datatank_py.DTDataFile.DTDataFile` open for writing
:param name: the mesh variable's name
:param grid_name: the grid name to be shared (will not be visible in DataTank)
:param time: the time value for this step (DataTank's ``t`` variable)
:param time_index: the corresponding integer index of this time step
This is an advanced technique, but it can give a significant space savings in
a data file. It's not widely implemented, since it's not clear yet if this
is the best API.
"""
if grid_name not in datafile:
datafile.write_anonymous(self._grid, grid_name)
datafile.write_anonymous(self.__dt_type__(), "Seq_" + name)
varname = "%s_%d" % (name, time_index)
datafile.write_anonymous(grid_name, varname)
datafile.write_anonymous(self._values, varname + "_V")
datafile.write_anonymous(np.array((time,)), varname + "_time")
@classmethod
def from_data_file(self, datafile, name):
grid = DTStructuredGrid2D.from_data_file(datafile, name)
values = datafile[name + "_V"]
return DTStructuredMesh2D(values, grid=grid)
if __name__ == '__main__':
from DTDataFile import DTDataFile
with DTDataFile("test/structured_mesh2D.dtbin", truncate=True) as df:
xvals = np.exp(np.array(range(18), dtype=np.float) / 5)
yvals = np.exp(np.array(range(20), dtype=np.float) / 5)
grid = DTStructuredGrid2D(xvals, yvals)
values = np.zeros(len(xvals) * len(yvals))
for i in xrange(len(values)):
values[i] = i
# DataTank indexes differently from numpy; the grid is z,y,x ordered
values = values.reshape(grid.shape())
mesh = DTStructuredMesh2D(values, grid=grid)
df["2D mesh"] = mesh
|
{"hexsha": "ca7ffb111df9716f1bf571b4daf6b4a736e95bf4", "size": 4176, "ext": "py", "lang": "Python", "max_stars_repo_path": "datatank_py/DTStructuredMesh2D.py", "max_stars_repo_name": "amaxwell/datatank_py", "max_stars_repo_head_hexsha": "69404b23e456b23db8ef2e59b484283f40dbb9ec", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-07-03T18:58:03.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-23T12:49:26.000Z", "max_issues_repo_path": "datatank_py/DTStructuredMesh2D.py", "max_issues_repo_name": "amaxwell/datatank_py", "max_issues_repo_head_hexsha": "69404b23e456b23db8ef2e59b484283f40dbb9ec", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datatank_py/DTStructuredMesh2D.py", "max_forks_repo_name": "amaxwell/datatank_py", "max_forks_repo_head_hexsha": "69404b23e456b23db8ef2e59b484283f40dbb9ec", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2857142857, "max_line_length": 109, "alphanum_fraction": 0.6051245211, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1009}
|
"""State of a Bayesian quadrature method."""
from typing import Optional, Tuple
import numpy as np
from probnum.quad._integration_measures import IntegrationMeasure
from probnum.quad.kernel_embeddings import KernelEmbedding
from probnum.randprocs.kernels import Kernel
from probnum.random_variables import Normal
# pylint: disable=too-few-public-methods, too-many-instance-attributes, too-many-arguments
class BQInfo:
"""Collect and stores information about the BQ loop.
Parameters
----------
iteration :
Iteration of the loop.
nevals :
Number of evaluations collected.
has_converged :
True if the BQ loop fulfils a stopping criterion, otherwise False.
"""
def __init__(
self,
iteration: int = 0,
nevals: int = 0,
has_converged: bool = False,
):
self.iteration = iteration
self.nevals = nevals
self.has_converged = has_converged
def update_iteration(self, batch_size: int) -> None:
"""Update the quantities tracking iteration info.
Parameters
----------
batch_size:
Number of points added in each iteration.
"""
self.iteration += 1
self.nevals += batch_size
class BQState:
"""Container for the quantities defining the BQ problem and the BQ loop state.
Parameters
----------
measure :
The integration measure.
kernel :
The kernel used for BQ.
integral_belief :
Normal distribution over the value of the integral.
info:
Information about the loop status.
batch_size:
Size of the batch when acquiring new nodes.
nodes:
All locations at which function evaluations are available.
fun_evals:
Function evaluations at nodes.
"""
def __init__(
self,
measure: IntegrationMeasure,
kernel: Kernel,
integral_belief: Optional[Normal] = None,
previous_integral_beliefs: Tuple[Normal] = (),
info: Optional[BQInfo] = None,
batch_size: int = 1,
nodes: Optional[np.ndarray] = None,
fun_evals: Optional[np.ndarray] = None,
gram: np.ndarray = np.array([[]]),
kernel_means: np.ndarray = np.array([]),
):
self.measure = measure
self.kernel = kernel
self.kernel_embedding = KernelEmbedding(kernel, measure)
self.integral_belief = integral_belief
self.previous_integral_beliefs = previous_integral_beliefs
self.input_dim = measure.input_dim
self.batch_size = batch_size
if nodes is None:
self.nodes = np.empty((0, self.input_dim))
self.fun_evals = np.array([])
else:
self.nodes = nodes
self.fun_evals = fun_evals
if info is None:
info = BQInfo(nevals=self.fun_evals.size)
self.info = info
self.gram = gram
self.kernel_means = kernel_means
@classmethod
def from_new_data(
cls,
nodes: np.ndarray,
fun_evals: np.ndarray,
integral_belief: Normal,
prev_state: "BQState",
gram: np.ndarray,
kernel_means: np.ndarray,
) -> "BQState":
r"""Initialize state from updated data
Parameters
----------
nodes:
All locations at which function evaluations are available.
fun_evals:
Function evaluations at nodes.
integral_belief :
Normal distribution over the value of the integral.
prev_state:
Previous state of the BQ loop.
gram :
The Gram matrix of the given nodes.
kernel_means :
The kernel means at the given nodes.
"""
return cls(
measure=prev_state.measure,
kernel=prev_state.kernel,
integral_belief=integral_belief,
previous_integral_beliefs=prev_state.previous_integral_beliefs
+ (prev_state.integral_belief,),
info=prev_state.info,
batch_size=prev_state.batch_size,
nodes=nodes,
fun_evals=fun_evals,
gram=gram,
kernel_means=kernel_means,
)
|
{"hexsha": "8166cf50892c7b84ec03df0e0661da1335aee1f8", "size": 4241, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/probnum/quad/solvers/bq_state.py", "max_stars_repo_name": "feimeng93/probnum", "max_stars_repo_head_hexsha": "4e46273c0157d26b9be2a7a415ccf69a3691ec22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/probnum/quad/solvers/bq_state.py", "max_issues_repo_name": "feimeng93/probnum", "max_issues_repo_head_hexsha": "4e46273c0157d26b9be2a7a415ccf69a3691ec22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/probnum/quad/solvers/bq_state.py", "max_forks_repo_name": "feimeng93/probnum", "max_forks_repo_head_hexsha": "4e46273c0157d26b9be2a7a415ccf69a3691ec22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2482758621, "max_line_length": 90, "alphanum_fraction": 0.6099976421, "include": true, "reason": "import numpy", "num_tokens": 901}
|
import LMT
variable {I} [Nonempty I] {E} [Nonempty E] [Nonempty (A I E)]
example {a1 a2 a3 : A I E} :
(((a1).write i1 (v3)).write i3 (v3)) ≠ (((a1).write i3 (v3)).write i1 (v3)) → False := by
arr
|
{"author": "abdoo8080", "repo": "ar-project", "sha": "303af2d62cf8c8fe996c9670f9fe5a0cc90e5bb8", "save_path": "github-repos/lean/abdoo8080-ar-project", "path": "github-repos/lean/abdoo8080-ar-project/ar-project-303af2d62cf8c8fe996c9670f9fe5a0cc90e5bb8/Test/Lean/Test37.lean"}
|
"""pymoku example: Basic Laser Lock Box
This example demonstrates how you can configure the laser lock box
instrument
(c) 2019 Liquid Instruments Pty. Ltd.
"""
from pymoku import Moku
from pymoku.instruments import LaserLockBox
from scipy import signal
def gen_butterworth(corner_frequency):
"""
Generate coefficients for a second order butterworth low-pass filter.
Corner frequencies for laser lock box second harmonic filtering should be
in the range: 1 kHz < corner frequency < 31.25 MHz.
"""
sample_rate = 31.25e6
normalised_corner = corner_frequency / (sample_rate / 2)
b, a = signal.butter(2, normalised_corner, 'low', analog=False)
coefficient_array = [[1.0, b[0], b[1], b[2], -a[1], -a[2]],
[1.0, 1.0, 0.0, 0.0, 0.0, 0.0]]
return coefficient_array
# Use Moku.get_by_serial() or get_by_name() if you don't know the IP
m = Moku.get_by_name('Moku')
try:
i = m.deploy_or_connect(LaserLockBox)
# set local oscillator, auxiliary and scan generators
i.set_local_oscillator(source='internal', frequency=0, phase=90,
pll_auto_acq=False)
i.set_aux_sine(amplitude=1.0, frequency=10e3, phase=0, sync_to_lo=False,
output='out1')
i.set_scan(frequency=1e3, phase=0, output='out2', amplitude=1.0,
waveform='triangle')
# configure PIDs:
i.set_pid_by_gain(1, g=1, kp=1)
i.set_pid_by_gain(2, g=1, kp=1)
# configure second harmonic rejection low pass filter
coef_array = gen_butterworth(1e4)
i.set_custom_filter(coef_array)
finally:
# Close the connection to the Moku device
# This ensures network resources and released correctly
m.close()
|
{"hexsha": "7859bc259412f39a1da20ff907ce15d5b81305d4", "size": 1725, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/laser_lock_box_basic.py", "max_stars_repo_name": "liquidinstruments/pymoku", "max_stars_repo_head_hexsha": "a10c2516e5953722a5f5b52aec7944bec22492c2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2016-10-11T04:37:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-10T02:34:03.000Z", "max_issues_repo_path": "examples/laser_lock_box_basic.py", "max_issues_repo_name": "liquidinstruments/pymoku", "max_issues_repo_head_hexsha": "a10c2516e5953722a5f5b52aec7944bec22492c2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2017-06-02T18:19:49.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-07T06:15:53.000Z", "max_forks_repo_path": "examples/laser_lock_box_basic.py", "max_forks_repo_name": "liquidinstruments/pymoku", "max_forks_repo_head_hexsha": "a10c2516e5953722a5f5b52aec7944bec22492c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2018-07-12T04:18:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T10:10:48.000Z", "avg_line_length": 30.8035714286, "max_line_length": 77, "alphanum_fraction": 0.6776811594, "include": true, "reason": "from scipy", "num_tokens": 477}
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from pyiron_base._tests import PyironTestCase
from pyiron_continuum.schroedinger.potentials import SquareWell, Sinusoidal
from pyiron_continuum.mesh import RectMesh
import numpy as np
class _PotentialTest(PyironTestCase):
def setUp(self) -> None:
super().setUp()
self.mesh = RectMesh([1, 1], 6)
class TestSquareWell(_PotentialTest):
def test_input(self):
potl = SquareWell(width=1/3, depth=2)
self.assertTrue(np.allclose(
[
6*[2],
6*[2],
[2, 2, 0, 0, 2, 2],
[2, 2, 0, 0, 2, 2],
6 * [2],
6 * [2],
],
potl(self.mesh)
))
def test_call(self):
not_a_mesh = np.random.rand(*self.mesh.shape)
potl = SquareWell()
with self.assertRaises(AttributeError):
potl(not_a_mesh) # Relies on mesh attributes
class TestSinusoidal(_PotentialTest):
def test_input(self):
potl = Sinusoidal(n_waves=2, amplitude=2)
self.assertTrue(np.allclose(
[
[0, 0, 0, 0, 0, 0],
[0, 3, -3, 0, 3, -3],
[0, -3, 3, 0, -3, 3],
[0, 0, 0, 0, 0, 0],
[0, 3, -3, 0, 3, -3],
[0, -3, 3, 0, -3, 3]
],
potl(self.mesh)
))
def test_call(self):
not_a_mesh = np.random.rand(*self.mesh.shape)
potl = Sinusoidal()
with self.assertRaises(AttributeError):
potl(not_a_mesh) # Relies on mesh attributes
|
{"hexsha": "42226f3c8b989dc454029f4c991cdae35b6e001b", "size": 1790, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/unit/schroedinger/test_potentials.py", "max_stars_repo_name": "yangbai90/pyiron_continuum", "max_stars_repo_head_hexsha": "98c1161441cf6f66ab428f35f3c5c37aa8c15736", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/unit/schroedinger/test_potentials.py", "max_issues_repo_name": "yangbai90/pyiron_continuum", "max_issues_repo_head_hexsha": "98c1161441cf6f66ab428f35f3c5c37aa8c15736", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 50, "max_issues_repo_issues_event_min_datetime": "2021-02-19T19:33:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T14:26:04.000Z", "max_forks_repo_path": "tests/unit/schroedinger/test_potentials.py", "max_forks_repo_name": "yangbai90/pyiron_continuum", "max_forks_repo_head_hexsha": "98c1161441cf6f66ab428f35f3c5c37aa8c15736", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-11T09:25:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-11T09:25:19.000Z", "avg_line_length": 30.3389830508, "max_line_length": 108, "alphanum_fraction": 0.5312849162, "include": true, "reason": "import numpy", "num_tokens": 524}
|
# -*- coding: utf-8 -*-
"""
Make a simplified graph of Copenhagen (and Frederiksberg) by removing
every non-necessary interstitial nodes and discriminating roads with
protected bicycling infrastructure (or safe place) and others, based on
the criterion of bikewgrowth.
"""
import nerds_osmnx.simplification as simplification
import nerds_osmnx.utils as utils
import osmnx as ox
import shapely
import networkx as nx
import geopandas as gpd
if __name__ == "__main__":
# First add every necessary tag on the tag_list so we can filter with them
tag_list = ["cycleway", "bicycle", "cycleway:right",
"cycleway:left", "cyclestreet", "bicycle_road"]
for tag_name in tag_list:
if tag_name not in ox.settings.useful_tags_way:
ox.settings.useful_tags_way += [tag_name]
# # Get the polygon of Copenhagen and Frederiksberg
# #TODO: Find a geocode for Copenhagen, old one doesn't work anymore
# cop = ox.geocode_to_gdf("Copenhagen Municipality")
# fre = ox.geocode_to_gdf("Frederiksberg Municipality")
cop = gpd.read_file("polygon_copenhagen.geojson")
fre = gpd.read_file("polygon_frederiksberg.geojson")
polygon = shapely.ops.unary_union([cop['geometry'][0], fre['geometry'][0]])
# Get the non-simplified graph with the extended list of attributes
G = ox.graph_from_polygon(polygon, simplify=False)
# REGION_COORD = [55.716, 55.555, 12.489, 12.681]
# G = ox.graph_from_bbox(*REGION_COORD, simplify=False)
G_sim = simplification.simplify_graph(G)
G_com = ox.simplify_graph(G)
# Use to get at look a edges attributes we get with this query
# ignore_attr = ['length', 'width', 'osmid', 'ref', 'name']
# edge_attr = utils.get_every_edge_attributes(G, ignore_key_list=ignore_attr)
# Make dictionary of protected bicycle infrastructure
protected_dict = dict()
protected_dict["cycleway"] = "track"
protected_dict["cycleway:right"] = "track"
protected_dict["cycleway:left"] = "track"
protected_dict["bicycle_road"] = "yes"
protected_dict["bicycle"] = "designated"
protected_dict["highway"] = "cycleway"
# Create new attribute to simplify it
H = utils.add_edge_attribute(G, protected_dict, 'protected_bicycling')
H_sim = simplification.simplify_graph(H, attributes='protected_bicycling')
H_fin = simplification.multidigraph_to_graph(
H_sim, attributes='protected_bicycling', verbose=True
)
H_mul = H_fin.copy()
# Count the number of protected edges and change bool into binary int
count_protected = 0
for edge in H_mul.edges:
if H_mul.edges[edge]['protected_bicycling'] is True:
H_mul.edges[edge]['protected_bicycling'] = 1
count_protected += 1
else:
H_mul.edges[edge]['protected_bicycling'] = 0
num_edges = len(list(H_mul.edges))
ratio = 1 - (num_edges - count_protected) / num_edges
print("{}% of protected edges".format(round((ratio * 100), 2)))
# Basic statistics
print("""
{} nodes and {} edges in original graph G \n
{} nodes and {} edges in traditional simplified graph G_sim \n
{} nodes and {} edges in OSMnx simplified graph G_com \n
{} nodes and {} edges in multilayer simplified graph H_sim \n
{} nodes and {} edges in final graph H_fin
""".format(len(list(G.nodes())), len(list(G.edges())),
len(list(G_sim.nodes())), len(list(G_sim.edges())),
len(list(G_com.nodes())), len(list(G_com.edges())),
len(list(H_sim.nodes())), len(list(H_sim.edges())),
len(list(H_fin.nodes())), len(list(H_fin.edges()))))
# Use binary int for visualization
ec = ox.plot.get_edge_colors_by_attr(H_mul, 'protected_bicycling',
cmap='bwr')
H_mul = nx.MultiGraph(H_mul)
# Red is protected, blue unprotected
ox.plot_graph(H_mul, figsize = (12, 8), bgcolor='w',
node_color='black', node_size=5,
edge_color=ec, edge_linewidth=2)
# nx.write_gpickle(H_fin, "copenhagen_graph_simplified.gpickle")
|
{"hexsha": "e3309e7eac21df8f3067ae827fa5f0aba010d7ee", "size": 4174, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/protected_bicycling_copenhagen_simplified.py", "max_stars_repo_name": "anerv/NERDS_osmnx", "max_stars_repo_head_hexsha": "c243317b3ac518269c63d2fcec0e51f9a7af8327", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-02T15:56:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T16:15:37.000Z", "max_issues_repo_path": "scripts/protected_bicycling_copenhagen_simplified.py", "max_issues_repo_name": "anerv/NERDS_osmnx", "max_issues_repo_head_hexsha": "c243317b3ac518269c63d2fcec0e51f9a7af8327", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/protected_bicycling_copenhagen_simplified.py", "max_forks_repo_name": "anerv/NERDS_osmnx", "max_forks_repo_head_hexsha": "c243317b3ac518269c63d2fcec0e51f9a7af8327", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.5918367347, "max_line_length": 81, "alphanum_fraction": 0.6629132726, "include": true, "reason": "import networkx", "num_tokens": 1036}
|
\subsection{capBAC}
\label{subsec:capbacsystem}
\section{CAPBAC (Capability Based Access Control)}
CapBAC is an access control framework designed for the Internet of Things.~\cite{hernandez2013distributed} the primary idea is to accommodate seamless integration of devices in the internet by facilitating a distributed approach in which devices themselves can make authorization decisions.
One major aspect that CapBAC focuses on is basing access control decisions on contextual information related to the end-device itself (various IOT use-case situations relating to emergency response procedures). Almost all IOT access control architectures proposed can be classified into three types: centralized, centralized and contextual, and distributed. In a purely centralized approach, while access control logic is located in an entity without constraints of resources, it nonetheless becomes a bottleneck, and moreover, the context of the end device cannot be taken into consideration. The centralized and contextual type is a solution which allows contextual information to be passed to the centralize policy decision process (PDP) and thereby allowing contextual information to participate in decision process. \footnote{In time-critical application domains, this has a slight difference between actual context and context used in decision making process}. Alternatively, in distributed architectures, the access control logic is embedded into the end devices. While the advantages of this mechanism are numerous, the key drawback is that it requires the end devices to be extended to support access control logic and the required cryptography.
As shown in Fig.~\ref{fig:capbac}, the architecture of CapBAC is intuitive and straightforward. It considers three entities in an interaction: an issuer, a subject, and an end device to access. The issuer is in charge of granting a capability token for a subject for end device access requests the subject makes. The capability token itself is a JSON file, which contains:
\begin{itemize}
\item Capability Identifier(\textbf{ID}) - random identifier generated for a capability token
\item Issuer(\textbf{IS}) - the entity that issued the token
\item Subject(\textbf{SU}) - the public key of the subject to which the rights from the token are granted
\item Device(\textbf{DE)} - a URI to identify the device to which token applies
\item Other fields to account for the permissions to be granted, the duration or validity of the token and the context conditions of end device.
\item Signature(\textbf{SI}) - the signature of the file generated by the issuer
\end{itemize}
\begin{figure}[h]
\centering
\includegraphics[scale = 0.2]{img/capBac}
\caption{CapBAC Architecture }
\label{fig:capbac}
\end{figure}
CapBAC uses ECDSA (Elliptic Curve Digital Signing Algorithm) to authenticate capability tokens and end device requests, for which all issuers and subjects are setup with an ECDSA key pair. A subject sends the request for access to a resource on an end device to an issuer, and an issuer validates and produces a capability token for this subject as a JSON file, and then signs it with its private key from it's ECDSA key pair. The subject now can sign the token with its own private key, and forwards the token and it's signature to the end device. The end device first verifies the signature against the public key for the subject (\textbf{SU}) from the token, thus validating that the request did genuinely arrive from the source that is granted the capability, and then verifies that the token itself originates from a valid issuer by verifying the signature (\textbf{SI}) in the token against the public key of the Issuer (\textbf{IS}).
CapBAC clearly uses the type of capability system tag with bits, and offers an intuitive and efficient mechanism for access control in the Internet of Things setting; however, it would have been useful if CapBAC could have provided performance measurements. Being a generic framework for introducing capabilities, a performance evaluation on it could validate the performance for any general application. However, in the original work, the authors merely accounted the time taken for one resource access.
|
{"hexsha": "6a30ec760ffd2e6b116a1d7a718b92758137b62b", "size": 4206, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "paper/capbacsystem.tex", "max_stars_repo_name": "jtracey/cuddly-fiesta", "max_stars_repo_head_hexsha": "7725f567f6eb85f7c0940c531d21d6dbd50a8767", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "paper/capbacsystem.tex", "max_issues_repo_name": "jtracey/cuddly-fiesta", "max_issues_repo_head_hexsha": "7725f567f6eb85f7c0940c531d21d6dbd50a8767", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paper/capbacsystem.tex", "max_forks_repo_name": "jtracey/cuddly-fiesta", "max_forks_repo_head_hexsha": "7725f567f6eb85f7c0940c531d21d6dbd50a8767", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 140.2, "max_line_length": 1254, "alphanum_fraction": 0.8102710414, "num_tokens": 860}
|
\section{File System Isolation}
Isolation is an important property in modern systems.
Various isolation techniques are proposed for different parts of the
system. Typical examples include virtual
machines~\cite{bugnion97disco,DragovicEtAl03-Xen}, Linux Containers~\cite{linux-container},
isolation kernel~\cite{WhitakerEtAl02-Denali}, BSD {\tt
jail}~\cite{Poul+00-Jail} and Solaris zones~\cite{Solaris11-Zones}.
They are used to create an independent environment for the target
applications or even operating systems to run in a shared platform.
For the file system component, these frameworks only provide namespace
isolation for different clients by constraining clients to only a
subset of the shared file system. However, as shown in this thesis,
the failure, recovery and journaling performance are not isolated in
a shared file system. Our work is to provide a file system level
container which can be used together with the above techniques to
provide full isolation for applications.
IceFS has derived inspiration from a number of projects for improving
file system recovery and repair, and for tolerating system crashes.
Many existing systems have improved the reliability of file systems
with better recovery techniques. Fast checking of the Solaris UFS~\cite{PeacockEtAl98-SolarisFsck} has been proposed by only
checking the working-set portion of the file system when failure
happens. Changing the I/O pattern of the file system checker to reduce
random requests has been suggested~\cite{BinaEmrath89-FasterFsck,Ma+13-ffsck}.
A background fsck in BSD~\cite{McKusick02-BackgroundFsck} checks a file system
snapshot to avoid conflicts with the foreground
workload. WAFL~\cite{HitzEtAl94-WAFL} employs Wafliron~\cite{WAFL-Iron}, an
online file system checker, to perform online checking on a volume but the
volume being checked cannot be accessed by users.
% AD removed for space -- return it if you want it
Our recovery idea is based on the cube abstraction which provides isolated
failure, recovery and journaling. Under this model, we only check the
faulty part of the file system without scanning the whole file system.
The above techniques can be utilized in one cube to further speedup
the recovery process.
Several repair-driven file systems also exist.
Chunkfs~\cite{HensonEtAl06-Chunkfs} does a partial check of Ext2 by
partitioning the file system into multiple chunks; however, files and
directory can still span multiple chunks, reducing the independence of
chunks. Windows ReFS~\cite{Windows-ReFS} can automatically recover
corrupted data from mirrored storage devices when it detects checksum
mismatch. Our earlier work~\cite{LuEtAl13-HS} proposes a high-level
design to isolate file system structures for fault and recovery
isolation. Here, we extend that work by addressing both reliability
and performance issues with a real prototype and demonstrations for
various applications.
% AD removed for space
Compared with these file systems, IceFS disentangles the file
system into both logically and physically isolated parts. In this
systematical manner, the fault detection and recovery can be more
independent and localized.
Many ideas for tolerating system crashes have been introduced at
different levels. Microrebooting~\cite{CandeaEtAl04-Reboot}
partitions a large application into rebootable and stateless
components; to recover a failed component, the data state of each
component is persistent in a separate store outside of the
application. Nooks~\cite{SwiftEtAl03-Nooks} isolates failures of
device drivers from the rest of the kernel with separated address
spaces for each target
driver. Membrane~\cite{SundararamanEtAl10-Membrane} handles file
system crashes transparently by tracking resource usage and
the requests at runtime; after a crash, the file system is restarted
by releasing the in-use resources and replaying the failed requests.
The Rio file cache~\cite{Chen96-Rio} protects the memory state of the
file system across a system crash, and conducts a warm reboot to
recover lost updates.
Inspired by these ideas, IceFS localizes a file system crash by
microisolating the file system structures and microrebooting a cube
with a simple and light-weight design.
Address space isolation technique could be used in cubes for better
memory fault isolation.
In addition to reliability isolation, performance isolation within
file systems and SSDs also have been proposed. When running multiple
workloads on a machine with fast storage devices and many cores, the
contention of in-memory locks from different threads may introduce a
large overhead. Similar to cubes in IceFS,
Multi-lane~\cite{Kang14-multilane} and SpanFS~\cite{spanfs}
propose to isolate I/O stacks (both in-memory and on-disk structures)
for different domains; in this manner, domains will not compete for
shared locks, avoiding lock scalability bottlenecks when running
multiple workloads in a many core machine.
Similar to metadata entanglement in file systems, data with different
lifetime could be stored together in the same flash page in modern
SSDs, leading to excessive garbage collection traffic.
Multi-streamed SSDs~\cite{multi-stream} propose streams for
applications, mapping data with different lifetimes to SSD streams.
The multi-streamed SSD ensures that the data in a stream are not only
written together to a physically related flash space, but also
separated from data in other streams; thus, the SSD throughput and
latency QoS can be significantly improved. Mapping cubes to different
streams may improve performance of IceFS when running on such
multi-streamed SSDs.
|
{"hexsha": "9403bf595d2e9f68c3c33c16fc6f19d9a7141389", "size": 5644, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "lanyue_thesis/related/icefs.tex", "max_stars_repo_name": "keqhe/phd_thesis", "max_stars_repo_head_hexsha": "770fc637f9b7d908f349bbbfa112cbc17d898be3", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-08-27T08:03:16.000Z", "max_stars_repo_stars_event_max_datetime": "2017-10-20T14:28:43.000Z", "max_issues_repo_path": "lanyue_thesis/related/icefs.tex", "max_issues_repo_name": "keqhe/phd_thesis", "max_issues_repo_head_hexsha": "770fc637f9b7d908f349bbbfa112cbc17d898be3", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lanyue_thesis/related/icefs.tex", "max_forks_repo_name": "keqhe/phd_thesis", "max_forks_repo_head_hexsha": "770fc637f9b7d908f349bbbfa112cbc17d898be3", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 55.3333333333, "max_line_length": 125, "alphanum_fraction": 0.8169737775, "num_tokens": 1280}
|
import numpy as np
import tvm
from tvm import te
# The sizes of inputs and filters
# batch = 256
# in_channel = 256
# out_channel = 512
# in_size = 14
# kernel = 3
# pad = 1
# stride = 1
batch = 4
in_channel = 3
out_channel = 64
in_size = 16
kernel = 3
pad = 0
stride = 1
# Algorithm
A = te.placeholder((in_size, in_size, in_channel, batch), name="A")
W = te.placeholder((kernel, kernel, in_channel, out_channel), name="W")
out_size = (in_size - kernel + 2 * pad) // stride + 1
# Pad input
Apad = te.compute(
(in_size + 2 * pad, in_size + 2 * pad, in_channel, batch),
lambda yy, xx, cc, nn: tvm.tir.if_then_else(
tvm.tir.all(yy >= pad, yy - pad < in_size, xx >= pad, xx - pad < in_size),
A[yy - pad, xx - pad, cc, nn],
tvm.tir.const(0.0, "float32"),
),
name="Apad",
)
# Create reduction variables 可以理解为对该轴进行一个求和
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel), name="ry")
rx = te.reduce_axis((0, kernel), name="rx")
# Compute the convolution
B = te.compute(
(out_size, out_size, out_channel, batch),
lambda yy, xx, ff, nn: te.sum(
Apad[yy * stride + ry, xx * stride + rx, rc, nn] * W[ry, rx, rc, ff], axis=[ry, rx, rc]
),
name="B",
)
# Designate the memory hierarchy
s = te.create_schedule(B.op)
s[Apad].compute_inline() # compute Apad inline
AA = s.cache_read(Apad, "shared", [B]) # 加载到shared memory
WW = s.cache_read(W, "shared", [B])
AL = s.cache_read(AA, "local", [B]) # 加载到线程local memory
WL = s.cache_read(WW, "local", [B])
BL = s.cache_write(B, "local")
# tile consts
tile = 8
num_thread = 8
block_factor = tile * num_thread
step = 8
vthread = 2
# Get the GPU thread indices
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
thread_y = te.thread_axis((0, num_thread), "threadIdx.y")
thread_xz = te.thread_axis((0, vthread), "vthread", name="vx")
thread_yz = te.thread_axis((0, vthread), "vthread", name="vy")
# Split the workloads
hi, wi, fi, ni = s[B].op.axis
bz = s[B].fuse(hi, wi)
by, fi = s[B].split(fi, factor=block_factor)
bx, ni = s[B].split(ni, factor=block_factor)
# Bind the iteration variables to GPU thread indices
s[B].bind(bz, block_z)
s[B].bind(by, block_y)
s[B].bind(bx, block_x)
tyz, fi = s[B].split(fi, nparts=vthread) # virtual thread split
txz, ni = s[B].split(ni, nparts=vthread) # virtual thread split
ty, fi = s[B].split(fi, nparts=num_thread)
tx, ni = s[B].split(ni, nparts=num_thread)
s[B].reorder(bz, by, bx, tyz, txz, ty, tx, fi, ni)
s[B].bind(tyz, thread_yz)
s[B].bind(txz, thread_xz)
s[B].bind(ty, thread_y)
s[B].bind(tx, thread_x)
# Schedule BL local write
s[BL].compute_at(s[B], tx)
yi, xi, fi, ni = s[BL].op.axis
ry, rx, rc = s[BL].op.reduce_axis
rco, rci = s[BL].split(rc, factor=step)
s[BL].reorder(rco, ry, rx, rci, fi, ni)
# Attach computation to iteration variables
s[AA].compute_at(s[BL], rx)
s[WW].compute_at(s[BL], rx)
s[AL].compute_at(s[BL], rci)
s[WL].compute_at(s[BL], rci)
# Schedule for A's shared memory load
yi, xi, ci, ni = s[AA].op.axis
ty, ci = s[AA].split(ci, nparts=num_thread)
tx, ni = s[AA].split(ni, nparts=num_thread)
_, ni = s[AA].split(ni, factor=4)
s[AA].reorder(ty, tx, yi, xi, ci, ni)
s[AA].bind(ty, thread_y)
s[AA].bind(tx, thread_x)
s[AA].vectorize(ni) # vectorize memory load
# Schedule for W's shared memory load
yi, xi, ci, fi = s[WW].op.axis
ty, ci = s[WW].split(ci, nparts=num_thread)
tx, fi = s[WW].split(fi, nparts=num_thread)
_, fi = s[WW].split(fi, factor=4)
s[WW].reorder(ty, tx, yi, xi, ci, fi)
s[WW].bind(ty, thread_y)
s[WW].bind(tx, thread_x)
s[WW].vectorize(fi) # vectorize memory load
func = tvm.build(s, [A, W, B], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=(in_size, in_size, in_channel, batch)).astype(A.dtype)
w_np = np.random.uniform(size=(kernel, kernel, in_channel, out_channel)).astype(W.dtype)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros((out_size, out_size, out_channel, batch), dtype=B.dtype), dev)
func(a, w, b)
evaluator = func.time_evaluator(func.entry_name, dev, number=1)
print("Convolution: %f ms" % (evaluator(a, w, b).mean * 1e3))
print(func.imported_modules[0].get_source())
|
{"hexsha": "d9a146d56c89be28e68954acef2ecc47fd5b922b", "size": 4276, "ext": "py", "lang": "Python", "max_stars_repo_path": "5.tvm/tvm-conv.py", "max_stars_repo_name": "DbettKK/AmpereSparseMatmul", "max_stars_repo_head_hexsha": "25a3a053059cd86a87a278757e2de6af9e889063", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "5.tvm/tvm-conv.py", "max_issues_repo_name": "DbettKK/AmpereSparseMatmul", "max_issues_repo_head_hexsha": "25a3a053059cd86a87a278757e2de6af9e889063", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "5.tvm/tvm-conv.py", "max_forks_repo_name": "DbettKK/AmpereSparseMatmul", "max_forks_repo_head_hexsha": "25a3a053059cd86a87a278757e2de6af9e889063", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5428571429, "max_line_length": 95, "alphanum_fraction": 0.6615996258, "include": true, "reason": "import numpy", "num_tokens": 1510}
|
import numpy as np
def ns(x):
y = np.roll(x, 1, axis=0)
y[0, :] = 0
z = np.roll(x - y, -1, axis=0)
z[-1, :] = 0
return z
def sn(x):
y = np.roll(x, -1, axis=0)
y[-1, :] = 0
z = np.roll(y - x, 1, axis=0)
z[0, :] = 0
return z
def we(x):
y = np.roll(x, 1, axis=1)
y[:, 0] = 0
z = np.roll(x - y, -1, axis=1)
z[:, -1] = 0
return z
def ew(x):
y = np.roll(x, -1, axis=1)
y[:, -1] = 0
z = np.roll(y - x, 1, axis=1)
z[:, 0] = 0
return z
if __name__ == '__main__':
x = np.array([[1, 2, 3, 4], [11, 22, 33, 44], [111, 222, 333, 222], [1, 2, 3, 4], ])
print(x)
print('ns')
print(ns(x))
print('we')
print(we(x))
print('sn')
print(sn(x))
print('ew')
print(ew(x))
|
{"hexsha": "49e65dcbe838e85062f99f70a4ff3d0f2642aeb4", "size": 782, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/analogues/gradient.py", "max_stars_repo_name": "b8raoult/analogues", "max_stars_repo_head_hexsha": "447c1b098298a93f45e754d95b6068df50a8ed1d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/analogues/gradient.py", "max_issues_repo_name": "b8raoult/analogues", "max_issues_repo_head_hexsha": "447c1b098298a93f45e754d95b6068df50a8ed1d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/analogues/gradient.py", "max_forks_repo_name": "b8raoult/analogues", "max_forks_repo_head_hexsha": "447c1b098298a93f45e754d95b6068df50a8ed1d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-19T01:34:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-19T01:34:31.000Z", "avg_line_length": 15.3333333333, "max_line_length": 88, "alphanum_fraction": 0.4181585678, "include": true, "reason": "import numpy", "num_tokens": 345}
|
\documentclass[11pt,letterpaper]{article}
\usepackage[hmargin=0.7in,vmargin=1in,landscape]{geometry}
\usepackage[T1]{fontenc}
\usepackage{url}
\usepackage{tabularx,array,varwidth}
\setlength{\parindent}{0pt}
\begin{document}
{\LARGE Big-O Cheat Sheet} \\
Generated \today. \\
Brandon Amos <\url{http://bamos.github.io}>
\newcolumntype{M}{>{\begin{varwidth}{4.5cm}}l<{\end{varwidth}}}
\section{Searching}
\begin{tabularx}{\textwidth}{ MXXXXX }
Algorithm&Data Structure&\multicolumn{2}{l}{Time Complexity}&Space Complexity\\
\hline
&&Average&\multicolumn{2}{l}{Worst}\\
\hline
Depth First Search (DFS) & Graph of $|V|$ vertices and $|E|$ edges & - & $O\left(|E| + |V|\right)$ & $O\left(|V|\right)$\\
Breadth First Search (BFS) & Graph of $|V|$ vertices and $|E|$ edges & - & $O\left(|E| + |V|\right)$ & $O\left(|V|\right)$\\
Binary search & Sorted array of n elements & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(1\right)$\\
Linear (Brute Force) & Array & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(1\right)$\\
Shortest path by Dijkstra, using a Min-heap as priority queue & Graph with $|V|$ vertices and $|E|$ edges & $O\left((|V| + |E|) \log |V|\right)$ & $O\left((|V| + |E|) \log |V|\right)$ & $O\left(|V|\right)$\\
Shortest path by Dijkstra, using an unsorted array as priority queue & Graph with $|V|$ vertices and $|E|$ edges & $O\left(|V|^2\right)$ & $O\left(|V|^2\right)$ & $O\left(|V|\right)$\\
Shortest path by Bellman-Ford & Graph with $|V|$ vertices and $|E|$ edges & $O\left(|V||E|\right)$ & $O\left(|V||E|\right)$ & $O\left(|V|\right)$\\
\end{tabularx}
\section{Sorting}
\begin{tabularx}{\textwidth}{ MXXXXXX }
Algorithm&Data Structure&\multicolumn{3}{l}{Time Complexity}&Worst Case Auxiliary Space Complexity\\
\hline
&&Best&Average&\multicolumn{2}{l}{Worst}\\
\hline
Quicksort & Array & $O\left(n \log(n)\right)$ & $O\left(n \log(n)\right)$ & $O\left(n^2\right)$ & $O\left(n\right)$\\
Mergesort & Array & $O\left(n \log(n)\right)$ & $O\left(n \log(n)\right)$ & $O\left(n \log(n)\right)$ & $O\left(n\right)$\\
Heapsort & Array & $O\left(n \log(n)\right)$ & $O\left(n \log(n)\right)$ & $O\left(n \log(n)\right)$ & $O\left(1\right)$\\
Bubble Sort & Array & $O\left(n\right)$ & $O\left(n^2\right)$ & $O\left(n^2\right)$ & $O\left(1\right)$\\
Insertion Sort & Array & $O\left(n\right)$ & $O\left(n^2\right)$ & $O\left(n^2\right)$ & $O\left(1\right)$\\
Select Sort & Array & $O\left(n^2\right)$ & $O\left(n^2\right)$ & $O\left(n^2\right)$ & $O\left(1\right)$\\
Bucket Sort & Array & $O\left(n+k\right)$ & $O\left(n+k\right)$ & $O\left(n^2\right)$ & $O\left(nk\right)$\\
Radix Sort & Array & $O\left(nk\right)$ & $O\left(nk\right)$ & $O\left(nk\right)$ & $O\left(n+k\right)$\\
\end{tabularx}
\section{Data Structures}
\begin{tabularx}{\textwidth}{ MXXXXXXXXXX }
Data Structure&\multicolumn{8}{l}{Time Complexity}&Space Complexity\\
\hline
&\multicolumn{4}{l}{Average}&\multicolumn{5}{l}{Worst}\\
\hline
&Indexing&Search&Insertion&Deletion&Indexing&Search&Insertion&Deletion&\\
\hline
Basic Array & $O\left(1\right)$ & $O\left(n\right)$ & - & - & $O\left(1\right)$ & $O\left(n\right)$ & - & - & $O\left(n\right)$\\
Dynamic Array & $O\left(1\right)$ & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(1\right)$ & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(n\right)$\\
Singly-Linked List & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(1\right)$ & $O\left(1\right)$ & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(1\right)$ & $O\left(1\right)$ & $O\left(n\right)$\\
Doubly-Linked List & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(1\right)$ & $O\left(1\right)$ & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(1\right)$ & $O\left(1\right)$ & $O\left(n\right)$\\
Skip List & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(n \log(n)\right)$\\
Hash Table & - & $O\left(1\right)$ & $O\left(1\right)$ & $O\left(1\right)$ & - & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(n\right)$\\
Binary Search Tree & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(n\right)$\\
Cartresian Tree & - & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & - & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(n\right)$ & $O\left(n\right)$\\
B-Tree & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(n\right)$\\
Red-Black Tree & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(n\right)$\\
Splay Tree & - & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & - & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(n\right)$\\
AVL Tree & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(\log(n)\right)$ & $O\left(n\right)$\\
\end{tabularx}
\end{document}
|
{"hexsha": "4946747a38992d5b775f7861bb5012cd9252bdc1", "size": 5491, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "data/2013-12-10/data.tex", "max_stars_repo_name": "dineshresearch/amaradineshkumar.github.io", "max_stars_repo_head_hexsha": "33f9506fdc3f58ee87524c3630c2d40157f685f2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2015-10-14T15:03:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T14:27:28.000Z", "max_issues_repo_path": "data/2013-12-10/data.tex", "max_issues_repo_name": "dineshresearch/amaradineshkumar.github.io", "max_issues_repo_head_hexsha": "33f9506fdc3f58ee87524c3630c2d40157f685f2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2015-02-01T17:02:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-03T15:35:31.000Z", "max_forks_repo_path": "data/2013-12-10/data.tex", "max_forks_repo_name": "dineshresearch/amaradineshkumar.github.io", "max_forks_repo_head_hexsha": "33f9506fdc3f58ee87524c3630c2d40157f685f2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 80, "max_forks_repo_forks_event_min_datetime": "2016-01-20T08:23:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-01T06:05:53.000Z", "avg_line_length": 77.338028169, "max_line_length": 244, "alphanum_fraction": 0.6024403569, "num_tokens": 2243}
|
SUBROUTINE CLATM4( ITYPE, N, NZ1, NZ2, RSIGN, AMAGN, RCOND,
$ TRIANG, IDIST, ISEED, A, LDA )
*
* -- LAPACK auxiliary test routine (version 3.0) --
* Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
* Courant Institute, Argonne National Lab, and Rice University
* September 30, 1994
*
* .. Scalar Arguments ..
LOGICAL RSIGN
INTEGER IDIST, ITYPE, LDA, N, NZ1, NZ2
REAL AMAGN, RCOND, TRIANG
* ..
* .. Array Arguments ..
INTEGER ISEED( 4 )
COMPLEX A( LDA, * )
* ..
*
* Purpose
* =======
*
* CLATM4 generates basic square matrices, which may later be
* multiplied by others in order to produce test matrices. It is
* intended mainly to be used to test the generalized eigenvalue
* routines.
*
* It first generates the diagonal and (possibly) subdiagonal,
* according to the value of ITYPE, NZ1, NZ2, RSIGN, AMAGN, and RCOND.
* It then fills in the upper triangle with random numbers, if TRIANG is
* non-zero.
*
* Arguments
* =========
*
* ITYPE (input) INTEGER
* The "type" of matrix on the diagonal and sub-diagonal.
* If ITYPE < 0, then type abs(ITYPE) is generated and then
* swapped end for end (A(I,J) := A'(N-J,N-I).) See also
* the description of AMAGN and RSIGN.
*
* Special types:
* = 0: the zero matrix.
* = 1: the identity.
* = 2: a transposed Jordan block.
* = 3: If N is odd, then a k+1 x k+1 transposed Jordan block
* followed by a k x k identity block, where k=(N-1)/2.
* If N is even, then k=(N-2)/2, and a zero diagonal entry
* is tacked onto the end.
*
* Diagonal types. The diagonal consists of NZ1 zeros, then
* k=N-NZ1-NZ2 nonzeros. The subdiagonal is zero. ITYPE
* specifies the nonzero diagonal entries as follows:
* = 4: 1, ..., k
* = 5: 1, RCOND, ..., RCOND
* = 6: 1, ..., 1, RCOND
* = 7: 1, a, a^2, ..., a^(k-1)=RCOND
* = 8: 1, 1-d, 1-2*d, ..., 1-(k-1)*d=RCOND
* = 9: random numbers chosen from (RCOND,1)
* = 10: random numbers with distribution IDIST (see CLARND.)
*
* N (input) INTEGER
* The order of the matrix.
*
* NZ1 (input) INTEGER
* If abs(ITYPE) > 3, then the first NZ1 diagonal entries will
* be zero.
*
* NZ2 (input) INTEGER
* If abs(ITYPE) > 3, then the last NZ2 diagonal entries will
* be zero.
*
* RSIGN (input) LOGICAL
* = .TRUE.: The diagonal and subdiagonal entries will be
* multiplied by random numbers of magnitude 1.
* = .FALSE.: The diagonal and subdiagonal entries will be
* left as they are (usually non-negative real.)
*
* AMAGN (input) REAL
* The diagonal and subdiagonal entries will be multiplied by
* AMAGN.
*
* RCOND (input) REAL
* If abs(ITYPE) > 4, then the smallest diagonal entry will be
* RCOND. RCOND must be between 0 and 1.
*
* TRIANG (input) REAL
* The entries above the diagonal will be random numbers with
* magnitude bounded by TRIANG (i.e., random numbers multiplied
* by TRIANG.)
*
* IDIST (input) INTEGER
* On entry, DIST specifies the type of distribution to be used
* to generate a random matrix .
* = 1: real and imaginary parts each UNIFORM( 0, 1 )
* = 2: real and imaginary parts each UNIFORM( -1, 1 )
* = 3: real and imaginary parts each NORMAL( 0, 1 )
* = 4: complex number uniform in DISK( 0, 1 )
*
* ISEED (input/output) INTEGER array, dimension (4)
* On entry ISEED specifies the seed of the random number
* generator. The values of ISEED are changed on exit, and can
* be used in the next call to CLATM4 to continue the same
* random number sequence.
* Note: ISEED(4) should be odd, for the random number generator
* used at present.
*
* A (output) COMPLEX array, dimension (LDA, N)
* Array to be computed.
*
* LDA (input) INTEGER
* Leading dimension of A. Must be at least 1 and at least N.
*
* =====================================================================
*
* .. Parameters ..
REAL ZERO, ONE
PARAMETER ( ZERO = 0.0E+0, ONE = 1.0E+0 )
COMPLEX CZERO, CONE
PARAMETER ( CZERO = ( 0.0E+0, 0.0E+0 ),
$ CONE = ( 1.0E+0, 0.0E+0 ) )
* ..
* .. Local Scalars ..
INTEGER I, ISDB, ISDE, JC, JD, JR, K, KBEG, KEND, KLEN
REAL ALPHA
COMPLEX CTEMP
* ..
* .. External Functions ..
REAL SLARAN
COMPLEX CLARND
EXTERNAL SLARAN, CLARND
* ..
* .. External Subroutines ..
EXTERNAL CLASET
* ..
* .. Intrinsic Functions ..
INTRINSIC ABS, CMPLX, EXP, LOG, MAX, MIN, MOD, REAL
* ..
* .. Executable Statements ..
*
IF( N.LE.0 )
$ RETURN
CALL CLASET( 'Full', N, N, CZERO, CZERO, A, LDA )
*
* Insure a correct ISEED
*
IF( MOD( ISEED( 4 ), 2 ).NE.1 )
$ ISEED( 4 ) = ISEED( 4 ) + 1
*
* Compute diagonal and subdiagonal according to ITYPE, NZ1, NZ2,
* and RCOND
*
IF( ITYPE.NE.0 ) THEN
IF( ABS( ITYPE ).GE.4 ) THEN
KBEG = MAX( 1, MIN( N, NZ1+1 ) )
KEND = MAX( KBEG, MIN( N, N-NZ2 ) )
KLEN = KEND + 1 - KBEG
ELSE
KBEG = 1
KEND = N
KLEN = N
END IF
ISDB = 1
ISDE = 0
GO TO ( 10, 30, 50, 80, 100, 120, 140, 160,
$ 180, 200 )ABS( ITYPE )
*
* |ITYPE| = 1: Identity
*
10 CONTINUE
DO 20 JD = 1, N
A( JD, JD ) = CONE
20 CONTINUE
GO TO 220
*
* |ITYPE| = 2: Transposed Jordan block
*
30 CONTINUE
DO 40 JD = 1, N - 1
A( JD+1, JD ) = CONE
40 CONTINUE
ISDB = 1
ISDE = N - 1
GO TO 220
*
* |ITYPE| = 3: Transposed Jordan block, followed by the identity.
*
50 CONTINUE
K = ( N-1 ) / 2
DO 60 JD = 1, K
A( JD+1, JD ) = CONE
60 CONTINUE
ISDB = 1
ISDE = K
DO 70 JD = K + 2, 2*K + 1
A( JD, JD ) = CONE
70 CONTINUE
GO TO 220
*
* |ITYPE| = 4: 1,...,k
*
80 CONTINUE
DO 90 JD = KBEG, KEND
A( JD, JD ) = CMPLX( JD-NZ1 )
90 CONTINUE
GO TO 220
*
* |ITYPE| = 5: One large D value:
*
100 CONTINUE
DO 110 JD = KBEG + 1, KEND
A( JD, JD ) = CMPLX( RCOND )
110 CONTINUE
A( KBEG, KBEG ) = CONE
GO TO 220
*
* |ITYPE| = 6: One small D value:
*
120 CONTINUE
DO 130 JD = KBEG, KEND - 1
A( JD, JD ) = CONE
130 CONTINUE
A( KEND, KEND ) = CMPLX( RCOND )
GO TO 220
*
* |ITYPE| = 7: Exponentially distributed D values:
*
140 CONTINUE
A( KBEG, KBEG ) = CONE
IF( KLEN.GT.1 ) THEN
ALPHA = RCOND**( ONE / REAL( KLEN-1 ) )
DO 150 I = 2, KLEN
A( NZ1+I, NZ1+I ) = CMPLX( ALPHA**( I-1 ) )
150 CONTINUE
END IF
GO TO 220
*
* |ITYPE| = 8: Arithmetically distributed D values:
*
160 CONTINUE
A( KBEG, KBEG ) = CONE
IF( KLEN.GT.1 ) THEN
ALPHA = ( ONE-RCOND ) / REAL( KLEN-1 )
DO 170 I = 2, KLEN
A( NZ1+I, NZ1+I ) = CMPLX( REAL( KLEN-I )*ALPHA+RCOND )
170 CONTINUE
END IF
GO TO 220
*
* |ITYPE| = 9: Randomly distributed D values on ( RCOND, 1):
*
180 CONTINUE
ALPHA = LOG( RCOND )
DO 190 JD = KBEG, KEND
A( JD, JD ) = EXP( ALPHA*SLARAN( ISEED ) )
190 CONTINUE
GO TO 220
*
* |ITYPE| = 10: Randomly distributed D values from DIST
*
200 CONTINUE
DO 210 JD = KBEG, KEND
A( JD, JD ) = CLARND( IDIST, ISEED )
210 CONTINUE
*
220 CONTINUE
*
* Scale by AMAGN
*
DO 230 JD = KBEG, KEND
A( JD, JD ) = AMAGN*REAL( A( JD, JD ) )
230 CONTINUE
DO 240 JD = ISDB, ISDE
A( JD+1, JD ) = AMAGN*REAL( A( JD+1, JD ) )
240 CONTINUE
*
* If RSIGN = .TRUE., assign random signs to diagonal and
* subdiagonal
*
IF( RSIGN ) THEN
DO 250 JD = KBEG, KEND
IF( REAL( A( JD, JD ) ).NE.ZERO ) THEN
CTEMP = CLARND( 3, ISEED )
CTEMP = CTEMP / ABS( CTEMP )
A( JD, JD ) = CTEMP*REAL( A( JD, JD ) )
END IF
250 CONTINUE
DO 260 JD = ISDB, ISDE
IF( REAL( A( JD+1, JD ) ).NE.ZERO ) THEN
CTEMP = CLARND( 3, ISEED )
CTEMP = CTEMP / ABS( CTEMP )
A( JD+1, JD ) = CTEMP*REAL( A( JD+1, JD ) )
END IF
260 CONTINUE
END IF
*
* Reverse if ITYPE < 0
*
IF( ITYPE.LT.0 ) THEN
DO 270 JD = KBEG, ( KBEG+KEND-1 ) / 2
CTEMP = A( JD, JD )
A( JD, JD ) = A( KBEG+KEND-JD, KBEG+KEND-JD )
A( KBEG+KEND-JD, KBEG+KEND-JD ) = CTEMP
270 CONTINUE
DO 280 JD = 1, ( N-1 ) / 2
CTEMP = A( JD+1, JD )
A( JD+1, JD ) = A( N+1-JD, N-JD )
A( N+1-JD, N-JD ) = CTEMP
280 CONTINUE
END IF
*
END IF
*
* Fill in upper triangle
*
IF( TRIANG.NE.ZERO ) THEN
DO 300 JC = 2, N
DO 290 JR = 1, JC - 1
A( JR, JC ) = TRIANG*CLARND( IDIST, ISEED )
290 CONTINUE
300 CONTINUE
END IF
*
RETURN
*
* End of CLATM4
*
END
|
{"hexsha": "c539a32479fac976edebe0646be61d28b1a9c952", "size": 10042, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "old/lapack-test/lapack-timing/EIG/clatm4.f", "max_stars_repo_name": "haampie/libflame", "max_stars_repo_head_hexsha": "a6b27af9b7ef91ec2724b52c7c09b681379a3470", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 199, "max_stars_repo_stars_event_min_datetime": "2015-02-06T06:05:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T05:20:33.000Z", "max_issues_repo_path": "old/lapack-test/lapack-timing/EIG/clatm4.f", "max_issues_repo_name": "haampie/libflame", "max_issues_repo_head_hexsha": "a6b27af9b7ef91ec2724b52c7c09b681379a3470", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 44, "max_issues_repo_issues_event_min_datetime": "2015-05-10T18:14:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-22T08:22:10.000Z", "max_forks_repo_path": "old/lapack-test/lapack-timing/EIG/clatm4.f", "max_forks_repo_name": "haampie/libflame", "max_forks_repo_head_hexsha": "a6b27af9b7ef91ec2724b52c7c09b681379a3470", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 70, "max_forks_repo_forks_event_min_datetime": "2015-02-07T04:53:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T05:20:36.000Z", "avg_line_length": 30.6158536585, "max_line_length": 72, "alphanum_fraction": 0.4862577176, "num_tokens": 3299}
|
import tensorflow as tf
import numpy as np
import json
import time
import logging
import pickle as pkl
from accuracy_score import model_evaluation
def HAN_model_1(session, config, logger, restore=False):
"""Hierarhical Attention Network"""
try:
from tensorflow.contrib.rnn import GRUCell, MultiRNNCell, DropoutWrapper
except ImportError:
MultiRNNCell = tf.nn.rnn_cell.MultiRNNCell
GRUCell = tf.nn.rnn_cell.GRUCell
from bn_lstm import BNLSTMCell
from han_model import HANClassifierModel
is_training = tf.placeholder(dtype=tf.bool, name='is_training')
cell = GRUCell(50)
if config["cell"] == 0:
cell = GRUCell(50)
logger.info("Using GRU")
elif config["cell"] == 1:
cell = BNLSTMCell(80, is_training) # h-h batchnorm LSTMCell
logger.info("Using batch Normalized LSTM")
else:
logger.info("Using GRU")
# elif config["cell"] == 2:
# cell = MultiRNNCell([cell] * 5)
# logger.info("Using multi RNN cells")
model = HANClassifierModel(
vocab_size=config["vocab_size"],
embedding_size=config["embedding_size"],
go_size=config["go_size"],
go_embedding_size=config["go_embedding_size"],
classes=config["classes"],
word_cell=cell,
sentence_cell=cell,
word_output_size=config["word_output_size"],
sentence_output_size=config["sentence_output_size"],
max_grad_norm=config["max_grad_norm"],
dropout_keep_proba=config["dropout_keep_prob"],
is_training=is_training
)
saver = tf.train.Saver(tf.global_variables())
checkpoint = tf.train.get_checkpoint_state(config["checkpoint_dir"])
print(checkpoint)
if checkpoint and not config["is_training"]:
print("Reading model parameters from %s" % checkpoint.model_checkpoint_path)
saver.restore(session, checkpoint.model_checkpoint_path)
else:
print("Created model with fresh parameters")
session.run(tf.global_variables_initializer())
return model, saver
model_fn = HAN_model_1
def batch_iterator(dataset, batch_size, max_epochs):
for i in range(max_epochs):
xb = []
yb = []
for ex in dataset:
x, y = ex
xb.append(x)
yb.append(y)
if len(xb) == batch_size:
yield xb, yb
xb, yb = [], []
def train_test(configuration):
tf.reset_default_graph()
logger = logging.getLogger("HAN")
config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=config) as s:
model, saver = model_fn(s, configuration, logger)
########
# Written only for sanity check of the model
# fd = {
# model.is_training: True,
# model.inputs: [[
# [5, 4, 1, 0],
# [3, 3, 6, 7],
# [6, 7, 0, 0]
# ],
# [
# [2, 2, 1, 0],
# [3, 3, 6, 7],
# [0, 0, 0, 0]
# ]],
# model.word_lengths: [
# [3, 4, 2],
# [3, 4, 0],
# ],
# model.sentence_lengths: [3, 2],
# model.labels: [[0, 1, 0], [1, 1, 1]],
# model.aspect:[[0,1,0],[1,0,0]],
# model.go_inputs:[1,2]
# }
########
if configuration["is_training"]:
# LOAD WORD EMBEDDINGS
with open(configuration["w_emb_path"], 'rb') as f:
w_emb = pkl.load(f)
s.run(model.word_emb_init, feed_dict={model.word_emb_placeholder: w_emb})
summary_writer = tf.summary.FileWriter(configuration["tflog_dir"], graph=tf.get_default_graph())
# Loading train data
with open(configuration["train_data_path"], 'rb')as f:
data = pkl.load(f)
logger.info("Loaded Data")
full_batch = configuration["full_batch"]
for i in range(1, configuration["epochs"] + 1):
fd, _ = model.get_feed_data(data, full_batch=full_batch)
t0 = time.clock()
step, summaries, loss, _ = s.run([
model.global_step,
model.summary_op,
model.loss,
model.train_op,
], fd)
td = time.clock() - t0
logger.info("STEP: %7d | Loss: %.8f | Time: %f" % (step, loss, td))
print("STEP: %7d | Loss: %.8f | Time: %f" % (step, loss, td))
if configuration["dump_log"]:
summary_writer.add_summary(summaries, global_step=step)
if step % configuration["dump_after_every_x_epochs"] == 0:
logger.info('checkpoint & graph meta')
saver.save(s, configuration["checkpoint_dir"], global_step=step)
logger.info('checkpoint done')
else:
# Load test data:
with open(configuration["test_data_path"], 'rb') as f:
data = pkl.load(f)
logger.info("Loaded Test Data")
test_data = model.get_feed_data_for_test(data, max_batchsize=1000)
print("CALCUALTED TRUE")
curr_true = None
curr_pred = None
for i in range(len(test_data)):
logger.info("Loaded Test Data")
fd, y_true = test_data[i][0], test_data[i][1]
#creating feeddict:
feed_d = {}
feed_d = {model.inputs:fd["abstract"],
model.sentence_lengths:fd['doc_len'],
model.word_lengths:fd["sent_len"],
model.go_inputs:fd["go_inputs"],
model.aspect:fd["aspect"],
model.is_training:False
}
sigmoids = s.run(model.prediction, feed_d)
print("CALCUALTED PRED")
predictions = sigmoids > 0.5
y_pred = predictions.astype(int)
if curr_true is None:
curr_true = y_true
else:
curr_true = np.vstack((curr_true,y_true))
if curr_pred is None:
curr_pred = y_pred
else:
curr_pred = np.vstack((curr_pred,y_pred))
#dumping the predicted data
if configuration["dump_eval"]:
with open("predict_data.pkl",'wb') as f:
pkl.dump([curr_true,curr_pred],f)
evaluator = model_evaluation(curr_true)
acc = evaluator.compute_accuracy_score(curr_true, curr_pred)
print("ACCURACY OF THE MODEL IS %f",acc)
logger.info("ACCURACY OF THE MODEL IS %f",acc)
f1_mac, f1_mic, f1_weighted, precision, recall = evaluator.binary_class_model(curr_true, curr_pred)
print("f1_macro : ",f1_mac)
print("###############################")
print("f1_micro :", f1_mic)
print("###############################")
print("f1_weighted :", f1_weighted)
print("###############################")
print("precision :", precision)
print("###############################")
print("recall :", recall)
print("###############################")
logger.info("f1_macro : "+ str(f1_mac))
logger.info("###############################")
logger.info("f1_micro :"+ str(f1_mic))
logger.info("###############################")
logger.info("f1_weighted :"+ str(f1_weighted))
logger.info("###############################")
logger.info("precision :"+ str(precision))
logger.info("###############################")
logger.info("recall :"+ str(recall))
logger.info("###############################")
# print(y_pred, y_true)
# calculate precision recall f1
def main():
with open("config.json") as f:
config = json.load(f)
if config["is_training"]:
try:
logfile = config["train_log_filename"]
with open(logfile, 'w') as f:
pass
except:
logfile = "log_train.txt"
logging.basicConfig(filename=logfile,
filemode='a',
level=logging.DEBUG)
logging.info("Starting training of Attention Model")
else:
try:
logfile = config["test_log_filename"]
with open(logfile, 'w') as f:
pass
except:
logfile = "log_test.txt"
logging.basicConfig(filename=logfile,
filemode='a',
level=logging.DEBUG)
logging.info("Starting testing of Attention Model")
train_test(config)
if __name__ == '__main__':
main()
|
{"hexsha": "7c30a19653e9698fcc4336c7f3a8ad7081529284", "size": 9176, "ext": "py", "lang": "Python", "max_stars_repo_path": "nn_model/worker.py", "max_stars_repo_name": "heisenbugfix/GO_Evidence_Classification", "max_stars_repo_head_hexsha": "06beb1d0ec56a983394f2f07aa3e70c33e28fdb3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nn_model/worker.py", "max_issues_repo_name": "heisenbugfix/GO_Evidence_Classification", "max_issues_repo_head_hexsha": "06beb1d0ec56a983394f2f07aa3e70c33e28fdb3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nn_model/worker.py", "max_forks_repo_name": "heisenbugfix/GO_Evidence_Classification", "max_forks_repo_head_hexsha": "06beb1d0ec56a983394f2f07aa3e70c33e28fdb3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2333333333, "max_line_length": 111, "alphanum_fraction": 0.5025065388, "include": true, "reason": "import numpy", "num_tokens": 1990}
|
! -----------------------------------------------------------------------------
!
! Copyright (c) 2017 Sam Cox, Roberto Sommariva
!
! This file is part of the AtChem2 software package.
!
! This file is covered by the MIT license which can be found in the file
! LICENSE.md at the top level of the AtChem2 distribution.
!
! -----------------------------------------------------------------------------
module solar_test
use fruit
use types_mod
use solar_functions_mod
implicit none
contains
subroutine test_calcTheta
use date_mod, only : currentYear, currentDayOfYear
real(kind=DP) :: theta, pi, threshold
threshold = 1.0e-8_DP
pi = 4.0_DP * atan( 1.0_DP )
currentYear = 2000_DI
currentDayOfYear = 0_DI
theta = calcTheta()
call assert_true( theta == 0.0_DP, "calcTheta(), first day of 2000" )
currentDayOfYear = 365_DI
theta = calcTheta()
call assert_true( theta == 2.0_DP*pi*365_DI/366_DI, "calcTheta(), last day of 2000" )
currentYear = 2001_DI
currentDayOfYear = 0_DI
theta = calcTheta()
call assert_true( theta == 0.0_DP, "calcTheta(), first day of 2001" )
currentDayOfYear = 364_DI
theta = calcTheta()
call assert_true( abs(theta - 2.0_DP*pi*364_DI/365_DI) <= threshold, "calcTheta(), last day of 2001" )
currentYear = 2004_DI
currentDayOfYear = 0_DI
theta = calcTheta()
call assert_true( theta == 0.0_DP, "calcTheta(), first day of 2004" )
currentDayOfYear = 365_DI
theta = calcTheta()
call assert_true( theta == 2.0_DP*pi*365_DI/366_DI, "calcTheta(), last day of 2004" )
end subroutine test_calcTheta
subroutine test_decFromTheta
real(kind=DP) :: dec, pi, theta
pi = 4.0_DP * atan( 1.0_DP )
theta = 0.0_DP
dec = decFromTheta( theta )
call assert_true( dec == -0.402449_DP, "decFromTheta( theta ), theta = 0" )
theta = pi
dec = decFromTheta( theta )
call assert_true( dec == 0.40276899999999999_DP, "decFromTheta( theta ), theta = pi" )
theta = pi/2.0_DP
dec = decFromTheta( theta )
call assert_true( dec == 8.2452999999999985E-002_DP, "decFromTheta( theta ), theta = pi/2" )
theta = pi*3.0_DP/2.0_DP
dec = decFromTheta( theta )
call assert_true( dec == -5.5100999999999921E-002_DP, "decFromTheta( theta ), theta = 3pi/2" )
end subroutine test_decFromTheta
subroutine test_calcDec
use types_mod
use date_mod, only : currentYear, currentDayOfYear
real(kind=DP) :: dec0, dec364over365, dec365over366, threshold
threshold = 1.0e-6_DP
dec0 = 0.006918_DP - 0.399912_DP - 0.006758_DP - 0.002697_DP
dec364over365 = -0.40369912461219781
dec365over366 = -0.40369589165596537
currentYear = 2000_DI
currentDayOfYear = 0_DI
call assert_true( calcDec() == dec0, "calcDec(), first day of 2000" )
currentDayOfYear = 365_DI
call assert_true( abs( calcDec() - dec365over366 ) < threshold, "calcDec(), last day of 2000" )
currentYear = 2001_DI
currentDayOfYear = 0_DI
call assert_true( calcDec() == dec0, "calcDec(), first day of 2001" )
currentDayOfYear = 364_DI
call assert_true( abs( calcDec() - dec364over365 ) < threshold, "calcDec(), last day of 2001" )
currentYear = 2004_DI
currentDayOfYear = 0_DI
call assert_true( calcDec() == dec0, "calcDec(), first day of 2004" )
currentDayOfYear = 365_DI
call assert_true( abs( calcDec() - dec365over366 ) < threshold, "calcDec(), last day of 2004" )
end subroutine test_calcDec
subroutine test_calcEQT
use types_mod
use date_mod, only : currentYear, currentDayOfYear
real(kind=DP) :: eqt0, threshold
threshold = 1.0e-8_DP
eqt0 = 0.000075_DP + 0.001868_DP - 0.014615_DP
currentYear = 2000_DI
currentDayOfYear = 0_DI
call assert_true( abs( calcEQT() - eqt0 ) < threshold, "calcEQT(), first day of 2000" )
currentDayOfYear = 365_DI
call assert_true( abs( calcEQT() + 1.0710769160677822E-002 ) < threshold, "calcEQT(), last day of 2000" )
currentYear = 2001_DI
currentDayOfYear = 0_DI
call assert_true( abs( calcEQT() - eqt0 ) < threshold, "calcEQT(), first day of 2001" )
currentDayOfYear = 364_DI
call assert_true( abs( calcEQT() + 1.0705374687579837E-002 ) < threshold, "calcEQT(), last day of 2001" )
currentYear = 2004_DI
currentDayOfYear = 0_DI
call assert_true( abs( calcEQT() - eqt0 ) < threshold, "calcEQT(), first day of 2004" )
currentDayOfYear = 365_DI
call assert_true( abs( calcEQT() + 1.0710769160677822E-002 ) < threshold, "calcEQT(), last day of 2004" )
end subroutine test_calcEQT
! TODO: subroutine test_calcZenith
end module solar_test
|
{"hexsha": "44aedb4f281b81391c5c5ff888597992f4241bc0", "size": 4663, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tests/unit_tests/solar_test.f90", "max_stars_repo_name": "AlfredMayhew/AtChem2", "max_stars_repo_head_hexsha": "afc389685ac8cce7028bd52dc99984d9959b044a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2018-01-15T09:49:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T16:48:08.000Z", "max_issues_repo_path": "tests/unit_tests/solar_test.f90", "max_issues_repo_name": "AlfredMayhew/AtChem2", "max_issues_repo_head_hexsha": "afc389685ac8cce7028bd52dc99984d9959b044a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 211, "max_issues_repo_issues_event_min_datetime": "2017-07-07T17:05:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T13:21:16.000Z", "max_forks_repo_path": "tests/unit_tests/solar_test.f90", "max_forks_repo_name": "AlfredMayhew/AtChem2", "max_forks_repo_head_hexsha": "afc389685ac8cce7028bd52dc99984d9959b044a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2019-07-04T05:52:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-22T19:36:58.000Z", "avg_line_length": 35.0601503759, "max_line_length": 109, "alphanum_fraction": 0.6598756166, "num_tokens": 1481}
|
import re
import numpy as np
import pandas as pd
import spacy
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from tqdm import tqdm
from tea import setup_logger, NEGATIVE_WORDS, POSITIVE_WORDS, CONTRACTION_MAP
from tea.text_mining import tokenize_text
from tea.word_embedding import WordEmbedding
SPACY_NLP = spacy.load('en', parse=False, tag=False, entity=False)
logger = setup_logger(__name__)
class ModelTransformer(BaseEstimator, TransformerMixin):
def __init__(self, model):
self.model = model
def fit(self, *args, **kwargs):
self.model.fit(*args, **kwargs)
return self
def transform(self, X, **transform_params):
return pd.DataFrame(self.model.predict(X))
class ColumnExtractor(BaseEstimator, TransformerMixin):
"""Takes in dataframe, extracts a number of columns and return these columns"""
def __init__(self, columns):
"""
:param columns:
"""
self.columns = columns
def transform(self, X, y=None):
if set(self.columns).issubset(set(X.columns.tolist())):
return X[self.columns].values
else:
raise Exception('Columns declared, not in dataframe')
def fit(self, X, y=None):
"""Returns `self` unless something different happens in train and test"""
return self
class TextColumnExtractor(BaseEstimator, TransformerMixin):
"""Takes in dataframe, extracts the column with the text"""
def __init__(self, column):
"""
:param column:
"""
self.column = column
def transform(self, X, y=None):
if {self.column}.issubset(set(X.columns.tolist())):
return X[self.column]
else:
raise Exception('Columns declared, not in dataframe')
def fit(self, X, y=None):
"""Returns `self` unless something different happens in train and test"""
return self
class DenseTransformer(BaseEstimator, TransformerMixin):
def transform(self, X, y=None, **fit_params):
return X.todense()
def fit_transform(self, X, y=None, **fit_params):
self.fit(X, y, **fit_params)
return self.transform(X)
def fit(self, X, y=None, **fit_params):
return self
class SingleColumnDimensionReshaper(BaseEstimator, TransformerMixin):
def __init__(self):
"""
"""
pass
def transform(self, X, y=None):
return X.values.reshape(-1, 1)
def fit(self, X, y=None):
"""Returns `self` unless something different happens in train and test"""
return self
class WordLengthMetricsExtractor(BaseEstimator, TransformerMixin):
"""Takes in dataframe, extracts text column, splits text in tokens and outputs average word length"""
def __init__(self,
col_name,
split_type='simple',
metric='avg',
reshape=True):
"""
:param col_name:
:param split_type:
:param metric:
:param reshape:
"""
assert metric in ['avg', 'std']
self.split_type = split_type
self.col_name = col_name
self.metric = metric
self.reshape = reshape
def calculate_metric(self, words):
"""
Helper code to compute average word length of a name
:param words:
:return:
"""
if words:
if self.metric == 'avg':
return np.mean([len(word) for word in words])
elif self.metric == 'std':
return np.std([len(word) for word in words])
else:
return 0
def transform(self, X, y=None):
if X is None:
x = X.apply(lambda s: tokenize_text(text=s, split_type=self.split_type))
else:
logger.info('Calculating {} for "{}" Column'.format(self.metric, self.col_name))
x = X[self.col_name].apply(lambda s: tokenize_text(text=s, split_type=self.split_type))
out = x.apply(self.calculate_metric)
if self.reshape:
return out.values.reshape(-1, 1)
return out
def fit(self, X, y=None):
"""Returns `self` unless something different happens in train and test"""
return self
class TextLengthExtractor(BaseEstimator, TransformerMixin):
"""Takes in dataframe, extracts text column, returns sentence's length"""
def __init__(self, col_name, reshape=True):
"""
:param col_name:
:param reshape:
"""
self.col_name = col_name
self.reshape = reshape
def transform(self, X, y=None):
if X is None:
logger.info('Calculating text length for "{}" Column'.format(self.col_name))
out = X.apply(len)
else:
logger.info('Calculating text length for "{}" Column'.format(self.col_name))
out = X[self.col_name].apply(len)
if self.reshape:
return out.values.reshape(-1, 1)
return out
def fit(self, X, y=None):
"""Returns `self` unless something different happens in train and test"""
return self
class ContainsSpecialCharactersExtractor(BaseEstimator, TransformerMixin):
def __init__(self, col_name, reshape=True):
"""
This class checks whether there are some given special characters in a text.
:param col_name:
"""
self.col_name = col_name
self.SPECIAL_CHARACTERS = set("!@#$%^&*()_+-=")
self.reshape = reshape
def transform(self, X, y=None):
logger.info('Checking whether text contains special characters for "{}" Column'.format(self.col_name))
if X is None:
out = X.apply(lambda s: bool(set(s) & self.SPECIAL_CHARACTERS))
else:
out = X[self.col_name].apply(lambda s: bool(set(s) & self.SPECIAL_CHARACTERS))
if self.reshape:
return out.values.reshape(-1, 1)
return out
def fit(self, X, y=None):
"""Returns `self` unless something different happens in train and test"""
return self
class ContainsSequentialChars(BaseEstimator, TransformerMixin):
"""
Checks if special character patterns appear in the sentence.
"""
def __init__(self, col_name, pattern="...", reshape=True):
"""
This class checks whether there are some given special characters in a text.
:param col_name:
"""
self.col_name = col_name
self.pattern = pattern
self.reshape = reshape
def transform(self, X, y=None):
logger.info('Checking whether text contains special characters for "{}" Column'.format(self.col_name))
if X is None:
out = X.apply(lambda s: bool(self.pattern in s))
else:
out = X[self.col_name].apply(lambda s: bool(self.pattern in s))
if self.reshape:
return out.values.reshape(-1, 1)
return out
def fit(self, X, y=None):
"""Returns `self` unless something different happens in train and test"""
return self
class ContainsUppercaseWords(BaseEstimator, TransformerMixin):
"""Takes in data-frame, extracts number of tokens in text"""
def __init__(self, col_name=None, how='bool', reshape=True):
"""
:param col_name:
:param how:
"""
assert how in ['bool', 'count']
self.col_name = col_name
self.how = how
self.reshape = reshape
def calculate_uppercase_words_in_tokens(self, sentence):
"""
This method checks whether we have words writter with uppercase chararcters in a sentence.
:param sentence:
:param how:
:return:
"""
tokens = tokenize_text(text=sentence, split_type='simple')
if self.how == 'bool':
for t in tokens:
if t.isupper():
return True
return False
else:
return sum([1 for token in tokens if token.isupper()])
def transform(self, X, y=None):
if self.col_name is None:
logger.info('Checking if text contains uppercase words for pandas series')
out = X.apply(self.calculate_uppercase_words_in_tokens)
else:
logger.info('Checking if text contains uppercase words for "{}" Column'.format(self.col_name))
out = X[self.col_name].apply(self.calculate_uppercase_words_in_tokens)
if self.reshape:
return out.values.reshape(-1, 1)
return out
def fit(self, X, y=None):
"""Returns `self` unless something different happens in train and test"""
return self
class NumberOfTokensCalculator(BaseEstimator, TransformerMixin):
"""Takes in dataframe, extracts number of tokens in text"""
def __init__(self, col_name, reshape=True):
"""
:param col_name:
"""
self.col_name = col_name
self.reshape = reshape
def transform(self, X, y=None):
logger.info('Counting number of tokens for "{}" Column'.format(self.col_name))
if X is None:
out = X.apply(lambda x: len(tokenize_text(x, split_type='thorough')))
else:
out = X[self.col_name].apply(lambda x: len(tokenize_text(x, split_type='thorough')))
if self.reshape:
return out.values.reshape(-1, 1)
return out
def fit(self, X, y=None):
"""Returns `self` unless something different happens in train and test"""
return self
class HasSentimentWordsExtractor(BaseEstimator, TransformerMixin):
"""Takes in data-frame, extracts number of tokens in text"""
def __init__(self,
col_name,
count_type='boolean',
input_type='text',
sentiment='negative',
reshape=True):
"""
:param col_name:
"""
assert sentiment in ['negative', 'positive']
assert count_type in ['boolean', 'counts']
assert input_type in ['text', 'tokens']
self.col_name = col_name
self.sentiment = sentiment
self.input_type = input_type
self.count_type = count_type
self.reshape = reshape
if self.sentiment == 'positive':
self.words_set = POSITIVE_WORDS
else:
self.words_set = NEGATIVE_WORDS
def calculate_boolean_output(self, inp):
"""
This method checks whether a sentence contains at least one tokens that contains sentiment.
:param inp:
:return:
"""
tokens = inp.split() if self.input_type == 'text' else inp
for token in set(tokens):
if token in self.words_set:
return True
return False
def calculate_counts_output(self, inp):
"""
This method counts the number of tokens that contain sentiment in a text.
:param inp:
:return:
"""
tokens = inp.split() if self.input_type == 'text' else inp
return sum([1 for t in tokens if t in self.words_set])
def transform(self, X, y=None):
"""
:param X:
:param y:
:return:
"""
logger.info('Searching for {} sentiment of tokens for "{}" Column'.format(self.sentiment, self.col_name))
if X is None and self.count_type == 'boolean':
out = X.apply(self.calculate_boolean_output)
elif X is None and self.count_type == 'counts':
out = X.apply(self.calculate_counts_output)
elif self.count_type == 'boolean':
out = X[self.col_name].apply(self.calculate_boolean_output)
else:
out = X[self.col_name].apply(self.calculate_counts_output)
if self.reshape:
return out.values.reshape(-1, 1)
return out
def fit(self, X, y=None):
"""Returns `self` unless something different happens in train and test"""
return self
class SentenceEmbeddingExtractor(BaseEstimator, TransformerMixin):
"""Takes in dataframe, the average of sentence's word embeddings"""
def __init__(self,
col_name=None,
embedding_type='tf',
embedding_dimensions=50,
word_embeddings_dict=None):
"""
:param col_name:
:param embedding_type:
:param embedding_output:
:param embedding_dimensions:
"""
assert embedding_type in ['tf', 'tfidf']
assert embedding_dimensions in [50, 100, 200, 300]
self.col_name = col_name
self.embedding_dimensions = embedding_dimensions
self.embedding_type = embedding_type
self.word_embeddings_dict = word_embeddings_dict
def calculate_updated_sentence_embeddings(self, X):
"""
:param X:
:return:
"""
if self.word_embeddings_dict is None:
logger.info('Loading word embeddings for {} dimensions'.format(self.embedding_dimensions))
word_embeddings = WordEmbedding.get_word_embeddings(dimension=self.embedding_dimensions)
else:
logger.info('Loading pre loaded word embeddings for {} dimensions'.format(self.embedding_dimensions))
word_embeddings = self.word_embeddings_dict.get(self.embedding_dimensions)
if self.embedding_type == 'tf':
vectorizer = CountVectorizer(strip_accents='unicode',
analyzer='word',
ngram_range=(1, 1),
stop_words=None,
lowercase=True,
binary=False)
elif self.embedding_type == 'tfidf':
vectorizer = TfidfVectorizer(strip_accents='unicode',
analyzer='word',
ngram_range=(1, 1),
stop_words=None,
lowercase=True,
binary=False,
norm='l2',
use_idf=True,
smooth_idf=True)
else:
raise NotImplementedError()
X_transformed = vectorizer.fit_transform(X)
analyser = vectorizer.build_analyzer()
vocabulary_indices = vectorizer.vocabulary_
embedded_vectors_updated = list()
for index_row, doc in enumerate(tqdm(X, unit=' Document')):
doc_vector = np.zeros(self.embedding_dimensions, dtype=float)
sum_of_tf_or_idfs = 0
# breaks test in tokens.
doc_tokens = analyser(doc)
# We keep only the unique ones in order to get the tf-idf values from the stored matrix X_transformed.
for token in set(doc_tokens):
# get column index from the vocabulary in order to find the exact spot in the X_transformed matrix
index_col = vocabulary_indices[token]
# Getting the tf or idf value for the given word from the transformed matrix
token_tf_or_idf_value = X_transformed[index_row, index_col]
# search for the embedding vector of the given token. If not found created a vector of zeros
# with the same shape.
token_embedding_vector = word_embeddings.get(token,
np.zeros(self.embedding_dimensions, dtype=float))
# Calculating the element product of the idf and embedding vector
token_embedding_vector = np.multiply(token_embedding_vector, token_tf_or_idf_value)
sum_of_tf_or_idfs += token_tf_or_idf_value
doc_vector += token_embedding_vector
doc_final_vector = np.divide(doc_vector, sum_of_tf_or_idfs)
embedded_vectors_updated.append(doc_final_vector)
return np.vstack(embedded_vectors_updated)
def transform(self, X, y=None):
if self.col_name is None:
logger.info('Calculating word embeddings of sentences for pandas series')
return self.calculate_updated_sentence_embeddings(X=X)
logger.info('Calculating word embeddings of sentences for "{}" Column'.format(self.col_name))
return self.calculate_updated_sentence_embeddings(X=X[self.col_name])
def fit(self, X, y=None):
"""Returns `self` unless something different happens in train and test"""
return self
class ContractionsExpander(BaseEstimator, TransformerMixin):
"""Takes in data-frame, the average of sentence's word embeddings"""
def __init__(self,
col_name=None,
contractions_mapper=CONTRACTION_MAP):
"""
:param col_name:
:param contractions_mapper:
"""
self.col_name = col_name
self.contractions_mapper = contractions_mapper
def expand_contractions(self, text):
"""
This function expands contractions for the english language. For example "I've" will become "I have".
:param text:
:param contractions_m: dict. A dict containing contracted words as keys, and their expanded text as values.
:return:
"""
contractions_pattern = re.compile('({})'.format('|'.join(self.contractions_mapper.keys())),
flags=re.IGNORECASE | re.DOTALL)
def expand_match(contraction):
"""
This sub function helps into expanding a given contraction
:param contraction:
:return:
"""
match = contraction.group(0)
first_char = match[0]
expanded_contr = self.contractions_mapper.get(
match) if self.contractions_mapper.get(match) else self.contractions_mapper.get(match.lower())
expanded_contr = first_char + expanded_contr[1:]
return expanded_contr
expanded_text = contractions_pattern.sub(expand_match, text)
expanded_text = re.sub("'", "", expanded_text)
return expanded_text
def transform(self, X, y=None):
if self.col_name is None:
logger.info('Extracting contractions for pandas series')
return X.apply(self.expand_contractions)
logger.info('Extracting contractions for "{}" Column'.format(self.col_name))
return X[self.col_name].apply(self.expand_contractions)
def fit(self, X, y=None):
"""Returns `self` unless something different happens in train and test"""
return self
class LemmaExtractor(BaseEstimator, TransformerMixin):
"""Takes in data-frame, gets lemmatized words"""
def __init__(self,
col_name=None,
spacy_nlp=SPACY_NLP,
contractions_mapper=CONTRACTION_MAP):
"""
:param col_name:
:param spacy_nlp:
:param contractions_mapper:
"""
self.col_name = col_name
self.contractions_mapper = contractions_mapper
self.spacy_nlp = spacy_nlp
def lemmatize_text(self, text):
"""
This method lemmatizes text
:param text:
:return:
"""
text = self.spacy_nlp(text)
text = ' '.join([word.lemma_ if word.lemma_ != '-PRON-' else word.text for word in text])
return text
def transform(self, X, y=None):
if self.col_name is None:
logger.info('Extracted Lemmatized words for text for pandas series')
return X.apply(self.lemmatize_text)
logger.info('Extracted Lemmatized words for text for "{}" Column'.format(self.col_name))
return X[self.col_name].apply(self.lemmatize_text)
def fit(self, X, y=None):
"""Returns `self` unless something different happens in train and test"""
return self
|
{"hexsha": "ebe1da8523e71169d12ec9774e06b6eba363d769", "size": 20112, "ext": "py", "lang": "Python", "max_stars_repo_path": "tea/features.py", "max_stars_repo_name": "gperakis/reviews-classifier", "max_stars_repo_head_hexsha": "18ded10d52e018136b56b934d091a39abd28a220", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tea/features.py", "max_issues_repo_name": "gperakis/reviews-classifier", "max_issues_repo_head_hexsha": "18ded10d52e018136b56b934d091a39abd28a220", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tea/features.py", "max_forks_repo_name": "gperakis/reviews-classifier", "max_forks_repo_head_hexsha": "18ded10d52e018136b56b934d091a39abd28a220", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1813953488, "max_line_length": 115, "alphanum_fraction": 0.598597852, "include": true, "reason": "import numpy", "num_tokens": 4109}
|
# Copyright 2021 ETH Zurich, Media Technology Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import pandas as pd
if __name__ == "__main__":
import pandas as pd
import numpy as np
num_articles=99 ### must be <99
num_users=10
num_user_item_entries=1000
folder = os.getenv('DATA_FOLDER','processed')
if not os.path.exists(folder):
os.makedirs(folder)
user_item = pd.DataFrame([np.random.randint(0, num_users, size=(num_user_item_entries)), np.random.randint(0, num_articles, size=(num_user_item_entries))]).T
user_item=user_item.reset_index()
user_item.columns = ['ts','user_ix', 'article_id']
text=pd.read_csv('blindtext', sep=';').iloc[:num_articles,:].reset_index()
text.columns=['resource_id','text']
# Loads the data and saves it in
text.to_csv(f'{folder}/meta.csv')
user_item.to_parquet(f"{folder}/user_item_matrix_vertical.pq")
|
{"hexsha": "ad21715d67879dd29ef8fcbe87e014305b20fe6c", "size": 1469, "ext": "py", "lang": "Python", "max_stars_repo_path": "example_scripts/create_dummy_data.py", "max_stars_repo_name": "MTC-ETH/RecommenderSystems", "max_stars_repo_head_hexsha": "ede5aa961740348a68210f271397e1924c5f7cf6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "example_scripts/create_dummy_data.py", "max_issues_repo_name": "MTC-ETH/RecommenderSystems", "max_issues_repo_head_hexsha": "ede5aa961740348a68210f271397e1924c5f7cf6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-11-12T09:12:59.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-12T09:16:46.000Z", "max_forks_repo_path": "example_scripts/create_dummy_data.py", "max_forks_repo_name": "MTC-ETH/RecommenderSystems", "max_forks_repo_head_hexsha": "ede5aa961740348a68210f271397e1924c5f7cf6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9761904762, "max_line_length": 161, "alphanum_fraction": 0.7113682777, "include": true, "reason": "import numpy", "num_tokens": 357}
|
import tensorflow as tf
import numpy as np
import argparse
import os
import time
import json
from sklearn import metrics
from tqdm import tqdm
os.environ.pop('http_proxy')
os.environ.pop('https_proxy')
def train_criteo(model, cluster, task_id, nrank, args):
def get_current_shard(data):
part_size = data.shape[0] // nrank
start = part_size * task_id
end = start + part_size if task_id != nrank - 1 else data.shape[0]
return data[start:end]
if args.all:
from models.load_data import process_all_criteo_data
dense, sparse, all_labels = process_all_criteo_data()
dense_feature = get_current_shard(dense[0])
sparse_feature = get_current_shard(sparse[0])
labels = get_current_shard(all_labels[0])
val_dense = get_current_shard(dense[1])
val_sparse = get_current_shard(sparse[1])
val_labels = get_current_shard(all_labels[1])
else:
from models.load_data import process_sampled_criteo_data
dense_feature, sparse_feature, labels = process_sampled_criteo_data()
dense_feature = get_current_shard(dense_feature)
sparse_feature = get_current_shard(sparse_feature)
labels = get_current_shard(labels)
batch_size = 128
worker_device = "/job:worker/task:%d/gpu:0" % (task_id)
with tf.device(worker_device):
dense_input = tf.placeholder(tf.float32, [batch_size, 13])
sparse_input = tf.placeholder(tf.int32, [batch_size, 26])
y_ = y_ = tf.placeholder(tf.float32, [batch_size, 1])
loss, y, train_op = model(dense_input, sparse_input, y_, cluster, task_id)
server = tf.train.Server(
cluster, job_name="worker", task_index=task_id)
init = tf.global_variables_initializer()
sv = tf.train.Supervisor(
is_chief=(task_id == 0),
init_op=init,
recovery_wait_secs=1)
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
device_filters=["/job:ps",
"/job:worker/task:%d" % task_id])
sess = sv.prepare_or_wait_for_session(server.target, config=sess_config)
sess.run(init)
if task_id == 0:
writer = tf.summary.FileWriter('logs/board', sess.graph)
my_feed_dict = {
dense_input: np.empty(shape = (batch_size, 13)),
sparse_input: np.empty(shape = (batch_size, 26)),
y_: np.empty(shape = (batch_size, 1)),
}
if args.all:
raw_log_file = './logs/tf_dist_%s_%d.log' % (args.model, task_id)
print('Processing all data, log to', raw_log_file)
log_file = open(raw_log_file, 'w')
iterations = dense_feature.shape[0] // batch_size
total_epoch = 400
start_index = 0
for ep in range(total_epoch):
# print("iters: %d" % (lp * 1000))
print("epoch %d" % ep)
st_time = time.time()
train_loss, train_acc, train_auc = [], [], []
for it in range(iterations // 10 + (ep % 10 == 9) * (iterations % 10)):
my_feed_dict[dense_input][:] = dense_feature[start_index: start_index + batch_size]
my_feed_dict[sparse_input][:] = sparse_feature[start_index : start_index + batch_size]
my_feed_dict[y_][:] = labels[start_index : start_index+batch_size]
start_index += batch_size
if start_index + batch_size > dense_feature.shape[0]:
start_index = 0
loss_val = sess.run([loss, y, y_, train_op], feed_dict=my_feed_dict)
pred_val = loss_val[1]
true_val = loss_val[2]
acc_val = np.equal(
true_val,
pred_val > 0.5)
train_loss.append(loss_val[0])
train_acc.append(acc_val)
train_auc.append(metrics.roc_auc_score(true_val, pred_val))
tra_accuracy = np.mean(train_acc)
tra_loss = np.mean(train_loss)
tra_auc = np.mean(train_auc)
en_time = time.time()
train_time = en_time - st_time
if args.val:
val_loss, val_acc, val_auc = [], [], []
for it in range(val_dense.shape[0] // batch_size):
local_st = it * batch_size
my_feed_dict[dense_input][:] = val_dense[local_st: local_st + batch_size]
my_feed_dict[sparse_input][:] = val_sparse[local_st : local_st + batch_size]
my_feed_dict[y_][:] = val_labels[local_st : local_st+batch_size]
loss_val = sess.run([loss, y, y_], feed_dict=my_feed_dict)
pred_val = loss_val[1]
true_val = loss_val[2]
acc_val = np.equal(
true_val,
pred_val > 0.5)
val_loss.append(loss_val[0])
val_acc.append(acc_val)
val_auc.append(metrics.roc_auc_score(true_val, pred_val))
v_accuracy = np.mean(val_acc)
v_loss = np.mean(val_loss)
v_auc = np.mean(val_auc)
printstr = "train_loss: %.4f, train_acc: %.4f, train_auc: %.4f, test_loss: %.4f, test_acc: %.4f, test_auc: %.4f, train_time: %.4f"\
% (tra_loss, tra_accuracy, tra_auc, v_loss, v_accuracy, v_auc, train_time)
else:
printstr = "train_loss: %.4f, train_acc: %.4f, train_auc: %.4f, train_time: %.4f"\
% (tra_loss, tra_accuracy, tra_auc, train_time)
print(printstr)
log_file.write(printstr + '\n')
log_file.flush()
else:
# here no val
iteration = dense_feature.shape[0] // batch_size
epoch = 10
for ep in range(epoch):
print('epoch', ep)
if ep == 5:
start = time.time()
ep_st = time.time()
train_loss = []
train_acc = []
for idx in range(iteration):
start_index = idx * batch_size
my_feed_dict[dense_input][:] = dense_feature[start_index: start_index + batch_size]
my_feed_dict[sparse_input][:] = sparse_feature[start_index : start_index + batch_size]
my_feed_dict[y_][:] = labels[start_index : start_index+batch_size]
loss_val = sess.run([loss, y, y_, train_op], feed_dict=my_feed_dict)
pred_val = loss_val[1]
true_val = loss_val[2]
if pred_val.shape[1] == 1: # for criteo case
acc_val = np.equal(
true_val,
pred_val > 0.5)
else:
acc_val = np.equal(
np.argmax(pred_val, 1),
np.argmax(true_val, 1)).astype(np.float)
train_loss.append(loss_val[0])
train_acc.append(acc_val)
tra_accuracy = np.mean(train_acc)
tra_loss = np.mean(train_loss)
ep_en = time.time()
print("train_loss: %.4f, train_acc: %.4f, train_time: %.4f"
% (tra_loss, tra_accuracy, ep_en - ep_st))
print("tensorflow: ", (time.time() - start))
def train_adult(model, cluster, task_id, nrank):
from models.load_data import load_adult_data
x_train_deep, x_train_wide, y_train = load_adult_data(return_val=False)
part_size = len(x_train_deep) // nrank
start = part_size * task_id
end = start + part_size if task_id != nrank - 1 else len(x_train_deep)
x_train_deep = x_train_deep[start:end]
x_train_wide = x_train_wide[start:end]
y_train = y_train[start:end]
batch_size = 128
total_epoch = 50
dim_wide = 809
worker_device = "/job:worker/task:%d/gpu:0" % (task_id)
with tf.device(worker_device):
X_deep = []
for i in range(8):
X_deep.append(tf.placeholder(tf.int32, [batch_size, 1]))
for i in range(4):
X_deep.append(tf.placeholder(tf.float32, [batch_size, 1]))
X_wide = tf.placeholder(tf.float32, [batch_size, dim_wide])
y_ = tf.placeholder(tf.float32, [batch_size, 2])
loss, y, train_op, global_step = model(X_deep, X_wide, y_, cluster, task_id)
with tf.device(
tf.train.replica_device_setter(
worker_device=worker_device,
cluster=cluster)):
server = tf.train.Server(
cluster, job_name="worker", task_index=task_id)
init = tf.global_variables_initializer()
sv = tf.train.Supervisor(
is_chief=(task_id == 0),
init_op=init,
recovery_wait_secs=1,
global_step=global_step)
sess_config = tf.ConfigProto(
# allow_soft_placement=True,
log_device_placement=False,
device_filters=["/job:ps",
"/job:worker/task:%d" % task_id])
sess = sv.prepare_or_wait_for_session(server.target, config=sess_config)
sess.run(init)
iterations = x_train_deep.shape[0] // batch_size
for ep in range(total_epoch):
print('epoch', ep)
if ep == 5:
start = time.time()
ep_st = time.time()
train_loss = []
train_acc = []
pre_index = 0
for it in range(iterations):
batch_x_deep = x_train_deep[pre_index:pre_index + batch_size]
batch_x_wide=x_train_wide[pre_index:pre_index + batch_size]
batch_y = y_train[pre_index:pre_index + batch_size]
pre_index += batch_size
my_feed_dict = dict()
for i in range(12):
my_feed_dict[X_deep[i]] = np.array(batch_x_deep[:,1]).reshape(-1,1)
my_feed_dict[X_wide] = np.array(batch_x_wide)
my_feed_dict[y_] = batch_y
loss_val = sess.run([loss, y, y_, train_op], feed_dict=my_feed_dict)
acc_val = np.equal(
np.argmax(loss_val[1], 1),
np.argmax(loss_val[2], 1)).astype(np.float)
train_loss.append(loss_val[0])
train_acc.append(acc_val)
tra_accuracy = np.mean(train_acc)
tra_loss = np.mean(train_loss)
ep_en = time.time()
print("train_loss: %.4f, train_acc: %.4f, train_time: %.4f"
% (tra_loss, tra_accuracy, ep_en - ep_st))
print("tensorflow: ", (time.time() - start))
def test_bandwidth(cluster, task_id):
print('test bandwidth')
iters = 1000
params_size = 128 * 9
ps_device = "/job:ps/task:0/cpu:0"
worker_device = "/job:worker/task:%d/cpu:0" % (task_id)
with tf.device(ps_device):
dtype=tf.int32
params = tf.get_variable("params", shape=[params_size], dtype=dtype,
initializer=tf.zeros_initializer())
with tf.device(tf.train.replica_device_setter(
worker_device=worker_device,
cluster=cluster)):
update = tf.get_variable("update", shape=[params_size], dtype=dtype,
initializer=tf.ones_initializer())
add_op = params.assign(update)
server = tf.train.Server(
cluster, job_name="worker", task_index=task_id)
init = tf.global_variables_initializer()
sv = tf.train.Supervisor(
is_chief=(task_id == 0),
init_op=init,
recovery_wait_secs=1)
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
device_filters=["/job:ps",
"/job:worker/task:%d" % task_id])
sess = sv.prepare_or_wait_for_session(server.target, config=sess_config)
sess.run(init)
# warm up
for i in range(5):
sess.run(add_op.op)
start_time = time.time()
for i in range(iters):
sess.run(add_op.op)
elapsed_time = time.time() - start_time
ans = float(iters)*(params_size / 1024 / 1024)/elapsed_time
print("transfer rate: %f MB/s"%(ans))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True, help="model to be tested")
parser.add_argument("--rank", type=int, required=True, help="rank of process")
parser.add_argument("--config", type=str, default='./settings/tf_dist_s1_w2.json', help="config file path")
parser.add_argument("--val", action="store_true", help="whether to use validation")
parser.add_argument("--all", action="store_true", help="whether to use all data")
args = parser.parse_args()
raw_model = args.model
task_id = int(args.rank)
raw_config = args.config
config = json.load(open(raw_config))
cluster = tf.train.ClusterSpec(config)
if raw_model != 'band':
import tf_models
model = eval('tf_models.' + raw_model)
dataset = raw_model.split('_')[-1]
print('Model:', raw_model)
if dataset == 'criteo':
train_criteo(model, cluster, task_id, len(config['worker']), args)
elif dataset == 'adult':
# not support val or all
train_adult(model, cluster, task_id, len(config['worker']))
else:
raise NotImplementedError
else:
test_bandwidth(cluster, task_id)
if __name__ == '__main__':
main()
|
{"hexsha": "fba943aeacda86e4b35267971ecead4777194123", "size": 13586, "ext": "py", "lang": "Python", "max_stars_repo_path": "pstests/tf_launch_worker.py", "max_stars_repo_name": "sj1104/Het", "max_stars_repo_head_hexsha": "81b7e9f0f593108db969fc46a1af3df74b825230", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-05T07:11:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-15T07:53:48.000Z", "max_issues_repo_path": "pstests/tf_launch_worker.py", "max_issues_repo_name": "sj1104/Het", "max_issues_repo_head_hexsha": "81b7e9f0f593108db969fc46a1af3df74b825230", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pstests/tf_launch_worker.py", "max_forks_repo_name": "sj1104/Het", "max_forks_repo_head_hexsha": "81b7e9f0f593108db969fc46a1af3df74b825230", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-04-01T22:39:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-21T11:51:57.000Z", "avg_line_length": 41.2948328267, "max_line_length": 147, "alphanum_fraction": 0.5768438098, "include": true, "reason": "import numpy", "num_tokens": 3191}
|
import torch
import megengine as mge
import megengine.functional as F
import os
import numpy as np
from meg_networks import FullyConnectedLayer
in_channels = 512
w_dim = 512
# activation = 'linear'
# activation = 'lrelu'
# activation = 'relu'
# activation = 'tanh'
activation = 'sigmoid'
# activation = 'elu'
# activation = 'selu'
# activation = 'softplus'
# activation = 'swish'
model = FullyConnectedLayer(w_dim, in_channels, activation=activation, bias_init=1)
model.train()
def copy(name, w, std):
value2 = w
value = std[name]
value = value * 0 + value2
std[name] = value
fullyConnectedLayer_std = model.state_dict()
ckpt_file = 'pytorch_fullyConnectedLayer.pth'
save_name = 'pytorch_fullyConnectedLayer.pkl'
state_dict = torch.load(ckpt_file, map_location=torch.device('cpu'))
fullyConnectedLayer_dic = {}
for key, value in state_dict.items():
fullyConnectedLayer_dic[key] = value.data.numpy()
for key in fullyConnectedLayer_dic.keys():
name2 = key
w = fullyConnectedLayer_dic[key]
if '.linear.weight' in key:
w = w.transpose(1, 0) # pytorch的nn.Linear()的weight权重要转置才能赋值给paddle的nn.Linear()
if '.noise_strength' in key:
print()
w = np.reshape(w, [1, ])
print(key)
copy(name2, w, fullyConnectedLayer_std)
model.load_state_dict(fullyConnectedLayer_std)
mge.save(fullyConnectedLayer_std, save_name)
|
{"hexsha": "674fd29609b8cabbf3a4497d4adfe35e6738211b", "size": 1381, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_grad/test2_01_FullyConnectedLayer_grad_2mge.py", "max_stars_repo_name": "miemie2013/ppgan", "max_stars_repo_head_hexsha": "48008d85ec6c5fa2e1469acf8507b2614fa550cc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test_grad/test2_01_FullyConnectedLayer_grad_2mge.py", "max_issues_repo_name": "miemie2013/ppgan", "max_issues_repo_head_hexsha": "48008d85ec6c5fa2e1469acf8507b2614fa550cc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test_grad/test2_01_FullyConnectedLayer_grad_2mge.py", "max_forks_repo_name": "miemie2013/ppgan", "max_forks_repo_head_hexsha": "48008d85ec6c5fa2e1469acf8507b2614fa550cc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-19T03:01:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T03:01:13.000Z", "avg_line_length": 24.2280701754, "max_line_length": 87, "alphanum_fraction": 0.7212165098, "include": true, "reason": "import numpy", "num_tokens": 375}
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Representation of a model for a computer/user."""
import injector
import typing
import numpy
import scipy.optimize
from simulation.configuration import Configuration
from simulation.distribution import EmpiricalDistribution
from simulation.static import user_satisfaction
from simulation.static import weighted_user_satisfaction
class Model(object):
"""Represents the model for a given hour.
A model will store all the needed distributions and will offer basic
functionality like timeout threshold calculation.
"""
@injector.inject
@injector.noninjectable('xmax', 'xmin', 'inactivity', 'activity',
'off_duration', 'off_fraction')
def __init__(self, config: Configuration, xmax: float,
xmin: float, inactivity: typing.List = None,
activity: typing.List = None,
off_duration: typing.List = None,
off_fraction: typing.List = None):
super(Model, self).__init__()
self.__inactivity = EmpiricalDistribution(inactivity)
self.__activity = EmpiricalDistribution(activity)
self.__off_duration = EmpiricalDistribution(off_duration)
self.__off_fraction = off_fraction or []
self.__optimal_timeout = None
self.__satisfaction_threshold = config.get_config_int(
'satisfaction_threshold')
self.__target_satisfaction = config.get_config_int(
'target_satisfaction')
self.__xmax = xmax
self.__xmin = xmin
self.__tested = None
@property
def is_complete(self) -> bool:
"""Indicates if the model has all the minimum distributions."""
return (len(self.__inactivity.data) > 0)
@property
def inactivity(self) -> EmpiricalDistribution:
"""Inactivity distribution."""
return self.__inactivity
@property
def activity(self) -> EmpiricalDistribution:
"""Activity distribution."""
return self.__activity
@property
def off_duration(self) -> EmpiricalDistribution:
"""Off intervals distribution."""
return self.__off_duration
@property
def off_fraction(self) -> typing.List:
"""Off proportions."""
return self.__off_fraction
def test_timeout(
self, timeout: float,
retest: bool = False) -> typing.Tuple[float, float, float, float]:
"""Calculate analytically the US and RI for a given timeout."""
if self.__tested is None or retest:
wus = (numpy.sum(weighted_user_satisfaction(
self.inactivity.data, timeout, self.__satisfaction_threshold))
/ len(self.inactivity.data)) * 100
us = (numpy.sum(user_satisfaction(self.inactivity.data, timeout))
/ len(self.inactivity.data)) * 100
ri = numpy.sum(numpy.where(self.inactivity.data > timeout,
self.inactivity.data - timeout, 0.0))
ti = numpy.sum(self.inactivity.data)
self.__tested = (wus, us, ri, ti)
return self.__tested
def resolve_key(self, key: str) -> EmpiricalDistribution:
"""Matches histograms and keys."""
if key == 'ACTIVITY_TIME':
return self.activity
elif key == 'INACTIVITY_TIME':
return self.inactivity
elif key == 'USER_SHUTDOWN_TIME':
return self.off_duration
elif key == 'AUTO_SHUTDOWN_TIME':
return EmpiricalDistribution()
raise KeyError('Invalid key for histogram.')
def extend(self, others: typing.List['Model']) -> None:
"""Appends the data from another model to this one."""
self.__inactivity.extend([i.inactivity for i in others])
self.__activity.extend([i.activity for i in others])
self.__off_duration.extend([i.off_duration for i in others])
self.__off_fraction.extend(i.off_fraction for i in others)
self.__optimal_timeout = None
self.__tested = None
def optimal_idle_timeout(self) -> float:
"""Does the search for the optimal timeout for this model."""
if self.__optimal_timeout is None:
self.__optimal_timeout = min(self.__optimal_timeout_search(),
self.__satisfaction_threshold)
return self.__optimal_timeout
def __optimal_timeout_search(self) -> float:
"""Uses the bisection method to find the timeout for the target."""
def f(x):
"""Trasposed function to optimize via root finding."""
return (numpy.mean(weighted_user_satisfaction(
self.inactivity.data, x, self.__satisfaction_threshold))
* 100 - self.__target_satisfaction)
try:
return scipy.optimize.brentq(f, self.__xmin, self.__xmax, xtol=1)
except ValueError:
# If the function has no root, means that we cannot achieve the
# satisfaction target, therefore, if we provide the max value, we
# ensure to, at least, be as close as possible.
if f(self.__xmax) > f(self.__xmin):
return self.__satisfaction_threshold
return self.__xmin
|
{"hexsha": "ad96e15cd7b3ec5fe7a7638fc6e8d986ede15ac8", "size": 5808, "ext": "py", "lang": "Python", "max_stars_repo_path": "simulation/model.py", "max_stars_repo_name": "asi-uniovi/power-simulation", "max_stars_repo_head_hexsha": "75262c57bfc2500999d0ee1eda4150b9a64817d6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-09T13:12:01.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-09T13:12:01.000Z", "max_issues_repo_path": "simulation/model.py", "max_issues_repo_name": "asi-uniovi/power-simulation", "max_issues_repo_head_hexsha": "75262c57bfc2500999d0ee1eda4150b9a64817d6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2019-05-27T20:36:22.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-10T06:20:43.000Z", "max_forks_repo_path": "simulation/model.py", "max_forks_repo_name": "asi-uniovi/power-simulation", "max_forks_repo_head_hexsha": "75262c57bfc2500999d0ee1eda4150b9a64817d6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-03-09T13:14:56.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-09T13:14:56.000Z", "avg_line_length": 40.6153846154, "max_line_length": 78, "alphanum_fraction": 0.647899449, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1198}
|
"""
------------------------------------
mcts_basic: Monte Carlo Tree Search.
------------------------------------
"""
import numpy as np
from controller.game_ai import GameAI
from config import Config
from view.log import log
from view.graph import Graph
class Node():
action = None
state = None
children = {}
visits = 0
value = 0
mean_value = 0
def __init__(self, state, action, parent=None):
self.state = state
self.action = action
self.parent = parent
def pretty_desc(self):
return "Node: a: {}, n: {}, v: {}, m: {}".format(
self.action, self.visits, self.value, "%.3f" % self.mean_value)
def __str__(self):
children = ", ".join([str(c) for c in self.children.values()])
return ("[Node: turn={}, visits={}, value={},\nchildren=\n [{}]]").format(
self.state.player, self.visits, self.value, children.replace("\n", "\n "))
class MCTS_Basic(GameAI):
"""
Implementation of MCTS. This is implemented in terms
of the four stages of the algorithm: Selection, Expansion,
Simulation and Backpropagation.
"""
EXPLORE_PARAM = 2 # Used when choosing which node to explore or exploit.
ITERATIONS = 100 # Number of times to run MCTS, per action taken in game.
def __init__(self, game, playouts=Config.MCTS_ITERATIONS):
super().__init__(game)
if playouts is not None:
self.ITERATIONS = playouts
self.MAX_MOVES = 5000
elif self.game.size > 3:
playout_options = [800, 200, 35, 20, 10, 5, 5]
max_moves = [400, 1200, 1600, 2400, 5000, 5000, 5000]
self.ITERATIONS = playout_options[self.game.size-4]
self.MAX_MOVES = max_moves[self.game.size-4]
log("MCTS is using {} playouts and {} max moves.".format(self.ITERATIONS, self.MAX_MOVES))
def select(self, node):
"""
Select a node to run simulations from.
Nodes are chosen according to how they maximize
the UCB formula = v(i) + C * sqrt (ln(N(i)) / n(i))
Where
- v(i) = mean value of node (node value / node visits).
- C = exploration constant, 2 usually.
- N(i) = number of visits of the parent of current node.
- n(i) = times current node was visited.
This assures a balance between exploring new nodes,
and exploiting nodes, that are known to result in good outcomes.
"""
if node.children == {}: # Node is a leaf.
return node
parent_log = np.log(node.visits)
best_node = None
best_value = -1
for child in node.children.values():
if child.visits == 0:
# Node has not been visited. It is chosen immediately.
best_node = child
break
else:
# UCB formula.
val = child.mean_value + self.EXPLORE_PARAM * (parent_log / child.visits)
if val > best_value:
best_value = val
best_node = child
return self.select(best_node)
def expand(self, node, actions):
"""
Expand the tree with new nodes, corresponding to
taking any possible actions from the current node.
"""
node.children = {action: Node(self.game.result(node.state, action), action, parent=node) for action in actions}
def simulate(self, state, actions):
"""
Simulate a random action from the given state and the given
possible actions. Return the result of the random action.
"""
chosen_action = actions[int(np.random.uniform(0, len(actions)))] # Chose random action.
return self.game.result(state, chosen_action)
def back_propagate(self, node, value):
"""
After a full simulation, propagate result up the tree.
Invert value at every node, to align 'perspective' to
the current player of that node.
"""
node.visits += 1
node.value += value
node.mean_value = node.value / node.visits
if node.parent is None:
return
self.back_propagate(node.parent, -value)
def rollout(self, og_state, node):
"""
Make random simulations until a terminal state
is reached. Then the utility value of this state,
for the current player, is returned.
"""
state = node.state
counter = 0
while not self.game.terminal_test(state) and counter < self.MAX_MOVES:
actions = self.game.actions(state)
state = self.simulate(state, actions)
counter += 1
return self.game.utility(state, og_state.player)
def execute_action(self, state):
super.__doc__
log("MCTS is calculating the best move...")
root_node = Node(state, None)
# Perform iterations of selection, simulation, expansion, and back propogation.
# After the iterations are done, the child of the root node with the highest
# number of mean value (value/visits) are chosen as the best action.
for _ in range(self.ITERATIONS):
node = self.select(root_node)
if node.visits > 0 and not self.game.terminal_test(node.state):
# Expand tree from available actions. Select first expanded node as
# new current and simulate an action from this nodes possible actions.
actions = self.game.actions(node.state)
self.expand(node, actions)
node = node.children[actions[0]] # Select first child of expanded Node.
# Perform rollout, simulate till end of game and return outcome.
value = self.rollout(root_node.state, node)
self.back_propagate(node, -value if node.state.player == root_node.state.player else value)
node = root_node
for node in root_node.children.values():
log(node.pretty_desc())
best_node = max(root_node.children.values(), key=lambda n: n.visits)
root_node = None
log("MCTS action: {}, likelihood of win: {}%".format(best_node.action, int((best_node.mean_value*50)+50)))
return best_node.state
|
{"hexsha": "a1e22d3672b89a7967b6d06ff5e443d971e92c19", "size": 6282, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/controller/mcts_basic.py", "max_stars_repo_name": "dkfrankandersen/ITU-BSc-Thesis", "max_stars_repo_head_hexsha": "d86dab2050966a65e8b81cd57dfcc0508e280543", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/controller/mcts_basic.py", "max_issues_repo_name": "dkfrankandersen/ITU-BSc-Thesis", "max_issues_repo_head_hexsha": "d86dab2050966a65e8b81cd57dfcc0508e280543", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/controller/mcts_basic.py", "max_forks_repo_name": "dkfrankandersen/ITU-BSc-Thesis", "max_forks_repo_head_hexsha": "d86dab2050966a65e8b81cd57dfcc0508e280543", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3928571429, "max_line_length": 119, "alphanum_fraction": 0.5955109838, "include": true, "reason": "import numpy", "num_tokens": 1411}
|
export HContainer, add_object!
import Base: collect, delete!
"""
`HContainer` is a device for holding a collection of hyperbolic objects.
It is like a set, but we have to do a lot of work before adding a new
element because equal hyperbolic objects might differ a tiny amount and
that would mess up hashing.
+ `C = HContainer()` creates a new container.
+ `C = HContainer(items...)` creates a new container with the items.
"""
struct HContainer
objs::Set{HObject}
function HContainer()
A = Set{HObject}()
new(A)
end
end
function HContainer(args...)
C = HContainer()
add_object!(C,args...)
return C
end
HContainer(HC::HContainer) = HContainer(collect(HC.objs)...) # copy constructor
function in(X::HObject, C::HContainer)::Bool
for Z in C.objs
if Z==X
return true
end
end
return false
end
length(C::HContainer) = length(C.objs)
collect(C::HContainer) = collect(C.objs)
"""
`add_object!(C::HContainer, X::HObject)` adds `X` to the container `C`.
"""
function add_object!(C::HContainer, X::HObject)::Bool
# see if we already have it
if in(X,C)
return false
end
# not here, so OK to add
push!(C.objs, X)
return true
end
function add_object!(C::HContainer, args...)
for X in args
add_object!(C,X)
end
end
"""
`delete!(C::HContainer, X::HObject)` deletes `X` from the
container `C` returning `true` if successful (or `false` if
`X` was not in the container).
"""
function delete!(C::HContainer, X::HObject)::Bool
# see if we already have it; if so, delete
for Z in C.objs
if Z==X
delete!(C.objs, X)
return true
end
end
return false # never found
end
function show(io::IO, C::HContainer)
print(io,"HContainer of size $(length(C))")
end
|
{"hexsha": "9f1fbf1b9fc8ae8756933e950e9dd5e92d5f0e1b", "size": 1833, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/container.jl", "max_stars_repo_name": "switzel/HyperbolicPlane.jl", "max_stars_repo_head_hexsha": "89ba26e080b520b4f200e985f2b431b1d61ecb21", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-01-19T06:12:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T22:18:48.000Z", "max_issues_repo_path": "src/container.jl", "max_issues_repo_name": "switzel/HyperbolicPlane.jl", "max_issues_repo_head_hexsha": "89ba26e080b520b4f200e985f2b431b1d61ecb21", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-01-31T08:33:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-26T19:44:39.000Z", "max_forks_repo_path": "src/container.jl", "max_forks_repo_name": "switzel/HyperbolicPlane.jl", "max_forks_repo_head_hexsha": "89ba26e080b520b4f200e985f2b431b1d61ecb21", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-01-31T21:10:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-26T18:49:43.000Z", "avg_line_length": 21.8214285714, "max_line_length": 80, "alphanum_fraction": 0.6399345336, "num_tokens": 515}
|
import hypergraph as hg
import numpy as np
graph1 = hg.Graph()
with graph1.as_default():
hg.mark("abc1") << (hg.dump() << "** abc1 **")
n = hg.mark("abc2") << (hg.dump() << "** abc2 **")
idx = hg.node(lambda _: np.random.randint(0, 2))
hg.output() << (hg.select(idx) << ["abc1", "abc2"])
for _ in range(3):
ctx = hg.ExecutionContext()
with ctx.as_default():
print(graph1())
print("*** end of execution ***")
|
{"hexsha": "a646fc3c636a3a45e1eedb0ff2daafedb186cfd4", "size": 447, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/basic/select1.py", "max_stars_repo_name": "sflinter/hypergraph", "max_stars_repo_head_hexsha": "c3108ee51361d2e4b8ddc7eced1953f1548ce8d8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-17T11:17:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-17T11:17:56.000Z", "max_issues_repo_path": "examples/basic/select1.py", "max_issues_repo_name": "sflinter/hypergraph", "max_issues_repo_head_hexsha": "c3108ee51361d2e4b8ddc7eced1953f1548ce8d8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/basic/select1.py", "max_forks_repo_name": "sflinter/hypergraph", "max_forks_repo_head_hexsha": "c3108ee51361d2e4b8ddc7eced1953f1548ce8d8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.2941176471, "max_line_length": 55, "alphanum_fraction": 0.5570469799, "include": true, "reason": "import numpy", "num_tokens": 129}
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def grocery(path):
"""Grocery
Grocery store sales
A dataset with 36 observations on the following 5 variables.
`Discount`
Amount of discount: `5.00%`, `10.00%` or `15.00%`
`Store`
Store number (1-12)
`Display`
`Featured End of Aisl`, `Featured Middle of A`, or `Not Featured`
`Sales`
Number sold during one week
`Price`
Wholesale price (in dollars)
These data are not real, though they are simulated to approximate an
actual study. The data come from John Grego, Director of the Stat Lab at
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `grocery.csv`.
Returns:
Tuple of np.ndarray `x_train` with 36 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'grocery.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Stat2Data/Grocery.csv'
maybe_download_and_extract(path, url,
save_file_name='grocery.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
{"hexsha": "7a2e1a442c1dde48fb7d5123a884ad382c154621", "size": 1638, "ext": "py", "lang": "Python", "max_stars_repo_path": "observations/r/grocery.py", "max_stars_repo_name": "hajime9652/observations", "max_stars_repo_head_hexsha": "2c8b1ac31025938cb17762e540f2f592e302d5de", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 199, "max_stars_repo_stars_event_min_datetime": "2017-07-24T01:34:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T00:50:55.000Z", "max_issues_repo_path": "observations/r/grocery.py", "max_issues_repo_name": "hajime9652/observations", "max_issues_repo_head_hexsha": "2c8b1ac31025938cb17762e540f2f592e302d5de", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 46, "max_issues_repo_issues_event_min_datetime": "2017-09-05T19:27:20.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-07T09:47:26.000Z", "max_forks_repo_path": "observations/r/grocery.py", "max_forks_repo_name": "hajime9652/observations", "max_forks_repo_head_hexsha": "2c8b1ac31025938cb17762e540f2f592e302d5de", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 45, "max_forks_repo_forks_event_min_datetime": "2017-07-26T00:10:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T20:44:59.000Z", "avg_line_length": 23.4, "max_line_length": 74, "alphanum_fraction": 0.6807081807, "include": true, "reason": "import numpy", "num_tokens": 398}
|
from __future__ import print_function
import numpy as np
from scipy import io
from keras.models import model_from_json
#Consturct CNN model
model = model_from_json(open('srcnn_model.json').read())
#load weights
model.load_weights('srcnn_model_weights.h5')
w = model.get_weights()
for i in range(0,6,2):
w[i] = np.array(w[i])
w[i+1] = np.array(w[i+1])
io.savemat('w'+str(i/2)+'.mat', {'array': w[i].transpose(2,3,1,0)})
io.savemat('b'+str(i/2)+'.mat', {'array': w[i+1]})
|
{"hexsha": "e7645c90169a994fe07141868f844ae7f88753f1", "size": 491, "ext": "py", "lang": "Python", "max_stars_repo_path": "SRCNN_train/convert/load_save.py", "max_stars_repo_name": "YapengTian/SRCNN-Keras", "max_stars_repo_head_hexsha": "017b5d3849e8de78879a7e0944a6773d6451cda6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2017-03-23T05:21:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-29T15:08:08.000Z", "max_issues_repo_path": "SRCNN_train/convert/load_save.py", "max_issues_repo_name": "ehumss/SRCNN-Keras", "max_issues_repo_head_hexsha": "017b5d3849e8de78879a7e0944a6773d6451cda6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-12-19T11:48:58.000Z", "max_issues_repo_issues_event_max_datetime": "2018-01-16T05:44:00.000Z", "max_forks_repo_path": "SRCNN_train/convert/load_save.py", "max_forks_repo_name": "ehumss/SRCNN-Keras", "max_forks_repo_head_hexsha": "017b5d3849e8de78879a7e0944a6773d6451cda6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2017-07-13T07:00:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-25T17:19:09.000Z", "avg_line_length": 30.6875, "max_line_length": 71, "alphanum_fraction": 0.6720977597, "include": true, "reason": "import numpy,from scipy", "num_tokens": 160}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.