text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 19 14:21:56 2018
@author: Aditya Vikram
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, 13].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
onehotencoder = OneHotEncoder(categorical_features = [1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#Now its time to use ANN
#We start by importing keras
import keras
from keras.models import Sequential
from keras.layers import Dense
#Sequential is used to initialise the ANN - defining it as a sequence of layers whereas Dense is used to add layers to the ANN
#Lets start with initialising the ANN - We use a sequence to initialise
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))
# Adding the second hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size = 10, epochs = 100)
# Part 3 - Making predictions and evaluating the model
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
#If we want to make a new prediction, we input the new prediction in an np arrray.
#Geography: France
#Credit Score: 600
#Gender: Male
#Age: 40 years old
#Tenure: 3 years
#Balance: $60000
#Number of Products: 2
#Does this customer have a credit card ? Yes
#Is this customer an Active Member: Yes
#Estimated Salary: $50000
new_pred = np.array([[0,0,619,0,40,3,60000,2,1,1,50000]])
new_pred = sc.transform(new_pred)
y_new_pred = classifier.predict(new_pred)
y_new_pred = (y_new_pred>0.5)
# Part 4 - Evaluating, Improving and Tuning the ANN
# Evaluating the ANN
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from keras.models import Sequential
from keras.layers import Dense
def build_classifier():
classifier = Sequential()
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
return classifier
classifier = KerasClassifier(build_fn = build_classifier, batch_size = 10, epochs = 100)
accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10, n_jobs = -1)
mean = accuracies.mean()
variance = accuracies.std()
# Improving the ANN
# Dropout Regularization to reduce overfitting if needed
# Tuning the ANN
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
from keras.models import Sequential
from keras.layers import Dense
def build_classifier(optimizer):
classifier = Sequential()
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
classifier.compile(optimizer = optimizer, loss = 'binary_crossentropy', metrics = ['accuracy'])
return classifier
classifier = KerasClassifier(build_fn = build_classifier)
parameters = {'batch_size': [25, 32],
'epochs': [100, 500],
'optimizer': ['adam', 'rmsprop']}
grid_search = GridSearchCV(estimator = classifier,
param_grid = parameters,
scoring = 'accuracy',
cv = 10)
grid_search = grid_search.fit(X_train, y_train)
best_parameters = grid_search.best_params_
best_accuracy = grid_search.best_score_
|
{"hexsha": "6f54665dfaabb4490a52926a3b2928b2695cc26e", "size": 5117, "ext": "py", "lang": "Python", "max_stars_repo_path": "churn_modelling_keras.py", "max_stars_repo_name": "vikadia7x/churn_modelling-using-keras", "max_stars_repo_head_hexsha": "e603a5b119bae8377cb34c72da3dc124c47ff609", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-09-30T08:05:53.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-30T08:05:53.000Z", "max_issues_repo_path": "churn_modelling_keras.py", "max_issues_repo_name": "vikadia7x/churn_modelling-using-keras", "max_issues_repo_head_hexsha": "e603a5b119bae8377cb34c72da3dc124c47ff609", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "churn_modelling_keras.py", "max_forks_repo_name": "vikadia7x/churn_modelling-using-keras", "max_forks_repo_head_hexsha": "e603a5b119bae8377cb34c72da3dc124c47ff609", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-09-30T08:05:18.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-30T08:05:18.000Z", "avg_line_length": 37.625, "max_line_length": 127, "alphanum_fraction": 0.7209302326, "include": true, "reason": "import numpy", "num_tokens": 1209}
|
#define BOOST_TEST_MODULE "test_bond_length_gocontact_interaction"
#ifdef BOOST_TEST_DYN_LINK
#include <boost/test/unit_test.hpp>
#else
#include <boost/test/included/unit_test.hpp>
#endif
#include <mjolnir/core/BoundaryCondition.hpp>
#include <mjolnir/core/SimulatorTraits.hpp>
#include <mjolnir/forcefield/local/GoContactInteraction.hpp>
#include <mjolnir/math/constants.hpp>
#include <mjolnir/util/make_unique.hpp>
#include <random>
// a test for specialization of ContactInteraction for GoContactPotential.
BOOST_AUTO_TEST_CASE(ContactGoContactInteraction)
{
mjolnir::LoggerManager::set_default_logger("test_contact_gocontact.log");
using traits_type = mjolnir::SimulatorTraits<double, mjolnir::UnlimitedBoundary>;
using real_type = traits_type::real_type;
using coord_type = traits_type::coordinate_type;
using boundary_type = traits_type::boundary_type;
using system_type = mjolnir::System<traits_type>;
using potential_type = mjolnir::GoContactPotential<real_type>;
using interaction_type = mjolnir::ContactInteraction<traits_type, potential_type>;
constexpr real_type tol = 1e-7;
const coord_type pos1(1.0, 0.0, 0.0);
const coord_type pos2(0.0, 0.0, 0.0);
system_type sys(2, boundary_type{});
sys.at(0).mass = 1.0;
sys.at(1).mass = 1.0;
sys.at(0).rmass = 1.0;
sys.at(1).rmass = 1.0;
sys.at(0).position = pos1;
sys.at(1).position = pos2;
sys.at(0).velocity = coord_type(0,0,0);
sys.at(1).velocity = coord_type(0,0,0);
sys.at(0).force = coord_type(0,0,0);
sys.at(1).force = coord_type(0,0,0);
sys.at(0).name = "X";
sys.at(1).name = "X";
sys.at(0).group = "NONE";
sys.at(1).group = "NONE";
const real_type k(1.0);
const real_type native(1.0);
potential_type potential(k, native);
interaction_type interaction("none", {{ {{0,1}}, potential}});
const int N = 10000;
const real_type dx = 0.001;
for(int i = 1; i < N; ++i)
{
const auto dist = 0.6 + i * dx;
sys.position(0) = coord_type(0.0, 0.0, 0.0);
sys.position(1) = coord_type(dist, 0.0, 0.0);
sys.force(0) = coord_type(0.0, 0.0, 0.0);
sys.force(1) = coord_type(0.0, 0.0, 0.0);
const real_type deriv = potential.derivative(dist);
interaction.initialize(sys);
interaction.calc_force(sys);
const real_type force_strength1 = mjolnir::math::length(sys.force(0));
const real_type force_strength2 = mjolnir::math::length(sys.force(1));
BOOST_TEST(force_strength1 == std::abs(deriv), boost::test_tools::tolerance(tol));
BOOST_TEST(force_strength2 == std::abs(deriv), boost::test_tools::tolerance(tol));
const auto E_pot = potential.potential(dist);
const auto E_int = interaction.calc_energy(sys);
BOOST_TEST(E_pot == E_int, boost::test_tools::tolerance(tol));
}
}
BOOST_AUTO_TEST_CASE(ContactGoContact_numerical_difference)
{
mjolnir::LoggerManager::set_default_logger("test_contact_gocontact.log");
using traits_type = mjolnir::SimulatorTraits<double, mjolnir::UnlimitedBoundary>;
using real_type = traits_type::real_type;
using coord_type = traits_type::coordinate_type;
using boundary_type = traits_type::boundary_type;
using system_type = mjolnir::System<traits_type>;
using potential_type = mjolnir::GoContactPotential<real_type>;
using interaction_type = mjolnir::ContactInteraction<traits_type, potential_type>;
const real_type k(1.0);
const real_type native(std::sqrt(3.0));
potential_type potential(k, native);
interaction_type interaction("none", {{ {{0,1}}, potential}});
std::mt19937 mt(123456789);
std::uniform_real_distribution<real_type> uni(-1.0, 1.0);
for(std::size_t i = 0; i < 1000; ++i)
{
system_type sys(2, boundary_type{});
sys.at(0).mass = 1.0;
sys.at(1).mass = 1.0;
sys.at(0).rmass = 1.0;
sys.at(1).rmass = 1.0;
sys.at(0).position = coord_type( 0.0 + 0.01 * uni(mt), 0.0 + 0.01 * uni(mt), 0.0 + 0.01 * uni(mt));
sys.at(1).position = coord_type( 1.0 + 0.01 * uni(mt), 1.0 + 0.01 * uni(mt), 1.0 + 0.01 * uni(mt));
sys.at(0).velocity = coord_type( 0.0, 0.0, 0.0);
sys.at(1).velocity = coord_type( 0.0, 0.0, 0.0);
sys.at(0).force = coord_type( 0.0, 0.0, 0.0);
sys.at(1).force = coord_type( 0.0, 0.0, 0.0);
sys.at(0).name = "X";
sys.at(1).name = "X";
sys.at(0).group = "TEST";
sys.at(1).group = "TEST";
const auto init = sys;
constexpr real_type tol = 2e-4;
constexpr real_type dr = 1e-5;
for(std::size_t idx=0; idx<2; ++idx)
{
{
// ----------------------------------------------------------------
// reset positions
sys = init;
interaction.initialize(sys);
// calc U(x-dx)
const auto E0 = interaction.calc_energy(sys);
mjolnir::math::X(sys.position(idx)) += dr;
// calc F(x)
interaction.calc_force(sys);
mjolnir::math::X(sys.position(idx)) += dr;
// calc U(x+dx)
const auto E1 = interaction.calc_energy(sys);
// central difference
const auto dE = (E1 - E0) * 0.5;
BOOST_TEST(-dE == dr * mjolnir::math::X(sys.force(idx)),
boost::test_tools::tolerance(tol));
}
{
// ----------------------------------------------------------------
// reset positions
sys = init;
interaction.initialize(sys);
// calc U(x-dx)
const auto E0 = interaction.calc_energy(sys);
mjolnir::math::Y(sys.position(idx)) += dr;
// calc F(x)
interaction.calc_force(sys);
mjolnir::math::Y(sys.position(idx)) += dr;
// calc U(x+dx)
const auto E1 = interaction.calc_energy(sys);
// central difference
const auto dE = (E1 - E0) * 0.5;
BOOST_TEST(-dE == dr * mjolnir::math::Y(sys.force(idx)),
boost::test_tools::tolerance(tol));
}
{
// ----------------------------------------------------------------
// reset positions
sys = init;
interaction.initialize(sys);
// calc U(x-dx)
const auto E0 = interaction.calc_energy(sys);
mjolnir::math::Z(sys.position(idx)) += dr;
// calc F(x)
interaction.calc_force(sys);
mjolnir::math::Z(sys.position(idx)) += dr;
// calc U(x+dx)
const auto E1 = interaction.calc_energy(sys);
// central difference
const auto dE = (E1 - E0) * 0.5;
BOOST_TEST(-dE == dr * mjolnir::math::Z(sys.force(idx)),
boost::test_tools::tolerance(tol));
}
}
}
}
BOOST_AUTO_TEST_CASE(ContactGoContact_calc_force_and_energy)
{
mjolnir::LoggerManager::set_default_logger("test_contact_gocontact.log");
using traits_type = mjolnir::SimulatorTraits<double, mjolnir::UnlimitedBoundary>;
using real_type = traits_type::real_type;
using coord_type = traits_type::coordinate_type;
using boundary_type = traits_type::boundary_type;
using system_type = mjolnir::System<traits_type>;
using potential_type = mjolnir::GoContactPotential<real_type>;
using interaction_type = mjolnir::ContactInteraction<traits_type, potential_type>;
const real_type k(1.0);
const real_type native(std::sqrt(3.0));
potential_type potential(k, native);
interaction_type interaction("none", {{ {{0,1}}, potential}});
std::mt19937 mt(123456789);
std::uniform_real_distribution<real_type> uni(-1.0, 1.0);
for(std::size_t i = 0; i < 1000; ++i)
{
system_type sys(2, boundary_type{});
sys.at(0).mass = 1.0;
sys.at(1).mass = 1.0;
sys.at(0).rmass = 1.0;
sys.at(1).rmass = 1.0;
sys.at(0).position = coord_type( 0.0 + 0.01 * uni(mt), 0.0 + 0.01 * uni(mt), 0.0 + 0.01 * uni(mt));
sys.at(1).position = coord_type( 1.0 + 0.01 * uni(mt), 1.0 + 0.01 * uni(mt), 1.0 + 0.01 * uni(mt));
sys.at(0).velocity = coord_type( 0.0, 0.0, 0.0);
sys.at(1).velocity = coord_type( 0.0, 0.0, 0.0);
sys.at(0).force = coord_type( 0.0, 0.0, 0.0);
sys.at(1).force = coord_type( 0.0, 0.0, 0.0);
sys.at(0).name = "X";
sys.at(1).name = "X";
sys.at(0).group = "TEST";
sys.at(1).group = "TEST";
constexpr real_type tol = 1e-4;
auto ref_sys = sys;
const auto energy = interaction.calc_force_and_energy(sys);
const auto ref_energy = interaction.calc_energy(ref_sys);
interaction.calc_force(ref_sys);
BOOST_TEST(ref_energy == energy, boost::test_tools::tolerance(tol));
for(std::size_t idx=0; idx<sys.size(); ++idx)
{
BOOST_TEST(mjolnir::math::X(sys.force(idx)) == mjolnir::math::X(ref_sys.force(idx)), boost::test_tools::tolerance(tol));
BOOST_TEST(mjolnir::math::Y(sys.force(idx)) == mjolnir::math::Y(ref_sys.force(idx)), boost::test_tools::tolerance(tol));
BOOST_TEST(mjolnir::math::Z(sys.force(idx)) == mjolnir::math::Z(ref_sys.force(idx)), boost::test_tools::tolerance(tol));
}
}
}
|
{"hexsha": "b876f4bd0f789b907267072dddce6dd9e520f747", "size": 9822, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/core/test_contact_gocontact_interaction.cpp", "max_stars_repo_name": "yutakasi634/Mjolnir", "max_stars_repo_head_hexsha": "ab7a29a47f994111e8b889311c44487463f02116", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/core/test_contact_gocontact_interaction.cpp", "max_issues_repo_name": "yutakasi634/Mjolnir", "max_issues_repo_head_hexsha": "ab7a29a47f994111e8b889311c44487463f02116", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2020-04-07T11:41:45.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-08T10:01:38.000Z", "max_forks_repo_path": "test/core/test_contact_gocontact_interaction.cpp", "max_forks_repo_name": "yutakasi634/Mjolnir", "max_forks_repo_head_hexsha": "ab7a29a47f994111e8b889311c44487463f02116", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5130111524, "max_line_length": 132, "alphanum_fraction": 0.564650784, "num_tokens": 2761}
|
import functools
import operator
import os
import os.path
import sys
import numpy as np
# Bamboo utilities
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# Data
np.random.seed(201909113)
one_hot_size = 7
_num_samples = 47
_samples = [np.random.uniform(-1, one_hot_size+1) for _ in range(_num_samples)]
# Sample access functions
def get_sample(index):
return [_samples[index]]
def num_samples():
return _num_samples
def sample_dims():
return (1,)
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann, weekly):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
mini_batch_size = num_samples() // 2
trainer = lbann.Trainer(mini_batch_size)
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer, None # Don't request any specific number of nodes
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Input data
x_lbann = lbann.Input(data_field='samples')
y_numpy = np.random.normal(size=one_hot_size).astype(np.float32)
y_numpy[:] = 1 ### @todo Remove
y_lbann = lbann.Weights(
initializer=lbann.ValueInitializer(
values=y_numpy))
y_lbann = lbann.WeightsLayer(
weights=y_lbann,
dims=[one_hot_size],
)
# Objects for LBANN model
obj = []
metrics = []
callbacks = []
# ------------------------------------------
# Compute expected metric values with NumPy
# ------------------------------------------
vals = []
for i in range(num_samples()):
x = int(np.floor(get_sample(i)[0]))
y = y_numpy
z = y[x] if (0 <= x < one_hot_size) else 0
vals.append(z)
val = np.mean(vals, dtype=np.float64)
tol = np.abs(8 * val * np.finfo(np.float32).eps)
# ------------------------------------------
# Data-parallel layout
# ------------------------------------------
x = x_lbann
y = y_lbann
x_onehot = lbann.OneHot(
x,
size=one_hot_size,
data_layout='data_parallel',
)
z = lbann.MatMul(
lbann.Reshape(x_onehot, dims=[1, -1]),
lbann.Reshape(y, dims=[1, -1]),
transpose_b=True,
)
obj.append(z)
metrics.append(lbann.Metric(z, name='data-parallel layout'))
callbacks.append(
lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test',
)
)
# ------------------------------------------
# Model-parallel layout
# ------------------------------------------
x = x_lbann
y = y_lbann
x_onehot = lbann.OneHot(
x,
size=one_hot_size,
data_layout='model_parallel',
)
z = lbann.MatMul(
lbann.Reshape(x_onehot, dims=[1, -1]),
lbann.Reshape(y, dims=[1, -1]),
transpose_b=True,
)
obj.append(z)
metrics.append(lbann.Metric(z, name='model-parallel layout'))
callbacks.append(
lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test',
)
)
# ------------------------------------------
# Construct model
# ------------------------------------------
num_epochs = 0
return lbann.Model(num_epochs,
layers=x_lbann,
objective_function=obj,
metrics=metrics,
callbacks=callbacks)
def construct_data_reader(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
# ==============================================
# Setup PyTest
# ==============================================
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment, __file__):
globals()[_test_func.__name__] = _test_func
|
{"hexsha": "1644610a77ce342b1089eb0c66b38e0724a6e464", "size": 5461, "ext": "py", "lang": "Python", "max_stars_repo_path": "ci_test/unit_tests/test_unit_layer_one_hot.py", "max_stars_repo_name": "LLNL/LBANN", "max_stars_repo_head_hexsha": "8bcc5d461e52de70e329d73081ca7eee3e5c580a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ci_test/unit_tests/test_unit_layer_one_hot.py", "max_issues_repo_name": "LLNL/LBANN", "max_issues_repo_head_hexsha": "8bcc5d461e52de70e329d73081ca7eee3e5c580a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ci_test/unit_tests/test_unit_layer_one_hot.py", "max_forks_repo_name": "LLNL/LBANN", "max_forks_repo_head_hexsha": "8bcc5d461e52de70e329d73081ca7eee3e5c580a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.305, "max_line_length": 100, "alphanum_fraction": 0.5403772203, "include": true, "reason": "import numpy", "num_tokens": 1204}
|
/*****************************************************************************
* {{name}}_test_base.f
*****************************************************************************/
{%set filename = "sim/tests/{{name}}_test_base.f" %}
+UVM_TESTNAME={{name}}_test_base
|
{"hexsha": "bf557bf3fc44bd8c2795b1d5a8b4e3c7a5265378", "size": 272, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "templates/verif/uvm/subenv/sim/tests/base_test.f", "max_stars_repo_name": "mballance/vte", "max_stars_repo_head_hexsha": "6063be201412a69cd1fd681081a0ede77c455512", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2019-03-28T16:09:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T07:17:13.000Z", "max_issues_repo_path": "templates/verif/uvm/subenv/sim/tests/base_test.f", "max_issues_repo_name": "mballance/vte", "max_issues_repo_head_hexsha": "6063be201412a69cd1fd681081a0ede77c455512", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "templates/verif/uvm/subenv/sim/tests/base_test.f", "max_forks_repo_name": "mballance/vte", "max_forks_repo_head_hexsha": "6063be201412a69cd1fd681081a0ede77c455512", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-14T21:10:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-14T21:10:35.000Z", "avg_line_length": 34.0, "max_line_length": 79, "alphanum_fraction": 0.2757352941, "num_tokens": 41}
|
// test cases for development purposes
#define BOOST_TEST_MODULE TestContext
#include <boost/test/unit_test.hpp>
#ifdef DEV_TESTS
#if defined(OPENCL_ENABLED)
#include "clfft_helper.hpp"
using namespace gearshifft::ClFFT;
struct Fixt {
cl_device_id device = 0;
cl_context ctx = 0;
Fixt() {
cl_platform_id platform;
cl_int err = CL_SUCCESS;
findClDevice(CL_DEVICE_TYPE_CPU, &platform, &device);
ctx = clCreateContext( NULL, 1, &device, NULL, NULL, &err );
CHECK_CL(err);
}
~Fixt() {
CHECK_CL(clReleaseContext( ctx ));
CHECK_CL(clReleaseDevice(device));
}
};
/**
* if segfaults in the end, check if you are on a login node
* instead of the compute node.
*/
BOOST_GLOBAL_FIXTURE(Fixt);
BOOST_AUTO_TEST_CASE( CL_Context_Global_Fixture )
{
BOOST_CHECK( true );
}
#endif
#if defined(CUDA_ENABLED)
#include "cufft_helper.hpp"
BOOST_AUTO_TEST_CASE( CUDA_Large_MemAlloc_Test )
{
float* data1 = nullptr;
float* data2 = nullptr;
size_t size = 1024*512*512*4;
for(size_t k=0; k<4; ++k) {
try {
CHECK_CUDA(cudaMalloc(&data1, size));
if(!data1) throw std::runtime_error("data1 failed.");
CHECK_CUDA(cudaMalloc(&data2, size));
if(!data2) throw std::runtime_error("data2 failed.");
CHECK_CUDA(cudaFree(data1));
CHECK_CUDA(cudaFree(data2));
}catch(const std::runtime_error& e){
std::cerr << "2x "<< size/1048576 << " MiB -- failed" << std::endl;
BOOST_FAIL(e.what());
}
std::cout << "2x " << size/1048576 << " MiB -- ok" << std::endl;
size<<=1;
}
BOOST_CHECK( true );
}
BOOST_AUTO_TEST_CASE( CUDA_KernelLeak_FFT_Test )
{
cufftDoubleComplex* data;
cufftDoubleComplex* data_transform;
cufftHandle plan;
CHECK_CUDA( cudaSetDevice(0) );
std::vector< std::array<size_t, 1> > vec_extents;
std::array<size_t, 1> e0 = {{262144}}; // works
std::array<size_t, 1> e1 = {{67108863}}; // out of memory at kernel call (lmem)
std::array<size_t, 1> e2 = {{262144}}; // still out of memory, even after reset
vec_extents.push_back( e0 );
vec_extents.push_back( e1 );
vec_extents.push_back( e2 );
for( auto extents : vec_extents ) {
size_t data_size = extents[0]*sizeof(cufftDoubleComplex);
size_t data_transform_size = extents[0]*sizeof(cufftDoubleComplex);
size_t s = 0;
try {
CHECK_CUDA( cudaMalloc(&data, data_size));
CHECK_CUDA( cudaMalloc(&data_transform, data_transform_size));
size_t mem_free=0, mem_tot=0;
CHECK_CUDA( cudaMemGetInfo(&mem_free, &mem_tot) );
std::cerr << mem_free/1048576 << " MiB, ";
CHECK_CUDA( cufftPlan1d(&plan, extents[0], CUFFT_Z2Z, 1)); // fails for 2^26-1
CHECK_CUDA( cudaMemGetInfo(&mem_free, &mem_tot) );
std::cerr << mem_free/1048576 << " MiB" << std::endl;
CHECK_CUDA( cufftExecZ2Z(plan, data, data_transform, CUFFT_FORWARD));
CHECK_CUDA( cufftExecZ2Z(plan, data_transform, data, CUFFT_INVERSE));
CHECK_CUDA( cudaFree(data) );
CHECK_CUDA( cudaFree(data_transform) );
CHECK_CUDA( cufftDestroy(plan) );
std::cerr << "Success: nx="<<extents[0]<<std::endl;
}catch(const std::runtime_error& e){
std::cerr << "Error for nx="<<extents[0]<<": "<<e.what() << std::endl;
// cleanup #1
CHECK_CUDA( cudaFree(data) );
CHECK_CUDA( cudaFree(data_transform) );
if(plan) {
CHECK_CUDA( cufftDestroy(plan) );
// cleanup #2
//CHECK_CUDA( cudaDeviceReset() ); // does not help
//CHECK_CUDA( cudaSetDevice(0) );
// something different #1
// < this later leads to CUFFT_SETUP_FAILED at cufftExecZ2Z()
//CHECK_CUDA( cufftPlan2d(&plan, 4, 4, CUFFT_R2C));
//CHECK_CUDA( cufftDestroy(plan) );
// >
// something different #2, also fails to recover cufft context
//CHECK_CUDA( cufftCreate(&plan) );
//CHECK_CUDA( cufftGetSize2d(plan, 4, 4, CUFFT_R2C, &s) );
//CHECK_CUDA( cufftDestroy(plan) );
}
}
}
CHECK_CUDA( cudaDeviceReset() );
}
#endif // cuda
#endif // ifdef DEV_TESTS
|
{"hexsha": "55f6846ef925011d25a7d4934f088d7fc89015ed", "size": 4071, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/tests.cpp", "max_stars_repo_name": "psteinb/gearshifft", "max_stars_repo_head_hexsha": "fea380554a898191b57a1c0922174f5dd64c04db", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/tests.cpp", "max_issues_repo_name": "psteinb/gearshifft", "max_issues_repo_head_hexsha": "fea380554a898191b57a1c0922174f5dd64c04db", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/tests.cpp", "max_forks_repo_name": "psteinb/gearshifft", "max_forks_repo_head_hexsha": "fea380554a898191b57a1c0922174f5dd64c04db", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0763358779, "max_line_length": 84, "alphanum_fraction": 0.6470154753, "num_tokens": 1196}
|
import skimage.io as io
import skimage.transform as skt
import numpy as np
from PIL import Image
from src.models.class_patcher import patcher
from src.utils.imgproc import *
class patcher(patcher):
def __init__(self, body='./body/body_hakka.png', **options):
super().__init__('薄荷', body=body, pantie_position=[0, 0], **options)
self.mask_front = io.imread('./mask/mask_hakka_front.png')
self.mask_back = io.imread('./mask/mask_hakka_back.png')
def convert_front(self, image):
pantie = np.array(image)
patch = np.copy(pantie[-120:-5, 546:, :])
[pr, pc, d] = patch.shape
pantie[125:125 + pr, :pc, :] = patch[::-1, ::-1]
front = pantie[:, :330]
arrx = np.zeros(64)
arrx[20:] += np.sin(np.linspace(0, np.pi / 2, 44)) * 79
arrx[10:20] += np.sin(np.linspace(0, np.pi, 10)) * 8
arrx -= 80
arry = np.zeros(64)
front = affine_transform_by_arr(front, arrx, arry)[:320]
front = np.uint8(resize(front, [1.2, 1.12]) * 255)[:, 8:]
front = np.bitwise_and(front, self.mask_front)
front = np.concatenate([front[:, ::-1], front], axis=1)
return Image.fromarray(front)
def convert_back(self, image):
pantie = np.array(image)
pantie[-120:, 546:, :] = 0
back = np.rot90(pantie[:-15, 330:][:, ::-1])
arrx = np.zeros(36)
arry = np.zeros(36)
arry[5:-5] = np.sin(np.linspace(0, np.pi, 26))**2 * 50
arry -= 30
back = affine_transform_by_arr(back, arrx, arry)
back = np.rot90(back, -1)
arrx = np.zeros(64)
arrx[6:] = np.linspace(0, np.pi / 2, 58)**2 * 43
arrx[45:55] += np.sin(np.linspace(0, np.pi, 10)) * 4
arrx -= 80
arry = np.zeros(64)
back = affine_transform_by_arr(back, arrx, arry)[:320]
back = np.uint8(resize(back, [1.2, 1.12]) * 255)[:, 18:]
back = np.bitwise_and(back, self.mask_back)
back = np.concatenate([back[:, ::-1], back], axis=1)
return Image.fromarray(back)
def patch(self, image, transparent=False):
pantie = self.convert(image)
if transparent:
patched = Image.new("RGBA", self.body_size)
else:
patched = self.body.copy()
patched = self.paste(patched, self.convert_front(image), (2180, 1671))
patched = self.paste(patched, self.convert_back(image), (2907, 1647))
return patched
|
{"hexsha": "1860e426db5afe6c765cbb31acc0c8c752545e12", "size": 2460, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/hakka.py", "max_stars_repo_name": "HhotateA/quiche_pantie_patch", "max_stars_repo_head_hexsha": "f50c4fd69bd43cccaeb38f026d486e3ccc3850d8", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 73, "max_stars_repo_stars_event_min_datetime": "2019-01-26T02:57:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T08:45:11.000Z", "max_issues_repo_path": "src/models/hakka.py", "max_issues_repo_name": "HhotateA/quiche_pantie_patch", "max_issues_repo_head_hexsha": "f50c4fd69bd43cccaeb38f026d486e3ccc3850d8", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2019-04-09T10:53:41.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-11T13:18:26.000Z", "max_forks_repo_path": "src/models/hakka.py", "max_forks_repo_name": "HhotateA/quiche_pantie_patch", "max_forks_repo_head_hexsha": "f50c4fd69bd43cccaeb38f026d486e3ccc3850d8", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2019-04-07T11:28:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T04:35:48.000Z", "avg_line_length": 39.6774193548, "max_line_length": 78, "alphanum_fraction": 0.5743902439, "include": true, "reason": "import numpy", "num_tokens": 747}
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RND Builder."""
from typing import Callable, Generic, Iterator, List, Optional
from acme import adders
from acme import core
from acme import specs
from acme.agents.jax import builders
from acme.agents.jax.rnd import config as rnd_config
from acme.agents.jax.rnd import learning as rnd_learning
from acme.agents.jax.rnd import networks as rnd_networks
from acme.jax import networks as networks_lib
from acme.jax.types import PolicyNetwork
from acme.utils import counting
from acme.utils import loggers
import jax
import optax
import reverb
class RNDBuilder(Generic[rnd_networks.DirectRLNetworks, PolicyNetwork],
builders.ActorLearnerBuilder[rnd_networks.RNDNetworks,
PolicyNetwork,
reverb.ReplaySample]):
"""RND Builder."""
def __init__(
self,
rl_agent: builders.ActorLearnerBuilder[rnd_networks.DirectRLNetworks,
PolicyNetwork,
reverb.ReplaySample],
config: rnd_config.RNDConfig,
logger_fn: Callable[[], loggers.Logger] = lambda: None,
):
"""Implements a builder for RND using rl_agent as forward RL algorithm.
Args:
rl_agent: The standard RL agent used by RND to optimize the generator.
config: A config with RND HPs.
logger_fn: a logger factory for the rl_agent's learner.
"""
self._rl_agent = rl_agent
self._config = config
self._logger_fn = logger_fn
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: rnd_networks.RNDNetworks[rnd_networks.DirectRLNetworks],
dataset: Iterator[reverb.ReplaySample],
logger: loggers.Logger,
environment_spec: specs.EnvironmentSpec,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
direct_rl_learner_key, rnd_learner_key = jax.random.split(random_key)
counter = counter or counting.Counter()
direct_rl_counter = counting.Counter(counter, 'direct_rl')
def direct_rl_learner_factory(
networks: rnd_networks.DirectRLNetworks,
dataset: Iterator[reverb.ReplaySample]) -> core.Learner:
return self._rl_agent.make_learner(
direct_rl_learner_key,
networks,
dataset,
logger=self._logger_fn(),
environment_spec=environment_spec,
replay_client=replay_client,
counter=direct_rl_counter)
optimizer = optax.adam(learning_rate=self._config.predictor_learning_rate)
return rnd_learning.RNDLearner(
direct_rl_learner_factory=direct_rl_learner_factory,
iterator=dataset,
optimizer=optimizer,
rnd_network=networks,
rng_key=rnd_learner_key,
is_sequence_based=self._config.is_sequence_based,
grad_updates_per_batch=self._config.num_sgd_steps_per_step,
counter=counter,
logger=logger)
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
policy: PolicyNetwork,
) -> List[reverb.Table]:
return self._rl_agent.make_replay_tables(environment_spec, policy)
def make_dataset_iterator(
self,
replay_client: reverb.Client) -> Optional[Iterator[reverb.ReplaySample]]:
return self._rl_agent.make_dataset_iterator(replay_client)
def make_adder(self,
replay_client: reverb.Client) -> Optional[adders.Adder]:
return self._rl_agent.make_adder(replay_client)
def make_actor(
self,
random_key: networks_lib.PRNGKey,
policy: PolicyNetwork,
environment_spec: specs.EnvironmentSpec,
variable_source: Optional[core.VariableSource] = None,
adder: Optional[adders.Adder] = None,
) -> core.Actor:
return self._rl_agent.make_actor(random_key, policy, environment_spec,
variable_source, adder)
|
{"hexsha": "eb4cd276ec96403676ae131df2332b30e2de6872", "size": 4573, "ext": "py", "lang": "Python", "max_stars_repo_path": "acme/agents/jax/rnd/builder.py", "max_stars_repo_name": "contropist/deepmind-acme", "max_stars_repo_head_hexsha": "237d9c91531f184104a3f146d289ce8e0d41c807", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "acme/agents/jax/rnd/builder.py", "max_issues_repo_name": "contropist/deepmind-acme", "max_issues_repo_head_hexsha": "237d9c91531f184104a3f146d289ce8e0d41c807", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "acme/agents/jax/rnd/builder.py", "max_forks_repo_name": "contropist/deepmind-acme", "max_forks_repo_head_hexsha": "237d9c91531f184104a3f146d289ce8e0d41c807", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2936507937, "max_line_length": 79, "alphanum_fraction": 0.6982287339, "include": true, "reason": "import jax", "num_tokens": 1017}
|
import copy
import itertools
import numpy as np
import tensorflow as tf
def dRNN(cell, inputs, rate, scope='default'):
"""
This function constructs a layer of dilated RNN.
Inputs:
cell -- the dilation operations is implemented independent of the RNN cell.
In theory, any valid tensorflow rnn cell should work.
inputs -- the input for the RNN. inputs should be in the form of
a list of 'n_steps' tenosrs. Each has shape (batch_size, input_dims)
rate -- the rate here refers to the 'dilations' in the orginal WaveNet paper.
scope -- variable scope.
Outputs:
outputs -- the outputs from the RNN.
"""
n_steps = len(inputs)
if rate < 0 or rate >= n_steps:
raise ValueError('The \'rate\' variable needs to be adjusted.')
print "Building layer: %s, input length: %d, dilation rate: %d, input dim: %d." % (
scope, n_steps, rate, inputs[0].get_shape()[1])
# make the length of inputs divide 'rate', by using zero-padding
EVEN = (n_steps % rate) == 0
if not EVEN:
# Create a tensor in shape (batch_size, input_dims), which all elements are zero.
# This is used for zero padding
zero_tensor = tf.zeros_like(inputs[0])
dialated_n_steps = n_steps // rate + 1
print "=====> %d time points need to be padded. " % (
dialated_n_steps * rate - n_steps)
print "=====> Input length for sub-RNN: %d" % (dialated_n_steps)
for i_pad in xrange(dialated_n_steps * rate - n_steps):
inputs.append(zero_tensor)
else:
dialated_n_steps = n_steps // rate
print "=====> Input length for sub-RNN: %d" % (dialated_n_steps)
# now the length of 'inputs' divide rate
# reshape it in the format of a list of tensors
# the length of the list is 'dialated_n_steps'
# the shape of each tensor is [batch_size * rate, input_dims]
# by stacking tensors that "colored" the same
# Example:
# n_steps is 5, rate is 2, inputs = [x1, x2, x3, x4, x5]
# zero-padding --> [x1, x2, x3, x4, x5, 0]
# we want to have --> [[x1; x2], [x3; x4], [x_5; 0]]
# which the length is the ceiling of n_steps/rate
dilated_inputs = [tf.concat(inputs[i * rate:(i + 1) * rate],
axis=0) for i in range(dialated_n_steps)]
# building a dialated RNN with reformated (dilated) inputs
dilated_outputs, _ = tf.contrib.rnn.static_rnn(
cell, dilated_inputs,
dtype=tf.float32, scope=scope)
# reshape output back to the input format as a list of tensors with shape [batch_size, input_dims]
# split each element of the outputs from size [batch_size*rate, input_dims] to
# [[batch_size, input_dims], [batch_size, input_dims], ...] with length = rate
splitted_outputs = [tf.split(output, rate, axis=0)
for output in dilated_outputs]
unrolled_outputs = [output
for sublist in splitted_outputs for output in sublist]
# remove padded zeros
outputs = unrolled_outputs[:n_steps]
return outputs
def multi_dRNN_with_dilations(cells, inputs, dilations):
"""
This function constucts a multi-layer dilated RNN.
Inputs:
cells -- A list of RNN cells.
inputs -- A list of 'n_steps' tensors, each has shape (batch_size, input_dims).
dilations -- A list of integers with the same length of 'cells' indicates the dilations for each layer.
Outputs:
x -- A list of 'n_steps' tensors, as the outputs for the top layer of the multi-dRNN.
"""
assert (len(cells) == len(dilations))
x = copy.copy(inputs)
for cell, dilation in zip(cells, dilations):
scope_name = "multi_dRNN_dilation_%d" % dilation
x = dRNN(cell, x, dilation, scope=scope_name)
return x
|
{"hexsha": "a1ee27c3e2eab67b39c6563d81a00e9d03bf29ee", "size": 3852, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/drnn.py", "max_stars_repo_name": "yuemind/DilatedRNN", "max_stars_repo_head_hexsha": "247a989977213dcb08a7db21dd16fea356b5be34", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 378, "max_stars_repo_stars_event_min_datetime": "2017-10-19T20:09:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-13T16:49:15.000Z", "max_issues_repo_path": "models/drnn.py", "max_issues_repo_name": "kashif/DilatedRNN", "max_issues_repo_head_hexsha": "247a989977213dcb08a7db21dd16fea356b5be34", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2017-10-25T11:08:25.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-18T13:26:25.000Z", "max_forks_repo_path": "models/drnn.py", "max_forks_repo_name": "kashif/DilatedRNN", "max_forks_repo_head_hexsha": "247a989977213dcb08a7db21dd16fea356b5be34", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 71, "max_forks_repo_forks_event_min_datetime": "2017-10-20T01:15:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-13T22:23:30.000Z", "avg_line_length": 43.2808988764, "max_line_length": 111, "alphanum_fraction": 0.6365524403, "include": true, "reason": "import numpy", "num_tokens": 1017}
|
import time
from typing import List, Tuple
import os
import numpy as np
from src.utils.data_utils import ImdbDataset
from src.utils.config_loader import SentimentAnalysisConfigReader
from src.models.sentiment_analysis_rnn import RNNModel, DataPreprocessor
from src.models.sentiment_analysis_tfidf import DumbModel
def get_training_validation_data(X: List, y: List, data_processor: DataPreprocessor)\
-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Wrapper method which yields the training and validation datasets
Args:
X: list of texts (features)
y: list of ratings
data_processor: a data handler object
Return:
tuple containing the training data, validation data
"""
preprocessed_input = data_processor.clean_data(X)
preprocessed_input = data_processor.tokenize_text(preprocessed_input)
X_train, X_test, y_train, y_test = data_processor.split_train_test(preprocessed_input, y)
return X_train, X_test, y_train, y_test
def train_model(config: SentimentAnalysisConfigReader) -> None:
"""
Training function which prints classification summary as as result
Args:
config: Configuration object containing parsed .json file parameters
Return:
None
"""
X, y = [], []
if config.dataset_name == "imdb":
dataset = ImdbDataset(config.dataset_url)
X, y = dataset.get_set("train")
X_test, y_test = dataset.get_set("test")
X = X + X_test
y = y + y_test
file_prefix = "sentiment_analysis_%s" % time.strftime("%Y%m%d_%H%M%S")
if config.model_name == "rnn":
data_preprocessor = DataPreprocessor(config.max_sequence_length, config.validation_split, config.vocab_size)
if config.experimental_mode:
ind = np.random.randint(0, len(X), 1000)
X = [X[i] for i in ind]
y = [y[i] for i in ind]
X_train, X_test, y_train, y_test = get_training_validation_data(X, y, data_preprocessor)
trained_model = None
history = []
trained_model = RNNModel(config=config, data_preprocessor=data_preprocessor)
history = trained_model.fit(X_train, y_train, X_test, y_test)
print("===========> saving learning curve under plots/")
trained_model.save_learning_curve(history, file_prefix)
print("===========> saving trained model and preprocessor under models/")
trained_model.save_model(file_prefix)
data_preprocessor.save_preprocessor(file_prefix)
else: # model_name =="tfidf"
trained_model = DumbModel(config.vocab_size)
trained_model.fit(X, y)
print("===========> saving trained model under models")
trained_model.save_model(file_prefix)
def main():
"""main function"""
root_dir = os.environ.get("MARABOU_HOME")
if root_dir is None:
raise ValueError("please make sure to setup the environment variable MARABOU_ROOT to point for the root of the project")
config_file_path = os.path.join(root_dir, "marabou/train/config/config_sentiment_analysis.json")
train_config = SentimentAnalysisConfigReader(config_file_path)
train_model(train_config)
if __name__ == '__main__':
main()
|
{"hexsha": "330e14558357b2d7daaa5e69c0949349b2856cb8", "size": 3226, "ext": "py", "lang": "Python", "max_stars_repo_path": "marabou/train/src/scripts/train_sentiment_analysis.py", "max_stars_repo_name": "mmarouen/marabou", "max_stars_repo_head_hexsha": "2f3f7512714c94b8d8f3da12751895ea091fe5f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-24T12:15:33.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-24T12:29:29.000Z", "max_issues_repo_path": "marabou/train/src/scripts/train_sentiment_analysis.py", "max_issues_repo_name": "mmarouen/marabou", "max_issues_repo_head_hexsha": "2f3f7512714c94b8d8f3da12751895ea091fe5f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-05-02T23:26:49.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-27T08:10:59.000Z", "max_forks_repo_path": "marabou/train/src/scripts/train_sentiment_analysis.py", "max_forks_repo_name": "mmarouen/marabou", "max_forks_repo_head_hexsha": "2f3f7512714c94b8d8f3da12751895ea091fe5f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.835443038, "max_line_length": 128, "alphanum_fraction": 0.6965282083, "include": true, "reason": "import numpy", "num_tokens": 718}
|
import numpy as np
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import (
ExtraTreesClassifier,
RandomForestClassifier,
GradientBoostingClassifier,
)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import FeatureAgglomeration
from sklearn.preprocessing import (
MaxAbsScaler,
MinMaxScaler,
Normalizer,
PolynomialFeatures,
RobustScaler,
StandardScaler,
Binarizer,
)
from sklearn.kernel_approximation import Nystroem, RBFSampler
from sklearn.decomposition import PCA, FastICA
from sklearn.feature_selection import (
SelectFwe,
SelectPercentile,
f_classif,
VarianceThreshold,
)
# A configuration with limited operators for unit tests.
clf_config = {
"alpha": [1e-3, 1e-2, 1e-1, 1.0, 10.0, 100.0],
"fit_prior": [True, False],
"min_samples_split": range(2, 21),
"min_samples_leaf": range(1, 21),
# Classifiers
GaussianNB: {},
BernoulliNB: {"alpha": [], "fit_prior": []},
MultinomialNB: {"alpha": [], "fit_prior": []},
DecisionTreeClassifier: {
"criterion": ["gini", "entropy"],
"max_depth": range(1, 11),
"min_samples_split": [],
"min_samples_leaf": [],
},
ExtraTreesClassifier: {
"n_estimators": [100],
"criterion": ["gini", "entropy"],
"max_features": [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
"min_samples_split": [],
"min_samples_leaf": [],
"bootstrap": [True, False],
},
RandomForestClassifier: {
"n_estimators": [100],
"criterion": ["gini", "entropy"],
"max_features": [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
"min_samples_split": range(2, 21),
"min_samples_leaf": range(1, 21),
"bootstrap": [True, False],
},
GradientBoostingClassifier: {
"n_estimators": [100],
"learning_rate": [1e-3, 1e-2, 1e-1, 0.5, 1.0],
"max_depth": range(1, 11),
"min_samples_split": range(2, 21),
"min_samples_leaf": range(1, 21),
"subsample": [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
"max_features": np.arange(0.05, 1.01, 0.05),
},
KNeighborsClassifier: {
"n_neighbors": range(1, 51),
"weights": ["uniform", "distance"],
"p": [1, 2],
},
LinearSVC: {
"penalty": ["l1", "l2"],
"loss": ["hinge", "squared_hinge"],
"dual": [False, True],
"tol": [1e-5, 1e-4, 1e-3, 1e-2, 1e-1],
"C": [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1.0, 5.0, 10.0, 15.0, 20.0, 25.0],
"param_check": [
lambda params: (not params["dual"] or params["penalty"] == "l2")
and not (params["penalty"] == "l1" and params["loss"] == "hinge")
and not (
params["penalty"] == "l2"
and params["loss"] == "hinge"
and not params["dual"]
)
],
},
LogisticRegression: {
"penalty": ["l1", "l2"],
"C": [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1.0, 5.0, 10.0, 15.0, 20.0, 25.0],
"dual": [False, True],
"param_check": [lambda params: not params["dual"] or params["penalty"] == "l2"],
},
# Preprocesssors
Binarizer: {"threshold": np.arange(0.0, 1.01, 0.05)},
FastICA: {"tol": np.arange(0.0, 1.01, 0.05)},
FeatureAgglomeration: {
"linkage": ["ward", "complete", "average"],
"affinity": ["euclidean", "l1", "l2", "manhattan", "cosine", "precomputed"],
"param_check": [
lambda params: (not params["linkage"] == "ward")
or params["affinity"] == "euclidean"
],
},
MaxAbsScaler: {},
MinMaxScaler: {},
Normalizer: {"norm": ["l1", "l2", "max"]},
Nystroem: {
"kernel": [
"rbf",
"cosine",
"chi2",
"laplacian",
"polynomial",
"poly",
"linear",
"additive_chi2",
"sigmoid",
],
"gamma": np.arange(0.0, 1.01, 0.05),
"n_components": range(1, 11),
},
PCA: {"svd_solver": ["randomized"], "iterated_power": range(1, 11)},
PolynomialFeatures: {
"degree": [2],
"include_bias": [False],
"interaction_only": [False],
},
RBFSampler: {"gamma": np.arange(0.0, 1.01, 0.05)},
RobustScaler: {},
StandardScaler: {},
# Selectors
SelectFwe: {"alpha": np.arange(0, 0.05, 0.001), "score_func": {f_classif: None}},
SelectPercentile: {"percentile": range(1, 100), "score_func": {f_classif: None}},
VarianceThreshold: {"threshold": np.arange(0.05, 1.01, 0.05)},
}
|
{"hexsha": "970e81c8c54466162fa704f5ce1b49379882d567", "size": 4821, "ext": "py", "lang": "Python", "max_stars_repo_path": "gama/configuration/testconfiguration.py", "max_stars_repo_name": "learsi1911/GAMA_pygmo_v4", "max_stars_repo_head_hexsha": "459807db352dd1c9f9c1e0e322f8c1e9b5abbca0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 49, "max_stars_repo_stars_event_min_datetime": "2018-10-22T06:05:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-07T20:12:36.000Z", "max_issues_repo_path": "gama/configuration/testconfiguration.py", "max_issues_repo_name": "learsi1911/GAMA_pygmo_v4", "max_issues_repo_head_hexsha": "459807db352dd1c9f9c1e0e322f8c1e9b5abbca0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 102, "max_issues_repo_issues_event_min_datetime": "2018-10-02T12:00:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-24T14:35:30.000Z", "max_forks_repo_path": "gama/configuration/testconfiguration.py", "max_forks_repo_name": "learsi1911/GAMA_pygmo_v4", "max_forks_repo_head_hexsha": "459807db352dd1c9f9c1e0e322f8c1e9b5abbca0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2021-06-04T11:56:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T20:21:15.000Z", "avg_line_length": 33.4791666667, "max_line_length": 88, "alphanum_fraction": 0.5490562124, "include": true, "reason": "import numpy", "num_tokens": 1601}
|
\chapter{Object-Oriented Analysis \& Design} \label{chapter:chapter4}
Object-Oriented Analysis (OOA) is a key activity in good software design as it facilitates the difficult transition between the problem domain and the solution domain. During this stage in the development process, the designer switches from a user-centric to an object-oriented point of view. Information from the domain model, use cases and requirements is extracted and systematically translated into a conceptual model. While the analysis is concerned with the "what", the design focuses on the "how" of a system.
The design phase refines the analysis model and describes the objects, their attributes, behaviour, and interactions in detail. Outputs of the design model should, therefore, include all details necessary to implement the product. However, Object-Oriented Analysis and Design activities are best conducted in iterative and incremental steps.
In the following section we will elaborate on the analysis model and on how this output shapes the subsequent design model.
% ==============================================================================
% ANALYSIS DIAGRAM SECTION
% ==============================================================================
\section{Analysis Model}
The goal of the analysis model is to create a draft version of the system. Although it abstracts the definition of the most important objects, the analysis model should exhaustively capture the system's required entities. Therefore, a data-driven design approach to identifying all classes was taken.
\begin{figure}[H]
\centering
\includegraphics[height=9.5cm]{img/analysis_diagram.png}
\caption{Analysis Class Diagram}
\label{analysis_diagram}
\end{figure}
After locking down the requirements and subsequently the use cases, the entity classes in the system were identified, as shown in figure \ref{analysis_diagram}. This was achieved by extracting the main nouns that were being commonly used, such as: \texttt{User}, \texttt{Property}, and \texttt{Booking} \cite{GangOfFour1994}.
The Analysis Class Diagram displays the initial classes in the B\&B system and indicates the multiplicities and associations between them. The model achieves high cohesion by acknowledging the separation of concerns design principle, therefore assigning only a small and well-defined set of responsibilities to each class.
% ==============================================================================
% DESIGN DIAGRAM SECTION
% ==============================================================================
\section{Design Model}
The Design Class diagram shown in figure \ref{design_diagram}, illustrates a more interconnected architecture than that shown in figure \ref{analysis_diagram}. This diagram's main purpose is to show the classes and their relationships, in addition to their attributes and operations. The system's architecture is made up of a mix of the following types of UML classes: Boundary, Entity, Controller, and Enumeration. It is to be noted that many of the parameters are abbreviated terms from our glossary for brevity.
As shown by the legend in figure \ref{design_diagram}, the classes have been colour-coded according to the class group that they belong to. The system has been designed to comply with the \textbf{Model-View-Controller} pattern, which will be discussed in more detail in section \ref{component_section}.
\begin{itemize}
\item \textcolor{RoyalBlue}{\textbf{Models/Entity Classes}}: These classes have a connection to the database. By using Object-relational mapping, their objects can be modelled as database records that include attributes and operations that are mainly CRUD based \cite{Hibernate}. Any additional business logic will also be placed in these classes.
Getter and setter operations such as \texttt{User.getName()}, which returns the name attribute of a given user object, have been left out of the diagram due to space constraints. They are implicitly included in the rest of the sequence diagrams in section \ref{sequence_section}.
\item \textcolor{red}{\textbf{Controller/Control Classes}}: These classes provide endpoints to the users in the form of actions. Every HTTP request made by the browser will be connected to an appropriate operation under these classes.
\item \textcolor{RedViolet}{\textbf{View Classes}}: After various meetings with the client, the recommendation that was decided on was to add these types of classes which are responsible for generating the HTML to be displayed by the browser. They receive parameters from the appropriate controllers in the form of objects.
\item \textcolor{LimeGreen}{\textbf{Boundary Classes}}: Within the system, there exists a need to talk to external services such as email service providers and payment gateways. The boundary classes act as the connection between these services and the internal models and controller. The connection is achieved by sending requests and receiving success/failure responses.
\item \textcolor{Goldenrod}{\textbf{Enumeration Classes}}: These classes represent a common type of data as constants.
\end{itemize}
\begin{figure}[H]
\centering
\includegraphics[width=14cm]{img/design_diagram_h.png}
\caption{UML Design Class Diagram}
\label{design_diagram}
\end{figure}
% ==============================================================================
% SEQUENCE DIAGRAM SECTION
% ==============================================================================
\section{Sequence Diagrams} \label{sequence_section}
Sequence diagrams are valuable tools for specifying how use cases can be implemented by the system in line with the classes, controllers, views, and methods developed in the class diagram. Specifically, they show the explicit sequence in which messages between the interacting parties in a use case are exchanged. The six sequence diagrams presented in this work do not fully exhaust all possible options of a given use case which would constitute a generic form, but rather depict some specific scenario or route through the use case \cite{George2017}.\footnote{Most scenarios chosen were straightforward success routes, but note that Figure \ref{Sequence Diagram: View Property List} (View Property List) portrays two iterations of a Property search loop, with one iteration coming up with an empty result set and hence showing the User a warning notification.}
Following the agile approach, the sequence diagrams have been developed iteratively with the intention of striking a balance between the level of detail for implementation in code and degree of abstraction to emphasize the core concepts (see the more detailed notes on this below).
All the methods and classes listed in the sequence diagrams correspond to the information in the class diagram. Return values that are pure HTML strings, e.g. the details of a property, are denoted as views, i.e. \texttt{propertyDetailsView}.
Note the following about the presentation here:
\begin{itemize}
\item The class name has been used when referring to the class itself (this usually happens when using the class' static methods), and an object name followed by a colon and a class name when referring to an object (so, for instance \texttt{Property} is the Property class, whereas \texttt{p : Property} is a Property object.)
\item When sending a message to some external actor or system (such as the Payment System in Table \ref{Sequence Diagram: Make Booking} (Make Booking)), the text associated with the message is not meant to name a method --- it is simply a description of the message.
\item The parameters of the method calls are not in any strict format --- it should be clear from the context what is being sent at each stage. Abbreviations of terms from the glossary are commonly referred to as done in the Design Class Diagram. Note also that some of the methods are overloaded (it should always be clear from the context how this works --- \texttt{validateInput()} method in \texttt{BookingController} can check both Booking Details and Initial Booking Details in the obvious way)
\item Each diagram includes a list of the use case Main Flow steps (for example, "P-C.3.a.") corresponding to each of its stages at the very left. Duration lines (the lines with dots at either end) are also attached to the steps to make things even clearer.
\item The diagrams have been simplified: due to space and legibility constraints, we have left out many lower-level method calls necessary to carry out the represented scenarios. A typical example of this is the Property constructor call in Figure \ref{Sequence Diagram: Register Property} (Register Property). In a more detailed depiction, the constructor would loop over all the Rooms in the Property constructing each of them, and then further loop over all Policies in each of the Rooms, constructing each of those as well. The current diagram is much clearer than one involving nested loops would be, and the intended realization in terms of lower-level method calls is obvious. Another example of simplification: the static database-related method calls made to the Model classes (\texttt{filter()}, for instance) are shown as returning objects of the same classes. The group thought it clear that a constructor was being called in such instances, and thought it would be unnecessary to add such details to the diagrams.
\item In a similar vein, it is sometimes implicitly assumed that certain pieces of information can be accessed (in cases where it should be clear where this information comes from). This can be illustrated with the User object \texttt{u} in Figure \ref{Sequence Diagram: Cancel Booking} (Cancel Booking) and the Payment Details in Figure \ref{Sequence Diagram: Make Booking} (Make Booking). In the former, it is assumed that \texttt{u} is already constructed, and it is used to get a set of Payment Details. The object corresponds to \texttt{aGuest}, the main actor in the diagram, and was obtained when \texttt{aGuest} logged in. Its reference is being held by a \texttt{SessionController} object. In Figure \ref{Sequence Diagram: Make Booking}, even the method call to the User object is skipped, and it's assumed that the Payment Details are already available. Assumptions like these helped make the diagrams much cleaner. (Another similar assumption in the same diagrams: the Users' email addresses are retrieved without explicitly fetching them at any point.)
\item Many of the objects used are portrayed as being destroyed when the method that held their reference finishes executing (and hence there are no explicit destroy messages sent). In practice, when the objects are destroyed would depend on the garbage collection implementation of the finished application, but such details are not considered here.
\item Note that in effect all the steps necessary to view a User's Bookings (see B-L-1 (Table \ref{use_case_B-L-1}) for the use case) are repeated in Figures \ref{Sequence Diagram: Make Booking}, \ref{Sequence Diagram: Cancel Booking} and \ref{Sequence Diagram: Give Rating}. This is due to all the portrayed scenarios finishing with a list of User Bookings. Use case inclusion would have made these diagrams simpler at the cost of potentially making our use cases more difficult to understand.
\end{itemize}
\begin{figure}[H]
\centering
\includegraphics[width=16cm]{img/seq_diagrams/sqd_p_c.png} \\[0.5em]
\caption{Sequence Diagram: Register Property}
\label{Sequence Diagram: Register Property}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=13cm]{img/seq_diagrams/sqd_e_u.png} \\[0.5em]
\caption{Sequence Diagram: Edit User}
\label{Sequence Diagram: Edit User}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=13cm]{img/seq_diagrams/sqd_p_l.png} \\[0.5em]
\caption{Sequence Diagram: View Property List}
\label{Sequence Diagram: View Property List}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=\textwidth]{img/seq_diagrams/sqd_b_c.png} \\[0.5em]
\caption{Sequence Diagram: Make Booking}
\label{Sequence Diagram: Make Booking}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=16cm]{img/seq_diagrams/sqd_b_d.png} \\[0.5em]
\caption{Sequence Diagram: Cancel Booking}
\label{Sequence Diagram: Cancel Booking}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=16cm]{img/seq_diagrams/sqd_r_c.png} \\[0.5em]
\caption{Sequence Diagram: Give Rating}
\label{Sequence Diagram: Give Rating}
\end{figure}
% ==============================================================================
% STATE MACHINE DIAGRAM SECTION
% ==============================================================================
\section{State Machine Diagram}\label{statemachine_section}
A state machine diagram is a type of behavioural diagram that shows transitions between various objects. Based on the input it receives it can change its status and thereby determine which operations an object can perform at a given moment. The case depicted in the following figure shows the process by which a Guest makes a Booking. In the "Check property availability" composite state, the system checks the property for availability in a few different sub-states. If the time is not available on the room, the process will be escaped. If the room shows availability, however, the booking will be added. To transition from available to unavailable, a booking for this property is created. To exit the unavailable state, a booking is either deleted or lies in the past.
\begin{figure}[H]
\centering
\label{state_machine_diagram}
\includegraphics[width=\textwidth]{img/state_machine_diagram.pdf}
\caption{State Machine Diagram: Make booking}
\end{figure}
% ==============================================================================
% ACTIVITY DIAGRAM SECTION
% ==============================================================================
\section{Activity View}
Following the design flow from the static view on the system (cf. Figure \ref{design_diagram}) to the interaction view in the form of sequence diagrams (cf. section \ref{sequence_section}), this section aims to show the control flow in the system in a more abstract way via the activity view. UML activity diagrams present business processes executed by users interacting with the system.
Figures \ref{activity_diagram_1} -- \ref{activity_diagram_4} describe in more detail how the use cases detailed in chapter \ref{chapter:use cases} translate into a sequence forming main business processes. An overview of which use cases map to a specific activity is given in Figure \ref{activity-use case-mapping}\footnote{The colour code in the \textquotesingle ID \textquotesingle column refers to the Actor involved in the activity, rather than the Actor set that is generally eligible for the use case.}. As the final product aims at matching Property hosts with potential Property guests, activities such as the Guest making a Booking, Host registering a Property or cancelling a Booking are selected as primary examples.
Activity diagrams are useful in identifying and specifying the sequential and concurrent nature of dependencies in the system. Whereas the sequence diagrams show the control flow between objects and hence show the technical implementation of a use case depicting operations on classes and message interaction, the activity diagrams below give a high-level overview of the execution behaviour of the system and do not show objects that perform the activities \cite{UMLReference2004}. Note how different types of control nodes specify the sequencing of action nodes and the high-level description of validation processes that are inherited in the behaviour of decision nodes \cite{UML2017}.
Refer to the User Manual in section \ref{chapter:mockups} on how the control described in this section translates into the user interface.
\begin{table}[H]
\centering
\begin{tabular}{| p{3cm} | p{1.25cm} | p{11cm} |}
\hline
Activity & ID & Use Case \\ \hline \hline
& \cellcolor{V}U-C-1 & \thead{A Visitor registers as a Guest or a Host.} \\ \cline{2-3}
\multirow{-2}{*}{\thead{Host \& Property \\ Registration}}
& \cellcolor{H}P-C & \thead{A Host registers a new Property.} \\
\hline
& \cellcolor{G}F-L & \thead{A Guest views their Favourites.} \\ \cline{2-3}
& \cellcolor{G}P-V & \thead{A Guest, Admin, or a Host views a Property\textquotesingle s Property
Details.} \\ \cline{2-3}
\multirow{-3}{*}{\thead{Guest make \\ Booking}}
& \cellcolor{G}B-C & \thead{The Guest makes a new Booking.} \\ \hline
& \cellcolor{G}P-L & \thead{A Guest or an Admin searches for, filters, orders, \\ and views a List of Properties.} \\ \cline{2-3}
& \cellcolor{G}F-C & \thead{A Guest adds a Property to their Favourites.} \\ \cline{2-3}
\multirow{-3}{*}{\thead{Guest search \\ Properties}}
& \cellcolor{G}F-D & \thead{A Guest removes a Property from their Favourites.} \\ \hline
& \cellcolor{H}B-L-2 & \thead{A Host views their own Property\textquotesingle s Bookings, \\ or an Admin views any Property\textquotesingle s
Bookings.} \\ \cline{2-3}\multirow{-2}{*}{\thead{Host cancel \\ Booking}}
& \cellcolor{H}B-D & \thead{A Guest cancels one of their own Cancellable Bookings,\\ or a Host cancels
a Cancellable Booking on their Property.} \\ \cline{2-3} \hline
\end{tabular}
\caption{Activity \& Use Case Mapping}
\label{activity-use case-mapping}
\end{table}
\begin{figure}[H]
\centering
\includegraphics[width=17cm]{img/activity/register_property.pdf}
\caption{Activity Diagram: Host \& Property Registration}
\label{activity_diagram_1}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=17cm]{img/activity/make_booking.pdf}
\caption{Activity Diagram: Guest Booking}
\label{activity_diagram_2}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=17cm]{img/activity/search.pdf}
\caption{Activity Diagram: Search Properties\protect\footnotemark}
\label{activity_diagram_3}
\end{figure}
\footnotetext{The action node for selecting a Property implies the view of the Property Details page}
\begin{figure}[H]
\centering
\includegraphics[width=17cm]{img/activity/cancel_booking.pdf}
\caption{Activity Diagram: Host Cancel Booking}
\label{activity_diagram_4}
\end{figure}
% ==============================================================================
% COMPONENT DIAGRAM SECTION
% ==============================================================================
\section{Component Diagram}\label{component_section}
A generalised architectural overview of the entire system can be visualised using figure \ref{component_diagram}. Since components are parts of the system that are encapsulated, reusable, and replaceable \cite{Hamilton2006}, the component diagram shows how the usage of the widely adopted Model-View-Controller architectural pattern, usually referred to as MVC \cite{Fowler2006}, takes full advantage of these best practices.
The aforementioned diagram builds on the Design and Analysis diagrams by combining similar components together. Within the system, there are three main component groups: Views, Controllers, and Models. The classes under each group of components are responsible for a similar set of tasks. The View classes have the sole responsibility of generating strings from the inputs sent to them from the controllers. The strings will then be sent to the client browsers and effectively be rendered as HTML. The Controller classes are responsible for acting as a middle layer that talks to the Models and the Views. Controllers do not contain business logic but rather provide endpoints that can be called from the generated views. The Models group are the classes that contain the business logic and are directly connected to the database.
\begin{figure}[H]
\centering
\includegraphics[width=\textwidth]{img/component_diagram.png}
\caption{Component Diagram}
\label{component_diagram}
\end{figure}
\section{Deployment Diagram}
The deployment to hardware environments is described using the following deployment diagram. This diagram shows the physical connections within the system in terms of deploying the software to a physical environment.
The system contains four main nodes:
\begin{itemize}
\item \textit{UserClient} - A web browser such as Google Chrome, Safari, or Firefox that renders the HTML assets to the Users.
\item \textit{WebServer} - Apache HTTP Server that will be responsible for dealing with HTTP requests and responses to/from clients.
\item \textit{ApplicationServer} - A server such as Apache Tomcat which is capable of running Java EE specifications and hosts a suitable execution environment.
\item \textit{DatabaseServer} - This server will be responsible for managing and maintaining MySQL Database that is connected to the system.
\end{itemize}
\begin{figure}[H]
\centering
\includegraphics[width=17cm]{img/deployment_diagram.png}
\caption{Deployment Diagram}
\label{deployment_diagram}
\end{figure}
\section{Site Map}
The hierarchical structure of the system is depicted in the following site map. It shows the organisation of the site's content and provides a good overview of the pages of the site.
\begin{figure}[H]
\centering
\includegraphics[width=17cm]{img/site_map.pdf}
\caption{Site Map}
\label{site_map}
\end{figure}
|
{"hexsha": "c37162160c5d706da893f137a159f519fb391ecf", "size": 22029, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapters/chap4.tex", "max_stars_repo_name": "ChristinaKr/SoftwareEngineering", "max_stars_repo_head_hexsha": "0f8329336d86ba5f86e79eb25ca3ce8718246337", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chapters/chap4.tex", "max_issues_repo_name": "ChristinaKr/SoftwareEngineering", "max_issues_repo_head_hexsha": "0f8329336d86ba5f86e79eb25ca3ce8718246337", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapters/chap4.tex", "max_forks_repo_name": "ChristinaKr/SoftwareEngineering", "max_forks_repo_head_hexsha": "0f8329336d86ba5f86e79eb25ca3ce8718246337", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 91.0289256198, "max_line_length": 1068, "alphanum_fraction": 0.7324435971, "num_tokens": 4700}
|
[STATEMENT]
lemma [simp]:
shows assert_gpv_eq_Done: "assert_gpv b = Done x \<longleftrightarrow> b"
and Done_eq_assert_gpv: "Done x = assert_gpv b \<longleftrightarrow> b"
and Pause_neq_assert_gpv: "Pause out rpv \<noteq> assert_gpv b"
and assert_gpv_neq_Pause: "assert_gpv b \<noteq> Pause out rpv"
and assert_gpv_eq_Fail: "assert_gpv b = Fail \<longleftrightarrow> \<not> b"
and Fail_eq_assert_gpv: "Fail = assert_gpv b \<longleftrightarrow> \<not> b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((assert_gpv b = Generative_Probabilistic_Value.Done x) = b &&& (Generative_Probabilistic_Value.Done x = assert_gpv b) = b &&& Generative_Probabilistic_Value.Pause out rpv \<noteq> assert_gpv b) &&& assert_gpv b \<noteq> Generative_Probabilistic_Value.Pause out rpv &&& (assert_gpv b = Fail) = (\<not> b) &&& (Fail = assert_gpv b) = (\<not> b)
[PROOF STEP]
by(simp_all add: assert_gpv_def)
|
{"llama_tokens": 386, "file": "CryptHOL_Generative_Probabilistic_Value", "length": 1}
|
/*
* GridMapMsgHelpers.hpp
*
* Created on: Sep 8, 2014
* Author: Péter Fankhauser
* Institute: ETH Zurich, ANYbotics
*/
#include "grid_map_ros/GridMapMsgHelpers.hpp"
// Boost
#include <boost/assign.hpp>
namespace grid_map {
const int nDimensions()
{
return 2;
}
std::map<StorageIndices, std::string> storageIndexNames = boost::assign::map_list_of
(StorageIndices::Column, "column_index")
(StorageIndices::Row, "row_index");
} /* namespace */
|
{"hexsha": "a92f63564f728a1e7e08ba3b4972dee81a245103", "size": 473, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "grid_map/grid_map_ros/src/GridMapMsgHelpers.cpp", "max_stars_repo_name": "xplorer-robot/xplorer", "max_stars_repo_head_hexsha": "51e43613017455331864a166b1bb830819e823a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2019-09-11T02:32:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-28T10:59:20.000Z", "max_issues_repo_path": "grid_map_ros/src/GridMapMsgHelpers.cpp", "max_issues_repo_name": "stevechan1993/grid_map", "max_issues_repo_head_hexsha": "70876a356539b6bd630a6264ca179bfc5edc58b9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "grid_map_ros/src/GridMapMsgHelpers.cpp", "max_forks_repo_name": "stevechan1993/grid_map", "max_forks_repo_head_hexsha": "70876a356539b6bd630a6264ca179bfc5edc58b9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-07-05T17:48:18.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-11T07:55:44.000Z", "avg_line_length": 18.1923076923, "max_line_length": 84, "alphanum_fraction": 0.6934460888, "num_tokens": 132}
|
using Test
using Plots, Parameters, OrdinaryDiffEq
# hip extensor parameters, single ramp from 0.5 to 1.0 over 0.1 s
cc_p = CCParameters(838.2, 645.0, 26.0, 7.94, 1.0, 1.0, 1.0, 4.93, 1.64)
sec_p = SECParameters(2854.48)
α_p = ActivationProfile(0.5, ActivationRamp(0.0, 0.1, 1.0))
## TorqueGenerator
include("torque_generator.jl")
θ₀ = π / 2 # inital torque generator angle
ω₀ = 2π # intial torque generator angular velocity
tqgen = TorqueGenerator(θ₀, ω₀, α_p, cc_p, sec_p)
@testset "torque generator" verbose = true begin
# @test abs(tqgen.sec.τ - tqgen.cc.τ * tqgen.α) < 1e-6 # initial CC and SEC matching
error = 0
for θ₀ = 0:0.001:2π
tqgen = TorqueGenerator(θ₀, ω₀, α_p, cc_p, sec_p)
match = !(abs(tqgen.sec.τ - tqgen.cc.τ * tqgen.α) < 1e-6)
error += match
end
@test error == 0
θ₀ = rand(0.0:0.001:2π)
tqgen = TorqueGenerator(θ₀, ω₀, α_p, cc_p, sec_p)
@test tqgen.θ == tqgen.sec.θ + tqgen.cc.θ
@test tqgen.ω == tqgen.sec.ω + tqgen.cc.ω
end
# test integration - he inital conditions
θ₀ = 215.89 |> deg2rad
ω₀ = -429.803 |> deg2rad
cc_p = CCParameters(838.2, 645.0, 26.0, 7.94, 1.0, 1.0, 1.0, 4.93, 1.64)
sec_p = SECParameters(2854.48)
α_p = ActivationProfile(0.9994277764, 0.6317965682e-01, 0.2115428829, 0.9106166714, 0.8071698914e-03, 0.1604915461, 0.1002353143)
tqgen = TorqueGenerator(θ₀, ω₀, cc_p, sec_p, α_p)
# θcc = tqgen.cc.θ
# t = 0.000
# dt = 0.001
# while t < 0.01
# println("original values: t=$t, θ=$θ₀, ω=$ω₀, θcc=$θcc, ωcc=$(tqgen.cc.ω) τ=$(-tqgen.τ)")
# τ = -update_torque_generator!(tqgen, t, θ₀, ω₀, θcc)
# θ₀ += ω₀ * dt
# ω₀ += τ * dt
# θcc = tqgen.cc.θ + tqgen.cc.ω * dt
# t += dt
# println("updated values: t=$t, θ=$θ₀, ω=$ω₀, θcc=$θcc, ωcc=$(tqgen.cc.ω), τ=$(-tqgen.τ)")
# end
u₀ = [θ₀, ω₀, tqgen.cc.θ]
f(u, p, t) = begin
τ = update_torque_generator!(p, t, u...)
return [u[2], τ, p.cc.ω]
end
prob = ODEProblem(f, u₀, (0.0, 0.111), tqgen)
sol = solve(prob, Tsit5(), saveat = 0.001)
tq = [torque(sol.prob.p, sol(t)[1], sol(t)[3]) for t in sol.t]
## CC
include("cc.jl")
# plot relationships
plot(cc_p)
## activations
t1 = collect(0:0.001:0.2);
t2 = collect(0:0.001:0.1);
t3 = collect(0:0.001:0.2);
r1 = ramp.(t1, 0.0, 0.2, 1.0);
r2 = ramp.(t2, 1.0, 0.1, 0.5);
r3 = ramp.(t3, 0.5, 0.2, 0.0);
t_true = collect(0:0.001:1.0)
a_true = [repeat([0.0], 99); r1; repeat([1.0], 199); r2; r3; repeat([0.0], 199)]
a0 = 0.0
r1 = ActivationRamp(0.1, 0.2, 1.0)
r2 = ActivationRamp(0.2, 0.1, 0.5)
r3 = ActivationRamp(0.0, 0.2, 0.0)
p = ActivationProfile(a0, [r1, r2, r3])
## PEC
include("pec.jl")
# recreate figures from Reiner and Edrich (1999)
ϕh = range(-40, 120, length = 1000)
ϕk = range(-10, 150, length = 1000)
ϕa = range(-30, 60, length = 1000)
# hip
knee_angles = [0, 45, 90]
hip_plt = plot(title = "hip", xticks = [-40:20:120...], yticks = [-80:20:60...], ylims = (-90, 70))
for knee_angle in knee_angles
plot!(ϕh, torqueHip.(ϕh, knee_angle), label = "ϕk = $knee_angle")
end
# knee
hip_angles = [0, 45, 90, 120]
knee_plt = plot(title = "knee", ylims = (-30, 30), xticks = [0:50:150...], yticks = [-20:10:30...])
for hip_angle in hip_angles
plot!(ϕk, torqueKnee.(hip_angle, ϕk, 0), label = "ϕh = $hip_angle")
end
# ankle
knee_angles = [0, 60]
ankle_plt = plot(title = "ankle", ylims = (-20, 30), xticks = [-30:10:60...], yticks = [-20:5:30...])
for knee_angle in knee_angles
plot!(ϕa, torqueAnkle.(knee_angle, ϕa), label = "ϕk = $knee_angle")
end
plot(hip_plt, knee_plt, ankle_plt)
θh = deg2rad.(0:0.1:220) # range(50, 220, length=1000) .|> deg2rad
θk = deg2rad.(0:0.1:220) # range(0, 180, length=1000) .|> deg2rad
θa = deg2rad.(0:0.1:220) # range(50, 180, length=1000) .|> deg2rad
# hip
knee_angles = [45, 90, 180] .|> deg2rad
hip_plt = plot(title = "hip", ylims = (-100, 100), legend = :topleft)
for knee_angle in knee_angles
plot!(rad2deg.(θh), torqueHip.(convertHip.(θh), convertKnee(knee_angle)), label = "θk = $(rad2deg(knee_angle))")
end
hip_plt
# knee
hip_angles = [45, 90, 135, 180] .|> deg2rad
knee_plt = plot(title = "knee", ylims = (-100, 100), legend = :topleft)
for hip_angle in hip_angles
plot!(rad2deg.(θk), torqueKnee.(convertHip(hip_angle), convertKnee.(θk), convertKnee.(0)), label = "θh = $(rad2deg(hip_angle))")
end
# ankle
knee_angles = [45, 135] .|> deg2rad
ankle_plt = plot(title = "ankle", ylims = (-100, 100))
for knee_angle in knee_angles
plot!(rad2deg.(θa), torqueAnkle.(convertKnee(knee_angle), convertAnkle.(θa)), label = "θk = $(rad2deg(knee_angle))")
end
plot(hip_plt, knee_plt, ankle_plt)
|
{"hexsha": "747b686fda99b671397b669dc6d2b205c7e08a08", "size": 4580, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "musclemodel/tests.jl", "max_stars_repo_name": "TomRottier/SprintingModel_julia", "max_stars_repo_head_hexsha": "86321d995872574998371f895627b4bb127c9499", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "musclemodel/tests.jl", "max_issues_repo_name": "TomRottier/SprintingModel_julia", "max_issues_repo_head_hexsha": "86321d995872574998371f895627b4bb127c9499", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "musclemodel/tests.jl", "max_forks_repo_name": "TomRottier/SprintingModel_julia", "max_forks_repo_head_hexsha": "86321d995872574998371f895627b4bb127c9499", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4360902256, "max_line_length": 132, "alphanum_fraction": 0.6296943231, "num_tokens": 2022}
|
module TestUtils
using MLJBase
using Test
import LightGBM
@testset "mlj_to_kwargs removes classifier truncate_booster flag" begin
# Arrange
fixture = LightGBM.MLJInterface.LGBMClassifier()
# Act
output = LightGBM.MLJInterface.mlj_to_kwargs(fixture)
# Assert
@test :truncate_booster ∉ keys(output)
end
@testset "mlj_to_kwargs removes regressor truncate_booster flag" begin
# Arrange
fixture = LightGBM.MLJInterface.LGBMRegressor()
# Act
output = LightGBM.MLJInterface.mlj_to_kwargs(fixture)
# Assert
@test :truncate_booster ∉ keys(output)
end
@testset "mlj_to_kwargs adds classifier num_class" begin
# Arrange
fixture = LightGBM.MLJInterface.LGBMClassifier()
# Act
output = LightGBM.MLJInterface.mlj_to_kwargs(fixture, [0,1])
# Assert
@test :num_class in keys(output)
end
end # Module
|
{"hexsha": "4f04c248697be39a3114da40ee9d394785f75098", "size": 871, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/basic/test_mljinterface.jl", "max_stars_repo_name": "yaxxie/LightGBM.jl", "max_stars_repo_head_hexsha": "1234c5d3d383fb29967de6f047131b8139ba447f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 61, "max_stars_repo_stars_event_min_datetime": "2020-03-24T09:21:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-10T07:05:14.000Z", "max_issues_repo_path": "test/basic/test_mljinterface.jl", "max_issues_repo_name": "yaxxie/LightGBM.jl", "max_issues_repo_head_hexsha": "1234c5d3d383fb29967de6f047131b8139ba447f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 59, "max_issues_repo_issues_event_min_datetime": "2020-03-17T09:54:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-29T14:40:08.000Z", "max_forks_repo_path": "test/basic/test_mljinterface.jl", "max_forks_repo_name": "yaxxie/LightGBM.jl", "max_forks_repo_head_hexsha": "1234c5d3d383fb29967de6f047131b8139ba447f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2020-09-16T13:09:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-16T11:34:52.000Z", "avg_line_length": 19.3555555556, "max_line_length": 71, "alphanum_fraction": 0.7278989667, "num_tokens": 241}
|
\documentclass[fleqn,10pt]{wlscirep}
\usepackage[utf8]{inputenc}
\usepackage{lineno}
\usepackage{adjustbox}
\usepackage{setspace}
\usepackage[normalem]{ulem}
\usepackage[T1]{fontenc}
\usepackage{pdfpages}
\usepackage{ulem}
\usepackage{array}
\newcolumntype{L}{>{\centering\arraybackslash}m{2cm}}
\title{Controlling for background genetic effects using polygenic scores improves the power of genome-wide association studies}
\author[1,]{Declan Bennett}
\author[1,]{Donal O'Shea}
\author[1,3]{John Ferguson}
\author[2]{Derek Morris}
\author[1,*]{Cathal Seoighe}
\affil[1]{School of mathematics, Statistics and Applied Mathematics, National University of Ireland Galway, H91TK33, Ireland}
\affil[2]{Centre for Neuroimaging, Cognition and Genomics, Discipline of Biochemistry, National University of Ireland Galway, H91CF50, Ireland.}
\affil[3]{Biostatistics Unit, Clinical Research Facility, National University of Ireland Galway, H91TK33, Ireland}
\affil[*]{cathal.seoighe@nuigalway.ie}
%\affil[+]{these authors contributed equally to this work}
\keywords{GWAS, PGS, Background genetic effects}
\begin{abstract}
Ongoing increases in the size of human genotype and phenotype collections offer the promise of improved understanding of the genetics of complex diseases. In addition to the biological insights that can be gained from the nature of the variants that contribute to the genetic component of complex trait variability, these data bring forward the prospect of predicting complex traits and the risk of complex genetic diseases from genotype data. Here we show that advances in phenotype prediction can be applied to improve the power of genome-wide association studies. We demonstrate a simple and efficient method to model genetic background effects using polygenic scores derived from SNPs that are not on the same chromosome as the target SNP. Using simulated and real data we found that this can result in a substantial increase in the number of variants passing genome-wide significance thresholds. This increase in power to detect trait-associated variants also translates into an increase in the accuracy with which the resulting polygenic score predicts the phenotype from genotype data. Our results suggest that advances in methods for phenotype prediction can be exploited to improve the control of background genetic effects, leading to more accurate GWAS results and further improvements in phenotype prediction.
\end{abstract}
\begin{document}
\flushbottom
\maketitle
\doublespacing
\linenumbers
\section*{Introduction}
Linear mixed effects models (LMMs) are routinely applied to detect associations between SNPs and phenotypes in genome-wide association studies (GWAS) and many methods have been developed that enable these models to be applied efficiently to the large scale datasets that are typically now encountered in studies of complex traits \cite{chen2007family, svishcheva2012rapid, jakobsdottir2013mastor, emmax, zhang2010mixed, lippert2011fast, gemma, BOLT, jiang2019resource}. Compared to fixed effects models for GWAS \cite{purcell2007plink}, LMMs can be designed that have the advantage of being applicable to samples that include related individuals \cite{yang2014advantages, chen2007family, eu2014comparison}. LMMs for this purpose typically include a random effect with covariance proportional to the kinship matrix that indicates the degree of relatedness between pairs of individuals in the sample \cite{eu2014comparison}. The relatedness of individuals in the sample may be known \textit{a priori} or may be derived from the genotype data by constructing a genetic relationship matrix (GRM), with entries corresponding to the genotypic covariance between pairs of individuals. When the entries of the GRM below a specified threshold are set to zero, the GRM is approximately equivalent to a family kinship matrix, with the degree of relatedness that the matrix captures controlled by this threshold. Thresholding the matrix to capture close family relationships (or cryptic relatedness \cite{devlin1999genomic}) allows specialized computational methods for sparse matrices to be applied so that model fitting remains tractable for studies that include large numbers of individuals \cite{jiang2019resource}. This is the approach taken by fastGWA \cite{jiang2019resource}, a recently developed tool that has been shown to generate correctly calibrated statistical results efficiently for biobank-scale GWAS.
In addition to enabling application to samples containing related individuals, LMMs can also account for genetic background effects \cite{listgarten2012improved,yang2014advantages}. When a statistical model is used to test for a relationship between a given SNP (the test SNP) and a phenotype, the contribution of all genetic variants in the genome that are not in linkage disequilibrium with the test SNP is a form of background noise. If the trait of interest is both highly polygenic and highly heritable this noise may be substantial. Failure to account for sources of variance in the response in a statistical model can reduce the power to detect a relationship of interest \cite{fisher1935, neuhaus1998estimation}. A LMM with a full GRM (i.e. derived from all SNPs in the data and with no threshold applied on the level of genetic correlation between individuals) is equivalent to a model in which all variants are assumed to have a causal effect on the phenotype, with effect sizes consisting of independent samples from a Gaussian distribution \cite{BOLT}. This is typically not a good fit to the true effect size distribution, and instead, the software package BOLT-LMM \cite{BOLT} uses a spike-and-slab Gaussian mixture for the effect size distribution, with a component (the spike) close to zero corresponding to weak genome-wide effects and accounting for family relationships, and component with larger variance (the slab) corresponding to variants with large effects \cite{BOLT}. Fitting this more sophisticated model requires specialist numeric methods, that are relatively computationally intensive. Consequently BOLT-LMM is much more computationally intensive than fastGWA \cite{jiang2019resource}.
The full GRM is an $N \times N$ matrix, where $N$ is the number of individuals in the study. The memory and compute requirements of BOLT-LMM are kept tractable by selecting a subset of SNPs to include in the GRM, because BOLT-LMM performs operations that involve the GRM in a factorized form and requires $O((NM)^{1.5})$ compute time, where $M$ is the number of SNPs on which the GRM is based. Various options have been explored for which SNPs to include in the calculation of the GRM \cite{yang2014advantages}. Including SNPs in LD with the target SNP results in loss of power, as the effect of the target SNP is partially accounted for by the random effect through the GRM. This has been referred to as proximal contamination \cite{listgarten2012improved}. On the other hand, including all (or most) SNPs that are not in LD with the target SNP, e.g. using a Leave One Chromosome Out (LOCO) approach, can result in dilution of the extent to which the relevant part of the genetic background is captured by the GRM. In the latter case, SNPs that are not relevant, in that they do not capture direct genetic effects or tag relevant population structure effects, effectively add noise to the GRM \cite{listgarten2012improved}. Alternatively, the GRM can be built from only the SNPs that are found using a linear model to be associated with the phenotype. Although this results in an increase in statistical power \cite{fastlmm,yang2014advantages,lippert2013benefits}, it does not fully control for population structure and is not recommended if population structure is of substantial concern \cite{BOLT,yang2014advantages}. Methods have been developed that incorporate principal components into the GRM calculation built from significant SNPs; however, most of these methods are not suited to large biobank-scale data, without access to cloud computing or large compute farms \cite{tucker2014improving,canela2018atlas,kadie2019ludicrous}. Background genetic effects can also be included in the statistical model as fixed effects and this is the recommended approach when there are SNPs with large effect sizes \cite{yang2014advantages}. A model fitting approach to determine the SNPs to include as fixed effects has been developed, and this also results in increased power in GWAS \cite{listgarten2012improved}.
\par\par
As the genomic architecture of complex diseases is uncovered with the help of large biobanks, there is an advancing prospect of predicting quantitative phenotypes and the risk of complex diseases from genotype data. Recent years have seen substantial success and emerging clinical utility in phenotype prediction from polygenic scores (PGS) \cite{tam2019benefits,torkamani2018personal}. PGS are constructed from weighted sums of allele dosages, with the weights corresponding to the effects size of the variants. Risk variants (variants associated with the phenotype) are typically inferred from the largest available GWAS, generally a meta-analysis. The clinical potential of PGS has already been shown in complex diseases such as coronary artery disease (CAD), diabetes and cancer \cite{khera2018genome, torkamani2018personal, yanes2020clinical}. In CAD, the identification of individuals with similar risk to those with rare high-risk monogenic variants has been reported \cite{khera2018genome}. Similarly, in breast cancer, pathogenic variants in BRCA1/2 account for 25\% of familial risk of the disease with genome wide variants accounting for a further 18\% of the risk \cite{michailidou2017association,bahcall2013common}. It is likely that in the future specialist machine learning methods will be developed to predict phenotype from genotype \cite{torkamani2018personal}, potentially achieving higher accuracy by incorporating the possibility of non-additive effects.
\par\par
Here, we set out an approach to GWAS that seeks to separate the model fitting at the test locus and estimation of the genetic background effect. After carrying out an initial round of GWAS using an existing method, we derive a PGS for each chromosome, using the summary statistics for SNPs on the remaining chromosomes. This LOCO PGS is then included as a fixed effect in a second round of GWAS. We tested this approach in two ways. Firstly, using simulated data we tested for an improvement in power on the task of recovering known causal variants as a function of study size, number of causal variants and trait heritability. In addition, we applied the method to standing height data from the UK Biobank and determined the number and characteristics of additional variants that were detected. For an objective assessment of performance on real data, where the true associations are unknown, we divided the data into test and training sets and predicted the phenotype in the test set. The improvement in performance on the critical task of complex phenotype prediction illustrates the utility of the PGS as a means of accounting for off target genetic effects. This straightforward, modular approach to accounting for genetic background effects in GWAS has the advantage of leveraging advances in phenotype prediction as they become available. It also offers significant improvements in speed relative to existing methods that correct for genetic background.
\section*{Results}
We simulated data to evaluate the impact of including the LOCO PGS as a fixed effect in GWAS. The simulations consisted initially of a normally-distributed continuous trait in 100,000 individuals. The trait had a narrow-sense heritability ($h^2$) of 0.5 and there were 1,000 causal SNPs with normally-distributed effects on the trait (see Methods for details). We incorporated the LOCO PGS as a fixed effect in a linear mixed model using GCTA fastGWA \cite{jiang2019resource} (we refer to this as fastGWA-PGS). To check the validity of this approach we performed simulations under the null model of no association between genotype and phenotype and found that the method was well calibrated (Fig. S1). This was in-line with our expectations as the LOCO PGS is approximately uncorrelated with the genotype of the tested SNP (see Supplementary Material for a mathematical justification). The p-values remained well calibrated even when we applied a higher p-value threshold for variants to include in the calculation of the LOCO PGS (Fig. S1), resulting in overfitting of the phenotype to SNPs not on the same chromosome as the test SNP. In 100 simulations we found that including a LOCO PGS resulted in a substantial improvement in power to detect the known causal SNPs (Fig. 1). We considered two alternative methods to select the SNP effects to include in the PGS calculation: pruning and thresholding (P\&T) and LDpred2 \cite{choi2019prsice,LDpred2}. When we included the PGS obtained using P\&T as a fixed effect with fastGWA (which we refer to as fastGWA-PGS-PT) we recovered 82 additional causal variants, on average, below the conventional P-value threshold of $5x10^{-8}$ compared to fastGWA (corresponding to a relative increase in power of 18.4\%; p = $3.0\times 10^{-32}$ from a paired T-test; Table S1-S3). The performance was further improved when we used LDpred2 to calculate the LOCO PGS (referred to as fastGWA-PGS-LDpred2). This resulted in the recovery of, on average, 115 more causal variants than fastGWA alone (relative increase of 25.9\%; p = $2.3\times 10^{-36}$). We also simulated case control data for a binary traits with $h^2$ of 0.5 and 1,000 causal loci, with disease prevalence, k, of 0.1 and 0.3. As with the quantitative trait simulations, inclusion of a fixed effect LOCO PGS always resulted in an increase in the average number of casual loci recovered, with an average of 28 more causal loci recovered for a disease prevalence of k=0.1 (p = 0.19) while, k = 0.3 recovered on average 48 more causal loci (p= 0.03) (Fig. S2 and Table S4 \& S5).
The contribution to phenotype variance of background SNPs can also be modelled as a random effect in a linear mixed model. This approach is applied by BOLT-LMM, which uses a normal mixture random effect, with a component corresponding to SNPs with large effects. The running time of BOLT-LMM is proportional to $MN^{1.5}$ and the memory requirement is approximately $MN/4$ bytes, where $N$ is the number of individuals in the dataset and $M$ is the number of SNPs included in the GRM \cite{BOLT}. When we ran BOLT-LMM with a subset of 165,683 SNPs (see Methods for how these were selected) we found that including the LOCO PGS as a fixed effect resulted in a substantial gain in power (Fig. 1), likely resulting from inability of the reduced GRM to account fully for genetic background. No further improvement was obtained by adding the LOCO PGS to BOLT-LMM with a GRM consisting of all of the 664,393 directly genotyped SNPs from the UKB (Fig. S3); however, the power obtained with the smaller GRM with the PGS fixed effect was close to the power obtained with the larger GRM, but with a much lower memory requirement (Table 1). Recently, a new fast method, REGENIE \cite{mbatchou2020computationally}, has been released that also includes control of the polygenic background effect based on prediction of the phenotype from SNPs that are not on the same chromosome as the test SNP. In our simulations the performance of REGENIE was intermediate between fastGWA and BOLT-LMM, but well behind fastGWA-PGS. REGENIE showed no improvement when the LOCO PGS was added as a fixed effect, suggesting that it accounts
adequately for the genetic background effect. Note that REGENIE was omitted from Table 1, as the simulation is based on a single phenotype and would unfairly disadvantage REGENIE, which is optimized for the task of performing association analyses on multiple phenotypes simultaneously.
\begin{figure}
\centering
\includegraphics[width=120mm]{images/Fig1_combiorev}
\caption{
The proportion of causal variants recovered in 100 simulations. The
boxplot shows the median (center line), upper and lower quartiles (hinges) and the maximum and minimum values not more than 1.5 times the interquartile range from the corresponding hinge (whiskers). The simulations consisted of 100,000 individuals and a continuous trait, with narrow-sense heritability of 0.5 and 1,000 causal variants.}
\label{fig:Recovery of causal variants in fixed simulations.}
\end{figure}
\begin{table}[ht]
\centering
\captionof{table}{Pipeline computation time and memory (N=100,000, M=665k)}
\begin{adjustbox}{width=\columnwidth,center}
\begin{tabular}{c|cccc|c}
\toprule
& \multicolumn{4}{c|}{CPU Time (s)} & \multicolumn{1}{c}{} \\
\hline
Method & GWAS & LOCO PGS & GWAS(22 chr) & Total (CPU Time) & Max Memory (GB) \\
\hline
fastGWA-PGS-LDpred2 & 501.2 & 449.6 & 2,953.3 & 3,904.0 & 0.9 \\
fastGWA-PGS-PT & 501.2 & 245.8 & 2,953.3 & 3,700.2 & 0.7 \\
BOLT-LMM-665 & 119,202.0 & 0.0 & 0.0 & 119,202.0 & 15.5 \\
BOLT-LMM-165-PGS-PT & 92,108.0 & 245.8 & 614,514.4 & 706,868.2 & 3.9
\\
%REGENIE-PGS-PT & & 245.8 & 65,396.5 & & 1.5 \\
\hline
\end{tabular}%
\end{adjustbox}
\end{table}
\par We calculated receiver operator characteristic (ROC) curves to investigate whether the increased number of causal variants recovered when we included the LOCO PGS as a fixed effect reflected a reduction in P-values across the board for the phenotype-associated variants or also an improvement in the ordering of the variants, when the variants are ordered by the evidence of an association with the phenotype. Over 100 simulations we found that the area under the ROC curve (AUC) was always higher for fastGWA-PGS than for fastGWA without the LOCO PGS fixed effect. This was also the case for 99 of the 100 simulations when we added the PGS fixed effect to BOLT-LMM. The difference in sensitivity as a function of specificity (Fig. 2 and Table S6) showed that the sensitivity was consistently higher at a given specificity when the LOCO PGS-LDpred2 was included as a fixed effect, indicating an improvement in the ordering of the SNPs. The increase in mean sensitivity was up to 0.073 in the case of fastGWA-PGS-LDPred2 vs fastGWA, corresponding to a relative increase of 11.6\% (at a specificity of 0.9988) over fastGWA. The addition of the LOCO PGS fixed effect led to a smaller but still consistent increase in sensitivity for BOLT-LMM-165. In this case, the greatest increase in the mean sensitivity was 0.028, corresponding to a 4.2\% relative increase in sensitivity (at a specificity of 0.9991)
In addition to increasing the statistical power to detect causal variants, including the PGS fixed effect also resulted in an improvement in effect size estimates (Fig. S4). We found that when a fixed effect PGS was incorporated into the association study the median squared error (MEDSE) of the effect size estimate was substantially reduced (Fig. S4, Table S7-9). Interestingly, the MEDSE of the effect size estimate was largest across all methods for BOLT-LMM with the reduced GRM (Fig. S4).
%\textbf{calculate relative decrease \& p value from Fisher exact test}.
\begin{figure}
\centering
\includegraphics[width=120mm]{images/fig2_fGWA_LDpred2}
\caption{Difference in sensitivity (between fastGWA-PGS-LDpred2 and fastGWA) as a function of specificity for 100 simulations of a continuous trait with narrow-sense heritability of 0.5 and 1,000 causal variants in 100,000 individuals. The specificity (x-axis) is discretized in bins of size 0.0001. Each grey line shows the results of one simulation. The red line shows the mean difference over all simulations. }
\label{fig:deltaROC curves}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=120mm]{images/variable_param_alt}
\caption{Proportion of causal variants recovered in simulations of a quantitative trait over a range of values of $h^2$ and the number of causal loci. Simulations on the top (A) and bottom (B) panels were based on 100,000 and 430,000 randomly sampled individuals from the UK Biobank, respectively.}
\label{fig:variable param}
\end{figure}
\par
\par
\subsection*{Effects of trait heritability, number of causal variants and sample size}
We simulated data over a range of values of sample size, $h^2$ and of the number of causal SNPs to investigate how these parameters affect the impact of including the LOCO PGS as a fixed effect on GWAS power. For the larger sample size, a small improvement in power was obtained even for the lowest values of $h^2$ (0.1) simulated, with a statistically significant improvement for $h^2 \geq 0.2$ (Fig. 3). The improvement was not statistically significant at this value of $h^2$ when only 100,000 samples were used in the simulation, but even in this case the number of causal variants recovered was always at least as large and typically larger when the PGS fixed effect was included in the model (Tables S10, S11). This was somewhat surprising, given that it is assumed that large sample sizes are required for accurate phenotype prediction from PGS \cite{dudbridge2013power}.
The improvement in power resulting from the inclusion of the PGS fixed effect increased consistently with increasing numbers of causal variants in the case of the larger sample size. This was not the case for the smaller sample size, for which the improvement decreased or was lost altogether when the number of causal variants was large (Fig. 3). This is likely due to the loss of power to detect true causal variants and to estimate their effect sizes accurately when the genetic effect is distributed over too large a number of causal variants, resulting in the inability to correct for the genetic background using the PGS. This suggests that larger sample sizes would be required for highly polygenic traits in order to obtain a benefit from using the LOCO PGS fixed effect. However, the larger sample size simulated is comparable in scale to the UK Biobank and with a sample of this size our simulations suggest that a significant improvement in power can be obtained, even for a trait with 10,000 independent causal loci. For the case control simulation (N=100,000), a more modest increase in power was observed as heritability increased, whereas the power to recover smaller effect loci decreased dramatically compared to the quantitative simulation. However, we found that for all except three simulations the inclusion of a fixed effect LOCO PGS improved the power to detect associated loci (Fig. S5, Table S12). \par
\subsection*{Application to UK Biobank phenotypes}
We assessed performance of fastGWA-PGS on real data using standing height, BMI, and heel bone mineral density (HBMD) in individuals of British ancestry ($N_{height}$=395,133, $N_{BMI}$=395,149 \& $N_{HBMD}$=229,191) from the UK Biobank. The distribution of P-values obtained from fastGWA with the LOCO PGS included was lower than that obtained using fastGWA (Fig. S6-8). At a genome-wide significance level of $5x10^{-8}$ inclusion of a LOCO PGS always increased the number of independent loci recovered, compared to fastGWA (Table 2). Across height, HBMD, and BMI, BOLT-LMM identified the largest number of independent associated loci. Including the PGS fixed effect resulted in substantial increases in the number of independent associated loci, compared to fastGWA alone for all phenotypes (Table 2, Table S13).
\begin{table}[ht]
\centering
\captionof{table}{Number of independent significant loci identified and resulting phenotype prediction model fit}
\begin{tabular}{rLLrrrr}
\hline
Method & Significant loci & $R^2$ full & $R^2$ pgs & Spearman's $\rho$ & Phenotype \\
\hline
BOLT-LMM & 1,804 & 0.679 & 0.170 & 0.388 & Height \\
fastGWA & 1,318 & 0.671 & 0.165 & 0.382 &\\
fastGWA-PGS-LDpred2 & 1,717 & 0.678 & 0.176 & 0.395 &\\
fastGWA-PGS-PT & 1,583 & 0.676 & 0.173 & 0.391 & \\
\hline
BOLT-LMM & 583 & 0.146 & 0.134 & 0.356 & BMI \\
fastGWA & 450 & 0.143 & 0.130 & 0.351 &\\
fastGWA-PGS-LDpred2 & 500 & 0.142 & 0.127 & 0.346 &\\
fastGWA-PGS-PT & 493 & 0.144 & 0.130 & 0.351 &\\
\hline
BOLT-LMM & 393 & 0.194 & 0.155 & 0.427 & HBMD \\
fastGWA & 324 & 0.189 & 0.150 & 0.418 &\\
fastGWA-PGS-LDpred2 & 385 & 0.196 & 0.157 & 0.432 &\\
fastGWA-PGS-PT & 365 & 0.194 & 0.155 & 0.427 & \\
\hline
\end{tabular}
\end{table}
One way to determine objectively whether fastGWA-PGS outperforms fastGWA on real data is to apply both methods on the key task of phenotype prediction. We partitioned the UKB data into independent training and testing datasets of European ancestry and applied, BOLT-LMM, fastGWA and fastGWA-PGS to the training data only to obtain summary statistics. We then
used these summary statistics to calculate PGS scores using LDpred2 and PRSice2 (see Methods for details). For two of the three phenotypes (height and HBMD), the PGS fixed effect resulted in an increase in the correlation between the PGS and the phenotype in the test data (Table 2). In both cases the highest correlation between with the phenotype was obtained using fastGWA-PGS-LDpred2, which out-performed BOLT-LMM on this task. For the remaining phenotype (BMI), the addition of the PGS fixed effected resulted in no change or a slightly worse correlation with the phenotype in the test data. In this case the highest performance was obtained by BOLT-LMM (but at a substantial cost in terms of computational cost; Table 1). However, even in this case, we found that including only the SNPs with low P-values in the polygenic score (as implemented by the P\&T method) resulted in an improvement over fastGWA (Fig. S9).
\par
\section*{Discussion}
Omitting covariates that are associated with a response and independent of an effect of interest can result in a reduction in the efficiency of the estimation of the effect of interest \cite{fisher1935, neuhaus1998estimation}. Complex traits are associated with the genotype of many loci across the genome, but the effects of genetic variants other than the variant being tested are often not fully modelled by GWAS methods. We evaluated a simple two-stage approach to accounting for this genetic background effect that consists of performing an initial GWAS and using the summary statistics to calculate a polygenic score and then including the polygenic score, derived from SNPs not on the same chromosome as the target SNP, as a fixed effect in a second round of association testing. Using simulated data, we found that this led to a substantial improvement in power of fastGWA, an efficient tool for biobank scale GWAS that does not fully control for genetic background effects. When we included the LOCO polygenic score as a fixed effect with fastGWA (which we refer to as fastGWA-PGS), the power exceeded that of REGENIE \cite{mbatchou2020computationally}, a recent, computationally efficient tool for GWAS that uses ridge regression to control for genetic background effects. When BOLT-LMM \cite{BOLT} was used with a GRM derived from all of the simulated variants, the LOCO PGS fixed effect did not provide any boost in power (Fig. S3); however, the equivalent (or slightly improved) performance of fastGWA-PGS-LDpred2 (Fig. 1) was achieved at a much lower computational cost (Table 1). Furthermore, we note, that our simulations were favourable to BOLT-LMM because the LOCO PGS was calculated from the same set of variants that were used in the GRM of BOLT-LMM. In practice, millions of variants could be included in the LOCO PGS calculations, but the number of variants, $M$, that can be included in the GRM of BOLT-LMM is constrained by memory and compute time, both of which scale at least linearly with $M$. A further key advantage of the approach that we propose is that it is modular. Any phenotype prediction method can be used to predict the combined effect of the LOCO genetic variants on the phenotype. As methods for phenotype prediction improve, we anticipate that the performance of this approach will increase.
The increase in power using the PGS fixed effect was largest for simulated phenotypes with high heritability and a large number of causal variants (Fig. 3). In these cases the many background SNPs collectively explain a substantial proportion of the phenotypic variance and summarizing the contribution of these background SNPs to the phenotype via the LOCO PGS is likely to result in a better estimate of the effect of the target SNP and its standard error. The boost in performance derived from including the LOCO PGS as a fixed effect also depended on study sizes. For example, when the number of causal variants became large (10,000) there was no substantial boost in performance in the simulation that included 100,000 individuals, presumably because in this case the study size was not sufficient to identify and accurately estimate the effects of the causal variants. Even with this large number of causal variants the larger simulation (with 430,000 individuals) still showed a significant improvement arising from the LOCO PGS fixed effect (Fig. 3). Across all the simulation parameters we investigated, the performance of the fastGWA-PGS was never worse than fastGWA without the LOCO PGS. We also note that we calculated the pruning and thresholding LOCO PGS using SNPs that were selected based on a fixed P-value threshold. Further increases in power may be possible by optimizing the SNPs that are used to calculate the PGS separately for each omitted chromosome. This optimization was not required for LDpred2, which may help to explain why we achieved significantly better power when the LOCO PGS was calculated using this method rather than pruning and thresholding (Fig. 1).
We also applied the method to real data (standing height, heel bone mineral density (HBMD) and body mass index (BMI) in individuals of British ancestry in the UK Biobank). Consistent with the simulation results, we found more independent trait-associated loci using fastGWA-PGS-LDpred2 than with fastGWA alone for all three traits (30\%, 19\%, \& 11\% more for height, HBMD, and BMI, respectively; Table 2). Although, BOLT-LMM recovered the largest number of independent significant loci across all UK Biobank traits, this did not always translate into better correlation between a PGS calculated from the resulting summary statistics and the phenotype in the test dataset. In fact, the highest correlation was obtained by fastGWA-PGS-LDpred2 for two of the three traits. This could be explained by a higher proportion of true positives among the loci detected using the PGS-based methods or a more accurate estimate of the effects sizes by these methods, as suggested by Fig. S4. For BMI, the correlation was in fact lower between the PGS and the phenotype in the test dataset when the LOCO PGS fixed effect was used (Table2). However, even in this case a larger number of significant variants were recovered than with fastGWA. Interestingly, when only the variants with lower P-values were included for the calculation of the PGS (using the P\&T method), the correlation between the PGS and the phenotype in the test dataset was higher when the PGS LOCO fixed effect was included (Fig. S9). This is consistent with a larger number of true positives (and therefore greater power) and/or better effect size estimates for the SNPs that were significantly associated with the phenotype. \par
%Height is likely to be well suited to our method, as it has both high heritability and high polygenicity. There have been over 3,000 near independent loci associated with height and it has an estimated SNP-based heritability ($h^2$) of $0.48$ \cite{yengo2018meta}.
%{{\color{red}TODO: edit next paragraph. Is this still relevant? It seems
%to combine a few different ideas. Is there anything we should keep in this
%paragraph? If not, what should we replace it with?or is the Discussion sufficient without it?}
%The $h^2$ estimate in the simulations corresponds to the combined additive contribution to the variance of the causal SNPs; however, when we apply GWAS methods to the simulated data we do not recover all of the associated SNPs and, consequently, the estimated SNP heritabilities tends to be lower than the simulated values. For example, the simulations that were the basis of Figure 1 used $h^2 = 0.5$, but the mean estimated $h^2$ for
%these simulations was 0.48. This has implications for the interpretation of Figure 3a, which shows the relationship between simulated $h^2$ and power, rather than the estimated $h^2$. Within UK Biobank, standing height
%has the highest estimated SNP $h^2$, at 0.52 \cite{jiang2019resource}. fastGWA-PGS offers %improvement over fastGWA at higher values of $h^2$, so we can expect it to yield more independent association signals for other traits such as weight, basal metabolic rate and platelet count that all have estimated $h^2 > 0.2$ in UK Biobank \cite{jiang2019resource,watanabe2019global}. These are traits with high heritabilities that can be easily measured in large population-based cohorts, which has enabled GWAS to already detect many association signals. Some diseases have equally high heritability, e.g., type 1 diabetes, schizophrenia and Alzheimer’s disease, but are not accessible in datasets such as the UK biobank. As this
%changes and statistical power of GWAS of these diseases increases, inclusion of the LOCO PGS as a fixed effect in GWAS may prove useful for identifying additional risk loci for them over and above conventional GWAS analysis.
%}
The use of polygenic scores for phenotype prediction from genotype is an increasingly important application of the results of GWAS \cite{martin2019predicting}. High polygenic scores can capture a substantial component of the risk of complex diseases \cite{khera2018genome, mars2020polygenic} and guide interventions that can confer health benefits to individuals and reduce the stress on health systems \cite{gibson2019utilization}. Performing GWAS on a subset of samples and predicting on the remainder, we observed an increase in the correlation of the PGS with the phenotype when we included the LOCO PGS as a fixed effect in two out of three traits considered, consistent with improved effect size estimates (Fig. S4). Our results suggest that a modular approach that integrates advances in phenotype prediction with efficient GWAS methods can have a significant impact on the power of GWAS and that this can, in turn, lead to more accurate phenotype prediction. A recent study showed that models that allow unequal a priori contribution of SNPs to trait heritability can lead to substantial improvements in the accuracy of trait \cite{zhang2020improved}. As new efficient methods emerge from these and further insights, they can be easily substituted for the calculation of the LOCO PGS fixed effect. The current fast pace of methodological innovation in phenotype prediction supports the use, at least for the time being, of the simple modular approach to modeling genetic background effects evaluated here.
\section*{Conclusion}
The tasks of detecting trait-associated variants and predicting the trait in a new sample from the summary statistics of these variants are closely intertwined. Improved performance on the trait-association task can result in more associated variants and better estimates of their effect sizes, resulting in improvement on the prediction task. On the other hand, improved methods for phenotype prediction can help to control for background genetic effects in methods that identify the trait-associated variants and their effects. The method that we have explored here consists of incorporating a LOCO PGS as a fixed-effect covariate to control for these background genetic effects; however, any method for phenotype prediction could play this role, once its application is restricted to variants that are not linked to the target SNP. We show here that incorporating the PGS as a fixed-effect covariate results in increased power to detect trait-associated variants in GWAS. The resulting trait-associated variants and effect size estimates can lead to an improvement in the PGS, as illustrated by improved performance in the task of predicting the phenotype in a test dataset.
\section*{Methods}
\subsection*{Simulations}
\subsubsection*{Genotype QC}
The use of the UK Biobank Materials falls within UK Biobank's generic Research Tissue Bank (RTB) approval from the NHS North West Research Ethics Committee, UK. The simulated genotype data was based on autosomal genotyped data from the UK Biobank. To limit the effects of population stratification only individuals reporting white British ancestry (data field 21000; code 1001; N=443,076) were included in these analyses. The genotype data for the simulation analysis was based on directly genotyped variants with minor allele frequency (MAF) greater than 0.05\%. Variants with genotype missingness greater than 2\% or that failed a test for Hardy-Weinberg equilibrium (HWE) at $\alpha{=0.0001}$ were excluded, resulting in a total of 664,393 genetic variants. There were 429,359 samples remaining following filtering. The sparse GRM required by fastGWA was created by setting entries corresponding to sample pairs with an estimated relatedness of less than 0.05 to 0. To account for population structure in the association studies, principal component analysis (PCA) was performed on a set of 165,684 variants LD-pruned with an $R^2$ greater than 0.1 in a sliding window of size 500bp, sliding by 200bp. This set was also used as the basis of the BOLT-LMM analyses with the reduced GRM size (referred to as BOLT-LMM-165 in Results). All genotype QC was implemented in plink2 \cite{chang2015second}.
Based on the above genotype data, we simulated a continuous phenotype using the GCTA software suite \cite{yang2011gcta}. The initial simulation consisted of 100,000 individuals, 1,000 randomly sampled causal variants and $h^2 = 0.5$. This simulation was repeated 100 times with the 664,393 variants remaining after variant filtering for the GRM calculation. Power was calculated as the proportion of the causal variants recovered. To calculate false positive rates we first removed all SNPs within 1 Mb of the causal SNPs. Further simulations were carried out to investigate the effects of varying the number of causal SNPs, $h^2$ and the sample size on method performance. In each case all parameters other than the ones being varied were the same as the initial simulation, and one simulation was performed per set of parameter values. The pROC R package was used to generate receiver operating characteristic (ROC) curves \cite{robin2011proc}. We applied the same simulation strategy to binary traits with two levels of disease prevalence, 0.1 \& 0.3, using 1,000 causal loci with $h^2=0.5$, and 100,000 samples.
\subsubsection*{Simulation Association tests}
Association testing was performed using fastGWA, REGENIE and BOLT-LMM. To
account for known sources of covariation (technical batch effects, population structure, biological effects) 10 PCs, sex, age, genotype batch and assessment centre were included as fixed-effect covariates in statistical models. For the PGS method we first performed GWAS (using fastGWA, REGENIE or BOLT-LMM) and calculated PGS scores on a Leave One Chromosome Out (LOCO) basis. This resulted in 22 sets of PGS values (one for each autosomal chromosome, calculated from the summary statistics of variants on all other autosomal chromosomes). Two PGS strategies were used in this study, pruning and thresholding (P+T), denoted with the suffix PGS-PT and LDpred2, denoted by the suffix PGS-LDpred2. The LOCO PGS-PT were calculated using PRSice2 (version 2.2.12 (2020-02-20))\cite{choi2019prsice}. To decrease computation time and reduce the likelihood of over-fitting a P-value threshold of $5\times10^{-5}$ was chosen, a priori, for the LOCO PGS-PT calculation. Association testing was then performed using fastGWA in a chromosome-wise manner, with the corresponding LOCO PGS included as a fixed effect. The bigsnpr R package was used to calculate the LOCO PGS-LDpred2 fixed effects \cite{LDpred2}. To reduce computation time, 22 LOCO genotype objects containing the SNP correlations were precomputed.
\subsection*{Application to the UK Biobank}
\subsubsection*{UK biobank Association tests}
The genotype selection, quality control and genetic relationship matrix were performed following the QC procedure in \textit{Jiang et al.}\cite{jiang2019resource}. The genetic relationship matrix used with fastGWA and BOLT-LMM was calculated for all European individuals (N=458,686), using a set of 556,516 lightly pruned HAPMAP3 variants ($R^2$ greater than 0.9 in a 100 variant sliding window of size 1,000 \& MAF > 0.01) \cite{jiang2019resource}. Association summary statistics were generated from a set of 1.1 million HAPMAP3 variants (MAF > 0.01, HWE $\alpha{=1\times10^{-6}}$ and missingness < 0.05) \cite{jiang2019resource}. Principal components were calculated using a set of 34,775 variants (LD-pruned with $R^2$ = 0.05 in a sliding window of size 1,000bp, sliding by 50bp)\cite{bycroft2018uk}. To identify white British samples with similar genetic backgrounds we clustered samples based on the first 6 principal components\cite{bycroft2018uk}, resulting in a subset of 406,319 white-British samples. Sample pairs that had a KING kinship coefficient above 0.05, with one member of the pair within the white-British group and the other in the group self-reporting as white European were removed. This left 399,135 white British and 46,406 other European samples \cite{manichaikul2010robust,bycroft2018uk}. To account for known sources of phenotype and genotype variation, 10 PCs, age, sex, genotype batch and assessment centre were included as fixed-effect covariates for the BOLT-LMM and fastGWA analyses. PRSice2 and LDpred2 were used to calculate the LOCO PGS. Independent loci were identified using the clumping algorithm in plink2 (P-value threshold = $5x10^{-9}$, window size = 5Mb, and LD $R^2$ threshold = 0.01).
\subsubsection*{UK Biobank phenotype prediction}
To test the performance of fastGWA-PGS on the task of predicting standing height, BMI and HBMD, the UK Biobank data was partitioned into training and test datasets. The test data consisted of white British individuals with similar genetic background described above and the polygenic score predictions were tested on the remaining independent European samples. Summary statistics were generated using fastGWA, fastGWA-PGS-PT, fastGWA-PGS-LDpred2 and BOLT-LMM. We used LDpred2 and PRSice2 to predict the phenotypic values in the test set. LDpred2 requires LD correlation data and we used a pre-computed set built on the 1.1 million HAPMAP3 variants for this purpose. The model fit was assessed for each method by fitting a linear model to the values of the phenotype in the test set as a function of their predicted values, accounting for known sources of phenotypic variation, i.e sex, age, PC's. We report both the proportion of variation explained collectively by the PGS, sex, age, the first 4 principal components and assessment centre as well as the $R^2$ using only the PGS in the regression model.\par
\section*{Data Availability}
All genotype and phenotype data analyzed are available, subject to application, from the UK Biobank (application 23739). Code to implement the fastGWA-PGS method described in this work is available under MIT license from https://github.com/declan93/PGS-LMM/.
\par\par
\bibliography{bibl}
\section*{Acknowledgements}
This research has been conducted using the UK Biobank Resource under Application Number 23739. This publication has emanated from research conducted with the financial support of Science Foundation Ireland under Grant number 16/IA/4612.
\section*{Author contributions statement}
CS conceived and supervised the project and performed analyses. DB implemented the pipeline and performed analyses. DB and CS wrote the manuscript, with input from DM. DM advised on application of the method to human phenotypes. DOS performed analysis. JF provided the mathematical justification of the method.
\section*{Additional information}
\textbf{Competing interests} \par The authors declare that they have no competing interests.
\pagebreak
%\includepdf{nr-research-reporting.pdf}
%\includepdf{nr-editorial-policy-checklist.pdf}
\end{document}
|
{"hexsha": "31b7fa6006177b05c4814139bf23f21492a5aee3", "size": 44558, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "paper/main.tex", "max_stars_repo_name": "declan93/PGS-LMM", "max_stars_repo_head_hexsha": "51fa71c51a93ea85b325bf9dc2d343bbd62b98fb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-29T19:52:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-29T19:52:51.000Z", "max_issues_repo_path": "paper/main.tex", "max_issues_repo_name": "declan93/PGS-LMM", "max_issues_repo_head_hexsha": "51fa71c51a93ea85b325bf9dc2d343bbd62b98fb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paper/main.tex", "max_forks_repo_name": "declan93/PGS-LMM", "max_forks_repo_head_hexsha": "51fa71c51a93ea85b325bf9dc2d343bbd62b98fb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-25T22:01:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-25T22:01:32.000Z", "avg_line_length": 182.6147540984, "max_line_length": 2582, "alphanum_fraction": 0.7994523991, "num_tokens": 10486}
|
import numpy as np
import math
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer,Dense, Activation
import tensorflow.keras as keras# as k
import tensorflow as t
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam,SGD
from tensorflow.linalg import trace
class gpool(Layer):
def __init__(self,gs=20,param=40,mode="max",**kwargs):
self.gs=gs
self.param=param
self.mode=mode
super(gpool,self).__init__(input_shape=(gs,param))
def build(self, input_shape):
self.built=True
def call(self,x):
x=x[0]
if self.mode=="max":
return K.max(x,axis=1)
if self.mode=="mean":
return K.mean(x,axis=1)
if self.mode=="sum":
return K.sum(x,axis=1)
def compute_output_shape(self,input_shape):
print("inputting",input_shape,"param",self.param)
input_shape=input_shape[0]
assert len(input_shape)==3
assert input_shape[1]==self.gs
assert input_shape[2]==self.param
return tuple([input_shape[0],self.gs])
def get_config(self):
mi={"gs":self.gs,"param":self.param,"mode":self.mode}
th=super(gpool,self).get_config()
th.update(mi)
return th
def from_config(config):
return gpool(**config)
|
{"hexsha": "ca3f7f078f6af4abb079063ac658a06b23d61945", "size": 1285, "ext": "py", "lang": "Python", "max_stars_repo_path": "grapa/layerfiles/gpool.py", "max_stars_repo_name": "psorus/grapa", "max_stars_repo_head_hexsha": "6af343bb35c466c971ded1876e7a9d00e77cef00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "grapa/layerfiles/gpool.py", "max_issues_repo_name": "psorus/grapa", "max_issues_repo_head_hexsha": "6af343bb35c466c971ded1876e7a9d00e77cef00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "grapa/layerfiles/gpool.py", "max_forks_repo_name": "psorus/grapa", "max_forks_repo_head_hexsha": "6af343bb35c466c971ded1876e7a9d00e77cef00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.8970588235, "max_line_length": 59, "alphanum_fraction": 0.6832684825, "include": true, "reason": "import numpy", "num_tokens": 330}
|
import cv2
import numpy
import math
from enum import Enum
class Pipeline:
"""
An OpenCV pipeline generated by GRIP.
"""
def __init__(self):
"""initializes all values to presets or None if need to be set
"""
self.__hsv_threshold_hue = [40.46762589928058, 108.78787878787878]
self.__hsv_threshold_saturation = [0.0, 255.0]
self.__hsv_threshold_value = [162.81474820143885, 248.56060606060603]
self.hsv_threshold_output = None
self.__find_contours_input = self.hsv_threshold_output
self.__find_contours_external_only = False
self.find_contours_output = None
self.__filter_contours_contours = self.find_contours_output
self.__filter_contours_min_area = 100.0
self.__filter_contours_min_perimeter = 0.0
self.__filter_contours_min_width = 0.0
self.__filter_contours_max_width = 1000.0
self.__filter_contours_min_height = 0.0
self.__filter_contours_max_height = 1000.0
self.__filter_contours_solidity = [13.489208633093524, 29.292929292929294]
self.__filter_contours_max_vertices = 100000.0
self.__filter_contours_min_vertices = 8.0
self.__filter_contours_min_ratio = 2.0
self.__filter_contours_max_ratio = 3.0
self.filter_contours_output = None
def process(self, source0):
"""
Runs the pipeline and sets all outputs to new values.
"""
# Step HSV_Threshold0:
self.__hsv_threshold_input = source0
(self.hsv_threshold_output) = self.__hsv_threshold(self.__hsv_threshold_input, self.__hsv_threshold_hue, self.__hsv_threshold_saturation, self.__hsv_threshold_value)
# Step Find_Contours0:
self.__find_contours_input = self.hsv_threshold_output
(self.find_contours_output) = self.__find_contours(self.__find_contours_input, self.__find_contours_external_only)
# Step Filter_Contours0:
self.__filter_contours_contours = self.find_contours_output
(self.filter_contours_output) = self.__filter_contours(self.__filter_contours_contours, self.__filter_contours_min_area, self.__filter_contours_min_perimeter, self.__filter_contours_min_width, self.__filter_contours_max_width, self.__filter_contours_min_height, self.__filter_contours_max_height, self.__filter_contours_solidity, self.__filter_contours_max_vertices, self.__filter_contours_min_vertices, self.__filter_contours_min_ratio, self.__filter_contours_max_ratio)
@staticmethod
def __hsv_threshold(input, hue, sat, val):
"""Segment an image based on hue, saturation, and value ranges.
Args:
input: A BGR numpy.ndarray.
hue: A list of two numbers the are the min and max hue.
sat: A list of two numbers the are the min and max saturation.
lum: A list of two numbers the are the min and max value.
Returns:
A black and white numpy.ndarray.
"""
out = cv2.cvtColor(input, cv2.COLOR_BGR2HSV)
return cv2.inRange(out, (hue[0], sat[0], val[0]), (hue[1], sat[1], val[1]))
@staticmethod
def __find_contours(input, external_only):
"""Sets the values of pixels in a binary image to their distance to the nearest black pixel.
Args:
input: A numpy.ndarray.
external_only: A boolean. If true only external contours are found.
Return:
A list of numpy.ndarray where each one represents a contour.
"""
if(external_only):
mode = cv2.RETR_EXTERNAL
else:
mode = cv2.RETR_LIST
method = cv2.CHAIN_APPROX_SIMPLE
im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
return contours
@staticmethod
def __filter_contours(input_contours, min_area, min_perimeter, min_width, max_width,
min_height, max_height, solidity, max_vertex_count, min_vertex_count,
min_ratio, max_ratio):
"""Filters out contours that do not meet certain criteria.
Args:
input_contours: Contours as a list of numpy.ndarray.
min_area: The minimum area of a contour that will be kept.
min_perimeter: The minimum perimeter of a contour that will be kept.
min_width: Minimum width of a contour.
max_width: MaxWidth maximum width.
min_height: Minimum height.
max_height: Maximimum height.
solidity: The minimum and maximum solidity of a contour.
min_vertex_count: Minimum vertex Count of the contours.
max_vertex_count: Maximum vertex Count.
min_ratio: Minimum ratio of width to height.
max_ratio: Maximum ratio of width to height.
Returns:
Contours as a list of numpy.ndarray.
"""
output = []
for contour in input_contours:
x,y,w,h = cv2.boundingRect(contour)
if (w < min_width or w > max_width):
continue
if (h < min_height or h > max_height):
continue
area = cv2.contourArea(contour)
if (area < min_area):
continue
if (cv2.arcLength(contour, True) < min_perimeter):
continue
hull = cv2.convexHull(contour)
solid = 100 * area / cv2.contourArea(hull)
if (solid < solidity[0] or solid > solidity[1]):
continue
if (len(contour) < min_vertex_count or len(contour) > max_vertex_count):
continue
ratio = (float)(w) / h
if (ratio < min_ratio or ratio > max_ratio):
continue
output.append(contour)
return output
|
{"hexsha": "13e9259e8c629af33c67aaa8d7455bcd9d49533a", "size": 5816, "ext": "py", "lang": "Python", "max_stars_repo_path": "grip_high_goal.py", "max_stars_repo_name": "4662FRCRobotics/InfiniteRechargeVision", "max_stars_repo_head_hexsha": "6f24b2e6b1af6b4ac7fe3281e18bcefb2d346f8c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "grip_high_goal.py", "max_issues_repo_name": "4662FRCRobotics/InfiniteRechargeVision", "max_issues_repo_head_hexsha": "6f24b2e6b1af6b4ac7fe3281e18bcefb2d346f8c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "grip_high_goal.py", "max_forks_repo_name": "4662FRCRobotics/InfiniteRechargeVision", "max_forks_repo_head_hexsha": "6f24b2e6b1af6b4ac7fe3281e18bcefb2d346f8c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.4525547445, "max_line_length": 479, "alphanum_fraction": 0.6549174691, "include": true, "reason": "import numpy", "num_tokens": 1363}
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
A collection of useful quantum information functions for operators.
"""
import warnings
import numpy as np
from qiskit.exceptions import QiskitError
from qiskit.quantum_info.operators.operator import Operator
from qiskit.quantum_info.operators.pauli import Pauli
from qiskit.quantum_info.operators.channel import SuperOp
def process_fidelity(channel,
target=None,
require_cp=True,
require_tp=False,
require_cptp=False):
r"""Return the process fidelity of a noisy quantum channel.
This process fidelity :math:`F_{\text{pro}}` is given by
.. math::
F_{\text{pro}}(\mathcal{E}, U)
= \frac{Tr[S_U^\dagger S_{\mathcal{E}}]}{d^2}
where :math:`S_{\mathcal{E}}, S_{U}` are the
:class:`~qiskit.quantum_info.SuperOp` matrices for the input quantum
*channel* :math:`\cal{E}` and *target* unitary :math:`U` respectively,
and :math:`d` is the dimension of the *channel*.
Args:
channel (QuantumChannel): noisy quantum channel.
target (Operator or None): target unitary operator.
If `None` target is the identity operator [Default: None].
require_cp (bool): require channel to be completely-positive
[Default: True].
require_tp (bool): require channel to be trace-preserving
[Default: False].
require_cptp (bool): (DEPRECATED) require input channels to be
CPTP [Default: False].
Returns:
float: The process fidelity :math:`F_{\text{pro}}`.
Raises:
QiskitError: if the channel and target do not have the same dimensions,
or have different input and output dimensions.
QiskitError: if the channel and target or are not completely-positive
(with ``require_cp=True``) or not trace-preserving
(with ``require_tp=True``).
"""
# Format inputs
if isinstance(channel, (list, np.ndarray, Operator, Pauli)):
channel = Operator(channel)
else:
channel = SuperOp(channel)
input_dim, output_dim = channel.dim
if input_dim != output_dim:
raise QiskitError(
'Quantum channel must have equal input and output dimensions.')
if target is not None:
# Multiple channel by adjoint of target
target = Operator(target)
if (input_dim, output_dim) != target.dim:
raise QiskitError(
'Quantum channel and target must have the same dimensions.')
channel = channel @ target.adjoint()
# Validate complete-positivity and trace-preserving
if require_cptp:
# require_cptp kwarg is DEPRECATED
# Remove in future qiskit version
warnings.warn(
"Please use `require_cp=True, require_tp=True` "
"instead of `require_cptp=True`.", DeprecationWarning)
require_cp = True
require_tp = True
if isinstance(channel, Operator) and (require_cp or require_tp):
is_unitary = channel.is_unitary()
# Validate as unitary
if require_cp and not is_unitary:
raise QiskitError('channel is not completely-positive')
if require_tp and not is_unitary:
raise QiskitError('channel is not trace-preserving')
else:
# Validate as QuantumChannel
if require_cp and not channel.is_cp():
raise QiskitError('channel is not completely-positive')
if require_tp and not channel.is_tp():
raise QiskitError('channel is not trace-preserving')
# Compute process fidelity with identity channel
if isinstance(channel, Operator):
# |Tr[U]/dim| ** 2
fid = np.abs(np.trace(channel.data) / input_dim)**2
else:
# Tr[S] / (dim ** 2)
fid = np.trace(channel.data) / (input_dim**2)
return float(np.real(fid))
def average_gate_fidelity(channel,
target=None,
require_cp=True,
require_tp=False):
r"""Return the average gate fidelity of a noisy quantum channel.
The average gate fidelity :math:`F_{\text{ave}}` is given by
.. math::
F_{\text{ave}}(\mathcal{E}, U)
&= \int d\psi \langle\psi|U^\dagger
\mathcal{E}(|\psi\rangle\!\langle\psi|)U|\psi\rangle \\
&= \frac{d F_{\text{pro}}(\mathcal{E}, U) + 1}{d + 1}
where :math:`F_{\text{pro}}(\mathcal{E}, U)` is the
:meth:`~qiskit.quantum_info.process_fidelity` of the input quantum
*channel* :math:`\mathcal{E}` with a *target* unitary :math:`U`, and
:math:`d` is the dimension of the *channel*.
Args:
channel (QuantumChannel): noisy quantum channel.
target (Operator or None): target unitary operator.
If `None` target is the identity operator [Default: None].
require_cp (bool): require channel to be completely-positive
[Default: True].
require_tp (bool): require channel to be trace-preserving
[Default: False].
Returns:
float: The average gate fidelity :math:`F_{\text{ave}}`.
Raises:
QiskitError: if the channel and target do not have the same dimensions,
or have different input and output dimensions.
QiskitError: if the channel and target or are not completely-positive
(with ``require_cp=True``) or not trace-preserving
(with ``require_tp=True``).
"""
if isinstance(channel, (list, np.ndarray, Operator, Pauli)):
channel = Operator(channel)
else:
channel = SuperOp(channel)
dim, _ = channel.dim
f_pro = process_fidelity(channel,
target=target,
require_cp=require_cp,
require_tp=require_tp)
return (dim * f_pro + 1) / (dim + 1)
def gate_error(channel, target=None, require_cp=True, require_tp=False):
r"""Return the gate error of a noisy quantum channel.
The gate error :math:`E` is given by the average gate infidelity
.. math::
E(\mathcal{E}, U) = 1 - F_{\text{ave}}(\mathcal{E}, U)
where :math:`F_{\text{ave}}(\mathcal{E}, U)` is the
:meth:`~qiskit.quantum_info.average_gate_fidelity` of the input
quantum *channel* :math:`\mathcal{E}` with a *target* unitary
:math:`U`.
Args:
channel (QuantumChannel): noisy quantum channel.
target (Operator or None): target unitary operator.
If `None` target is the identity operator [Default: None].
require_cp (bool): require channel to be completely-positive
[Default: True].
require_tp (bool): require channel to be trace-preserving
[Default: False].
Returns:
float: The average gate error :math:`E`.
Raises:
QiskitError: if the channel and target do not have the same dimensions,
or have different input and output dimensions.
QiskitError: if the channel and target or are not completely-positive
(with ``require_cp=True``) or not trace-preserving
(with ``require_tp=True``).
"""
return 1 - average_gate_fidelity(
channel, target=target, require_cp=require_cp, require_tp=require_tp)
|
{"hexsha": "9c838c149a0f0f7833c33911271b359c0ab8d7a9", "size": 7885, "ext": "py", "lang": "Python", "max_stars_repo_path": "qiskit/quantum_info/operators/measures.py", "max_stars_repo_name": "qinvador/qiskit-terra", "max_stars_repo_head_hexsha": "4e104de3c113c01688a0ed06b2f2cb1a958fce44", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-05-29T03:58:03.000Z", "max_stars_repo_stars_event_max_datetime": "2018-05-29T03:58:03.000Z", "max_issues_repo_path": "qiskit/quantum_info/operators/measures.py", "max_issues_repo_name": "qinvador/qiskit-terra", "max_issues_repo_head_hexsha": "4e104de3c113c01688a0ed06b2f2cb1a958fce44", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-11-13T17:33:37.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-03T09:35:00.000Z", "max_forks_repo_path": "qiskit/quantum_info/operators/measures.py", "max_forks_repo_name": "qinvador/qiskit-terra", "max_forks_repo_head_hexsha": "4e104de3c113c01688a0ed06b2f2cb1a958fce44", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-12-03T15:48:14.000Z", "max_forks_repo_forks_event_max_datetime": "2018-03-11T13:08:03.000Z", "avg_line_length": 38.842364532, "max_line_length": 79, "alphanum_fraction": 0.624730501, "include": true, "reason": "import numpy", "num_tokens": 1867}
|
c
c
c ###################################################
c ## COPYRIGHT (C) 1990 by Jay William Ponder ##
c ## All Rights Reserved ##
c ###################################################
c
c ################################################################
c ## ##
c ## subroutine column -- access Hessian elements by column ##
c ## ##
c ################################################################
c
c
c "column" takes the off-diagonal Hessian elements stored
c as sparse rows and sets up indices to allow column access
c
c
subroutine column (nvar,hinit,hstop,hindex,
& cinit,cstop,cindex,cvalue)
implicit none
include 'sizes.i'
integer i,j,k
integer m,nvar
integer hinit(*)
integer hstop(*)
integer cinit(*)
integer cstop(*)
integer hindex(*)
integer cindex(*)
integer cvalue(*)
c
c
c zero out the start and end marker for each column
c
do i = 1, nvar
cinit(i) = 0
cstop(i) = 0
end do
c
c count the number of elements in each column
c
do i = 1, nvar
do j = hinit(i), hstop(i)
k = hindex(j)
cstop(k) = cstop(k) + 1
end do
end do
c
c set each start marker just past last element for its column
c
cinit(1) = cstop(1) + 1
do i = 2, nvar
cinit(i) = cinit(i-1) + cstop(i)
end do
c
c set column index by scanning rows in reverse order
c
do i = nvar, 1, -1
do j = hinit(i), hstop(i)
k = hindex(j)
m = cinit(k) - 1
cinit(k) = m
cindex(m) = i
cvalue(m) = j
end do
end do
c
c convert from number of elements to end marker for column
c
do i = 1, nvar
cstop(i) = cinit(i) + cstop(i) - 1
end do
return
end
|
{"hexsha": "608325ff9c9070185973d16994fa0eaa7493b63e", "size": 2045, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "HCsbLib/HCsbLib/HTLib2.Bioinfo/External.Tinker/src/tinker-6.2.06/column.f", "max_stars_repo_name": "htna/HCsbLib", "max_stars_repo_head_hexsha": "dae7f4e3e5e2fbc3b6e619f2ea037f661a8ae097", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-01-21T23:45:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-03T16:34:24.000Z", "max_issues_repo_path": "HCsbLib/HCsbLib/HTLib2.Bioinfo/External.Tinker/src/tinker-6.2.06/column.f", "max_issues_repo_name": "htna/HCsbLib", "max_issues_repo_head_hexsha": "dae7f4e3e5e2fbc3b6e619f2ea037f661a8ae097", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HCsbLib/HCsbLib/HTLib2.Bioinfo/External.Tinker/src/tinker-6.2.06/column.f", "max_forks_repo_name": "htna/HCsbLib", "max_forks_repo_head_hexsha": "dae7f4e3e5e2fbc3b6e619f2ea037f661a8ae097", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-03-05T00:26:38.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-08T23:25:29.000Z", "avg_line_length": 26.9078947368, "max_line_length": 70, "alphanum_fraction": 0.4239608802, "num_tokens": 534}
|
"""
The SIDDType 2.0 definition.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
from typing import Union, Tuple
from collections import OrderedDict
from copy import deepcopy
import numpy
from sarpy.io.xml.base import Serializable
from sarpy.io.xml.descriptors import SerializableDescriptor
from sarpy.geometry import point_projection
from sarpy.io.product.sidd_schema import get_specification_identifier, \
get_urn_details, validate_xml_ns
from .base import DEFAULT_STRICT
from .ProductCreation import ProductCreationType
from .Display import ProductDisplayType
from .GeoData import GeoDataType
from .Measurement import MeasurementType
from .ExploitationFeatures import ExploitationFeaturesType
from .DownstreamReprocessing import DownstreamReprocessingType
from .Compression import CompressionType
from .DigitalElevationData import DigitalElevationDataType
from .ProductProcessing import ProductProcessingType
from .Annotations import AnnotationsType
from ..sidd1_elements.SIDD import SIDDType as SIDDType1
from .blocks import ErrorStatisticsType, RadiometricType, MatchInfoType
logger = logging.getLogger(__name__)
############
# namespace validate and definitIon of required entries in the namespace dictionary
_SIDD_SPECIFICATION_IDENTIFIER = get_specification_identifier()
_SIDD_URN = 'urn:SIDD:2.0.0'
_sidd_details = get_urn_details(_SIDD_URN)
_SIDD_SPECIFICATION_VERSION = _sidd_details['version']
_SIDD_SPECIFICATION_DATE = _sidd_details['date']
_ISM_URN = _sidd_details['ism_urn']
_SFA_URN = _sidd_details['sfa_urn']
_SICOMMON_URN = _sidd_details['sicommon_urn']
##########
# The SIDD object
class SIDDType(Serializable):
"""
The root element of the SIDD 2.0 document.
"""
_fields = (
'ProductCreation', 'Display', 'GeoData', 'Measurement', 'ExploitationFeatures',
'DownstreamReprocessing', 'ErrorStatistics', 'Radiometric', 'MatchInfo', 'Compression',
'DigitalElevationData', 'ProductProcessing', 'Annotations')
_required = (
'ProductCreation', 'Display', 'GeoData', 'Measurement', 'ExploitationFeatures')
# Descriptor
ProductCreation = SerializableDescriptor(
'ProductCreation', ProductCreationType, _required, strict=DEFAULT_STRICT,
docstring='Information related to processor, classification, and product type.') # type: ProductCreationType
Display = SerializableDescriptor(
'Display', ProductDisplayType, _required, strict=DEFAULT_STRICT,
docstring='Contains information on the parameters needed to display the product in '
'an exploitation tool.') # type: ProductDisplayType
GeoData = SerializableDescriptor(
'GeoData', GeoDataType, _required, strict=DEFAULT_STRICT,
docstring='Contains generic and extensible targeting and geographic region '
'information.') # type: GeoDataType
Measurement = SerializableDescriptor(
'Measurement', MeasurementType, _required, strict=DEFAULT_STRICT,
docstring='Contains the metadata necessary for performing measurements.') # type: MeasurementType
ExploitationFeatures = SerializableDescriptor(
'ExploitationFeatures', ExploitationFeaturesType, _required, strict=DEFAULT_STRICT,
docstring='Computed metadata regarding the input collections and '
'final product.') # type: ExploitationFeaturesType
DownstreamReprocessing = SerializableDescriptor(
'DownstreamReprocessing', DownstreamReprocessingType, _required, strict=DEFAULT_STRICT,
docstring='Metadata describing any downstream processing of the '
'product.') # type: Union[None, DownstreamReprocessingType]
ErrorStatistics = SerializableDescriptor(
'ErrorStatistics', ErrorStatisticsType, _required, strict=DEFAULT_STRICT,
docstring='Error statistics passed through from the SICD metadata.') # type: Union[None, ErrorStatisticsType]
Radiometric = SerializableDescriptor(
'Radiometric', RadiometricType, _required, strict=DEFAULT_STRICT,
docstring='Radiometric information about the product.') # type: Union[None, RadiometricType]
MatchInfo = SerializableDescriptor(
'MatchInfo', MatchInfoType, _required, strict=DEFAULT_STRICT,
docstring='Information about other collections that are matched to the current '
'collection. The current collection is the collection from which this '
'SIDD product was generated.') # type: MatchInfoType
Compression = SerializableDescriptor(
'Compression', CompressionType, _required, strict=DEFAULT_STRICT,
docstring='Contains information regarding any compression that has occurred '
'to the image data.') # type: CompressionType
DigitalElevationData = SerializableDescriptor(
'DigitalElevationData', DigitalElevationDataType, _required, strict=DEFAULT_STRICT,
docstring='This describes any Digital ElevatioNData included with '
'the SIDD product.') # type: DigitalElevationDataType
ProductProcessing = SerializableDescriptor(
'ProductProcessing', ProductProcessingType, _required, strict=DEFAULT_STRICT,
docstring='Contains metadata related to algorithms used during '
'product generation.') # type: ProductProcessingType
Annotations = SerializableDescriptor(
'Annotations', AnnotationsType, _required, strict=DEFAULT_STRICT,
docstring='List of annotations for the imagery.') # type: AnnotationsType
def __init__(self, ProductCreation=None, Display=None, GeoData=None,
Measurement=None, ExploitationFeatures=None, DownstreamReprocessing=None,
ErrorStatistics=None, Radiometric=None, MatchInfo=None, Compression=None,
DigitalElevationData=None, ProductProcessing=None, Annotations=None, **kwargs):
"""
Parameters
----------
ProductCreation : ProductCreationType
Display : ProductDisplayType
GeoData : GeoDataType
Measurement : MeasurementType
ExploitationFeatures : ExploitationFeaturesType
DownstreamReprocessing : None|DownstreamReprocessingType
ErrorStatistics : None|ErrorStatisticsType
Radiometric : None|RadiometricType
MatchInfo : None|MatchInfoType
Compression : None|CompressionType
DigitalElevationData : None|DigitalElevationDataType
ProductProcessing : None|ProductProcessingType
Annotations : None|AnnotationsType
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
nitf = kwargs.get('_NITF', {})
if not isinstance(nitf, dict):
raise TypeError('Provided NITF options are required to be in dictionary form.')
self._NITF = nitf
self._coa_projection = None
self.ProductCreation = ProductCreation
self.Display = Display
self.GeoData = GeoData
self.Measurement = Measurement
self.ExploitationFeatures = ExploitationFeatures
self.DownstreamReprocessing = DownstreamReprocessing
self.ErrorStatistics = ErrorStatistics
self.Radiometric = Radiometric
self.MatchInfo = MatchInfo
self.Compression = Compression
self.DigitalElevationData = DigitalElevationData
self.ProductProcessing = ProductProcessing
self.Annotations = Annotations
super(SIDDType, self).__init__(**kwargs)
@property
def coa_projection(self):
"""
The COA Projection object, if previously defined through using :func:`define_coa_projection`.
Returns
-------
None|sarpy.geometry.point_projection.COAProjection
"""
return self._coa_projection
@property
def NITF(self):
"""
Optional dictionary of NITF header information, pertains only to subsequent
SIDD file writing.
Returns
-------
Dict
"""
return self._NITF
def can_project_coordinates(self):
"""
Determines whether the necessary elements are populated to permit projection
between image and physical coordinates. If False, then the (first discovered)
reason why not will be logged at error level.
Returns
-------
bool
"""
if self._coa_projection is not None:
return True
if self.Measurement.ProjectionType != 'PlaneProjection':
logger.error(
'Formulating a projection is only supported for PlaneProjection, '
'got {}.'.format(self.Measurement.ProjectionType))
return False
return True
def define_coa_projection(self, delta_arp=None, delta_varp=None, range_bias=None,
adj_params_frame='ECF', overide=True):
"""
Define the COAProjection object.
Parameters
----------
delta_arp : None|numpy.ndarray|list|tuple
ARP position adjustable parameter (ECF, m). Defaults to 0 in each coordinate.
delta_varp : None|numpy.ndarray|list|tuple
VARP position adjustable parameter (ECF, m/s). Defaults to 0 in each coordinate.
range_bias : float|int
Range bias adjustable parameter (m), defaults to 0.
adj_params_frame : str
One of ['ECF', 'RIC_ECF', 'RIC_ECI'], specifying the coordinate frame used for
expressing `delta_arp` and `delta_varp` parameters.
overide : bool
should we redefine, if it is previously defined?
Returns
-------
None
"""
if not self.can_project_coordinates():
logger.error('The COAProjection object cannot be defined.')
return
if self._coa_projection is not None and not overide:
return
self._coa_projection = point_projection.COAProjection.from_sidd(
self, delta_arp=delta_arp, delta_varp=delta_varp, range_bias=range_bias,
adj_params_frame=adj_params_frame)
def project_ground_to_image(self, coords, **kwargs):
"""
Transforms a 3D ECF point to pixel (row/column) coordinates. This is
implemented in accordance with the SICD Image Projections Description Document.
**Really Scene-To-Image projection.**"
Parameters
----------
coords : numpy.ndarray|tuple|list
ECF coordinate to map to scene coordinates, of size `N x 3`.
kwargs
The keyword arguments for the :func:`sarpy.geometry.point_projection.ground_to_image` method.
Returns
-------
Tuple[numpy.ndarray, float, int]
* `image_points` - the determined image point array, of size `N x 2`. Following
the SICD convention, he upper-left pixel is [0, 0].
* `delta_gpn` - residual ground plane displacement (m).
* `iterations` - the number of iterations performed.
See Also
--------
sarpy.geometry.point_projection.ground_to_image
"""
if 'use_structure_coa' not in kwargs:
kwargs['use_structure_coa'] = True
return point_projection.ground_to_image(coords, self, **kwargs)
def project_ground_to_image_geo(self, coords, ordering='latlong', **kwargs):
"""
Transforms a 3D Lat/Lon/HAE point to pixel (row/column) coordinates. This is
implemented in accordance with the SICD Image Projections Description Document.
**Really Scene-To-Image projection.**"
Parameters
----------
coords : numpy.ndarray|tuple|list
ECF coordinate to map to scene coordinates, of size `N x 3`.
ordering : str
If 'longlat', then the input is `[longitude, latitude, hae]`.
Otherwise, the input is `[latitude, longitude, hae]`. Passed through
to :func:`sarpy.geometry.geocoords.geodetic_to_ecf`.
kwargs
The keyword arguments for the :func:`sarpy.geometry.point_projection.ground_to_image_geo` method.
Returns
-------
Tuple[numpy.ndarray, float, int]
* `image_points` - the determined image point array, of size `N x 2`. Following
the SICD convention, he upper-left pixel is [0, 0].
* `delta_gpn` - residual ground plane displacement (m).
* `iterations` - the number of iterations performed.
See Also
--------
sarpy.geometry.point_projection.ground_to_image_geo
"""
if 'use_structure_coa' not in kwargs:
kwargs['use_structure_coa'] = True
return point_projection.ground_to_image_geo(coords, self, ordering=ordering, **kwargs)
def project_image_to_ground(self, im_points, projection_type='HAE', **kwargs):
"""
Transforms image coordinates to ground plane ECF coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
the image coordinate array
projection_type : str
One of `['PLANE', 'HAE', 'DEM']`. Type `DEM` is a work in progress.
kwargs
The keyword arguments for the :func:`sarpy.geometry.point_projection.image_to_ground` method.
Returns
-------
numpy.ndarray
Ground Plane Point (in ECF coordinates) corresponding to the input image coordinates.
See Also
--------
sarpy.geometry.point_projection.image_to_ground
"""
if 'use_structure_coa' not in kwargs:
kwargs['use_structure_coa'] = True
return point_projection.image_to_ground(
im_points, self, projection_type=projection_type, **kwargs)
def project_image_to_ground_geo(self, im_points, ordering='latlong', projection_type='HAE', **kwargs):
"""
Transforms image coordinates to ground plane WGS-84 coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
the image coordinate array
projection_type : str
One of `['PLANE', 'HAE', 'DEM']`. Type `DEM` is a work in progress.
ordering : str
Determines whether return is ordered as `[lat, long, hae]` or `[long, lat, hae]`.
Passed through to :func:`sarpy.geometry.geocoords.ecf_to_geodetic`.
kwargs
The keyword arguments for the :func:`sarpy.geometry.point_projection.image_to_ground_geo` method.
Returns
-------
numpy.ndarray
Ground Plane Point (in ECF coordinates) corresponding to the input image coordinates.
See Also
--------
sarpy.geometry.point_projection.image_to_ground_geo
"""
if 'use_structure_coa' not in kwargs:
kwargs['use_structure_coa'] = True
return point_projection.image_to_ground_geo(
im_points, self, ordering=ordering, projection_type=projection_type, **kwargs)
@staticmethod
def get_xmlns_collection():
"""
Gets the correct SIDD 2.0 dictionary of xml namespace details.
Returns
-------
dict
"""
return OrderedDict([
('xmlns', _SIDD_URN), ('xmlns:sicommon', _SICOMMON_URN),
('xmlns:sfa', _SFA_URN), ('xmlns:ism', _ISM_URN)])
@staticmethod
def get_des_details():
"""
Gets the correct SIDD 2.0 DES subheader details.
Returns
-------
dict
"""
return OrderedDict([
('DESSHSI', _SIDD_SPECIFICATION_IDENTIFIER),
('DESSHSV', _SIDD_SPECIFICATION_VERSION),
('DESSHSD', _SIDD_SPECIFICATION_DATE),
('DESSHTN', _SIDD_URN)])
@classmethod
def from_node(cls, node, xml_ns, ns_key='default', kwargs=None):
if ns_key is None:
raise ValueError('ns_key must be defined.')
if ns_key not in xml_ns:
raise ValueError('ns_key {} is not in the xml namespace'.format(ns_key))
if xml_ns[ns_key].startswith('urn:SIDD:1.'):
return SIDDType1.from_node(node, xml_ns, ns_key=ns_key, kwargs=kwargs)
valid_ns = validate_xml_ns(xml_ns, ns_key)
if not xml_ns[ns_key].startswith('urn:SIDD:2.'):
raise ValueError('Cannot use urn {} for SIDD version 2.0'.format(xml_ns[ns_key]))
if not valid_ns:
logger.warning(
'SIDD namespace validation failed,\n\t'
'which may lead to subsequent deserialization failures')
return super(SIDDType, cls).from_node(node, xml_ns, ns_key=ns_key, kwargs=kwargs)
def to_xml_bytes(self, urn=None, tag='SIDD', check_validity=False, strict=DEFAULT_STRICT):
if urn is None:
urn = self.get_xmlns_collection()
return super(SIDDType, self).to_xml_bytes(urn=urn, tag=tag, check_validity=check_validity, strict=strict)
def to_xml_string(self, urn=None, tag='SIDD', check_validity=False, strict=DEFAULT_STRICT):
return self.to_xml_bytes(urn=urn, tag=tag, check_validity=check_validity, strict=strict).decode('utf-8')
def copy(self):
"""
Provides a deep copy.
Returns
-------
SIDDType
"""
out = super(SIDDType, self).copy()
out._NITF = deepcopy(self._NITF)
return out
|
{"hexsha": "4a2eb7a028eaeb8a047830affaf26f29c31c4112", "size": 17707, "ext": "py", "lang": "Python", "max_stars_repo_path": "sarpy/io/product/sidd2_elements/SIDD.py", "max_stars_repo_name": "bombaci-vsc/sarpy", "max_stars_repo_head_hexsha": "3e31e9d7fca77612b60f2507f6f7068d1660a3e2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 119, "max_stars_repo_stars_event_min_datetime": "2018-07-12T22:08:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T12:11:39.000Z", "max_issues_repo_path": "sarpy/io/product/sidd2_elements/SIDD.py", "max_issues_repo_name": "bombaci-vsc/sarpy", "max_issues_repo_head_hexsha": "3e31e9d7fca77612b60f2507f6f7068d1660a3e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 72, "max_issues_repo_issues_event_min_datetime": "2018-03-29T15:57:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-10T01:46:21.000Z", "max_forks_repo_path": "sarpy/io/product/sidd2_elements/SIDD.py", "max_forks_repo_name": "bombaci-vsc/sarpy", "max_forks_repo_head_hexsha": "3e31e9d7fca77612b60f2507f6f7068d1660a3e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 54, "max_forks_repo_forks_event_min_datetime": "2018-03-27T19:57:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T20:53:11.000Z", "avg_line_length": 40.4269406393, "max_line_length": 118, "alphanum_fraction": 0.6627887276, "include": true, "reason": "import numpy", "num_tokens": 3838}
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
class QciLinearRegression(object):
def __init__(self, learning_rate=0.01, epoch=50000, patience=10,
train_x=None, train_y=None,
validate_x=None, validate_y=None,
test_x=None, test_y=None):
self.name = 'QciLinearRegression'
self.loss = 'mean_squared_error'
self.learning_rate = learning_rate
self.epoch = epoch
self.patience = patience
self.optimizer = tf.keras.optimizers.Adam(self.learning_rate)
if train_x is not None:
self.train_x = train_x
self.train_y = train_y
self.validate_x = validate_x
self.validate_y = validate_y
self.test_x = test_x
self.test_y = test_y
else:
self.train_x, self.train_y, self.validate_x, \
self.validate_y, self.test_x, \
self.test_y = self.generate_dataset()
def generate_dataset(self):
train_x = np.array([-40, -10, 0, 8, 15, 22, 38, 20, 9, 13], dtype=np.float32)
train_y = np.array([-40, 14, 32, 46, 59, 72, 100, 68, 48.2, 55.4], dtype=np.float32)
validate_x = np.array([], dtype=float)
validate_y = np.array([], dtype=float)
test_x = np.array([], dtype=float)
test_y = np.array([], dtype=float)
return train_x, train_y, validate_x, validate_y, test_x, test_y
def train(self):
model = self.build_model()
model.compile(loss=self.loss, optimizer=self.optimizer)
class PrintDot(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('epoch:{0}...{1}!'.format(epoch, logs))
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=self.patience)
print('I am ok')
print('x:{0}; y:{0}'.format(self.train_x.shape, self.train_y.shape))
history = model.fit(self.train_x, self.train_y,
epochs=self.epoch, validation_split = 0.1,
verbose=False, callbacks=[early_stop, PrintDot()])
plt.title('linear regression training process')
plt.xlabel('epochs')
plt.ylabel('error')
plt.plot(history.history['loss'])
#plt.show()
plt.savefig('/content/drive/My Drive/aqp/aqt003_001.png', format='png')
model.save('./work/aqt003_qiclr')
weights = np.array(model.get_weights())
print(weights)
return weights
def build_model(self):
layer1 = tf.keras.layers.Dense(units=1, input_shape=[1])
model = tf.keras.Sequential([layer1])
return model
def predict(self, data):
model = tf.keras.models.load_model('./work/aqt003_qiclr')
rst = model.predict(data)
return rst
if '__main__' == __name__:
lr = QciLinearRegression()
lr.train()
|
{"hexsha": "89faf2a8062f8c390348b1d91cc8469e67461c7b", "size": 2980, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/pqb/qic_linear_regression.py", "max_stars_repo_name": "yt7589/aqp", "max_stars_repo_head_hexsha": "c9c1c79facdea7ace73e2421e8a5868d87fb58dd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app/pqb/qic_linear_regression.py", "max_issues_repo_name": "yt7589/aqp", "max_issues_repo_head_hexsha": "c9c1c79facdea7ace73e2421e8a5868d87fb58dd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app/pqb/qic_linear_regression.py", "max_forks_repo_name": "yt7589/aqp", "max_forks_repo_head_hexsha": "c9c1c79facdea7ace73e2421e8a5868d87fb58dd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.2702702703, "max_line_length": 97, "alphanum_fraction": 0.5956375839, "include": true, "reason": "import numpy", "num_tokens": 717}
|
# Copyright 2021 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for modeling_strategy_descriptor."""
from absl.testing import absltest
from typing import Dict
from typing import Type
import numpy as np
from wfa_planning_evaluation_framework.models.goerg_model import (
GoergModel,
)
from wfa_planning_evaluation_framework.models.reach_curve import (
ReachCurve,
)
from wfa_planning_evaluation_framework.models.reach_surface import (
ReachSurface,
)
from wfa_planning_evaluation_framework.models.pairwise_union_reach_surface import (
PairwiseUnionReachSurface,
)
from wfa_planning_evaluation_framework.simulator.modeling_strategy import (
ModelingStrategy,
)
from wfa_planning_evaluation_framework.driver.modeling_strategy_descriptor import (
MODELING_STRATEGIES,
ModelingStrategyDescriptor,
)
class FakeModelingStrategy(ModelingStrategy):
def __init__(
self,
single_pub_model: Type[ReachCurve],
single_pub_model_kwargs: Dict,
multi_pub_model: Type[ReachSurface],
multi_pub_model_kwargs: Dict,
x: int,
):
self.name = "fake"
self.x = 1
super().__init__(
single_pub_model,
single_pub_model_kwargs,
multi_pub_model,
multi_pub_model_kwargs,
)
class ModelingStrategyDescriptorTest(absltest.TestCase):
def test_modeling_strategy_descriptor(self):
MODELING_STRATEGIES["fake"] = FakeModelingStrategy
desc = ModelingStrategyDescriptor(
"fake", {"x": 1}, "goerg", {}, "pairwise_union", {}
)
strategy = desc.instantiate_strategy()
self.assertEqual(strategy.name, "fake")
self.assertEqual(strategy.x, 1)
self.assertEqual(strategy._single_pub_model, GoergModel)
self.assertEqual(strategy._single_pub_model_kwargs, {})
self.assertEqual(strategy._multi_pub_model, PairwiseUnionReachSurface)
self.assertEqual(strategy._multi_pub_model_kwargs, {})
self.assertEqual(str(desc), "fake(x=1),goerg,pairwise_union")
if __name__ == "__main__":
absltest.main()
|
{"hexsha": "75db64f4924735fb023fae8c8232f4bfa7cb7d31", "size": 2676, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/driver/tests/modeling_strategy_descriptor_test.py", "max_stars_repo_name": "pasin30055/planning-evaluation-framework", "max_stars_repo_head_hexsha": "ba5fc3b553fee0b4f5beb50076ecaa7b634dac23", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/driver/tests/modeling_strategy_descriptor_test.py", "max_issues_repo_name": "pasin30055/planning-evaluation-framework", "max_issues_repo_head_hexsha": "ba5fc3b553fee0b4f5beb50076ecaa7b634dac23", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/driver/tests/modeling_strategy_descriptor_test.py", "max_forks_repo_name": "pasin30055/planning-evaluation-framework", "max_forks_repo_head_hexsha": "ba5fc3b553fee0b4f5beb50076ecaa7b634dac23", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3076923077, "max_line_length": 83, "alphanum_fraction": 0.7328101644, "include": true, "reason": "import numpy", "num_tokens": 563}
|
# -*- coding: utf-8 -*-
"""
"""
from __future__ import division, print_function, unicode_literals
from past.utils import old_div
import uncertainties
import uncertainties.umath
from .complex import Complex
import functools
import numpy as np
from . import dispatched
dispatched.module_by_type[uncertainties.AffineScalarFunc] = [uncertainties.umath]
#mul_orig = uncertainties.AffineScalarFunc.__mul__
#
#
#def mul_wrap(self, other):
# if isinstance(other, complex):
# other = Complex(other)
# return self * other
# else:
# return mul_orig(self, other)
#
#uncertainties.AffineScalarFunc.__mul__ = mul_wrap
def fix_op(opname, lambda_syntax):
op_orig = getattr(uncertainties.AffineScalarFunc, opname)
def op_wrap(self, other):
if isinstance(other, (complex, np.complex, np.complex64, np.complex128)):
other = Complex(other.real, other.imag)
return lambda_syntax(self, other)
else:
return op_orig(self, other)
functools.update_wrapper(op_wrap, op_orig)
setattr(uncertainties.AffineScalarFunc, opname, op_wrap)
return
fix_op('__mul__' , lambda s, o: s * o)
fix_op('__rmul__' , lambda s, o: o * s)
fix_op('__div__' , lambda s, o: old_div(s, o))
fix_op('__rdiv__' , lambda s, o: old_div(o, s))
fix_op('__add__' , lambda s, o: s + o)
fix_op('__radd__' , lambda s, o: o + s)
fix_op('__sub__' , lambda s, o: s - o)
fix_op('__rsub__' , lambda s, o: o - s)
uncertainties.AffineScalarFunc.conjugate = lambda self: self
|
{"hexsha": "e3d868cbe35a3f2ade7ab853278c6825271d1349", "size": 1535, "ext": "py", "lang": "Python", "max_stars_repo_path": "phasor/math/uncert.py", "max_stars_repo_name": "mccullerlp/OpenLoop", "max_stars_repo_head_hexsha": "fe86dc6dec3740d4b6be6b88d8eef8566e2aa78d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-02-28T00:43:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-21T11:39:15.000Z", "max_issues_repo_path": "phasor/math/uncert.py", "max_issues_repo_name": "mccullerlp/OpenLoop", "max_issues_repo_head_hexsha": "fe86dc6dec3740d4b6be6b88d8eef8566e2aa78d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-09-07T23:15:43.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-07T23:15:43.000Z", "max_forks_repo_path": "phasor/math/uncert.py", "max_forks_repo_name": "mccullerlp/OpenLoop", "max_forks_repo_head_hexsha": "fe86dc6dec3740d4b6be6b88d8eef8566e2aa78d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-21T04:42:09.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-21T04:42:09.000Z", "avg_line_length": 21.6197183099, "max_line_length": 81, "alphanum_fraction": 0.6872964169, "include": true, "reason": "import numpy", "num_tokens": 430}
|
# This file is auto-generated by AWSMetadata.jl
using AWS
using AWS.AWSServices: efs
using AWS.Compat
using AWS.UUIDs
"""
create_access_point(client_token, file_system_id)
create_access_point(client_token, file_system_id, params::Dict{String,<:Any})
Creates an EFS access point. An access point is an application-specific view into an EFS
file system that applies an operating system user and group, and a file system path, to any
file system request made through the access point. The operating system user and group
override any identity information provided by the NFS client. The file system path is
exposed as the access point's root directory. Applications using the access point can only
access data in its own directory and below. To learn more, see Mounting a file system using
EFS access points. This operation requires permissions for the
elasticfilesystem:CreateAccessPoint action.
# Arguments
- `client_token`: A string of up to 64 ASCII characters that Amazon EFS uses to ensure
idempotent creation.
- `file_system_id`: The ID of the EFS file system that the access point provides access to.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"PosixUser"`: The operating system user and group applied to all file system requests
made using the access point.
- `"RootDirectory"`: Specifies the directory on the Amazon EFS file system that the access
point exposes as the root directory of your file system to NFS clients using the access
point. The clients using the access point can only access the root directory and below. If
the RootDirectory > Path specified does not exist, EFS creates it and applies the
CreationInfo settings when a client connects to an access point. When specifying a
RootDirectory, you need to provide the Path, and the CreationInfo. Amazon EFS creates a
root directory only if you have provided the CreationInfo: OwnUid, OwnGID, and permissions
for the directory. If you do not provide this information, Amazon EFS does not create the
root directory. If the root directory does not exist, attempts to mount using the access
point will fail.
- `"Tags"`: Creates tags associated with the access point. Each tag is a key-value pair.
"""
create_access_point(ClientToken, FileSystemId; aws_config::AbstractAWSConfig=global_aws_config()) = efs("POST", "/2015-02-01/access-points", Dict{String, Any}("ClientToken"=>ClientToken, "FileSystemId"=>FileSystemId); aws_config=aws_config)
create_access_point(ClientToken, FileSystemId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("POST", "/2015-02-01/access-points", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ClientToken"=>ClientToken, "FileSystemId"=>FileSystemId), params)); aws_config=aws_config)
"""
create_file_system(creation_token)
create_file_system(creation_token, params::Dict{String,<:Any})
Creates a new, empty file system. The operation requires a creation token in the request
that Amazon EFS uses to ensure idempotent creation (calling the operation with same
creation token has no effect). If a file system does not currently exist that is owned by
the caller's AWS account with the specified creation token, this operation does the
following: Creates a new, empty file system. The file system will have an Amazon EFS
assigned ID, and an initial lifecycle state creating. Returns with the description of the
created file system. Otherwise, this operation returns a FileSystemAlreadyExists error
with the ID of the existing file system. For basic use cases, you can use a randomly
generated UUID for the creation token. The idempotent operation allows you to retry a
CreateFileSystem call without risk of creating an extra file system. This can happen when
an initial call fails in a way that leaves it uncertain whether or not a file system was
actually created. An example might be that a transport level timeout occurred or your
connection was reset. As long as you use the same creation token, if the initial call had
succeeded in creating a file system, the client can learn of its existence from the
FileSystemAlreadyExists error. For more information, see Creating a file system in the
Amazon EFS User Guide. The CreateFileSystem call returns while the file system's lifecycle
state is still creating. You can check the file system creation status by calling the
DescribeFileSystems operation, which among other things returns the file system state.
This operation accepts an optional PerformanceMode parameter that you choose for your file
system. We recommend generalPurpose performance mode for most file systems. File systems
using the maxIO performance mode can scale to higher levels of aggregate throughput and
operations per second with a tradeoff of slightly higher latencies for most file
operations. The performance mode can't be changed after the file system has been created.
For more information, see Amazon EFS performance modes. You can set the throughput mode for
the file system using the ThroughputMode parameter. After the file system is fully created,
Amazon EFS sets its lifecycle state to available, at which point you can create one or more
mount targets for the file system in your VPC. For more information, see CreateMountTarget.
You mount your Amazon EFS file system on an EC2 instances in your VPC by using the mount
target. For more information, see Amazon EFS: How it Works. This operation requires
permissions for the elasticfilesystem:CreateFileSystem action.
# Arguments
- `creation_token`: A string of up to 64 ASCII characters. Amazon EFS uses this to ensure
idempotent creation.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"AvailabilityZoneName"`: Used to create a file system that uses One Zone storage
classes. It specifies the AWS Availability Zone in which to create the file system. Use the
format us-east-1a to specify the Availability Zone. For more information about One Zone
storage classes, see Using EFS storage classes in the Amazon EFS User Guide. One Zone
storage classes are not available in all Availability Zones in AWS Regions where Amazon EFS
is available.
- `"Backup"`: Specifies whether automatic backups are enabled on the file system that you
are creating. Set the value to true to enable automatic backups. If you are creating a file
system that uses One Zone storage classes, automatic backups are enabled by default. For
more information, see Automatic backups in the Amazon EFS User Guide. Default is false.
However, if you specify an AvailabilityZoneName, the default is true. AWS Backup is not
available in all AWS Regions where Amazon EFS is available.
- `"Encrypted"`: A Boolean value that, if true, creates an encrypted file system. When
creating an encrypted file system, you have the option of specifying
CreateFileSystemRequestKmsKeyId for an existing AWS Key Management Service (AWS KMS)
customer master key (CMK). If you don't specify a CMK, then the default CMK for Amazon EFS,
/aws/elasticfilesystem, is used to protect the encrypted file system.
- `"KmsKeyId"`: The ID of the AWS KMS CMK that you want to use to protect the encrypted
file system. This parameter is only required if you want to use a non-default KMS key. If
this parameter is not specified, the default CMK for Amazon EFS is used. This ID can be in
one of the following formats: Key ID - A unique identifier of the key, for example
1234abcd-12ab-34cd-56ef-1234567890ab. ARN - An Amazon Resource Name (ARN) for the key,
for example arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.
Key alias - A previously created display name for a key, for example alias/projectKey1.
Key alias ARN - An ARN for a key alias, for example
arn:aws:kms:us-west-2:444455556666:alias/projectKey1. If KmsKeyId is specified, the
CreateFileSystemRequestEncrypted parameter must be set to true. EFS accepts only symmetric
KMS keys. You cannot use asymmetric KMS keys with EFS file systems.
- `"PerformanceMode"`: The performance mode of the file system. We recommend generalPurpose
performance mode for most file systems. File systems using the maxIO performance mode can
scale to higher levels of aggregate throughput and operations per second with a tradeoff of
slightly higher latencies for most file operations. The performance mode can't be changed
after the file system has been created. The maxIO mode is not supported on file systems
using One Zone storage classes.
- `"ProvisionedThroughputInMibps"`: The throughput, measured in MiB/s, that you want to
provision for a file system that you're creating. Valid values are 1-1024. Required if
ThroughputMode is set to provisioned. The upper limit for throughput is 1024 MiB/s. To
increase this limit, contact AWS Support. For more information, see Amazon EFS quotas that
you can increase in the Amazon EFS User Guide.
- `"Tags"`: A value that specifies to create one or more tags associated with the file
system. Each tag is a user-defined key-value pair. Name your file system on creation by
including a \"Key\":\"Name\",\"Value\":\"{value}\" key-value pair.
- `"ThroughputMode"`: Specifies the throughput mode for the file system, either bursting or
provisioned. If you set ThroughputMode to provisioned, you must also set a value for
ProvisionedThroughputInMibps. After you create the file system, you can decrease your file
system's throughput in Provisioned Throughput mode or change between the throughput modes,
as long as it’s been more than 24 hours since the last decrease or throughput mode
change. For more information, see Specifying throughput with provisioned mode in the Amazon
EFS User Guide. Default is bursting.
"""
create_file_system(CreationToken; aws_config::AbstractAWSConfig=global_aws_config()) = efs("POST", "/2015-02-01/file-systems", Dict{String, Any}("CreationToken"=>CreationToken); aws_config=aws_config)
create_file_system(CreationToken, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("POST", "/2015-02-01/file-systems", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("CreationToken"=>CreationToken), params)); aws_config=aws_config)
"""
create_mount_target(file_system_id, subnet_id)
create_mount_target(file_system_id, subnet_id, params::Dict{String,<:Any})
Creates a mount target for a file system. You can then mount the file system on EC2
instances by using the mount target. You can create one mount target in each Availability
Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a
single mount target for a given file system. If you have multiple subnets in an
Availability Zone, you create a mount target in one of the subnets. EC2 instances do not
need to be in the same subnet as the mount target in order to access their file system. You
can create only one mount target for an EFS file system using One Zone storage classes. You
must create that mount target in the same Availability Zone in which the file system is
located. Use the AvailabilityZoneName and AvailabiltyZoneId properties in the
DescribeFileSystems response object to get this information. Use the subnetId associated
with the file system's Availability Zone when creating the mount target. For more
information, see Amazon EFS: How it Works. To create a mount target for a file system, the
file system's lifecycle state must be available. For more information, see
DescribeFileSystems. In the request, provide the following: The file system ID for which
you are creating the mount target. A subnet ID, which determines the following: The VPC
in which Amazon EFS creates the mount target The Availability Zone in which Amazon EFS
creates the mount target The IP address range from which Amazon EFS selects the IP
address of the mount target (if you don't specify an IP address in the request) After
creating the mount target, Amazon EFS returns a response that includes, a MountTargetId and
an IpAddress. You use this IP address when mounting the file system in an EC2 instance. You
can also use the mount target's DNS name when mounting the file system. The EC2 instance on
which you mount the file system by using the mount target can resolve the mount target's
DNS name to its IP address. For more information, see How it Works: Implementation
Overview. Note that you can create mount targets for a file system in only one VPC, and
there can be only one mount target per Availability Zone. That is, if the file system
already has one or more mount targets created for it, the subnet specified in the request
to add another mount target must meet the following requirements: Must belong to the same
VPC as the subnets of the existing mount targets Must not be in the same Availability
Zone as any of the subnets of the existing mount targets If the request satisfies the
requirements, Amazon EFS does the following: Creates a new mount target in the specified
subnet. Also creates a new network interface in the subnet as follows: If the request
provides an IpAddress, Amazon EFS assigns that IP address to the network interface.
Otherwise, Amazon EFS assigns a free address in the subnet (in the same way that the Amazon
EC2 CreateNetworkInterface call does when a request does not specify a primary private IP
address). If the request provides SecurityGroups, this network interface is associated
with those security groups. Otherwise, it belongs to the default security group for the
subnet's VPC. Assigns the description Mount target fsmt-id for file system fs-id where
fsmt-id is the mount target ID, and fs-id is the FileSystemId. Sets the
requesterManaged property of the network interface to true, and the requesterId value to
EFS. Each Amazon EFS mount target has one corresponding requester-managed EC2 network
interface. After the network interface is created, Amazon EFS sets the NetworkInterfaceId
field in the mount target's description to the network interface ID, and the IpAddress
field to its address. If network interface creation fails, the entire CreateMountTarget
operation fails. The CreateMountTarget call returns only after creating the network
interface, but while the mount target state is still creating, you can check the mount
target creation status by calling the DescribeMountTargets operation, which among other
things returns the mount target state. We recommend that you create a mount target in each
of the Availability Zones. There are cost considerations for using a file system in an
Availability Zone through a mount target created in another Availability Zone. For more
information, see Amazon EFS. In addition, by always using a mount target local to the
instance's Availability Zone, you eliminate a partial failure scenario. If the Availability
Zone in which your mount target is created goes down, then you can't access your file
system through that mount target. This operation requires permissions for the following
action on the file system: elasticfilesystem:CreateMountTarget This operation also
requires permissions for the following Amazon EC2 actions: ec2:DescribeSubnets
ec2:DescribeNetworkInterfaces ec2:CreateNetworkInterface
# Arguments
- `file_system_id`: The ID of the file system for which to create the mount target.
- `subnet_id`: The ID of the subnet to add the mount target in. For file systems that use
One Zone storage classes, use the subnet that is associated with the file system's
Availability Zone.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"IpAddress"`: Valid IPv4 address within the address range of the specified subnet.
- `"SecurityGroups"`: Up to five VPC security group IDs, of the form sg-xxxxxxxx. These
must be for the same VPC as subnet specified.
"""
create_mount_target(FileSystemId, SubnetId; aws_config::AbstractAWSConfig=global_aws_config()) = efs("POST", "/2015-02-01/mount-targets", Dict{String, Any}("FileSystemId"=>FileSystemId, "SubnetId"=>SubnetId); aws_config=aws_config)
create_mount_target(FileSystemId, SubnetId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("POST", "/2015-02-01/mount-targets", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("FileSystemId"=>FileSystemId, "SubnetId"=>SubnetId), params)); aws_config=aws_config)
"""
create_tags(file_system_id, tags)
create_tags(file_system_id, tags, params::Dict{String,<:Any})
DEPRECATED - CreateTags is deprecated and not maintained. Please use the API action to
create tags for EFS resources. Creates or overwrites tags associated with a file system.
Each tag is a key-value pair. If a tag key specified in the request already exists on the
file system, this operation overwrites its value with the value provided in the request. If
you add the Name tag to your file system, Amazon EFS returns it in the response to the
DescribeFileSystems operation. This operation requires permission for the
elasticfilesystem:CreateTags action.
# Arguments
- `file_system_id`: The ID of the file system whose tags you want to modify (String). This
operation modifies the tags only, not the file system.
- `tags`: An array of Tag objects to add. Each Tag object is a key-value pair.
"""
create_tags(FileSystemId, Tags; aws_config::AbstractAWSConfig=global_aws_config()) = efs("POST", "/2015-02-01/create-tags/$(FileSystemId)", Dict{String, Any}("Tags"=>Tags); aws_config=aws_config)
create_tags(FileSystemId, Tags, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("POST", "/2015-02-01/create-tags/$(FileSystemId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("Tags"=>Tags), params)); aws_config=aws_config)
"""
delete_access_point(access_point_id)
delete_access_point(access_point_id, params::Dict{String,<:Any})
Deletes the specified access point. After deletion is complete, new clients can no longer
connect to the access points. Clients connected to the access point at the time of deletion
will continue to function until they terminate their connection. This operation requires
permissions for the elasticfilesystem:DeleteAccessPoint action.
# Arguments
- `access_point_id`: The ID of the access point that you want to delete.
"""
delete_access_point(AccessPointId; aws_config::AbstractAWSConfig=global_aws_config()) = efs("DELETE", "/2015-02-01/access-points/$(AccessPointId)"; aws_config=aws_config)
delete_access_point(AccessPointId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("DELETE", "/2015-02-01/access-points/$(AccessPointId)", params; aws_config=aws_config)
"""
delete_file_system(file_system_id)
delete_file_system(file_system_id, params::Dict{String,<:Any})
Deletes a file system, permanently severing access to its contents. Upon return, the file
system no longer exists and you can't access any contents of the deleted file system. You
can't delete a file system that is in use. That is, if the file system has any mount
targets, you must first delete them. For more information, see DescribeMountTargets and
DeleteMountTarget. The DeleteFileSystem call returns while the file system state is still
deleting. You can check the file system deletion status by calling the DescribeFileSystems
operation, which returns a list of file systems in your account. If you pass file system ID
or creation token for the deleted file system, the DescribeFileSystems returns a 404
FileSystemNotFound error. This operation requires permissions for the
elasticfilesystem:DeleteFileSystem action.
# Arguments
- `file_system_id`: The ID of the file system you want to delete.
"""
delete_file_system(FileSystemId; aws_config::AbstractAWSConfig=global_aws_config()) = efs("DELETE", "/2015-02-01/file-systems/$(FileSystemId)"; aws_config=aws_config)
delete_file_system(FileSystemId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("DELETE", "/2015-02-01/file-systems/$(FileSystemId)", params; aws_config=aws_config)
"""
delete_file_system_policy(file_system_id)
delete_file_system_policy(file_system_id, params::Dict{String,<:Any})
Deletes the FileSystemPolicy for the specified file system. The default FileSystemPolicy
goes into effect once the existing policy is deleted. For more information about the
default file system policy, see Using Resource-based Policies with EFS. This operation
requires permissions for the elasticfilesystem:DeleteFileSystemPolicy action.
# Arguments
- `file_system_id`: Specifies the EFS file system for which to delete the FileSystemPolicy.
"""
delete_file_system_policy(FileSystemId; aws_config::AbstractAWSConfig=global_aws_config()) = efs("DELETE", "/2015-02-01/file-systems/$(FileSystemId)/policy"; aws_config=aws_config)
delete_file_system_policy(FileSystemId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("DELETE", "/2015-02-01/file-systems/$(FileSystemId)/policy", params; aws_config=aws_config)
"""
delete_mount_target(mount_target_id)
delete_mount_target(mount_target_id, params::Dict{String,<:Any})
Deletes the specified mount target. This operation forcibly breaks any mounts of the file
system by using the mount target that is being deleted, which might disrupt instances or
applications using those mounts. To avoid applications getting cut off abruptly, you might
consider unmounting any mounts of the mount target, if feasible. The operation also deletes
the associated network interface. Uncommitted writes might be lost, but breaking a mount
target using this operation does not corrupt the file system itself. The file system you
created remains. You can mount an EC2 instance in your VPC by using another mount target.
This operation requires permissions for the following action on the file system:
elasticfilesystem:DeleteMountTarget The DeleteMountTarget call returns while the mount
target state is still deleting. You can check the mount target deletion by calling the
DescribeMountTargets operation, which returns a list of mount target descriptions for the
given file system. The operation also requires permissions for the following Amazon EC2
action on the mount target's network interface: ec2:DeleteNetworkInterface
# Arguments
- `mount_target_id`: The ID of the mount target to delete (String).
"""
delete_mount_target(MountTargetId; aws_config::AbstractAWSConfig=global_aws_config()) = efs("DELETE", "/2015-02-01/mount-targets/$(MountTargetId)"; aws_config=aws_config)
delete_mount_target(MountTargetId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("DELETE", "/2015-02-01/mount-targets/$(MountTargetId)", params; aws_config=aws_config)
"""
delete_tags(file_system_id, tag_keys)
delete_tags(file_system_id, tag_keys, params::Dict{String,<:Any})
DEPRECATED - DeleteTags is deprecated and not maintained. Please use the API action to
remove tags from EFS resources. Deletes the specified tags from a file system. If the
DeleteTags request includes a tag key that doesn't exist, Amazon EFS ignores it and doesn't
cause an error. For more information about tags and related restrictions, see Tag
Restrictions in the AWS Billing and Cost Management User Guide. This operation requires
permissions for the elasticfilesystem:DeleteTags action.
# Arguments
- `file_system_id`: The ID of the file system whose tags you want to delete (String).
- `tag_keys`: A list of tag keys to delete.
"""
delete_tags(FileSystemId, TagKeys; aws_config::AbstractAWSConfig=global_aws_config()) = efs("POST", "/2015-02-01/delete-tags/$(FileSystemId)", Dict{String, Any}("TagKeys"=>TagKeys); aws_config=aws_config)
delete_tags(FileSystemId, TagKeys, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("POST", "/2015-02-01/delete-tags/$(FileSystemId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("TagKeys"=>TagKeys), params)); aws_config=aws_config)
"""
describe_access_points()
describe_access_points(params::Dict{String,<:Any})
Returns the description of a specific Amazon EFS access point if the AccessPointId is
provided. If you provide an EFS FileSystemId, it returns descriptions of all access points
for that file system. You can provide either an AccessPointId or a FileSystemId in the
request, but not both. This operation requires permissions for the
elasticfilesystem:DescribeAccessPoints action.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"AccessPointId"`: (Optional) Specifies an EFS access point to describe in the response;
mutually exclusive with FileSystemId.
- `"FileSystemId"`: (Optional) If you provide a FileSystemId, EFS returns all access points
for that file system; mutually exclusive with AccessPointId.
- `"MaxResults"`: (Optional) When retrieving all access points for a file system, you can
optionally specify the MaxItems parameter to limit the number of objects returned in a
response. The default value is 100.
- `"NextToken"`: NextToken is present if the response is paginated. You can use NextMarker
in the subsequent request to fetch the next page of access point descriptions.
"""
describe_access_points(; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/access-points"; aws_config=aws_config)
describe_access_points(params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/access-points", params; aws_config=aws_config)
"""
describe_account_preferences()
describe_account_preferences(params::Dict{String,<:Any})
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"MaxResults"`:
- `"NextToken"`:
"""
describe_account_preferences(; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/account-preferences"; aws_config=aws_config)
describe_account_preferences(params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/account-preferences", params; aws_config=aws_config)
"""
describe_backup_policy(file_system_id)
describe_backup_policy(file_system_id, params::Dict{String,<:Any})
Returns the backup policy for the specified EFS file system.
# Arguments
- `file_system_id`: Specifies which EFS file system to retrieve the BackupPolicy for.
"""
describe_backup_policy(FileSystemId; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/file-systems/$(FileSystemId)/backup-policy"; aws_config=aws_config)
describe_backup_policy(FileSystemId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/file-systems/$(FileSystemId)/backup-policy", params; aws_config=aws_config)
"""
describe_file_system_policy(file_system_id)
describe_file_system_policy(file_system_id, params::Dict{String,<:Any})
Returns the FileSystemPolicy for the specified EFS file system. This operation requires
permissions for the elasticfilesystem:DescribeFileSystemPolicy action.
# Arguments
- `file_system_id`: Specifies which EFS file system to retrieve the FileSystemPolicy for.
"""
describe_file_system_policy(FileSystemId; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/file-systems/$(FileSystemId)/policy"; aws_config=aws_config)
describe_file_system_policy(FileSystemId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/file-systems/$(FileSystemId)/policy", params; aws_config=aws_config)
"""
describe_file_systems()
describe_file_systems(params::Dict{String,<:Any})
Returns the description of a specific Amazon EFS file system if either the file system
CreationToken or the FileSystemId is provided. Otherwise, it returns descriptions of all
file systems owned by the caller's AWS account in the AWS Region of the endpoint that
you're calling. When retrieving all file system descriptions, you can optionally specify
the MaxItems parameter to limit the number of descriptions in a response. Currently, this
number is automatically set to 10. If more file system descriptions remain, Amazon EFS
returns a NextMarker, an opaque token, in the response. In this case, you should send a
subsequent request with the Marker request parameter set to the value of NextMarker. To
retrieve a list of your file system descriptions, this operation is used in an iterative
process, where DescribeFileSystems is called first without the Marker and then the
operation continues to call it with the Marker parameter set to the value of the NextMarker
from the previous response until the response has no NextMarker. The order of file
systems returned in the response of one DescribeFileSystems call and the order of file
systems returned across the responses of a multi-call iteration is unspecified. This
operation requires permissions for the elasticfilesystem:DescribeFileSystems action.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"CreationToken"`: (Optional) Restricts the list to the file system with this creation
token (String). You specify a creation token when you create an Amazon EFS file system.
- `"FileSystemId"`: (Optional) ID of the file system whose description you want to retrieve
(String).
- `"Marker"`: (Optional) Opaque pagination token returned from a previous
DescribeFileSystems operation (String). If present, specifies to continue the list from
where the returning call had left off.
- `"MaxItems"`: (Optional) Specifies the maximum number of file systems to return in the
response (integer). This number is automatically set to 100. The response is paginated at
100 per page if you have more than 100 file systems.
"""
describe_file_systems(; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/file-systems"; aws_config=aws_config)
describe_file_systems(params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/file-systems", params; aws_config=aws_config)
"""
describe_lifecycle_configuration(file_system_id)
describe_lifecycle_configuration(file_system_id, params::Dict{String,<:Any})
Returns the current LifecycleConfiguration object for the specified Amazon EFS file system.
EFS lifecycle management uses the LifecycleConfiguration object to identify which files to
move to the EFS Infrequent Access (IA) storage class. For a file system without a
LifecycleConfiguration object, the call returns an empty array in the response. This
operation requires permissions for the elasticfilesystem:DescribeLifecycleConfiguration
operation.
# Arguments
- `file_system_id`: The ID of the file system whose LifecycleConfiguration object you want
to retrieve (String).
"""
describe_lifecycle_configuration(FileSystemId; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/file-systems/$(FileSystemId)/lifecycle-configuration"; aws_config=aws_config)
describe_lifecycle_configuration(FileSystemId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/file-systems/$(FileSystemId)/lifecycle-configuration", params; aws_config=aws_config)
"""
describe_mount_target_security_groups(mount_target_id)
describe_mount_target_security_groups(mount_target_id, params::Dict{String,<:Any})
Returns the security groups currently in effect for a mount target. This operation requires
that the network interface of the mount target has been created and the lifecycle state of
the mount target is not deleted. This operation requires permissions for the following
actions: elasticfilesystem:DescribeMountTargetSecurityGroups action on the mount
target's file system. ec2:DescribeNetworkInterfaceAttribute action on the mount
target's network interface.
# Arguments
- `mount_target_id`: The ID of the mount target whose security groups you want to retrieve.
"""
describe_mount_target_security_groups(MountTargetId; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/mount-targets/$(MountTargetId)/security-groups"; aws_config=aws_config)
describe_mount_target_security_groups(MountTargetId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/mount-targets/$(MountTargetId)/security-groups", params; aws_config=aws_config)
"""
describe_mount_targets()
describe_mount_targets(params::Dict{String,<:Any})
Returns the descriptions of all the current mount targets, or a specific mount target, for
a file system. When requesting all of the current mount targets, the order of mount targets
returned in the response is unspecified. This operation requires permissions for the
elasticfilesystem:DescribeMountTargets action, on either the file system ID that you
specify in FileSystemId, or on the file system of the mount target that you specify in
MountTargetId.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"AccessPointId"`: (Optional) The ID of the access point whose mount targets that you
want to list. It must be included in your request if a FileSystemId or MountTargetId is not
included in your request. Accepts either an access point ID or ARN as input.
- `"FileSystemId"`: (Optional) ID of the file system whose mount targets you want to list
(String). It must be included in your request if an AccessPointId or MountTargetId is not
included. Accepts either a file system ID or ARN as input.
- `"Marker"`: (Optional) Opaque pagination token returned from a previous
DescribeMountTargets operation (String). If present, it specifies to continue the list from
where the previous returning call left off.
- `"MaxItems"`: (Optional) Maximum number of mount targets to return in the response.
Currently, this number is automatically set to 10, and other values are ignored. The
response is paginated at 100 per page if you have more than 100 mount targets.
- `"MountTargetId"`: (Optional) ID of the mount target that you want to have described
(String). It must be included in your request if FileSystemId is not included. Accepts
either a mount target ID or ARN as input.
"""
describe_mount_targets(; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/mount-targets"; aws_config=aws_config)
describe_mount_targets(params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/mount-targets", params; aws_config=aws_config)
"""
describe_tags(file_system_id)
describe_tags(file_system_id, params::Dict{String,<:Any})
DEPRECATED - The DeleteTags action is deprecated and not maintained. Please use the API
action to remove tags from EFS resources. Returns the tags associated with a file system.
The order of tags returned in the response of one DescribeTags call and the order of tags
returned across the responses of a multiple-call iteration (when using pagination) is
unspecified. This operation requires permissions for the elasticfilesystem:DescribeTags
action.
# Arguments
- `file_system_id`: The ID of the file system whose tag set you want to retrieve.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"Marker"`: (Optional) An opaque pagination token returned from a previous DescribeTags
operation (String). If present, it specifies to continue the list from where the previous
call left off.
- `"MaxItems"`: (Optional) The maximum number of file system tags to return in the
response. Currently, this number is automatically set to 100, and other values are ignored.
The response is paginated at 100 per page if you have more than 100 tags.
"""
describe_tags(FileSystemId; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/tags/$(FileSystemId)/"; aws_config=aws_config)
describe_tags(FileSystemId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/tags/$(FileSystemId)/", params; aws_config=aws_config)
"""
list_tags_for_resource(resource_id)
list_tags_for_resource(resource_id, params::Dict{String,<:Any})
Lists all tags for a top-level EFS resource. You must provide the ID of the resource that
you want to retrieve the tags for. This operation requires permissions for the
elasticfilesystem:DescribeAccessPoints action.
# Arguments
- `resource_id`: Specifies the EFS resource you want to retrieve tags for. You can retrieve
tags for EFS file systems and access points using this API endpoint.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"MaxResults"`: (Optional) Specifies the maximum number of tag objects to return in the
response. The default value is 100.
- `"NextToken"`: (Optional) You can use NextToken in a subsequent request to fetch the next
page of access point descriptions if the response payload was paginated.
"""
list_tags_for_resource(ResourceId; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/resource-tags/$(ResourceId)"; aws_config=aws_config)
list_tags_for_resource(ResourceId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("GET", "/2015-02-01/resource-tags/$(ResourceId)", params; aws_config=aws_config)
"""
modify_mount_target_security_groups(mount_target_id)
modify_mount_target_security_groups(mount_target_id, params::Dict{String,<:Any})
Modifies the set of security groups in effect for a mount target. When you create a mount
target, Amazon EFS also creates a new network interface. For more information, see
CreateMountTarget. This operation replaces the security groups in effect for the network
interface associated with a mount target, with the SecurityGroups provided in the request.
This operation requires that the network interface of the mount target has been created and
the lifecycle state of the mount target is not deleted. The operation requires permissions
for the following actions: elasticfilesystem:ModifyMountTargetSecurityGroups action on
the mount target's file system. ec2:ModifyNetworkInterfaceAttribute action on the mount
target's network interface.
# Arguments
- `mount_target_id`: The ID of the mount target whose security groups you want to modify.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"SecurityGroups"`: An array of up to five VPC security group IDs.
"""
modify_mount_target_security_groups(MountTargetId; aws_config::AbstractAWSConfig=global_aws_config()) = efs("PUT", "/2015-02-01/mount-targets/$(MountTargetId)/security-groups"; aws_config=aws_config)
modify_mount_target_security_groups(MountTargetId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("PUT", "/2015-02-01/mount-targets/$(MountTargetId)/security-groups", params; aws_config=aws_config)
"""
put_account_preferences(resource_id_type)
put_account_preferences(resource_id_type, params::Dict{String,<:Any})
# Arguments
- `resource_id_type`:
"""
put_account_preferences(ResourceIdType; aws_config::AbstractAWSConfig=global_aws_config()) = efs("PUT", "/2015-02-01/account-preferences", Dict{String, Any}("ResourceIdType"=>ResourceIdType); aws_config=aws_config)
put_account_preferences(ResourceIdType, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("PUT", "/2015-02-01/account-preferences", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("ResourceIdType"=>ResourceIdType), params)); aws_config=aws_config)
"""
put_backup_policy(backup_policy, file_system_id)
put_backup_policy(backup_policy, file_system_id, params::Dict{String,<:Any})
Updates the file system's backup policy. Use this action to start or stop automatic backups
of the file system.
# Arguments
- `backup_policy`: The backup policy included in the PutBackupPolicy request.
- `file_system_id`: Specifies which EFS file system to update the backup policy for.
"""
put_backup_policy(BackupPolicy, FileSystemId; aws_config::AbstractAWSConfig=global_aws_config()) = efs("PUT", "/2015-02-01/file-systems/$(FileSystemId)/backup-policy", Dict{String, Any}("BackupPolicy"=>BackupPolicy); aws_config=aws_config)
put_backup_policy(BackupPolicy, FileSystemId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("PUT", "/2015-02-01/file-systems/$(FileSystemId)/backup-policy", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("BackupPolicy"=>BackupPolicy), params)); aws_config=aws_config)
"""
put_file_system_policy(file_system_id, policy)
put_file_system_policy(file_system_id, policy, params::Dict{String,<:Any})
Applies an Amazon EFS FileSystemPolicy to an Amazon EFS file system. A file system policy
is an IAM resource-based policy and can contain multiple policy statements. A file system
always has exactly one file system policy, which can be the default policy or an explicit
policy set or updated using this API operation. EFS file system policies have a 20,000
character limit. When an explicit policy is set, it overrides the default policy. For more
information about the default file system policy, see Default EFS File System Policy. EFS
file system policies have a 20,000 character limit. This operation requires permissions for
the elasticfilesystem:PutFileSystemPolicy action.
# Arguments
- `file_system_id`: The ID of the EFS file system that you want to create or update the
FileSystemPolicy for.
- `policy`: The FileSystemPolicy that you're creating. Accepts a JSON formatted policy
definition. EFS file system policies have a 20,000 character limit. To find out more about
the elements that make up a file system policy, see EFS Resource-based Policies.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"BypassPolicyLockoutSafetyCheck"`: (Optional) A flag to indicate whether to bypass the
FileSystemPolicy lockout safety check. The policy lockout safety check determines whether
the policy in the request will prevent the principal making the request will be locked out
from making future PutFileSystemPolicy requests on the file system. Set
BypassPolicyLockoutSafetyCheck to True only when you intend to prevent the principal that
is making the request from making a subsequent PutFileSystemPolicy request on the file
system. The default value is False.
"""
put_file_system_policy(FileSystemId, Policy; aws_config::AbstractAWSConfig=global_aws_config()) = efs("PUT", "/2015-02-01/file-systems/$(FileSystemId)/policy", Dict{String, Any}("Policy"=>Policy); aws_config=aws_config)
put_file_system_policy(FileSystemId, Policy, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("PUT", "/2015-02-01/file-systems/$(FileSystemId)/policy", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("Policy"=>Policy), params)); aws_config=aws_config)
"""
put_lifecycle_configuration(file_system_id, lifecycle_policies)
put_lifecycle_configuration(file_system_id, lifecycle_policies, params::Dict{String,<:Any})
Enables lifecycle management by creating a new LifecycleConfiguration object. A
LifecycleConfiguration object defines when files in an Amazon EFS file system are
automatically transitioned to the lower-cost EFS Infrequent Access (IA) storage class. A
LifecycleConfiguration applies to all files in a file system. Each Amazon EFS file system
supports one lifecycle configuration, which applies to all files in the file system. If a
LifecycleConfiguration object already exists for the specified file system, a
PutLifecycleConfiguration call modifies the existing configuration. A
PutLifecycleConfiguration call with an empty LifecyclePolicies array in the request body
deletes any existing LifecycleConfiguration and disables lifecycle management. In the
request, specify the following: The ID for the file system for which you are enabling,
disabling, or modifying lifecycle management. A LifecyclePolicies array of
LifecyclePolicy objects that define when files are moved to the IA storage class. The array
can contain only one LifecyclePolicy item. This operation requires permissions for the
elasticfilesystem:PutLifecycleConfiguration operation. To apply a LifecycleConfiguration
object to an encrypted file system, you need the same AWS Key Management Service (AWS KMS)
permissions as when you created the encrypted file system.
# Arguments
- `file_system_id`: The ID of the file system for which you are creating the
LifecycleConfiguration object (String).
- `lifecycle_policies`: An array of LifecyclePolicy objects that define the file system's
LifecycleConfiguration object. A LifecycleConfiguration object tells lifecycle management
when to transition files from the Standard storage class to the Infrequent Access storage
class.
"""
put_lifecycle_configuration(FileSystemId, LifecyclePolicies; aws_config::AbstractAWSConfig=global_aws_config()) = efs("PUT", "/2015-02-01/file-systems/$(FileSystemId)/lifecycle-configuration", Dict{String, Any}("LifecyclePolicies"=>LifecyclePolicies); aws_config=aws_config)
put_lifecycle_configuration(FileSystemId, LifecyclePolicies, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("PUT", "/2015-02-01/file-systems/$(FileSystemId)/lifecycle-configuration", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("LifecyclePolicies"=>LifecyclePolicies), params)); aws_config=aws_config)
"""
tag_resource(resource_id, tags)
tag_resource(resource_id, tags, params::Dict{String,<:Any})
Creates a tag for an EFS resource. You can create tags for EFS file systems and access
points using this API operation. This operation requires permissions for the
elasticfilesystem:TagResource action.
# Arguments
- `resource_id`: The ID specifying the EFS resource that you want to create a tag for.
- `tags`: An array of Tag objects to add. Each Tag object is a key-value pair.
"""
tag_resource(ResourceId, Tags; aws_config::AbstractAWSConfig=global_aws_config()) = efs("POST", "/2015-02-01/resource-tags/$(ResourceId)", Dict{String, Any}("Tags"=>Tags); aws_config=aws_config)
tag_resource(ResourceId, Tags, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("POST", "/2015-02-01/resource-tags/$(ResourceId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("Tags"=>Tags), params)); aws_config=aws_config)
"""
untag_resource(resource_id, tag_keys)
untag_resource(resource_id, tag_keys, params::Dict{String,<:Any})
Removes tags from an EFS resource. You can remove tags from EFS file systems and access
points using this API operation. This operation requires permissions for the
elasticfilesystem:UntagResource action.
# Arguments
- `resource_id`: Specifies the EFS resource that you want to remove tags from.
- `tag_keys`: The keys of the key-value tag pairs that you want to remove from the
specified EFS resource.
"""
untag_resource(ResourceId, tagKeys; aws_config::AbstractAWSConfig=global_aws_config()) = efs("DELETE", "/2015-02-01/resource-tags/$(ResourceId)", Dict{String, Any}("tagKeys"=>tagKeys); aws_config=aws_config)
untag_resource(ResourceId, tagKeys, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("DELETE", "/2015-02-01/resource-tags/$(ResourceId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("tagKeys"=>tagKeys), params)); aws_config=aws_config)
"""
update_file_system(file_system_id)
update_file_system(file_system_id, params::Dict{String,<:Any})
Updates the throughput mode or the amount of provisioned throughput of an existing file
system.
# Arguments
- `file_system_id`: The ID of the file system that you want to update.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"ProvisionedThroughputInMibps"`: (Optional) Sets the amount of provisioned throughput,
in MiB/s, for the file system. Valid values are 1-1024. If you are changing the throughput
mode to provisioned, you must also provide the amount of provisioned throughput. Required
if ThroughputMode is changed to provisioned on update.
- `"ThroughputMode"`: (Optional) Updates the file system's throughput mode. If you're not
updating your throughput mode, you don't need to provide this value in your request. If you
are changing the ThroughputMode to provisioned, you must also set a value for
ProvisionedThroughputInMibps.
"""
update_file_system(FileSystemId; aws_config::AbstractAWSConfig=global_aws_config()) = efs("PUT", "/2015-02-01/file-systems/$(FileSystemId)"; aws_config=aws_config)
update_file_system(FileSystemId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = efs("PUT", "/2015-02-01/file-systems/$(FileSystemId)", params; aws_config=aws_config)
|
{"hexsha": "b2b686619e0761efa2641803a6da09a8c6753078", "size": 49347, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/services/efs.jl", "max_stars_repo_name": "wytbella/AWS.jl", "max_stars_repo_head_hexsha": "786e82ccd4d91ff0c9f51d7635045febb64d02f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-17T17:36:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-17T17:36:23.000Z", "max_issues_repo_path": "src/services/efs.jl", "max_issues_repo_name": "mattBrzezinski/AWS.jl", "max_issues_repo_head_hexsha": "eab2948025e09a42af88d06a8ddf32e9dcd186a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/services/efs.jl", "max_forks_repo_name": "mattBrzezinski/AWS.jl", "max_forks_repo_head_hexsha": "eab2948025e09a42af88d06a8ddf32e9dcd186a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 67.9710743802, "max_line_length": 350, "alphanum_fraction": 0.7942732081, "num_tokens": 11399}
|
import json
import re
import string
import pandas as pd
import numpy as np
from .console import console, print_paper
def decode_line(line):
paper = json.loads(line)
return paper
def preprocess_title(text):
text = text.lower().replace("-", " ").replace("\n", "").replace(" ", " ").strip()
return text
def preprocess_abstract(text):
text = text.lower().replace("-", " ").replace("\n", " ").strip()
text = re.sub(r"[{}]".format(string.punctuation), " ", text)
return text
def line_iterator(file):
"""
Access the metadata of a specific category one by one.
"""
with open(file, "r") as f:
for line in f:
yield line
def paper_iterator(file):
for line in line_iterator(file):
paper = decode_line(line)
if (
"copyright infringement" in paper["abstract"]
or "this article was withdrawn" in paper["abstract"]
):
continue
yield paper
def paper_to_dict(paper):
paper = {
"id": str(paper["id"]),
"title": paper["title"],
"abstract": paper["abstract"],
"authors": paper["authors_parsed"],
"update_date": paper["update_date"],
}
return paper
def query_papers_from_keywords(kws, file):
collected_papers = []
metadata = paper_iterator(file)
for ind, paper in enumerate(metadata):
if any(kw in preprocess_title(paper["title"]) for kw in kws) or any(
kw in preprocess_abstract(paper["abstract"]) for kw in kws
):
paper = paper_to_dict(paper)
collected_papers.append(paper)
df = pd.DataFrame(data=collected_papers)
console.print(f"Found {len(df)} papers with keywords.", style="info")
return df
def dataframe_from_file(file):
collected_papers = []
metadata = paper_iterator(file)
for ind, paper in enumerate(metadata):
paper = paper_to_dict(paper)
collected_papers.append(paper)
df = pd.DataFrame(data=collected_papers)
return df
def build_corpus(file):
# create the corpus of abstract
corpus = []
metadata = paper_iterator(file)
for ind, paper in enumerate(metadata):
corpus.append(paper["abstract"])
return corpus
class TextQuery:
def __init__(self, df) -> None:
self.df = df
def __getitem__(self, item):
row = self.df.iloc[item].to_dict("records")[0]
print_paper(row)
user_input = None
while not (user_input in ["y", "n", "quit"]):
user_input = console.input("[red]Select the paper?[/] (y/n/quit): ")
if user_input == "quit":
raise KeyboardInterrupt
val = 1 if user_input == "y" else 0
return np.array([val])
def get_paper(self, item):
return self.df.iloc[item].to_dict("records")[0]
def drop(self, idx):
self.df = self.df.drop(idx, axis=0)
def build_training_set(df_selected, file, vectorizer=None):
# collect possible candidates (i.e., papers that are not selected yet)
collected_papers = []
metadata = paper_iterator(file)
for ind, paper in enumerate(metadata):
if not (paper["id"] in df_selected["id"].values):
paper = paper_to_dict(paper)
collected_papers.append(paper)
df_pool = pd.DataFrame(data=collected_papers)
X_training = df_selected["abstract"].values
y_training = df_selected["selected"].values.reshape(-1)
X = df_pool["abstract"].values
y = TextQuery(df_pool)
if vectorizer:
X_training = vectorizer.transform(X_training)
X = vectorizer.transform(X)
return X_training, y_training, X, y, df_pool
|
{"hexsha": "e25ac435c49a8055673aef726944ce891638277d", "size": 3743, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/ALreview/text.py", "max_stars_repo_name": "Deathn0t/ALReview", "max_stars_repo_head_hexsha": "816e9b04cde04816ae38a23271dd8e5cabf93f08", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ALreview/text.py", "max_issues_repo_name": "Deathn0t/ALReview", "max_issues_repo_head_hexsha": "816e9b04cde04816ae38a23271dd8e5cabf93f08", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ALreview/text.py", "max_forks_repo_name": "Deathn0t/ALReview", "max_forks_repo_head_hexsha": "816e9b04cde04816ae38a23271dd8e5cabf93f08", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.4640522876, "max_line_length": 86, "alphanum_fraction": 0.6104728827, "include": true, "reason": "import numpy", "num_tokens": 861}
|
"""Functions for generating synthetic networks.
2021, Xavier R. Hoffmann <xrhoffmann@gmail.com>
"""
import copy
import random
from typing import List, Sequence, Tuple, Dict
from scipy import special as sp_special # type: ignore
def configuration_model(
*, degrees: Sequence[int], max_trials: int = 10, max_fails: int = 1000
) -> List[Tuple[int, int]]:
"""Configuration model from degree list.
Generates undirected simple graph: no self-loops nor multiedges.
Returns empty list if not feasible.
Args:
degrees: Degree list.
max_trials: Max number of trials with this degree sequence.
max_fails: Max number of fails (not added pair) in a trial.
Returns:
adjacency: Adjacency list with tuples of pairs (n1, n2), with
n1 < n2.
Raises:
ValueError: If the sum of degrees is uneven.
"""
# check if sum of stubs is even
if sum(degrees) % 2 != 0:
err = f"Sum of degrees ({sum(degrees)}) must be even."
raise ValueError(err)
# backup stubs and edges
stubs_bu = []
edges_bu: Dict[int, List[int]] = {}
for i, el in enumerate(degrees):
aux = [i] * el
stubs_bu += aux[:]
edges_bu[i] = []
trials = 0
while trials < max_trials:
stubs = copy.copy(stubs_bu)
edges = copy.deepcopy(edges_bu)
fails = 0
while stubs:
n1 = random.choice(stubs)
aux = stubs[:]
aux.remove(n1)
n2 = random.choice(aux)
if n1 != n2 and n2 not in edges[n1]:
edges[n1].append(n2)
edges[n2].append(n1)
stubs.remove(n1)
stubs.remove(n2)
else:
fails += 1
if fails > max_fails:
trials += 1
break
adjacency = [(i, j) for i in edges for j in edges[i] if i < j]
return adjacency
return []
def sample_powerlaw_with_natural_cutoff(
*, gamma: float, nodes: int, k_min: int = 2
) -> List[int]:
"""Sample degrees from a powerlaw with natural cutoff.
Args:
gamma: Powerlaw exponent (larger than 2).
nodes: Total number of nodes.
k_min: Minimum degree.
Returns:
degrees: Degree sequence.
Raises:
ValueError: If exponent is smaller or equal than 2.
ValueError: If k_min is smaller than 1.
"""
if gamma <= 2:
err = f"Exponent ({gamma}) should be larger than 2."
raise ValueError(err)
if k_min < 1:
err = f"Mimimum degree ({k_min}) should be larger than 0."
k0 = k_min
x0 = float(k0)
# compute natural cut-off
k_cut = int(x0 * nodes ** (1.0 / (gamma - 1.0)))
# compute normalization constant
norm_discrete = sp_special.zeta(gamma, k0) - sp_special.zeta(gamma, k_cut + 1)
norm_continuous = (gamma - 1.0) * x0 ** (gamma - 1.0)
def discrete(k):
return k ** (-gamma) / norm_discrete
def continuous(x):
return norm_continuous * x ** (-gamma)
degrees = []
count = 0
coef = discrete(k0) / continuous(x0 + 1)
while count < nodes:
u = random.random()
x = x0 * u ** (1.0 / (1.0 - gamma))
k = int(x)
if k <= k_cut and random.random() * coef * continuous(x) <= discrete(k):
degrees.append(k)
count += 1
return degrees
def degree_random_regular_network(*, nodes, k, **kwargs) -> List[Tuple[int, int]]:
"""Generate adjacency list for random degree-regular network.
Generates undirected simple graph: no self-loops nor multiedges.
Returns empty list if not feasible.
Args:
nodes: Number of nodes.
k: Fixed degree.
**kwargs: Keyword arguments for function configuration_model.
Returns:
Adjacency list.
"""
degrees = [k] * nodes
return configuration_model(degrees=degrees, **kwargs)
def scale_free_network(
*, nodes, gamma, k_min, max_random: int = 10, **kwargs
) -> List[Tuple[int, int]]:
"""Generate adjacency list for scale-free network.
Generates undirected simple graph: no self-loops nor multiedges.
Returns empty list if not feasible.
Args:
nodes: Number of nodes.
gamma: Powerlaw exponent.
k_min: Minimum degree.
max_random: Maximum randomizations of degree sequence.
**kwargs: Keyword arguments for function configuration_model.
Returns:
Adjacency list.
"""
randomization = 0
while randomization < max_random:
degrees = sample_powerlaw_with_natural_cutoff(
gamma=gamma, nodes=nodes, k_min=k_min
)
adjacency = configuration_model(degrees=degrees, **kwargs)
if adjacency:
return adjacency
else:
randomization += 1
return []
|
{"hexsha": "78ab557091b34e6c5018cb1ed0027b8b20e76c22", "size": 4858, "ext": "py", "lang": "Python", "max_stars_repo_path": "networks/generation.py", "max_stars_repo_name": "xhoffmann/networks", "max_stars_repo_head_hexsha": "7fc699925af4053544c82f8c528a0b750d3336f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "networks/generation.py", "max_issues_repo_name": "xhoffmann/networks", "max_issues_repo_head_hexsha": "7fc699925af4053544c82f8c528a0b750d3336f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "networks/generation.py", "max_forks_repo_name": "xhoffmann/networks", "max_forks_repo_head_hexsha": "7fc699925af4053544c82f8c528a0b750d3336f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7455621302, "max_line_length": 82, "alphanum_fraction": 0.5926307122, "include": true, "reason": "from scipy", "num_tokens": 1216}
|
module RegistryUtils
using Base: thispatch, thisminor, nextpatch, nextminor
import Base: convert
import LibGit2
import UUIDs
import LinearAlgebra: checksquare
import Pkg
using Pkg.Operations
using Pkg.Types
using Pkg.Types: uuid_package, uuid_registry, uuid5, VersionSpec, VersionRange, VersionBound
import Pkg: TOML
import Pkg.Pkg2.Reqs: Reqs, Requirement
import Pkg.Pkg2.Pkg2Types: VersionInterval, VersionSet
export
generate!
include("loadmeta.jl")
include("loadregistry.jl")
include("utils.jl")
include("gitmeta.jl")
include("genstdlib.jl")
include("generate.jl")
include("update.jl")
end # module
|
{"hexsha": "16a446514286efc70ea87131130eccfd6c2982d3", "size": 610, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/RegistryUtils.jl", "max_stars_repo_name": "galenlynch/RegistryUtils.jl", "max_stars_repo_head_hexsha": "6944f98b3bc04d1a2cff5d4bf95b62634a25553b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-08-15T18:14:33.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-11T00:15:39.000Z", "max_issues_repo_path": "src/RegistryUtils.jl", "max_issues_repo_name": "galenlynch/RegistryUtils.jl", "max_issues_repo_head_hexsha": "6944f98b3bc04d1a2cff5d4bf95b62634a25553b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/RegistryUtils.jl", "max_forks_repo_name": "galenlynch/RegistryUtils.jl", "max_forks_repo_head_hexsha": "6944f98b3bc04d1a2cff5d4bf95b62634a25553b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7857142857, "max_line_length": 92, "alphanum_fraction": 0.7967213115, "num_tokens": 172}
|
# Example of Naive Bayes implemented from Scratch in Python
import csv
import random
import math
import xgboost as xgb
import matplotlib.pyplot as plt
import numpy as np
def loadCsv(filename):
lines = csv.reader(open(filename, "r"))
dataset = list(lines)
for i in range(len(dataset)):
dataset[i] = [float(x) for x in dataset[i]]
return dataset
def loadDataset_ckd(filename, trainingSet=[]):
lines = csv.reader(open(filename, "r"))
dataset = list(lines)
#print(len(dataset),range(len(dataset)))
for x in range(len(dataset)):
for y in range(15):
dataset[x][y] = float(dataset[x][y])
trainingSet.append(dataset[x])
def loadDataset_ckd1(filename, testSet=[]):
lines1 = csv.reader(open(filename, "r"))
dataset1 = list(lines1)
#print(len(dataset1),range(len(dataset1)))
for x in range(len(dataset1)):
for y in range(15):
dataset1[x][y] = float(dataset1[x][y])
testSet.append(dataset1[x])
def loadDataset_ml(filename, trainingSet=[]):
lines = csv.reader(open(filename, "r"))
dataset = list(lines)
for x in range(len(dataset)):
for y in range(9):
dataset[x][y] = float(dataset[x][y])
trainingSet.append(dataset[x])
def loadDataset_hd(filename, trainingSet=[]):
lines = csv.reader(open(filename, "r"))
dataset = list(lines)
for x in range(len(dataset)):
for y in range(12):
dataset[x][y] = float(dataset[x][y])
trainingSet.append(dataset[x])
def loadDataset_ml1(filename, testSet=[]):
lines1 = csv.reader(open(filename, "r"))
dataset1 = list(lines1)
#print(len(dataset1),range(len(dataset1)))
for x in range(len(dataset1)):
for y in range(9):
dataset1[x][y] = float(dataset1[x][y])
testSet.append(dataset1[x])
def loadDataset_hd1(filename, testSet=[]):
lines1 = csv.reader(open(filename, "r"))
dataset1 = list(lines1)
#print(len(dataset1),range(len(dataset1)))
for x in range(len(dataset1)):
for y in range(12):
dataset1[x][y] = float(dataset1[x][y])
testSet.append(dataset1[x])
def splitDataset(dataset, splitRatio):
trainSize = int(len(dataset) * splitRatio)
trainSet = []
copy = list(dataset)
while len(trainSet) < trainSize:
index = random.randrange(len(copy))
trainSet.append(copy.pop(index))
return [trainSet, copy]
def separateByClass(dataset):
separated = {}
for i in range(len(dataset)):
vector = dataset[i]
if (vector[-1] not in separated):
separated[vector[-1]] = []
separated[vector[-1]].append(vector)
return separated
def mean(numbers):
return sum(numbers)/float(len(numbers))
def stdev(numbers):
avg = mean(numbers)
variance = sum([pow(x-avg,2) for x in numbers])/float(len(numbers)-1)
return math.sqrt(variance)
def summarize(dataset):
summaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)]
del summaries[-1]
return summaries
def summarizeByClass(dataset):
separated = separateByClass(dataset)
summaries = {}
#print(separated)
for classValue, instances in separated.items():
#print(instances)
summaries[classValue] = summarize(instances)
return summaries
def calculateProbability(x, mean, stdev):
#print(x,mean,stdev)
if(x==0 and mean==0 and stdev==0):
x = 1
mean = 1
stdev = 1
#print(x,mean,stdev)
part2 = (2*math.pow(stdev,2))
if(part2==0) :
part2 = 0.1
#print(part2)
exponent = math.exp(-(math.pow(x-mean,2)/part2))
part3 = (math.sqrt(2*math.pi) * stdev)
if(part3==0) :
part3 = 0.1
fin = (1 / part3) * exponent
return fin
def calculateClassProbabilities(summaries, inputVector):
probabilities = {}
for classValue, classSummaries in summaries.items():
probabilities[classValue] = 1
for i in range(len(classSummaries)):
mean, stdev = classSummaries[i]
x = inputVector[i]
probabilities[classValue] *= calculateProbability(x, mean, stdev)
return probabilities
def predict(summaries, inputVector):
probabilities = calculateClassProbabilities(summaries, inputVector)
bestLabel, bestProb = None, -1
for classValue, probability in probabilities.items():
if bestLabel is None or probability > bestProb:
bestProb = probability
bestLabel = classValue
return bestLabel
def getPredictions(summaries, testSet):
predictions = []
for i in range(len(testSet)):
result = predict(summaries, testSet[i])
predictions.append(result)
return predictions
def getAccuracy(testSet, predictions):
correct = 0
for i in range(len(testSet)):
if testSet[i][-1] == predictions[i]:
correct += 1
return (correct/float(len(testSet))) * 100.0
def main():
print ('\n~~~~~~~~~~~');
#checking of presence of ckd disease
# prepare data
matched_count = 0 ;
total_datas = 0
trainingSet=[]
testSet=[]
loadDataset_ckd('dataset_ckd_train.csv', trainingSet)
total_datas = total_datas+int(repr(len(trainingSet)))
loadDataset_ckd1('dataset_ckd_test.csv', testSet)
print ('Train set of ckd: ',repr(len(trainingSet)))
#print ('Train set: ', trainingSet)
#print ('Test set: ', repr(len(testSet)))
print ('Input for CKD disease related parameters :\n ',testSet)
summaries = summarizeByClass(trainingSet)
matched_count = matched_count+int(repr(len(summaries)))
print('matches: ',repr(len(summaries)))
# test model
predictions = getPredictions(summaries, testSet)
#print('> predicted=' , predictions)
print('> disease presence =' , predictions )
accuracy = getAccuracy(testSet, predictions)
#print('Accuracy: {0}%').format(accuracy)
#print('Accuracy: ',accuracy)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print ('\n~~~~~~~~~~~');
#checking of presence of diabetes disease
trainingSet=[]
testSet=[]
loadDataset_ml('dataset_diabetes_train.csv', trainingSet)
total_datas = total_datas+int(repr(len(trainingSet)))
loadDataset_ml1('dataset_diabetes_test.csv', testSet)
print ('Train set of diabetes: ',repr(len(trainingSet)))
print ('Input for Diabetes disease related parameters :\n ',testSet)
#print(trainingSet)
#print(testSet)
# prepare model
summaries = summarizeByClass(trainingSet)
#print(summaries)
matched_count = matched_count+int(repr(len(summaries)))
print('matches: ',repr(len(summaries)))
# test model
predictions = getPredictions(summaries, testSet)
#print('> predicted=' , predictions)
print('> disease presence =' , predictions )
accuracy = getAccuracy(testSet, predictions)
#print('Accuracy: {0}%').format(accuracy)
#print('Accuracy: ',accuracy)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print ('\n~~~~~~~~~~~');
#checking of presence of heart disease
trainingSet=[]
testSet=[]
loadDataset_hd('dataset_heartdisease_train.csv', trainingSet)
total_datas = total_datas+int(repr(len(trainingSet)))
loadDataset_hd1('dataset_heartdisease_test.csv', testSet)
print ('Train set of heart disease: ',repr(len(trainingSet)))
print ('Input for heart disease related parameters :\n ',testSet)
summaries = summarizeByClass(trainingSet)
#print(summaries)
matched_count = matched_count+int(repr(len(summaries)))
print('matches: ',repr(len(summaries)))
# test model
predictions = getPredictions(summaries, testSet)
#print('> predicted=' , predictions)
print('> disease presence =' , predictions )
accuracy = getAccuracy(testSet, predictions)
#print('Accuracy: {0}%').format(accuracy)
#print('Accuracy: ',accuracy)
print('Total Datas',total_datas,'Matched Accuracy: ',matched_count)
main()
|
{"hexsha": "d7b9607848de2585415aac4ac43c5891e9257879", "size": 7928, "ext": "py", "lang": "Python", "max_stars_repo_path": "Final/prediction/nb1.py", "max_stars_repo_name": "NaguGowda/machin-learning-", "max_stars_repo_head_hexsha": "dd04f44a06d6f83e58ed6eb4d69db09620040e49", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Final/prediction/nb1.py", "max_issues_repo_name": "NaguGowda/machin-learning-", "max_issues_repo_head_hexsha": "dd04f44a06d6f83e58ed6eb4d69db09620040e49", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Final/prediction/nb1.py", "max_forks_repo_name": "NaguGowda/machin-learning-", "max_forks_repo_head_hexsha": "dd04f44a06d6f83e58ed6eb4d69db09620040e49", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8393574297, "max_line_length": 101, "alphanum_fraction": 0.6456861756, "include": true, "reason": "import numpy", "num_tokens": 1922}
|
import numpy as np
import igraph as ig
import copy
import pickle
import os
def initialize_world(config):
g = ig.Graph()
hub_cities = config.get('hub_cities', [str(i) for i in range(5)])
for i, hub_city in enumerate(hub_cities):
hub_members = int(
config.get('hub_starting_members_avg', 20)
+ config.get('hub_starting_members_std', 0)*np.random.randn())
indexes = range(len(g.vs), len(g.vs) + hub_members)
g.add_vertices(hub_members)
g.vs(indexes)['city'] = hub_city
g.vs(indexes)['previous_city'] = None
g.vs(indexes)['age'] = (
config.get('mean_starting_age', 24)
+ config.get('starting_age_std', 2)*np.random.randn(len(indexes)))
g.vs['former_ambassador'] = None
g.es["interactions"] = 0
return g
def promote_ambassadors(g, config):
for i, hub_city in enumerate(list(set(g.vs['city']))):
hub_members = g.vs.select(city=hub_city)
indexes = [hub_member.index for hub_member in hub_members]
if all(v == 0 for v in hub_members.degree()):
new_ambassador_index = np.random.choice(indexes)
else:
old_ambasader_index = [
hub_member.index for hub_member in hub_members
if hub_member['ambassador']]
if len(old_ambasader_index):
if not config.get('promote_new_ambassador_yearly', 0):
continue
g.vs[old_ambasader_index]['ambassador'] = None
g.vs[old_ambasader_index]['former_ambassador'] = True
# electing a ambassador with the percentage chance
# given by how square of degree for each member
degrees = np.array(hub_members.degree())
new_ambassador_index = np.random.choice(
indexes,
p=(
degrees**config.get('degree_count_power', 1.0)
/ np.sum(degrees**config.get('degree_count_power', 1.0))))
g.vs(new_ambassador_index)['ambassador'] = True
return g
def adding_edge(g, link):
# adding edge to the graph if it does not exists,
# or doing other things with it
try:
new_link = 0
es_id = g.get_eid(link[0], link[1])
except:
new_link = 1
# no such edge: adding one
g.add_edges([link])
es_id = len(g.es)-1
if new_link:
g.es[es_id]['interactions'] = 1
g.es[es_id]['yearly_interactions'] = 1
else:
g.es[es_id]['interactions'] += 1
g.es[es_id]['yearly_interactions'] += 1
return g
def new_members(g, config):
for i, hub_city in enumerate(list(set(g.vs['city']))):
hub_members = g.vs.select(city=hub_city)
num_new_members = int(
np.ceil(config.get('new_member_ratio', 0)*len(hub_members)))
new_members_ids = list(len(g.vs)+np.array(range(num_new_members)))
g.add_vertices(new_members_ids)
g.vs(new_members_ids)['city'] = hub_city
rand_n = np.random.randn(len(new_members_ids))
g.vs(new_members_ids)['age'] = (
config.get('mean_starting_age', 24)
+ config.get('starting_age_std', 2)*rand_n)
return g
def churn(g, config):
degrees = np.array(g.vs.degree())
# chance of leaving organization
chance_of_leaving = (
config.get('churn_no_degree_rate', 0.0)
/ (degrees+1.)**config.get('degree_count_power', 1.0)
+ config.get('base_churn', 0.0))
# some members will leave because they know nobody
# print chance_of_leaving
if np.sum(chance_of_leaving) != 0.:
members_leaving = int(np.sum(chance_of_leaving))
# Normalize so that all weights sum to 1
chance_of_leaving = chance_of_leaving/np.sum(chance_of_leaving)
members_leaving = np.random.choice(
range(len(g.vs)),
size=members_leaving,
p=chance_of_leaving)
g.delete_vertices(members_leaving)
# Members leave as they get too old for the network
old_members = g.vs.select(age_gt=config.get('max_age', 100))
g.delete_vertices([
old_member.index for old_member in old_members])
return g
def city_hopping(g, config):
# people who move:
people_who_move = np.random.choice(
range(len(g.vs)),
int(len(g.vs)*config.get('city_hopping_propability', 0.0)),
replace=False)
cities = list(set(g.vs['city']))
for person_index in people_who_move:
g.vs[person_index]['previous_city'] = g.vs[person_index]['city']
g.vs[person_index]['city'] = np.random.choice(cities)
if config.get('verbose') > 1:
if config.get('city_hopping_propability') is None:
print 'warning', 'city_hopping_propability', 'is not defined'
return g
def global_retreat(g, config):
all_indexes = range(len(g.vs))
ambassadors = g.vs.select(ambassador=True)
index_ambassadors = [ambassador.index for ambassador in ambassadors]
not_ambassadors = all_indexes
for i in index_ambassadors:
not_ambassadors.remove(i)
number_of_not_ambassadors_goers = (
config.get('global_retreat_goers', int(len(g.vs)/5.))-len(ambassadors))
if number_of_not_ambassadors_goers > 0:
not_ambassadors_going_to_retreat = list(np.random.choice(
not_ambassadors, number_of_not_ambassadors_goers))
going_to_retreat = not_ambassadors_going_to_retreat + index_ambassadors
else:
going_to_retreat = index_ambassadors
# make links among participants
new_links = np.random.choice(
going_to_retreat,
(
config.get('global_retreat_link_multiplier', 10)
* len(going_to_retreat), 2),
replace=True)
for link in new_links:
if link[0] != link[1]:
g = adding_edge(g, link)
return g
def local_event(g, config):
for hub_city in list(set(g.vs['city'])):
hub_members = g.vs.select(city=hub_city)
ambassador_index = [
hub_member.index for hub_member in hub_members
if hub_member['ambassador']][0]
hub_member_indices = [
hub_member.index for hub_member in hub_members
if not hub_member['ambassador']]
event_participants = [ambassador_index]+list(np.random.choice(
hub_member_indices,
config.get('local_event_participants', len(hub_members))-1,
replace=False))
new_links = np.random.choice(
event_participants,
(
config.get('local_event_avg_new_link_per_participant', 10)
* len(event_participants), 2),
replace=True)
for link in new_links:
if link[0] != link[1]:
g = adding_edge(g, link)
if config.get('verbose', 0) > 1:
print len(g.es)
return g
def get_connectivity(g):
g.delete_vertices(
[i for i, degree in enumerate(g.degree()) if degree == 0])
shortest_paths = g.shortest_paths_dijkstra(mode='all')
return np.mean(shortest_paths)
def run_simulation(config, g_initialize=None):
if config.get('verbose'):
print 'Initializing network'
if not g_initialize:
g = initialize_world(config)
g_states = []
avg_age = []
num_members = []
for j in range(config.get('simulation_years', 1)):
g.es['yearly_interactions'] = 0
if config.get('verbose', 0) > 0:
print 'year', str(j),
print 'members', len(g.vs),
print 'avg_age: %.1f' % np.mean(g.vs['age'])
# Saving state -- inefficient as there are no immutable datastructures
# https://github.com/igraph/igraph/wiki/Temporal-graphs
if config.get('save_states', 1):
g_states.append(copy.copy(g))
avg_age.append(np.mean(g.vs['age']))
num_members.append(len(g.vs))
g = city_hopping(g, config)
g = promote_ambassadors(g, config)
g = new_members(g, config)
for i in range(config.get('yearly_local_events', 0)):
g = local_event(g, config)
for i in range(config.get('yearly_global_retreats', 0)):
g = global_retreat(g, config)
g = churn(g, config)
if config.get('verbose', 0) > 1:
print 'members after churn', str(len(g.vs))
g.vs['age'] = list(np.array(g.vs['age'])+1.)
if config.get('verbose', 0) > 1:
print 'yearly interactions', len(g.es['yearly_interactions'])
# post processing
g.es['weights'] = g.es['interactions']
if config.get('delete_zero_connection_at_end', 0):
# Removing all members without any connections
g.delete_vertices(
[i for i, degree in enumerate(g.degree()) if degree == 0])
if config.get('verbose', 0) > 0:
print 'end ',
print 'members', len(g.vs),
print 'avg_age: %.1f' % np.mean(g.vs['age'])
# saving last iteration
if config.get('save_states', 1):
g_states.append(copy.copy(g))
return g, g_states
def run_simulation_cached(
config,
name,
redo=False,
simu_folder='simu_archieve'):
filepath = os.path.join(simu_folder, name+'.pckl')
if os.path.exists(filepath) and not redo:
g_states = pickle.load(open(filepath, 'rb'))
else:
if not len(simu_folder) and os.path.exists(simu_folder):
os.mkdir(simu_folder)
_, g_states = run_simulation(config)
pickle.dump(g_states, open(filepath, 'wb'))
return g_states
|
{"hexsha": "1e56642bd0fb96df32c3339cd041ce5f451c2943", "size": 9614, "ext": "py", "lang": "Python", "max_stars_repo_path": "simulation_code.py", "max_stars_repo_name": "keldLundgaard/ThousandNetwork_simulation", "max_stars_repo_head_hexsha": "d09cec2b2e56bb9945f513ec1b814da81a3ed90e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "simulation_code.py", "max_issues_repo_name": "keldLundgaard/ThousandNetwork_simulation", "max_issues_repo_head_hexsha": "d09cec2b2e56bb9945f513ec1b814da81a3ed90e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "simulation_code.py", "max_forks_repo_name": "keldLundgaard/ThousandNetwork_simulation", "max_forks_repo_head_hexsha": "d09cec2b2e56bb9945f513ec1b814da81a3ed90e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7156549521, "max_line_length": 79, "alphanum_fraction": 0.609423757, "include": true, "reason": "import numpy", "num_tokens": 2377}
|
#ifndef CANARD_NET_OFP_V13_HELLO_ELEMENTS_VERSIONBITMAP_HPP
#define CANARD_NET_OFP_V13_HELLO_ELEMENTS_VERSIONBITMAP_HPP
#include <cstddef>
#include <cstdint>
#include <algorithm>
#include <iterator>
#include <stdexcept>
#include <utility>
#include <boost/algorithm/cxx11/all_of.hpp>
#include <boost/container/vector.hpp>
#include <boost/endian/conversion.hpp>
#include <boost/range/adaptor/reversed.hpp>
#include <boost/range/algorithm/find_if.hpp>
#include <boost/range/algorithm/for_each.hpp>
#include <boost/range/algorithm/mismatch.hpp>
#include <canard/net/ofp/detail/basic_protocol_type.hpp>
#include <canard/net/ofp/detail/decode.hpp>
#include <canard/net/ofp/detail/encode.hpp>
#include <canard/net/ofp/detail/memcmp.hpp>
#include <canard/net/ofp/v13/detail/byteorder.hpp>
#include <canard/net/ofp/v13/openflow.hpp>
namespace canard {
namespace net {
namespace ofp {
namespace v13 {
namespace hello_elements {
class versionbitmap
: public detail::basic_protocol_type<versionbitmap>
{
public:
using ofp_type = protocol::ofp_hello_elem_versionbitmap;
using ofp_header_type = protocol::ofp_hello_elem_header;
using bitmaps_type = boost::container::vector<std::uint32_t>;
private:
using bitmap_type = bitmaps_type::value_type;
static constexpr std::size_t bitmap_bits
= std::numeric_limits<bitmap_type>::digits;
public:
static constexpr protocol::ofp_hello_elem_type hello_element_type
= protocol::OFPHET_VERSIONBITMAP;
explicit versionbitmap(bitmaps_type bitmaps)
: versionbitmap_{
hello_element_type
, calc_length(bitmaps)
}
, bitmaps_{std::move(bitmaps)}
{
}
versionbitmap(versionbitmap const&) = default;
versionbitmap(versionbitmap&& other)
: versionbitmap_(other.versionbitmap_)
, bitmaps_(other.extract_bitmaps())
{
}
auto operator=(versionbitmap const& other)
-> versionbitmap&
{
return operator=(versionbitmap{other});
}
auto operator=(versionbitmap&& other)
-> versionbitmap&
{
auto tmp = std::move(other);
std::swap(versionbitmap_, tmp.versionbitmap_);
bitmaps_.swap(tmp.bitmaps_);
return *this;
}
static constexpr auto type() noexcept
-> protocol::ofp_hello_elem_type
{
return hello_element_type;
}
auto length() const noexcept
-> std::uint16_t
{
return versionbitmap_.length;
}
auto bitmaps() const noexcept
-> bitmaps_type const&
{
return bitmaps_;
}
auto extract_bitmaps()
-> bitmaps_type
{
auto bitmaps = bitmaps_type{};
bitmaps.swap(bitmaps_);
versionbitmap_.length = sizeof(ofp_type);
return bitmaps;
}
auto support(std::uint8_t const version) const noexcept
-> bool
{
auto const index = std::size_t(version / bitmap_bits);
auto const shift = std::size_t(version % bitmap_bits);
if (bitmaps_.size() <= index) {
return false;
}
return (bitmap_type{1} << shift) & bitmaps_[index];
}
auto max_support_version() const
-> std::uint8_t
{
auto const rbitmaps = bitmaps_ | boost::adaptors::reversed;
auto const rit
= boost::find_if(rbitmaps, [](bitmap_type b) { return b != 0; });
if (rit == rbitmaps.end()) {
throw std::runtime_error{"no valid bitmaps"};
}
auto const it = std::prev(rit.base());
auto bitmap = *it;
auto shift = std::size_t{0};
while (bitmap != 1) {
bitmap >>= 1;
++shift;
}
return shift + std::distance(bitmaps_.begin(), it) * bitmap_bits;
}
static auto validate_header(ofp_header_type const& header) noexcept
-> char const*
{
if (header.type != type()) {
return "invalid hello element type";
}
if (!is_valid_hello_element_length(header)) {
return "invalid hello element length";
}
return nullptr;
}
static constexpr auto is_valid_hello_element_length(
ofp_header_type const& header) noexcept
-> bool
{
return header.length >= min_length()
&& (header.length - min_length()) % sizeof(bitmap_type) == 0;
}
private:
versionbitmap(ofp_type const& versionbitmap, bitmaps_type&& bitmaps)
: versionbitmap_(versionbitmap)
, bitmaps_(std::move(bitmaps))
{
}
friend basic_protocol_type;
friend constexpr auto exclude_padding(
detail::basic_protocol_type_tag<versionbitmap>) noexcept
-> bool
{
return true;
}
template <class Container>
void encode_impl(Container& container) const
{
detail::encode(container, versionbitmap_);
boost::for_each(bitmaps_, [&](bitmap_type bitmap) {
detail::encode(container, bitmap);
});
}
template <class Iterator>
static auto decode_impl(Iterator& first, Iterator last)
-> versionbitmap
{
auto const vbitmap = detail::decode<ofp_type>(first, last);
auto const bitmaps_length = vbitmap.length - sizeof(ofp_type);
auto bitmaps = bitmaps_type(
bitmaps_length / sizeof(bitmap_type), boost::container::default_init);
std::copy_n(
first, bitmaps_length
, reinterpret_cast<unsigned char*>(&bitmaps[0]));
std::advance(first, bitmaps_length);
boost::for_each(bitmaps, [](bitmap_type& bitmap) {
boost::endian::big_to_native_inplace(bitmap);
});
return versionbitmap{vbitmap, std::move(bitmaps)};
}
template <class Validator>
void validate_impl(Validator) const
{
if (bitmaps_.empty()) {
throw std::runtime_error{"bitmaps is never empty"};
}
if (boost::algorithm::all_of(
bitmaps_, [](bitmap_type b) { return b == 0; })) {
throw std::runtime_error{
"bitmaps must include at least one non zero bitmap"
};
}
}
auto equal_impl(versionbitmap const& rhs) const noexcept
-> bool
{
return detail::memcmp(versionbitmap_, rhs.versionbitmap_)
&& bitmaps_ == rhs.bitmaps_;
}
auto equivalent_impl(versionbitmap const& rhs) const noexcept
-> bool
{
using bt = versionbitmap::bitmaps_type;
auto const compare = [](bt const& larger, bt const& smaller) -> bool {
auto const result = boost::mismatch(smaller, larger);
if (result.first != smaller.end()) {
return false;
}
return std::all_of(
result.second, larger.end()
, [](bt::value_type const b) { return b == 0; });
};
return (bitmaps_.size() >= rhs.bitmaps_.size())
? compare(bitmaps_, rhs.bitmaps_)
: compare(rhs.bitmaps_, bitmaps_);
}
static auto calc_length(bitmaps_type const& bitmaps)
-> std::uint16_t
{
constexpr auto max_bitmaps_size
= (std::numeric_limits<std::uint16_t>::max() - sizeof(ofp_type))
/ sizeof(bitmap_type);
if (bitmaps.size() > max_bitmaps_size) {
throw std::runtime_error{"invalid bitmaps size"};
}
return sizeof(ofp_type) + bitmaps.size() * sizeof(bitmap_type);
}
private:
ofp_type versionbitmap_;
bitmaps_type bitmaps_;
};
} // namespace hello_elements
} // namespace v13
} // namespace ofp
} // namespace net
} // namespace canard
#endif // CANARD_NET_OFP_V13_HELLO_ELEMENTS_VERSIONBITMAP_HPP
|
{"hexsha": "14ca51f26a62c0a86d3d6ee53101eeea96bcc698", "size": 7459, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/canard/net/ofp/v13/hello_element/versionbitmap.hpp", "max_stars_repo_name": "amedama41/bulb", "max_stars_repo_head_hexsha": "2e9fd8a8c35cfc2be2ecf5f747f83cf36ffbbdbb", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/canard/net/ofp/v13/hello_element/versionbitmap.hpp", "max_issues_repo_name": "amedama41/bulb", "max_issues_repo_head_hexsha": "2e9fd8a8c35cfc2be2ecf5f747f83cf36ffbbdbb", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 8.0, "max_issues_repo_issues_event_min_datetime": "2016-07-21T11:29:13.000Z", "max_issues_repo_issues_event_max_datetime": "2016-12-03T05:16:42.000Z", "max_forks_repo_path": "include/canard/net/ofp/v13/hello_element/versionbitmap.hpp", "max_forks_repo_name": "amedama41/bulb", "max_forks_repo_head_hexsha": "2e9fd8a8c35cfc2be2ecf5f747f83cf36ffbbdbb", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.0413533835, "max_line_length": 80, "alphanum_fraction": 0.6428475667, "num_tokens": 1781}
|
[STATEMENT]
lemma aboveS_decr:
assumes TRANS: "trans r" and ANTISYM: "antisym r" and
REL: "(a,b) \<in> r"
shows "aboveS r b \<le> aboveS r a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. aboveS r b \<subseteq> aboveS r a
[PROOF STEP]
proof(unfold aboveS_def, auto)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>a \<noteq> b; (b, a) \<in> r\<rbrakk> \<Longrightarrow> False
2. \<And>x. \<lbrakk>x \<noteq> b; (b, x) \<in> r\<rbrakk> \<Longrightarrow> (a, x) \<in> r
[PROOF STEP]
assume *: "a \<noteq> b" and **: "(b,a) \<in> r"
[PROOF STATE]
proof (state)
this:
a \<noteq> b
(b, a) \<in> r
goal (2 subgoals):
1. \<lbrakk>a \<noteq> b; (b, a) \<in> r\<rbrakk> \<Longrightarrow> False
2. \<And>x. \<lbrakk>x \<noteq> b; (b, x) \<in> r\<rbrakk> \<Longrightarrow> (a, x) \<in> r
[PROOF STEP]
with ANTISYM antisym_def[of r] REL
[PROOF STATE]
proof (chain)
picking this:
antisym r
antisym r = (\<forall>x y. (x, y) \<in> r \<longrightarrow> (y, x) \<in> r \<longrightarrow> x = y)
(a, b) \<in> r
a \<noteq> b
(b, a) \<in> r
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
antisym r
antisym r = (\<forall>x y. (x, y) \<in> r \<longrightarrow> (y, x) \<in> r \<longrightarrow> x = y)
(a, b) \<in> r
a \<noteq> b
(b, a) \<in> r
goal (1 subgoal):
1. False
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
False
goal (1 subgoal):
1. \<And>x. \<lbrakk>x \<noteq> b; (b, x) \<in> r\<rbrakk> \<Longrightarrow> (a, x) \<in> r
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. \<lbrakk>x \<noteq> b; (b, x) \<in> r\<rbrakk> \<Longrightarrow> (a, x) \<in> r
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. \<lbrakk>x \<noteq> b; (b, x) \<in> r\<rbrakk> \<Longrightarrow> (a, x) \<in> r
[PROOF STEP]
assume "x \<noteq> b" "(b,x) \<in> r"
[PROOF STATE]
proof (state)
this:
x \<noteq> b
(b, x) \<in> r
goal (1 subgoal):
1. \<And>x. \<lbrakk>x \<noteq> b; (b, x) \<in> r\<rbrakk> \<Longrightarrow> (a, x) \<in> r
[PROOF STEP]
with REL TRANS trans_def[of r]
[PROOF STATE]
proof (chain)
picking this:
(a, b) \<in> r
trans r
trans r = (\<forall>x y z. (x, y) \<in> r \<longrightarrow> (y, z) \<in> r \<longrightarrow> (x, z) \<in> r)
x \<noteq> b
(b, x) \<in> r
[PROOF STEP]
show "(a,x) \<in> r"
[PROOF STATE]
proof (prove)
using this:
(a, b) \<in> r
trans r
trans r = (\<forall>x y z. (x, y) \<in> r \<longrightarrow> (y, z) \<in> r \<longrightarrow> (x, z) \<in> r)
x \<noteq> b
(b, x) \<in> r
goal (1 subgoal):
1. (a, x) \<in> r
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
(a, x) \<in> r
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1265, "file": null, "length": 12}
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class MultinomialTest(tf.test.TestCase):
def testSimpleShapes(self):
with self.test_session():
p = [.1, .3, .6]
dist = tf.contrib.distributions.Multinomial(n=1., p=p)
self.assertEqual(3, dist.event_shape().eval())
self.assertAllEqual([], dist.batch_shape().eval())
self.assertEqual(tf.TensorShape([3]), dist.get_event_shape())
self.assertEqual(tf.TensorShape([]), dist.get_batch_shape())
def testComplexShapes(self):
with self.test_session():
p = 0.5 * np.ones([3, 2, 2], dtype=np.float32)
n = [[3., 2], [4, 5], [6, 7]]
dist = tf.contrib.distributions.Multinomial(n=n, p=p)
self.assertEqual(2, dist.event_shape().eval())
self.assertAllEqual([3, 2], dist.batch_shape().eval())
self.assertEqual(tf.TensorShape([2]), dist.get_event_shape())
self.assertEqual(tf.TensorShape([3, 2]), dist.get_batch_shape())
def testNProperty(self):
p = [[0.1, 0.2, 0.7], [0.2, 0.3, 0.5]]
n = [[3.], [4]]
with self.test_session():
dist = tf.contrib.distributions.Multinomial(n=n, p=p)
self.assertEqual((2, 1), dist.n.get_shape())
self.assertAllClose(n, dist.n.eval())
def testPProperty(self):
p = [[0.1, 0.2, 0.7]]
with self.test_session():
dist = tf.contrib.distributions.Multinomial(n=3., p=p)
self.assertEqual((1, 3), dist.p.get_shape())
self.assertEqual((1, 3), dist.logits.get_shape())
self.assertAllClose(p, dist.p.eval())
def testLogitsProperty(self):
logits = [[0., 9., -0.5]]
with self.test_session():
multinom = tf.contrib.distributions.Multinomial(n=3., logits=logits)
self.assertEqual((1, 3), multinom.p.get_shape())
self.assertEqual((1, 3), multinom.logits.get_shape())
self.assertAllClose(logits, multinom.logits.eval())
def testPmfNandCountsAgree(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.test_session():
dist = tf.contrib.distributions.Multinomial(
n=n, p=p, validate_args=True)
dist.pmf([2., 3, 0]).eval()
dist.pmf([3., 0, 2]).eval()
with self.assertRaisesOpError("Condition x >= 0.*"):
dist.pmf([-1., 4, 2]).eval()
with self.assertRaisesOpError("counts do not sum to n"):
dist.pmf([3., 3, 0]).eval()
def testPmfNonIntegerCounts(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.test_session():
# No errors with integer n.
multinom = tf.contrib.distributions.Multinomial(
n=n, p=p, validate_args=True)
multinom.pmf([2., 1, 2]).eval()
multinom.pmf([3., 0, 2]).eval()
# Counts don't sum to n.
with self.assertRaisesOpError("counts do not sum to n"):
multinom.pmf([2., 3, 2]).eval()
# Counts are non-integers.
with self.assertRaisesOpError("Condition x == y.*"):
multinom.pmf([1.0, 2.5, 1.5]).eval()
multinom = tf.contrib.distributions.Multinomial(
n=n, p=p, validate_args=False)
multinom.pmf([1., 2., 2.]).eval()
# Non-integer arguments work.
multinom.pmf([1.0, 2.5, 1.5]).eval()
def testPmfBothZeroBatches(self):
with self.test_session():
# Both zero-batches. No broadcast
p = [0.5, 0.5]
counts = [1., 0]
pmf = tf.contrib.distributions.Multinomial(n=1., p=p).pmf(counts)
self.assertAllClose(0.5, pmf.eval())
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesNontrivialN(self):
with self.test_session():
# Both zero-batches. No broadcast
p = [0.1, 0.9]
counts = [3., 2]
dist = tf.contrib.distributions.Multinomial(n=5., p=p)
pmf = dist.pmf(counts)
# 5 choose 3 = 5 choose 2 = 10. 10 * (.9)^2 * (.1)^3 = 81/10000.
self.assertAllClose(81./10000, pmf.eval())
self.assertEqual((), pmf.get_shape())
def testPmfPStretchedInBroadcastWhenSameRank(self):
with self.test_session():
p = [[0.1, 0.9]]
counts = [[1., 0], [0, 1]]
pmf = tf.contrib.distributions.Multinomial(n=1., p=p).pmf(counts)
self.assertAllClose([0.1, 0.9], pmf.eval())
self.assertEqual((2), pmf.get_shape())
def testPmfPStretchedInBroadcastWhenLowerRank(self):
with self.test_session():
p = [0.1, 0.9]
counts = [[1., 0], [0, 1]]
pmf = tf.contrib.distributions.Multinomial(n=1., p=p).pmf(counts)
self.assertAllClose([0.1, 0.9], pmf.eval())
self.assertEqual((2), pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenSameRank(self):
with self.test_session():
p = [[0.1, 0.9], [0.7, 0.3]]
counts = [[1., 0]]
pmf = tf.contrib.distributions.Multinomial(n=1., p=p).pmf(counts)
self.assertAllClose(pmf.eval(), [0.1, 0.7])
self.assertEqual((2), pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenLowerRank(self):
with self.test_session():
p = [[0.1, 0.9], [0.7, 0.3]]
counts = [1., 0]
pmf = tf.contrib.distributions.Multinomial(n=1., p=p).pmf(counts)
self.assertAllClose(pmf.eval(), [0.1, 0.7])
self.assertEqual(pmf.get_shape(), (2))
def testPmfShapeCountsStretchedN(self):
with self.test_session():
# [2, 2, 2]
p = [[[0.1, 0.9], [0.1, 0.9]], [[0.7, 0.3], [0.7, 0.3]]]
# [2, 2]
n = [[3., 3], [3, 3]]
# [2]
counts = [2., 1]
pmf = tf.contrib.distributions.Multinomial(n=n, p=p).pmf(counts)
pmf.eval()
self.assertEqual(pmf.get_shape(), (2, 2))
def testPmfShapeCountsPStretchedN(self):
with self.test_session():
p = [0.1, 0.9]
counts = [3., 2]
n = np.full([4, 3], 5., dtype=np.float32)
pmf = tf.contrib.distributions.Multinomial(n=n, p=p).pmf(counts)
pmf.eval()
self.assertEqual((4, 3), pmf.get_shape())
def testMultinomialMean(self):
with self.test_session():
n = 5.
p = [0.1, 0.2, 0.7]
dist = tf.contrib.distributions.Multinomial(n=n, p=p)
expected_means = 5 * np.array(p, dtype=np.float32)
self.assertEqual((3,), dist.mean().get_shape())
self.assertAllClose(expected_means, dist.mean().eval())
def testMultinomialVariance(self):
with self.test_session():
n = 5.
p = [0.1, 0.2, 0.7]
dist = tf.contrib.distributions.Multinomial(n=n, p=p)
expected_variances = [
[9./20, -1/10, -7/20], [-1/10, 4/5, -7/10], [-7/20, -7/10, 21/20]]
self.assertEqual((3, 3), dist.variance().get_shape())
self.assertAllClose(expected_variances, dist.variance().eval())
def testMultinomialVarianceBatch(self):
with self.test_session():
# Shape [2]
n = [5.] * 2
# Shape [4, 1, 2]
p = [[[0.1, 0.9]], [[0.1, 0.9]]] * 2
dist = tf.contrib.distributions.Multinomial(n=n, p=p)
# Shape [2, 2]
inner_var = [[9./20, -9/20], [-9/20, 9/20]]
# Shape [4, 2, 2, 2]
expected_variances = [[inner_var, inner_var]] * 4
self.assertEqual((4, 2, 2, 2), dist.variance().get_shape())
self.assertAllClose(expected_variances, dist.variance().eval())
def testVarianceMultidimensional(self):
# Shape [3, 5, 4]
p = np.random.dirichlet([.25, .25, .25, .25], [3, 5]).astype(np.float32)
# Shape [6, 3, 3]
p2 = np.random.dirichlet([.3, .3, .4], [6, 3]).astype(np.float32)
ns = np.random.randint(low=1, high=11, size=[3, 5]).astype(np.float32)
ns2 = np.random.randint(low=1, high=11, size=[6, 1]).astype(np.float32)
with self.test_session():
dist = tf.contrib.distributions.Multinomial(ns, p)
dist2 = tf.contrib.distributions.Multinomial(ns2, p2)
variance = dist.variance()
variance2 = dist2.variance()
self.assertEqual((3, 5, 4, 4), variance.get_shape())
self.assertEqual((6, 3, 3, 3), variance2.get_shape())
if __name__ == "__main__":
tf.test.main()
|
{"hexsha": "31aa5e72e7def46e861ed65cd986fc1961302c0f", "size": 8594, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow/contrib/distributions/python/kernel_tests/multinomial_test.py", "max_stars_repo_name": "steven0820/tensorflow", "max_stars_repo_head_hexsha": "36ebbf1ddc3ed820b7a5572ff4ed8e9bc707b8e5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-10-09T06:15:19.000Z", "max_stars_repo_stars_event_max_datetime": "2016-10-09T06:15:19.000Z", "max_issues_repo_path": "tensorflow/contrib/distributions/python/kernel_tests/multinomial_test.py", "max_issues_repo_name": "srivatsan-ramesh/tensorflow", "max_issues_repo_head_hexsha": "36ebbf1ddc3ed820b7a5572ff4ed8e9bc707b8e5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow/contrib/distributions/python/kernel_tests/multinomial_test.py", "max_forks_repo_name": "srivatsan-ramesh/tensorflow", "max_forks_repo_head_hexsha": "36ebbf1ddc3ed820b7a5572ff4ed8e9bc707b8e5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-02-27T00:34:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T16:38:08.000Z", "avg_line_length": 37.5283842795, "max_line_length": 80, "alphanum_fraction": 0.607865953, "include": true, "reason": "import numpy", "num_tokens": 2676}
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .encoder import GaussianEncoderBase
def he_init(m):
s = np.sqrt(2. / m.in_features)
m.weight.data.normal_(0, s)
class MaskedConv2d(nn.Conv2d):
def __init__(self, include_center=False, *args, **kwargs):
super(MaskedConv2d, self).__init__(*args, **kwargs)
self.register_buffer('mask', self.weight.data.clone())
_, _, kH, kW = self.weight.size()
self.mask.fill_(1)
self.mask[:, :, kH // 2, kW // 2 + (include_center == True):] = 0
self.mask[:, :, kH // 2 + 1:] = 0
def forward(self, x):
self.weight.data *= self.mask.cuda()
return super(MaskedConv2d, self).forward(x)
class ResidualBlock(nn.Module):
def __init__(self, in_dim, out_dim=None, with_residual=True, with_batchnorm=True, mask=None,
kernel_size=3, padding=1):
if out_dim is None:
out_dim = in_dim
super(ResidualBlock, self).__init__()
if mask is None:
self.conv1 = nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size, padding=padding)
self.conv2 = nn.Conv2d(out_dim, out_dim, kernel_size=kernel_size, padding=padding)
else:
self.conv1 = MaskedConv2d(mask, in_dim, out_dim, kernel_size=kernel_size, padding=padding)
self.conv2 = MaskedConv2d(mask, out_dim, out_dim, kernel_size=kernel_size, padding=padding)
self.with_batchnorm = with_batchnorm
if with_batchnorm:
self.bn1 = nn.BatchNorm2d(out_dim)
self.bn2 = nn.BatchNorm2d(out_dim)
self.with_residual = with_residual
if in_dim == out_dim or not with_residual:
self.proj = None
else:
self.proj = nn.Conv2d(in_dim, out_dim, kernel_size=1)
def forward(self, x):
if self.with_batchnorm:
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
else:
out = self.conv2(F.relu(self.conv1(x)))
res = x if self.proj is None else self.proj(x)
if self.with_residual:
out = F.relu(res + out)
else:
out = F.relu(out)
return out
class ResNetEncoder(GaussianEncoderBase):
"""docstring for ResNetEncoder"""
def __init__(self, args):
super(ResNetEncoder, self).__init__(args)
enc_modules = []
img_h = args.img_size[1]
img_w = args.img_size[2]
for i in range(len(args.enc_layers)):
if i == 0:
input_dim = args.img_size[0]
else:
input_dim = args.enc_layers[i-1]
enc_modules.append(ResidualBlock(input_dim, args.enc_layers[i]))
enc_modules.append(nn.Conv2d(args.enc_layers[i], args.enc_layers[i], kernel_size=2, stride=2))
img_h //= 2
img_w //= 2
latent_in_dim = img_h*img_w*args.enc_layers[-1]
self.enc_cnn = nn.Sequential(*enc_modules)
self.latent_linear_mean = nn.Linear(latent_in_dim, args.nz)
self.latent_linear_logvar = nn.Linear(latent_in_dim, args.nz)
self.reset_parameters()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Linear):
he_init(m)
def forward(self, img):
img_code = self.enc_cnn(img)
img_code = img_code.view(img.size(0), -1)
self.img_code = img_code
mean = self.latent_linear_mean(img_code)
logvar = self.latent_linear_logvar(img_code)
return mean, logvar
|
{"hexsha": "2272d1c5c6c00f8a1a077276ac7a0faf94b7ecfa", "size": 3589, "ext": "py", "lang": "Python", "max_stars_repo_path": "modules/encoders/enc_resnet.py", "max_stars_repo_name": "tom-pelsmaeker/vae-lagging-encoder", "max_stars_repo_head_hexsha": "b190239019a94c85858d188a0853886eb48ce4be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modules/encoders/enc_resnet.py", "max_issues_repo_name": "tom-pelsmaeker/vae-lagging-encoder", "max_issues_repo_head_hexsha": "b190239019a94c85858d188a0853886eb48ce4be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/encoders/enc_resnet.py", "max_forks_repo_name": "tom-pelsmaeker/vae-lagging-encoder", "max_forks_repo_head_hexsha": "b190239019a94c85858d188a0853886eb48ce4be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8446601942, "max_line_length": 106, "alphanum_fraction": 0.6090833101, "include": true, "reason": "import numpy", "num_tokens": 907}
|
/-
Copyright (c) 2020 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kenny Lau
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.algebra.big_operators.pi
import Mathlib.data.finsupp.default
import Mathlib.PostPort
universes u_1 u_2 u_4 u_3 u_5 u_6
namespace Mathlib
/-!
# Big operators for finsupps
This file contains theorems relevant to big operators in finitely supported functions.
-/
theorem finset.sum_apply' {α : Type u_1} {ι : Type u_2} {A : Type u_4} [add_comm_monoid A] {s : finset α} {f : α → ι →₀ A} (i : ι) : coe_fn (finset.sum s fun (k : α) => f k) i = finset.sum s fun (k : α) => coe_fn (f k) i :=
Eq.symm (finset.sum_hom s ⇑(finsupp.apply_add_hom i))
theorem finsupp.sum_apply' {ι : Type u_2} {γ : Type u_3} {A : Type u_4} {B : Type u_5} [add_comm_monoid A] [add_comm_monoid B] (g : ι →₀ A) (k : ι → A → γ → B) (x : γ) : finsupp.sum g k x = finsupp.sum g fun (i : ι) (b : A) => k i b x :=
finset.sum_apply x (finsupp.support g) fun (a : ι) => k a (coe_fn g a)
theorem finsupp.sum_sum_index' {α : Type u_1} {ι : Type u_2} {A : Type u_4} {C : Type u_6} [add_comm_monoid A] [add_comm_monoid C] {t : ι → A → C} (h0 : ∀ (i : ι), t i 0 = 0) (h1 : ∀ (i : ι) (x y : A), t i (x + y) = t i x + t i y) {s : finset α} {f : α → ι →₀ A} : finsupp.sum (finset.sum s fun (x : α) => f x) t = finset.sum s fun (x : α) => finsupp.sum (f x) t := sorry
|
{"author": "AurelienSaue", "repo": "Mathlib4_auto", "sha": "590df64109b08190abe22358fabc3eae000943f2", "save_path": "github-repos/lean/AurelienSaue-Mathlib4_auto", "path": "github-repos/lean/AurelienSaue-Mathlib4_auto/Mathlib4_auto-590df64109b08190abe22358fabc3eae000943f2/Mathlib/algebra/big_operators/finsupp.lean"}
|
import os
import warnings
import numpy as np
def nonzeros(m, row):
"""returns the non zeroes of a row in csr_matrix"""
for index in range(m.indptr[row], m.indptr[row + 1]):
yield m.indices[index], m.data[index]
_checked_blas_config = False
def check_blas_config():
"""checks to see if using OpenBlas/Intel MKL. If so, warn if the number of threads isn't set
to 1 (causes severe perf issues when training - can be 10x slower)"""
# don't warn repeatedly
global _checked_blas_config # pylint: disable=global-statement
if _checked_blas_config:
return
_checked_blas_config = True
if np.__config__.get_info("openblas_info") and os.environ.get("OPENBLAS_NUM_THREADS") != "1":
warnings.warn(
"OpenBLAS detected. Its highly recommend to set the environment variable "
"'export OPENBLAS_NUM_THREADS=1' to disable its internal multithreading"
)
if np.__config__.get_info("blas_mkl_info") and os.environ.get("MKL_NUM_THREADS") != "1":
warnings.warn(
"Intel MKL BLAS detected. Its highly recommend to set the environment "
"variable 'export MKL_NUM_THREADS=1' to disable its internal "
"multithreading"
)
def check_random_state(random_state):
"""Validate the random state.
Check a random seed or existing numpy RandomState
and get back an initialized RandomState.
Parameters
----------
random_state : int, None or RandomState
The existing RandomState. If None, or an int, will be used
to seed a new numpy RandomState.
"""
# if it's an existing random state, pass through
if isinstance(random_state, np.random.RandomState):
return random_state
# otherwise try to initialize a new one, and let it fail through
# on the numpy side if it doesn't work
return np.random.RandomState(random_state)
def augment_inner_product_matrix(factors):
"""This function transforms a factor matrix such that an angular nearest neighbours search
will return top related items of the inner product.
This involves transforming each row by adding one extra dimension as suggested in the paper:
"Speeding Up the Xbox Recommender System Using a Euclidean Transformation for Inner-Product
Spaces" https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/XboxInnerProduct.pdf
Basically this involves transforming each feature vector so that they have the same norm, which
means the cosine of this transformed vector is proportional to the dot product (if the other
vector in the cosine has a 0 in the extra dimension)."""
norms = np.linalg.norm(factors, axis=1)
max_norm = norms.max()
# add an extra dimension so that the norm of each row is the same
# (max_norm)
extra_dimension = np.sqrt(max_norm ** 2 - norms ** 2)
return max_norm, np.append(factors, extra_dimension.reshape(norms.shape[0], 1), axis=1)
def _batch_call(func, ids, *args, N=10, **kwargs):
# we're running in batch mode, just loop over each item and call the scalar version of the
# function
output_ids = np.zeros((len(ids), N), dtype=np.int32)
output_scores = np.zeros((len(ids), N), dtype=np.float32)
for i, idx in enumerate(ids):
batch_ids, batch_scores = func(idx, *args, N=N, **kwargs)
# pad out to N items if we're returned fewer
missing_items = N - len(batch_ids)
if missing_items > 0:
batch_ids = np.append(batch_ids, np.full(missing_items, -1))
batch_scores = np.append(
batch_scores, np.full(missing_items, -np.finfo(np.float32).max)
)
output_ids[i] = batch_ids[:N]
output_scores[i] = batch_scores[:N]
return output_ids, output_scores
def _filter_items_from_results(queryid, ids, scores, filter_items, N):
if np.isscalar(queryid):
mask = np.in1d(ids, filter_items, invert=True)
ids, scores = ids[mask][:N], scores[mask][:N]
else:
rows = len(queryid)
filtered_scores = np.zeros((rows, N), dtype=scores.dtype)
filtered_ids = np.zeros((rows, N), dtype=ids.dtype)
for row in range(rows):
mask = np.in1d(ids[row], filter_items, invert=True)
filtered_ids[row] = ids[row][mask][:N]
filtered_scores[row] = scores[row][mask][:N]
ids, scores = filtered_ids, filtered_scores
return ids, scores
|
{"hexsha": "d19ce9747e1994d85754dde995b30b45c658d1af", "size": 4459, "ext": "py", "lang": "Python", "max_stars_repo_path": "implicit/utils.py", "max_stars_repo_name": "bingxu01/implicit", "max_stars_repo_head_hexsha": "91215f1c278dd8ed455dd0dcff707af86f59371f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "implicit/utils.py", "max_issues_repo_name": "bingxu01/implicit", "max_issues_repo_head_hexsha": "91215f1c278dd8ed455dd0dcff707af86f59371f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "implicit/utils.py", "max_forks_repo_name": "bingxu01/implicit", "max_forks_repo_head_hexsha": "91215f1c278dd8ed455dd0dcff707af86f59371f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7739130435, "max_line_length": 100, "alphanum_fraction": 0.6763848397, "include": true, "reason": "import numpy", "num_tokens": 1067}
|
import os
import sys
file_dir = os.path.dirname(__file__)
sys.path.append(file_dir)
import numpy as np
import torch
from pytorch_resnet import ResNet43
from e2cnn import gspaces
import torch.nn.functional as F
import e2cnn.nn as enn
import kornia as K
import torchvision
from matplotlib import pyplot as plt
class Transport:
''''equavariant Transport module'''
#TODO by Haojie, try Resnet_ns + Resnet_ns
# or Resnet + Resenet_ns
def __init__(self, in_shape, n_rotations, crop_size, preprocess,device):
# TODO BY HAOJIE: add lite model
self.device = device
self.preprocess = preprocess
self.n_rotations = n_rotations
self.iters = 0
self.crop_size_2 = crop_size # crop size must be N*16 (e.g. 96)
self.crop_size_1 = 96
# Padding the image to get same size output after the cross-relation
self.pad_size_2 = int(self.crop_size_2 / 2)
self.padding_2 = np.zeros((3, 2), dtype=int)
self.padding_2[:2, :] = self.pad_size_2
# Padding the image to get 96*96 crop centered at pick location
self.pad_size_1 = int(self.crop_size_1 / 2)
self.padding_1 = np.zeros((3, 2), dtype=int)
self.padding_1[:2, :] = self.pad_size_1
in_shape = np.array(in_shape)
in_shape[0:2] += self.pad_size_2 * 2
in_shape = tuple(in_shape)
if not hasattr(self, 'output_dim'):
self.output_dim = 3
if not hasattr(self, 'kernel_dim'):
self.kernel_dim = 3
self.in_type = in_shape[-1]
self.model_map = ResNet43(self.in_type, outdim=self.output_dim).to(self.device)
self.model_kernel = ResNet43(self.in_type, outdim=self.kernel_dim).to(self.device)
self.parameter = list(self.model_map.parameters()) + list(self.model_kernel.parameters())
self.optim = torch.optim.Adam(self.parameter, lr=1e-5)
def forward(self,in_img,p,softmax=True,train=True):
# The entire image
img_unprocessed = np.pad(in_img, self.padding_2, mode='constant')
input_data = self.preprocess(img_unprocessed.copy())
in_shape = (1,) + input_data.shape
input_data = input_data.reshape(in_shape).transpose(0, 3, 1, 2)
input_tensor = torch.from_numpy(input_data).to(self.device)
#print('input map',input_tensor.shape)
# The crop
#print('before padding',in_img.shape)
crop = np.pad(in_img, self.padding_1, mode='constant')
crop = self.preprocess(crop)
in_shape = (1,) + crop.shape
crop = crop.reshape(in_shape).transpose(0, 3, 1, 2)
crop = torch.from_numpy(crop).to(self.device)
crop = crop.repeat(self.n_rotations,1,1,1)
#print('before rotate',crop.shape)
#self.imshow(crop,size=(36,36))
pivot = np.array([p[1], p[0]]) + self.pad_size_1
pivot = torch.from_numpy(pivot).to(self.device).repeat(self.n_rotations,1).to(torch.float32)
crop = K.geometry.rotate(crop,torch.from_numpy(np.linspace(0., 360., self.n_rotations,
endpoint=False,dtype=np.float32)).to(self.device),
mode='nearest',center=pivot)
#print('after rotate', crop.shape)
#self.imshow(crop, size=(36, 36))
crop_input = crop[:,:,p[0]:(p[0] + self.crop_size_1),p[1]:(p[1] + self.crop_size_1)]
#print('after crop', crop_input.shape)
#self.imshow(crop_input, size=(36, 36))
#self.imshow(crop_input)
#print('after crop',crop.shape)
# pass the entire image and crop to the network
if not train:
self.model_map.eval()
self.model_kernel.eval()
with torch.no_grad():
logits = self.model_map(input_tensor)
kernel_raw = self.model_kernel(crop_input)
else:
logits = self.model_map(input_tensor)
kernel_raw = self.model_kernel(crop_input)
#print('after model',kernel_raw.shape)
pivot = int(self.crop_size_1 / 2)
assert pivot == int(kernel_raw.shape[-1] / 2)
# print('pivot',pivot)
half_length = self.pad_size_2
l, r = pivot - half_length, pivot + half_length+1
b, u = pivot - half_length, pivot + half_length+1
kernel_raw = kernel_raw[:,:,l:r,b:u]
#print('crop')
#np.save('crop.npy',kernel_raw.cpu().detach().numpy())
#print('after model kernel', kernel_raw.shape)
#self.imshow(kernel_raw, size=(36, 36))
#p2d = (0, 1, 0, 1)
#kernel_raw = F.pad(kernel_raw,p2d)
#print('after pad',kernel_raw.size())
#self.imshow(kernel_raw, size=(36, 36))
# correlation step
output = F.conv2d(input=logits,weight=kernel_raw)
#print('output shape',output.shape)
if softmax:
output_shape = output.shape
output = output.reshape(-1)
output = F.softmax(output,dim=-1)
output = output.reshape(output_shape[1:]).detach().cpu().numpy()
output = output.transpose(1,2,0)
return output
def train(self, in_img, p, q, theta, backprop=True):
"""Transport pixel p to pixel q.
Args:
in_img: input image.
p: pixel (y, x)
q: pixel (y, x)
theta: rotation label in radians.
backprop: True if backpropagating gradients.
Returns:
loss: training loss.
"""
self.model_map.train()
self.model_kernel.train()
output = self.forward(in_img,p,softmax=False)
output = output.reshape(1,-1)
# Get one-hot pixel label map.
itheta = theta / (2 * np.pi / self.n_rotations)
itheta = np.int32(np.round(itheta)) % self.n_rotations
label_size = (self.n_rotations,) + in_img.shape[:2]
label = torch.zeros(label_size, dtype=torch.long,device=self.device)
label[itheta, q[0], q[1],] = 1
label = label.reshape(-1)
label = torch.argmax(label).unsqueeze(dim=0)
# Get loss
loss = F.cross_entropy(input=output, target=label)
if backprop:
self.optim.zero_grad()
loss.backward()
self.optim.step()
self.iters +=1
return np.float32(loss.item())
def load(self,path1,path2):
# safe operation for e2cnn
self.model_map.eval()
self.model_kernel.eval()
self.model_map.load_state_dict(torch.load(path1,map_location=self.device))
self.model_kernel.load_state_dict(torch.load(path2,map_location=self.device))
def save(self,filename1,filename2):
# safe operation for e2cnn
self.model_map.eval()
self.model_kernel.eval()
torch.save(self.model_map.state_dict(), filename1)
torch.save(self.model_kernel.state_dict(), filename2)
def imshow(self,input: torch.Tensor, size: tuple = None, center: bool= False):
input_ = input[:,0:3,:,:]
if center:
center_x = int(input_.shape[-2]/2)
center_y = int(input_.shape[-1]/2)
#input_[:,:,center_x,center_y]=[0,1,0]
out = torchvision.utils.make_grid(input_, nrow=6, padding=5)
out_np: np.ndarray = K.utils.tensor_to_image(out)
plt.figure(figsize=size)
plt.imshow(out_np)
plt.axis('off')
plt.show()
|
{"hexsha": "bb91a2e3bd0f64e39d109dbb944c7375b0384b4c", "size": 7448, "ext": "py", "lang": "Python", "max_stars_repo_path": "networks/non_equi_transport.py", "max_stars_repo_name": "HaojHuang/Equivariant-Transporter-Net", "max_stars_repo_head_hexsha": "f3bd4bb0d669b54be9385a3246355a6f68a6bfea", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-05T20:44:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T02:19:05.000Z", "max_issues_repo_path": "networks/non_equi_transport.py", "max_issues_repo_name": "HaojHuang/Equivariant-Transporter-Net", "max_issues_repo_head_hexsha": "f3bd4bb0d669b54be9385a3246355a6f68a6bfea", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-25T05:04:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T05:04:15.000Z", "max_forks_repo_path": "networks/non_equi_transport.py", "max_forks_repo_name": "HaojHuang/Equivariant-Transporter-Net", "max_forks_repo_head_hexsha": "f3bd4bb0d669b54be9385a3246355a6f68a6bfea", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3917525773, "max_line_length": 118, "alphanum_fraction": 0.6037862513, "include": true, "reason": "import numpy", "num_tokens": 1870}
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pathlib
import os.path
import random
import sys
import numpy as np
from sten import Sten
from tqdm.auto import tqdm, trange
# -
def run():
for x in trange(5):
set1File = x+11
set2File = x+6
name = str(set1File) + '_' + str(set2File)
encImg = st.encode("./data/set1/{0}.jpg".format(set1File), "./data/set2/{0}.jpg".format(set2File), "./encodedArray/bit_7/{0}.npy".format(name))
decImg = st.decode("./encodedArray/bit_7/{0}.npy".format(name), "./decodedArray/bit_7/{0}.npy".format(name))
pathlib.Path("./encodedArray/bit_7").mkdir(parents=True, exist_ok=True)
pathlib.Path("./decodedArray/bit_7").mkdir(parents=True, exist_ok=True)
st = Sten(7)
run()
|
{"hexsha": "0269168038f06a32fce1e7ced409f022c7318c2f", "size": 976, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo/preprocess_mini.py", "max_stars_repo_name": "CSCI4850/S20-team6-project", "max_stars_repo_head_hexsha": "b7968357518ec21a676460594d3912575c5d0606", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "demo/preprocess_mini.py", "max_issues_repo_name": "CSCI4850/S20-team6-project", "max_issues_repo_head_hexsha": "b7968357518ec21a676460594d3912575c5d0606", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demo/preprocess_mini.py", "max_forks_repo_name": "CSCI4850/S20-team6-project", "max_forks_repo_head_hexsha": "b7968357518ec21a676460594d3912575c5d0606", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-05-08T23:13:23.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-09T14:44:46.000Z", "avg_line_length": 23.2380952381, "max_line_length": 151, "alphanum_fraction": 0.6342213115, "include": true, "reason": "import numpy", "num_tokens": 302}
|
from recurrentshop import*
from keras.layers import*
from keras.models import*
import numpy as np
import time
import sys
# Script for comparing performance of native keras and recurrentshop stacked RNN implementations
# We observe 20-30% speed ups on GPU
sys.setrecursionlimit(10000000)
# Params
rnn, rnn_cell = LSTM, LSTMCell
depth = 3
input_length = 1000
dim = 10
nb_epoch = 5
unroll = K.backend() == 'tensorflow'
# Random data
x = np.random.random((10, input_length, dim))
y = np.random.random((10, dim))
# Native keras model
model = Sequential()
for i in range(depth):
model.add(rnn(dim, return_sequences=i != depth-1, input_shape=(input_length, dim), unroll=unroll, consume_less='gpu')) # We set consume_less = 'gpu' so that both models use the same LSTM implementation.
model.compile(loss='mse', optimizer='sgd')
print('Compiling...')
model.train_on_batch(x[:1], y[:1]) # force compile
start_time = time.time()
model.fit(x, y, nb_epoch=nb_epoch)
end_time = time.time()
keras_time_taken = end_time - start_time
# recurrentshop model
rc = RecurrentContainer(input_length=input_length, unroll=unroll)
for _ in range(depth):
rc.add(rnn_cell(dim, input_dim=dim))
model = Sequential()
model.add(rc)
model.compile(loss='mse', optimizer='sgd')
print('Compiling...')
model.train_on_batch(x[:1], y[:1]) # force compile
start_time = time.time()
model.fit(x, y, nb_epoch=nb_epoch)
end_time = time.time()
recurrentshop_time_taken = end_time - start_time
speed_up = keras_time_taken / recurrentshop_time_taken
print('Time taken by native keras model: ' + str(int(keras_time_taken)) + ' seconds.')
print('Time taken by recurrentshop model: ' + str(int(recurrentshop_time_taken)) + ' seconds.')
print('Speed up:' + str(speed_up) + 'X')
|
{"hexsha": "cef90ab877e59d13b3086387fb568000b6835246", "size": 1755, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/speed_test.py", "max_stars_repo_name": "arpitgogia/recurrentshop", "max_stars_repo_head_hexsha": "5cc58af3020c19ec9597944fdd4d33dfb29e9bec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 488, "max_stars_repo_stars_event_min_datetime": "2016-09-22T00:00:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T21:39:19.000Z", "max_issues_repo_path": "examples/speed_test.py", "max_issues_repo_name": "arpitgogia/recurrentshop", "max_issues_repo_head_hexsha": "5cc58af3020c19ec9597944fdd4d33dfb29e9bec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 66, "max_issues_repo_issues_event_min_datetime": "2016-09-22T13:54:44.000Z", "max_issues_repo_issues_event_max_datetime": "2017-06-14T06:30:16.000Z", "max_forks_repo_path": "examples/speed_test.py", "max_forks_repo_name": "arpitgogia/recurrentshop", "max_forks_repo_head_hexsha": "5cc58af3020c19ec9597944fdd4d33dfb29e9bec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 150, "max_forks_repo_forks_event_min_datetime": "2017-07-12T22:32:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T13:57:17.000Z", "avg_line_length": 24.375, "max_line_length": 204, "alphanum_fraction": 0.7356125356, "include": true, "reason": "import numpy", "num_tokens": 462}
|
import unittest
import itertools
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numpy as np
import openmdao.api as om
from openmdao.utils.mpi import MPI
try:
from parameterized import parameterized
except ImportError:
from openmdao.utils.assert_utils import SkipParameterized as parameterized
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
from openmdao.utils.assert_utils import assert_near_equal
from openmdao.utils.logger_utils import TestLogger
from openmdao.error_checking.check_config import _default_checks
from openmdao.core.tests.test_distrib_derivs import DistribExecComp
def _test_func_name(func, num, param):
args = []
for p in param.args:
if isinstance(p, str):
p = {p}
elif not isinstance(p, Iterable):
p = {p}
for item in p:
try:
arg = item.__name__
except:
arg = str(item)
args.append(arg)
return func.__name__ + '_' + '_'.join(args)
@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
class TestParallelGroups(unittest.TestCase):
N_PROCS = 2
@parameterized.expand(itertools.product(['fwd', 'rev']),
name_func=_test_func_name)
def test_dup_dup(self, mode):
# duplicated vars on both ends
prob = om.Problem()
model = prob.model
model.add_subsystem('indep', om.IndepVarComp('x', 1.0))
model.add_subsystem('C1', om.ExecComp('y = 2.5 * x'))
model.connect('indep.x', 'C1.x')
of=['C1.y']
wrt=['indep.x']
prob.model.linear_solver = om.LinearRunOnce()
prob.setup(check=False, mode=mode)
prob.set_solver_print(level=0)
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt)
print(model.comm.rank, "val:", J['C1.y', 'indep.x'][0][0])
assert_near_equal(J['C1.y', 'indep.x'][0][0], 2.5, 1e-6)
assert_near_equal(prob['C1.y'], 2.5, 1e-6)
@parameterized.expand(itertools.product(['fwd', 'rev']),
name_func=_test_func_name)
def test_dup_par(self, mode):
# duplicated output, parallel input
prob = om.Problem()
model = prob.model
model.add_subsystem('indep', om.IndepVarComp('x', 1.0))
par = model.add_subsystem('par', om.ParallelGroup())
par.add_subsystem('C1', om.ExecComp('y = 2.5 * x'))
par.add_subsystem('C2', om.ExecComp('y = 7 * x'))
model.connect('indep.x', 'par.C1.x')
model.connect('indep.x', 'par.C2.x')
of=['par.C1.y', 'par.C2.y']
wrt=['indep.x']
prob.model.linear_solver = om.LinearRunOnce()
prob.setup(check=False, mode=mode)
prob.set_solver_print(level=0)
prob.run_model()
assert_near_equal(prob.get_val('par.C1.y', get_remote=True), 2.5, 1e-6)
assert_near_equal(prob.get_val('par.C2.y', get_remote=True), 7., 1e-6)
J = prob.compute_totals(of=of, wrt=wrt)
assert_near_equal(J['par.C1.y', 'indep.x'][0][0], 2.5, 1e-6)
assert_near_equal(prob.get_val('par.C1.y', get_remote=True), 2.5, 1e-6)
assert_near_equal(J['par.C2.y', 'indep.x'][0][0], 7., 1e-6)
assert_near_equal(prob.get_val('par.C2.y', get_remote=True), 7., 1e-6)
@parameterized.expand(itertools.product(['fwd', 'rev']),
name_func=_test_func_name)
def test_dup_dup_and_par(self, mode):
# duplicated and parallel outputs, dup input
prob = om.Problem()
model = prob.model
model.add_subsystem('indep', om.IndepVarComp('x', 1.0))
model.add_subsystem('dup', om.ExecComp('y = 1.5 * x'))
par = model.add_subsystem('par', om.ParallelGroup())
par.add_subsystem('C1', om.ExecComp('y = 2.5 * x'))
par.add_subsystem('C2', om.ExecComp('y = 7 * x'))
model.connect('indep.x', 'par.C1.x')
model.connect('indep.x', 'par.C2.x')
model.connect('indep.x', 'dup.x')
of=['par.C1.y', 'par.C2.y', 'dup.y']
wrt=['indep.x']
prob.model.linear_solver = om.LinearRunOnce()
prob.setup(check=False, mode=mode)
prob.set_solver_print(level=0)
prob.run_model()
assert_near_equal(prob.get_val('par.C1.y', get_remote=True), 2.5, 1e-6)
assert_near_equal(prob.get_val('par.C2.y', get_remote=True), 7., 1e-6)
assert_near_equal(prob.get_val('dup.y', get_remote=True), 1.5, 1e-6)
J = prob.compute_totals(of=of, wrt=wrt)
assert_near_equal(J['par.C1.y', 'indep.x'][0][0], 2.5, 1e-6)
assert_near_equal(prob.get_val('par.C1.y', get_remote=True), 2.5, 1e-6)
assert_near_equal(J['par.C2.y', 'indep.x'][0][0], 7., 1e-6)
assert_near_equal(prob.get_val('par.C2.y', get_remote=True), 7., 1e-6)
assert_near_equal(J['dup.y', 'indep.x'][0][0], 1.5, 1e-6)
assert_near_equal(prob.get_val('dup.y', get_remote=True), 1.5, 1e-6)
def test_dup_par_par_derivs(self):
# duplicated output, parallel input
prob = om.Problem()
model = prob.model
model.add_subsystem('indep', om.IndepVarComp('x', 1.0))
par = model.add_subsystem('par', om.ParallelGroup())
par.add_subsystem('C1', om.ExecComp('y = 2.5 * x'))
par.add_subsystem('C2', om.ExecComp('y = 7 * x'))
model.connect('indep.x', 'par.C1.x')
model.connect('indep.x', 'par.C2.x')
model.add_design_var('indep.x')
model.add_constraint('par.C1.y', upper=0.0, parallel_deriv_color='parc')
model.add_constraint('par.C2.y', upper=0.0, parallel_deriv_color='parc')
# of=['par.C1.y', 'par.C2.y']
# wrt=['indep.x']
prob.model.linear_solver = om.LinearBlockGS()
#import wingdbstub
prob.setup(check=False, mode='rev')
prob.set_solver_print(level=0)
prob.run_model()
assert_near_equal(prob.get_val('par.C1.y', get_remote=True), 2.5, 1e-6)
assert_near_equal(prob.get_val('par.C2.y', get_remote=True), 7., 1e-6)
J = prob.driver._compute_totals()
assert_near_equal(J['par.C1.y', 'indep.x'][0][0], 2.5, 1e-6)
assert_near_equal(prob.get_val('par.C1.y', get_remote=True), 2.5, 1e-6)
assert_near_equal(J['par.C2.y', 'indep.x'][0][0], 7., 1e-6)
assert_near_equal(prob.get_val('par.C2.y', get_remote=True), 7., 1e-6)
@parameterized.expand(itertools.product(['fwd', 'rev']),
name_func=_test_func_name)
def test_dup_dist(self, mode):
# duplicated output, parallel input
prob = om.Problem()
model = prob.model
size = 3
sizes = [2, 1]
rank = prob.comm.rank
model.add_subsystem('indep', om.IndepVarComp('x', np.ones(size)))
model.add_subsystem('C1', DistribExecComp(['y=2.5*x', 'y=3.5*x'], arr_size=size))
model.connect('indep.x', 'C1.x')
of=['C1.y']
wrt=['indep.x']
prob.model.linear_solver = om.LinearRunOnce()
prob.setup(check=False, mode=mode)
prob.set_solver_print(level=0)
prob.run_model()
assert_near_equal(prob.get_val('C1.y', get_remote=True),
np.array([2.5,2.5,3.5], dtype=float), 1e-6)
J = prob.compute_totals(of=of, wrt=wrt)
expected = np.array([[2.5, 0, 0], [0, 2.5, 0], [0,0,3.5]], dtype=float)
assert_near_equal(J['C1.y', 'indep.x'], expected, 1e-6)
assert_near_equal(prob.get_val('C1.y', get_remote=True),
np.array([2.5,2.5,3.5], dtype=float), 1e-6)
@parameterized.expand(itertools.product(['fwd', 'rev']),
name_func=_test_func_name)
def test_par_dup(self, mode):
# duplicated output, parallel input
prob = om.Problem()
model = prob.model
par = model.add_subsystem('par', om.ParallelGroup())
par.add_subsystem('indep1', om.IndepVarComp('x', 1.0))
par.add_subsystem('indep2', om.IndepVarComp('x', 1.0))
model.add_subsystem('C1', om.ExecComp('y = 2.5 * x1 + 3.5 * x2'))
model.connect('par.indep1.x', 'C1.x1')
model.connect('par.indep2.x', 'C1.x2')
of=['C1.y']
wrt=['par.indep1.x', 'par.indep2.x']
prob.model.linear_solver = om.LinearRunOnce()
# import wingdbstub
prob.setup(check=False, mode=mode)
prob.set_solver_print(level=0)
prob.run_model()
assert_near_equal(prob['C1.y'], 6., 1e-6)
J = prob.compute_totals(of=of, wrt=wrt)
assert_near_equal(J['C1.y', 'par.indep1.x'][0][0], 2.5, 1e-6)
assert_near_equal(J['C1.y', 'par.indep2.x'][0][0], 3.5, 1e-6)
assert_near_equal(prob['C1.y'], 6., 1e-6)
@parameterized.expand(itertools.product(['fwd', 'rev']),
name_func=_test_func_name)
def test_dist_dup(self, mode):
# duplicated output, parallel input
prob = om.Problem()
model = prob.model
size = 3
rank = prob.comm.rank
model.add_subsystem('indep', om.IndepVarComp('x', np.ones(size)))
model.add_subsystem('C1', DistribExecComp(['y=2.5*x', 'y=3.5*x'], arr_size=size))
model.add_subsystem('sink', om.ExecComp('y=-1.5 * x', x=np.zeros(size), y=np.zeros(size)))
model.connect('indep.x', 'C1.x')
model.connect('C1.y', 'sink.x')
of=['sink.y']
wrt=['indep.x']
prob.model.linear_solver = om.LinearRunOnce()
#import wingdbstub
prob.setup(check=False, mode=mode)
prob.set_solver_print(level=0)
prob.run_model()
assert_near_equal(prob.get_val('sink.y', get_remote=True),
np.array([-3.75,-3.75,-5.25], dtype=float), 1e-6)
J = prob.compute_totals(of=of, wrt=wrt)
expected = np.array([[-3.75, 0, 0], [0, -3.75, 0], [0,0,-5.25]], dtype=float)
assert_near_equal(J['sink.y', 'indep.x'], expected, 1e-6)
assert_near_equal(prob.get_val('sink.y', get_remote=True),
np.array([-3.75,-3.75,-5.25], dtype=float), 1e-6)
@parameterized.expand(itertools.product(['fwd', 'rev']),
name_func=_test_func_name)
def test_par_dist(self, mode):
# duplicated output, parallel input
prob = om.Problem()
model = prob.model
size = 3
sizes = [2, 1]
rank = prob.comm.rank
model.add_subsystem('indep', om.IndepVarComp('x', np.ones(size)))
par = model.add_subsystem('par', om.ParallelGroup())
par.add_subsystem('C1', om.ExecComp('y = 3 * x', x=np.zeros(size), y=np.zeros(size)))
par.add_subsystem('C2', om.ExecComp('y = 5 * x', x=np.zeros(size), y=np.zeros(size)))
model.add_subsystem('C3', DistribExecComp(['y=1.5*x1+2.5*x2', 'y=2.5*x1-.5*x2'], arr_size=size))
model.connect('indep.x', 'par.C1.x')
model.connect('indep.x', 'par.C2.x')
model.connect('par.C1.y', 'C3.x1')
model.connect('par.C2.y', 'C3.x2')
of=['C3.y']
wrt=['indep.x']
prob.model.linear_solver = om.LinearRunOnce()
prob.setup(check=False, mode=mode)
prob.set_solver_print(level=0)
prob.run_model()
assert_near_equal(prob.get_val('C3.y', get_remote=True),
np.array([17,17,5], dtype=float), 1e-6)
J = prob.compute_totals(of=of, wrt=wrt)
expected = np.array([[17, 0, 0], [0, 17, 0], [0,0,5]], dtype=float)
assert_near_equal(J['C3.y', 'indep.x'], expected, 1e-6)
assert_near_equal(prob.get_val('C3.y', get_remote=True),
np.array([17,17,5], dtype=float), 1e-6)
@parameterized.expand(itertools.product(['fwd', 'rev']),
name_func=_test_func_name)
def test_crossover(self, mode):
# multiple crossovers in fwd and rev
prob = om.Problem()
model = prob.model
model.add_subsystem('ivc', om.IndepVarComp('x'))
par1 = model.add_subsystem('par1', om.ParallelGroup())
par1.add_subsystem('C1', om.ExecComp('y = 1.5 * x'))
par1.add_subsystem('C2', om.ExecComp('y = 2.5 * x'))
model.add_subsystem('C3', om.ExecComp('y = 3.5 * x1 - .5 * x2'))
par2 = model.add_subsystem('par2', om.ParallelGroup())
par2.add_subsystem('C4', om.ExecComp('y = 4.5 * x'))
par2.add_subsystem('C5', om.ExecComp('y = 5.5 * x'))
model.add_subsystem('C6', om.ExecComp('y = 6.5 * x1 + 1.1 * x2'))
model.connect('ivc.x', 'par1.C1.x')
model.connect('ivc.x', 'par1.C2.x')
model.connect('par1.C1.y', 'C3.x1')
model.connect('par1.C2.y', 'C3.x2')
model.connect('C3.y', 'par2.C4.x')
model.connect('C3.y', 'par2.C5.x')
model.connect('par2.C4.y', 'C6.x1')
model.connect('par2.C5.y', 'C6.x2')
of = ['C6.y']
wrt = ['ivc.x']
#import wingdbstub
prob.setup(check=False, mode=mode)
prob.set_solver_print(level=0)
prob.run_model()
np.testing.assert_allclose(prob.get_val('C6.y', get_remote=True),
141.2)
J = prob.compute_totals(of=of, wrt=wrt)
print(J)
np.testing.assert_allclose(J['C6.y', 'ivc.x'][0][0], 141.2)
np.testing.assert_allclose(prob.get_val('C6.y', get_remote=True),
141.2)
|
{"hexsha": "40086bb957c508246da13e83db39904bcf258178", "size": 13619, "ext": "py", "lang": "Python", "max_stars_repo_path": "openmdao/core/tests/test_deriv_transfers.py", "max_stars_repo_name": "gjkennedy/OpenMDAO", "max_stars_repo_head_hexsha": "06897b584403cce34bc106dd2840aa07eea69e96", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "openmdao/core/tests/test_deriv_transfers.py", "max_issues_repo_name": "gjkennedy/OpenMDAO", "max_issues_repo_head_hexsha": "06897b584403cce34bc106dd2840aa07eea69e96", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "openmdao/core/tests/test_deriv_transfers.py", "max_forks_repo_name": "gjkennedy/OpenMDAO", "max_forks_repo_head_hexsha": "06897b584403cce34bc106dd2840aa07eea69e96", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5587467363, "max_line_length": 104, "alphanum_fraction": 0.5824950437, "include": true, "reason": "import numpy", "num_tokens": 4013}
|
! This program calculates the matrix elements of the positronium-hydrogen
! matrices (similar to equation 2.15 of the Armour and Humberston article). Specifically,
! we are calculating elements of the form (phi_i, L phi_j) as in equation (3.22).
! This does use OpenMP to speed up computation on multicore processors, so some familiarity
! with parallel programming is required to understand the !$omp lines.
! A future speed-up could be done with Open MPI (as is done for the energy code), but this
! computation is usually done in a few hours.
! TODO: Make MaxIter and the 4 come from the configuration file.
module WLimits
implicit none
integer lmin, lmax
integer mmin, mmax
integer nmin, nmax
end module WLimits
program PsHMain
use WLimits
implicit none
interface
integer(c_int) function optimizewavefn(MaxIter, Omega, NumTerms, M12max, M23max, M31max, pmax, IsTriplet, Ordering, Method, EigenRoutine, alpha, beta, gamma) bind(c, name='optimizewavefn')
use iso_c_binding
integer(c_int), value :: MaxIter, Omega, NumTerms, M12max, M23max, M31max, pmax, IsTriplet, Ordering, Method, EigenRoutine
real(c_double), value :: alpha, beta, gamma
end function optimizewavefn
end interface
real*16 omp_get_wtime
integer Omega ! This sets the limits on the terms.
integer NumTerms ! Number of terms in our Hylleraas expansion
integer iread, iwrite ! Our input and output files
integer CalcPowerTableSize
real*16, allocatable, dimension(:,:) :: PhiPhi, Phi2HPhi
real*8, allocatable, dimension(:,:) :: PhiPhi8, Phi2HPhi8
real*8, allocatable, dimension(:) :: Energies, Workspace
real*16 Alpha, Beta, Gamma, Prev
integer, allocatable, dimension(:) :: UsedTerms
integer NumUsed, i, j, k, n
real*8 StartTime, EndTime
integer iargc, IsTriplet, Method, LowerEigen, UpperEigen, Info, AllEnergies, EigenRoutine
character *100 IOBuffer
integer Optimize, EigenNum, Ordering, Iter, MaxIter, MaxIterOuter, M12max, M23max, M31max, pmax, StartEnergy, LValue
real*16 Tol, Err, h, EDeriv, fa, fh, y, Divisor, ederivwrapper
real*16, dimension(3) :: Del, Delta, f1, f0, x1, x0, x, C31, C13
real*16, dimension(3,3) :: B, BTemp, C33, Jacob
real*16 ThreeJSymbol, HylleraasIntegralGeneral
! real*16, allocatable, dimension(:,:,:,:) :: WMatrix
! call GenFactorial(400)
! lmin = 1; lmax = 1
! mmin = 1; mmax = 1
! nmin = 1; nmax = 1
! allocate(WMatrix(lmin:lmax, mmin:mmax, nmin:nmax, 6))
! write (*,*) HylleraasIntegralGeneral(.true., .false., 2, 2, 0, -1, 1, 1, 1.0q0, 1.2q0, 2.2q0, &
! 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 10, 150, 5, 51, 0, 0, 0, WMatrix, 1)
! deallocate(WMatrix)
iread = 9
iwrite = 10
! This allows the possibility of using different input and output files than the defaults.
! This mainly lets me run this program on multiple computers at the same time without
! one overwriting the results from another. If the program is called without any
! command-line arguments, it uses the default filenames.
if (iargc() == 2) then
call getarg(1, IOBuffer)
open(iread, FILE=IOBuffer)
call getarg(2, IOBuffer)
open(iwrite, FILE=IOBuffer)
else
open(iread, FILE='input.txt')
open(iwrite, FILE='output.txt')
endif
! Read the input parameters.
call ReadParamFile(iread, Omega, LValue, Alpha, Beta, Gamma, M12max, M23max, M31max, pmax, Method, IsTriplet, LowerEigen, UpperEigen, &
AllEnergies, Optimize, EigenNum, MaxIter, Ordering, EigenRoutine)
if (IsTriplet /= 0 .and. IsTriplet /= 1) then
write (*,*) 'Must choose between singlet and triplet calculation - exiting.'
stop
endif
if (Optimize == 1) write (iwrite,*) 'Optimizing eigenvalue', EigenNum
Tol = 1e-3
Err = 1 ! Just has to be larger than tol.
h = 1e-5
! Get the start time to determine the duration of the program.
!call cpu_time(StartTime)
StartTime = omp_get_wtime()
NumTerms = CalcPowerTableSize(Omega) * 2 ! First and second symmetries
call WriteHeader(iwrite, LValue, IsTriplet, Ordering, Method, EigenRoutine, Omega, NumTerms, Alpha, Beta, Gamma, M12max, M23max, M31max, pmax)
write (*,*) 'NumTerms:', NumTerms
allocate(PhiPhi(NumTerms,NumTerms))
allocate(Phi2HPhi(NumTerms,NumTerms))
allocate(PhiPhi8(NumTerms,NumTerms))
allocate(Phi2HPhi8(NumTerms,NumTerms))
allocate(Energies(NumTerms))
allocate(Workspace(3*NumTerms-1))
if (Omega == -1) then
goto 200 ! No short-range terms, so no need to do these calculations (just a header)
end if
if (Optimize == 1) then
! TODO: Add these back in.
!fa = optimizewavefn(MaxIter, Omega, NumTerms, M12max, M23max, M31max, pmax, IsTriplet, Ordering, Method, EigenRoutine, Alpha, Beta, Gamma)
!fa = EDeriv(Omega, NumTerms, Alpha, Beta, Gamma, EigenNum, PhiPhi, Phi2HPhi, M12max, pmax, 0, &
! IsTriplet, Ordering, Method)
else
! If we are not optimizing eigenvalues, we can still call the same function, which exits early with PhiPhi and Phi2HPhi filled.
fa = EDeriv(Omega, NumTerms, Alpha, Beta, Gamma, EigenNum, PhiPhi, Phi2HPhi, M12max, M23max, M31max, pmax, 0, &
IsTriplet, Ordering, Method, EigenRoutine, LValue)
write (*,*) fa
endif
if (LValue == 0) then ! S-wave only has one symmetry
NumTerms = NumTerms / 2
end if
do i = 1, NumTerms, 1
!do j = 1, i, 1 ! Use this instead if only the lower triangle is required.
do j = 1, NumTerms, 1
write (iwrite,"(i6,i6,d38.30,d38.30)") i, j, PhiPhi(i,j), Phi2HPhi(i,j)
enddo
enddo
! Divide every entry in Phi2HPhi by 2, since we calculated <phi_i|2H|phi_j> in equation (3.22).
Phi2HPhi = Phi2HPhi / 2.0_16
write (iwrite,*)
write (iwrite,*) 'Energies:'
if (AllEnergies == 0) then
StartEnergy = NumTerms
else
StartEnergy = 1
end if
if (LValue == 0) then
NumTerms = NumTerms * 2
end if
do j = StartEnergy, NumTerms, 1
if (LValue == 0 .and. j > NumTerms / 2) then ! S-wave only has one symmetry
exit
end if
! Copies our real*16 matrices to real*8 matrices so that LAPACK can operate on them.
Phi2HPhi8 = Phi2HPhi
PhiPhi8 = PhiPhi
! This calculates the energy eigenvalues of the generalized eigenvalue problem generated from
! the Rayleigh-Ritz variational method:
! det(<phi_i|H|phi_j> - E <phi_i|phi_j>) = 0
! We calculated the lower and upper triangles of the PhiPhi and Phi2HPhi matrices above, though
! they are symmetric. LAPACK does not require us to fill in both halves; we can just
! specify 'L' in the third parameter to denote that the lower triangle (and diagonal) is filled.
! Explanation of choices for the parameters are found at the Netlib site:
! http://www.netlib.org/lapack/double/dsygv.f
call dsygv(1, 'N', 'L', j, Phi2HPhi8, NumTerms, PhiPhi8, NumTerms, Energies, Workspace, 3*NumTerms-1, Info)
!call Newdsygv(1, 'N', 'L', j, Phi2HPhi, NumTerms, PhiPhi, NumTerms, Energies, Workspace, 3*NumTerms-1, Info)
!call nag_sym_gen_eig_all('L', Phi2HPhi8, PhiPhi8, Energies) ! NAG equivalent
if (Info /= 0) then
write (*,*)
write (*,*) 'Energy eigenvalues could not be determined!'
write (*,*) 'dsygv error code:', Info
write (iwrite,*)
write (iwrite,*) 'Energy eigenvalues could not be determined!'
write (iwrite,*) 'dsygv error code:', Info
exit ! Exit the loop, because every successive j will give the same error, since we have a troublesome term.
elseif (Info == 0) then ! dsygv successfully completed.
! Writes the eigenvalues.
! This output is formatted properly for inclusion into Excel as a comma-separated value (.csv) file.
write (iwrite,"(i4)",advance='no') j
do i = LowerEigen, UpperEigen, 1
write (iwrite,"(a,d21.14)",advance='no') ', ', Energies(i)
enddo
write (iwrite,*) ' ' ! Finish the line
write (*,"(i4)",advance='no') j
do i = LowerEigen, UpperEigen, 1
write (*,"(a,d21.14)",advance='no') ', ', Energies(i)
enddo
write (*,*) ' ' ! Finish the line
endif
enddo
! Get the end time to find the duration of the program.
!call cpu_time(EndTime)
200 EndTime = omp_get_wtime()
write (*,*) 'Time taken (s):', EndTime - StartTime
write (iwrite,*)
write (iwrite,*) 'Time taken (s):', EndTime - StartTime, ' (min):', (EndTime - StartTime) / 60.0
! Clean up memory before exiting
deallocate(Workspace)
deallocate(Energies)
deallocate(PhiPhi8)
deallocate(Phi2HPhi8)
deallocate(PhiPhi)
deallocate(Phi2HPhi)
! Close file handles
close(iwrite)
stop
end
subroutine ReadParamFile(iread, Omega, LValue, Alpha, Beta, Gamma, M12max, M23max, M31max, pmax, Method, IsTriplet, LowerEigen, UpperEigen, &
AllEnergies, Optimize, EigenNum, MaxIter, Ordering, EigenRoutine)
implicit none
integer iread, LValue, Omega, Method, M12max, M23max, M31max, pmax, IsTriplet, LowerEigen, UpperEigen, AllEnergies
integer Optimize, EigenNum, MaxIter, Ordering, EigenRoutine
real*16 Alpha, Beta, Gamma
read (iread,*) ! Description line
read (iread,*) Omega
read (iread,*) ! Description line
read (iread,*) LValue
read (iread,*)
read (iread,*) Alpha
read (iread,*) Beta
read (iread,*) Gamma
read (iread,*)
read (iread,*) Method
read (iread,*)
read (iread,*) M12max, M23max, M31max
read (iread,*)
read (iread,*) pmax
read (iread,*)
read (iread,*) IsTriplet
read (iread,*)
read (iread,*) LowerEigen, UpperEigen
read (iread,*)
read (iread,*) AllEnergies
read (iread,*)
read (iread,*) Optimize
read (iread,*)
read (iread,*) EigenNum
read (iread,*)
read (iread,*) MaxIter
read (iread,*)
read (iread,*) Ordering
read (iread,*)
read (iread,*) EigenRoutine
close (iread)
return
end
subroutine WriteHeader(iwrite, LValue, IsTriplet, Ordering, Method, EigenRoutine, Omega, NumTerms, Alpha, Beta, Gamma, M12max, M23max, M31max, pmax)
implicit none
integer iwrite, LValue, IsTriplet, Ordering, Method, EigenRoutine, Omega, NumTerms, M12max, M23max, M31max, pmax
real*16 Alpha, Beta, Gamma
select case (LValue)
case (0) ! S-Wave
if (IsTriplet == 0) then
write (*,*) "S-Wave Singlet Ps-H"
write (iwrite,*) "S-Wave Singlet Ps-H"
else
write (*,*) "S-Wave Triplet Ps-H"
write (iwrite,*) "S-Wave Triplet Ps-H"
endif
case (1)
if (IsTriplet == 0) then
write (*,*) "P-Wave Singlet Ps-H: 1st formalism"
write (iwrite,*) "P-Wave Singlet Ps-H: 1st formalism"
else
write (*,*) "P-Wave Triplet Ps-H: 1st formalism"
write (iwrite,*) "P-Wave Triplet Ps-H: 1st formalism"
endif
case (2)
if (IsTriplet == 0) then
write (*,*) "D-Wave Singlet Ps-H: 1st formalism"
write (iwrite,*) "D-Wave Singlet Ps-H: 1st formalism"
else
write (*,*) "D-Wave Triplet Ps-H: 1st formalism"
write (iwrite,*) "D-Wave Triplet Ps-H: 1st formalism"
endif
case (3) ! F-Wave
if (IsTriplet == 0) then
write (*,*) "F-Wave Singlet Ps-H"
write (iwrite,*) "F-Wave Singlet Ps-H"
else
write (*,*) "F-Wave Triplet Ps-H"
write (iwrite,*) "F-Wave Triplet Ps-H"
endif
case (4) ! G-Wave
if (IsTriplet == 0) then
write (*,*) "G-Wave Singlet Ps-H"
write (iwrite,*) "G-Wave Singlet Ps-H"
else
write (*,*) "G-Wave Triplet Ps-H"
write (iwrite,*) "G-Wave Triplet Ps-H"
endif
case (5) ! H-Wave
if (IsTriplet == 0) then
write (*,*) "H-Wave Singlet Ps-H"
write (iwrite,*) "H-Wave Singlet Ps-H"
else
write (*,*) "H-Wave Triplet Ps-H"
write (iwrite,*) "H-Wave Triplet Ps-H"
endif
case (6) ! I-Wave
if (IsTriplet == 0) then
write (*,*) "I-Wave Singlet Ps-H"
write (iwrite,*) "I-Wave Singlet Ps-H"
else
write (*,*) "I-Wave Triplet Ps-H"
write (iwrite,*) "I-Wave Triplet Ps-H"
endif
case (7) ! K-Wave
if (IsTriplet == 0) then
write (*,*) "K-Wave Singlet Ps-H"
write (iwrite,*) "K-Wave Singlet Ps-H"
else
write (*,*) "K-Wave Triplet Ps-H"
write (iwrite,*) "K-Wave Triplet Ps-H"
endif
case (8) ! L-Wave
if (IsTriplet == 0) then
write (*,*) "L-Wave Singlet Ps-H"
write (iwrite,*) "L-Wave Singlet Ps-H"
else
write (*,*) "L-Wave Triplet Ps-H"
write (iwrite,*) "L-Wave Triplet Ps-H"
endif
case default
write (*,*) "Higher partial waves are not supported yet...exiting."
stop
end select
if (Ordering == 1) then
write (*,*) "Using Peter Van Reeth's ordering"
write (iwrite,*) "Using Peter Van Reeth's ordering"
else
write (*,*) "Using my ordering"
write (iwrite,*) "Using my ordering"
endif
if (Method == 0) then
write (*,*) "Integration Technique: Direct Summation"
write (iwrite,*) "Integration Technique: Direct Summation"
else if (Method == 1) then
write (*,*) "Integration Technique: Asymptotic Expansion"
write (iwrite,*) "Integration Technique: Asymptotic Expansion"
else if (Method == 2) then
write (*,*) "Integration Technique: Recursion Relations"
write (iwrite,*) "Integration Technique: Recursion Relations"
else
write (*,*) "Method parameter in input file must be 0, 1 or 2."
stop
end if
if (EigenRoutine == 1) then
write (*,*) "Eigenvalue routine: dsygv from LAPACK"
write (iwrite,*) "Eigenvalue routine: dsygv from LAPACK"
else if (EigenRoutine == 2) then
write (*,*) "Eigenvalue routine: Pachucki eigenproblem solver"
write (iwrite,*) "Eigenvalue routine: Pachucki eigenproblem solver"
else
write (*,*) "Eigenvalue routine parameter in input file must be 1 or 2."
stop
end if
write (iwrite,*) 'Omega =', Omega
write (iwrite,*) 'Alpha =', Alpha
write (iwrite,*) 'Beta =', Beta
write (iwrite,*) 'Gamma =', Gamma
if (LValue == 0) NumTerms = NumTerms / 2
write (iwrite,*) 'Number of terms:', NumTerms
write (*,*) 'Number of terms:', NumTerms
write (*,*) 'Omega =', Omega
if (LValue == 0) NumTerms = NumTerms * 2
write (iwrite,*) 'M12max, M23max, M31max, pmax =', M12max, M23max, M31max, pmax
write (iwrite,*)
return
end
! This subroutine calculates the number of terms for the function GenOmegaPowerTable (below). This
! is needed to determine the size of the dynamically allocated table before it is produced.
integer function CalcPowerTableSize(Omega)
implicit none
integer Omega ! This sets the limits on the terms.
integer NumTerms ! The total number of terms
integer om, ki, li, mi, ni, pi, qi ! These are the exponents we are determining.
if (Omega == -1) then ! Special case where we don't want any short-range terms
CalcPowerTableSize = 0
return
end if
NumTerms = 0
do om = 0, Omega, 1
do ki = 0, Omega, 1
do li = 0, Omega, 1
do mi = 0, Omega, 1
do ni = 0, Omega, 1
do pi = 0, Omega, 1
do qi = 0, Omega, 1
if (ki + li + mi + ni + pi + qi == om) then
!if (ki>li) cycle
!if (ki==li .and. qi>pi) cycle
! if (om > 3 .and. qi > 0) cycle ! Restrict r23 to max of 1
! if (om > 3 .and. pi > 0) cycle ! Restrict r13 to max of 3
! if (om > 2 .and. ni > 0) cycle ! Restrict r3 to max of 0
! if (ki >= li .and. qi > pi) cycle ! Check if r1>r2 and r23 > r13 (powers)
NumTerms = NumTerms + 1
endif
enddo
enddo
enddo
enddo
enddo
enddo
enddo
CalcPowerTableSize = NumTerms
return
end
! This subroutine calculates values of k_i, l_i, m_i, n_i, p_i and q_i of
! (3.14). The summation of these is given by equation (3.15), but we do not have
! the restriction on q of being even.
! This is written to always have the terms in order of increasing omega, i.e. the terms
! for omega = 0 are first, etc.
! TODO: Use logical for Ordering
subroutine GenOmegaPowerTable(Omega, PowerTable, ArraySize, Ordering, l1, l2)
!implicit none
integer Omega ! This sets the limits on the terms.
integer ArraySize
integer, dimension(ArraySize,6) :: PowerTable
integer NumTerm ! The number of the current term
integer om, ki, li, mi, ni, pi, qi ! These are the exponents we are determining.
integer Ordering ! Whether to use Peter's ordering or mine
integer l1, l2, l3 ! l1 = 0 for S-wave, 1 for P-wave, etc.
if (Ordering == 0) then ! Use my ordering
NumTerm = 0
do om = 0, Omega, 1
do ki = 0, Omega, 1
do li = 0, Omega, 1
do mi = 0, Omega, 1
do ni = 0, Omega, 1
do pi = 0, Omega, 1
do qi = 0, Omega, 1
if (ki + li + mi + ni + pi + qi == om) then
!if (ki + li + mi + ni + pi + qi <= Omega) then
!if (ki>li) cycle
!if (ki==li .and. qi>pi) cycle
! if (om > 3 .and. qi > 0) cycle ! Restrict r23 to max of 1
! if (om > 3 .and. pi > 0) cycle ! Restrict r13 to max of 3
! if (om > 2 .and. ni > 0) cycle ! Restrict r3 to max of 0
! if (ki >= li .and. qi > pi) cycle ! Check if r1>r2 and r23 > r13 (powers)
NumTerm = NumTerm + 1
PowerTable(NumTerm,1) = ki
PowerTable(NumTerm,2) = li
PowerTable(NumTerm,3) = mi
PowerTable(NumTerm,4) = ni
PowerTable(NumTerm,5) = pi
PowerTable(NumTerm,6) = qi
write (*,*) ki, li, mi, ni, pi, qi!, ki + li + mi + ni + pi + qi
endif
enddo
enddo
enddo
enddo
enddo
enddo
enddo
else ! Use Peter Van Reeth's ordering instead.
! I pulled this (modified) snippet from Peter Van Reeth's code so that
! my indices match up with his to debug mine easier.
MEGA=OMEGA
IHDPP1 = MEGA +1
INX=0
DO 65 I=1,IHDPP1
DO 66 I23P1=1,I,1
I23=I23P1 -1
I12P1M = I-I23
DO 67 I12P1=1,I12P1M
I12=I12P1-1
I2P1M =I12P1M -I12
DO 68 I2P1 =1,I2P1M
I2 = I2P1-1
I13P1M = I2P1M-I2
DO 69 I13P1=1,I13P1M
I13 = I13P1 -1
I3P1M = I13P1M -I13
DO 70 I3P1 = 1, I3P1M
I3=I3P1-1
I1 = I3P1M -I3P1
!if (I1>I2) cycle
!if (I1==I2 .and. I23>I13) cycle
INX =INX +1
PowerTable(INX,1) = I1
PowerTable(INX,2) = I2
PowerTable(INX,3) = I12
PowerTable(INX,4) = I3
PowerTable(INX,5) = I13
PowerTable(INX,6) = I23
!write (*,*) I1, I2, I12, I3, I13, I23
70 CONTINUE
69 CONTINUE
68 CONTINUE
67 CONTINUE
66 CONTINUE
65 CONTINUE
NumTerm = INX
endif
! REMOVE THIS !
if (Ordering == 2) then ! Use my ordering
NumTerm = 0
do om = 0, Omega, 1
do qi = 0, Omega, 1
do pi = 0, Omega, 1
do ni = 0, Omega, 1
do mi = 0, Omega, 1
do li = 0, Omega, 1
do ki = 0, Omega, 1
if (ki + li + mi + ni + pi + qi == om) then
!if (ki + li + mi + ni + pi + qi <= Omega) then
!if (ki>li) cycle
!if (ki==li .and. qi>pi) cycle
! if (om > 3 .and. qi > 0) cycle ! Restrict r23 to max of 1
! if (om > 3 .and. pi > 0) cycle ! Restrict r13 to max of 3
! if (om > 2 .and. ni > 0) cycle ! Restrict r3 to max of 0
! if (ki >= li .and. qi > pi) cycle ! Check if r1>r2 and r23 > r13 (powers)
NumTerm = NumTerm + 1
PowerTable(NumTerm,1) = ki
PowerTable(NumTerm,2) = li
PowerTable(NumTerm,3) = ni
PowerTable(NumTerm,4) = mi
PowerTable(NumTerm,5) = qi
PowerTable(NumTerm,6) = pi
write (*,*) ki, li, ni, mi, qi, pi!, ki + li + mi + ni + pi + qi
endif
enddo
enddo
enddo
enddo
enddo
enddo
enddo
endif
do i = 1, NumTerm, 1
! Increase r1 by l1 and r2 by l2/l3
PowerTable(i,1) = PowerTable(i,1) + l1
PowerTable(i,2) = PowerTable(i,2) + l2
write (*,'(i3,a,6i2)') i, ":", PowerTable(i,1), PowerTable(i,2), PowerTable(i,3), PowerTable(i,4), PowerTable(i,5), PowerTable(i,6)
end do
return
end
subroutine GenCoeffTable(CoeffTable, PowerTablei, i, PowerTablej, j, NumTerms, Alphai, Alphaj, Betai, Betaj, &
Gammai, Gammaj, l1r, l2r, l3r)
implicit none
real*16, dimension(34) :: CoeffTable
integer i, j, NumTerms
integer, dimension(NumTerms,6) :: PowerTablei
integer, dimension(NumTerms,6) :: PowerTablej
real*16 Alphai, Alphaj, Betai, Betaj, Gammai, Gammaj
integer kj, lj, mj, nj, pj, qj, l1r, l2r, l3r
! We do not really have to do this, but it makes reading the assignments later
! easier to follow instead of assigning index numbers to each of k, l, m, etc.
! In the power tables, we have the following indices:
! k = 1, l = 2, m = 3, n = 4, p = 5, q = 6
kj = PowerTablej(j,1)
lj = PowerTablej(j,2)
mj = PowerTablej(j,3)
nj = PowerTablej(j,4)
pj = PowerTablej(j,5)
qj = PowerTablej(j,6)
CoeffTable(1) = -Alphaj*Alphaj - Betaj*Betaj - Gammaj*Gammaj
CoeffTable(2) = -kj - kj*kj - kj*mj - kj*pj + l1r + l1r*l1r
CoeffTable(3) = -2.0q0*mj - kj*mj - 2.0q0*mj*mj - mj*lj - mj*qj - mj*pj
CoeffTable(4) = -2.0q0
CoeffTable(5) = -lj - mj*lj - lj*lj - lj*qj + l2r + l2r*l2r
CoeffTable(6) = mj*lj
CoeffTable(7) = kj*mj
CoeffTable(8) = -2.0q0*qj - mj*qj - lj*qj - 2.0q0*qj*qj - qj*nj - qj*pj
CoeffTable(9) = 2.0q0
CoeffTable(10) = -nj - qj*nj - nj*nj - nj*pj + l3r + l3r*l3r
CoeffTable(11) = qj*nj
CoeffTable(12) = lj*qj
CoeffTable(13) = -2.0q0*pj - kj*pj - mj*pj - qj*pj - nj*pj - 2.0q0*pj*pj
CoeffTable(14) = qj*pj
CoeffTable(15) = mj*pj
CoeffTable(16) = nj*pj
CoeffTable(17) = kj*pj
CoeffTable(18) = -2.0q0
CoeffTable(19) = mj*qj
CoeffTable(20) = mj*Alphaj
CoeffTable(21) = -mj*Alphaj
CoeffTable(22) = pj*Alphaj
CoeffTable(23) = -pj*Alphaj
CoeffTable(24) = 2.0q0 + 2.0q0*Alphaj + 2.0q0*kj*Alphaj + mj*Alphaj + pj*Alphaj
CoeffTable(25) = -mj*Betaj
CoeffTable(26) = mj*Betaj
CoeffTable(27) = qj*Betaj
CoeffTable(28) = -qj*Betaj
CoeffTable(29) = -2.0q0 + 2.0q0*Betaj + mj*Betaj + 2.0q0*lj*Betaj + qj*Betaj
CoeffTable(30) = -qj*Gammaj
CoeffTable(31) = qj*Gammaj
CoeffTable(32) = -pj*Gammaj
CoeffTable(33) = pj*Gammaj
CoeffTable(34) = -2.0q0 + 2.0q0*Gammaj + qj*Gammaj + 2.0q0*nj*Gammaj + pj*Gammaj
return
end
! Note that the naming is a little different here. I used rPowerTable here as the parameter name but
! use rPowers elsewhere.
subroutine GenRPowerTable(rPowerTable, PowerTablei, i, PowerTablej, j, NumTerms)
integer, dimension(34,6) :: rPowerTable
integer, dimension(NumTerms,6) :: PowerTablei
integer, dimension(NumTerms,6) :: PowerTablej
integer i, j, NumTerms
integer ki, li, mi, ni, pi, qi, kj, lj, mj, nj, pj, qj
integer, dimension(34,6) :: rPowers
integer n
! This is a 2-dimensional array expressed in a contiguous 1-D array. I see no way to assign a 2-D
! array in Fortran (at least in G95). I found several examples that would not compile with G95.
! These are taken directly from the table in rpowers.pdf. Each power is covered by 2 lines, since
! there are 34 terms to take care of. These can be read as going down the columns of rpowers.pdf.
data rPowers / 0,-2, 0, 0, 0, 2,-2, 0, 0, 0, 0, 0, 0, 0, 0, 2,-2, 0, 0, 1,-1, 1,-1,-1, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, & ! r1
0, 0, 0, 0,-2,-2, 2, 0, 0, 0, 2,-2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0,-1, 1, 1,-1,-1, 2, 0, 0, 0, 0, & ! r2
0, 0,-2,-1, 0,-2,-2, 0, 0, 0, 0, 0, 0, 2,-2, 0, 0, 0,-2,-2,-2, 0, 0, 0,-2,-2, 0, 0, 0, 0, 0, 0, 0, 0, & ! r12
0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-2, 2, 0, 0, 0,-2, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0,-1, 1,-1, 1,-1, & ! r3
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-2,-2,-2,-2,-1, 2, 0, 0,-2,-2, 0, 0, 0, 0, 0, 0, 0, 0,-2,-2, 0, & ! r31
0, 0, 0, 0, 0, 0, 0,-2,-1, 0,-2,-2, 0,-2, 2, 0, 0, 0,-2, 0, 0, 0, 0, 0, 0, 0,-2,-2, 0,-2,-2, 0, 0, 0 / ! r23
!1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34
! We do not really have to do this, but it makes reading the assignments later
! easier to follow instead of assigning index numbers to each of k, l, m, etc.
! In the power tables, we have the following indices:
! k = 1, l = 2, m = 3, n = 4, p = 5, q = 6
ki = PowerTablei(i,1)
li = PowerTablei(i,2)
mi = PowerTablei(i,3)
ni = PowerTablei(i,4)
pi = PowerTablei(i,5)
qi = PowerTablei(i,6)
kj = PowerTablej(j,1)
lj = PowerTablej(j,2)
mj = PowerTablej(j,3)
nj = PowerTablej(j,4)
pj = PowerTablej(j,5)
qj = PowerTablej(j,6)
! Combine powers of like r's from f_i, f_j and the powers in rpowers.pdf.
do n = 1, 34, 1
rPowerTable(n,1) = ki + kj + rPowers(n,1)
rPowerTable(n,2) = li + lj + rPowers(n,2)
rPowerTable(n,3) = mi + mj + rPowers(n,3)
rPowerTable(n,4) = ni + nj + rPowers(n,4)
rPowerTable(n,5) = pi + pj + rPowers(n,5)
rPowerTable(n,6) = qi + qj + rPowers(n,6)
enddo
return
end
subroutine GenCoeffTableSH(CoeffTable, PowerTablei, i, PowerTablej, j, NumTerms, Alphai, Alphaj, Betai, Betaj, Gammai, Gammaj, l1r, l2r, l3r)
implicit none
real*16, dimension(6) :: CoeffTable
integer i, j, NumTerms
integer, dimension(NumTerms,6) :: PowerTablei
integer, dimension(NumTerms,6) :: PowerTablej
real*16 Alphai, Alphaj, Betai, Betaj, Gammai, Gammaj
integer kj, lj, mj, nj, pj, qj, l1r, l2r, l3r
kj = PowerTablej(j,1)
lj = PowerTablej(j,2)
mj = PowerTablej(j,3)
nj = PowerTablej(j,4)
pj = PowerTablej(j,5)
qj = PowerTablej(j,6)
CoeffTable(1) = mj
CoeffTable(2) = pj
CoeffTable(3) = qj
CoeffTable(4) = mj
CoeffTable(5) = pj
CoeffTable(6) = qj
return
end
! Note that the naming is a little different here. I used rPowerTable here as the parameter name but
! use rPowers elsewhere.
subroutine GenRPowerTableSH(rPowerTable, PowerTablei, i, PowerTablej, j, NumTerms)
integer, dimension(6,6) :: rPowerTable
integer, dimension(NumTerms,6) :: PowerTablei
integer, dimension(NumTerms,6) :: PowerTablej
integer i, j, NumTerms
integer ki, li, mi, ni, pi, qi, kj, lj, mj, nj, pj, qj
integer, dimension(6,6) :: rPowers
integer n
data rPowers / -1,-1, 0, 1, 1, 0, & ! r1
1, 0,-1,-1, 0, 1, & ! r2
-2, 0, 0,-2, 0, 0, & ! r12
0, 1, 1, 0,-1,-1, & ! r3
0,-2, 0, 0,-2, 0, & ! r31
0, 0,-2, 0, 0,-2 / ! r23
!1, 2, 3, 4, 5, 6
! In the power tables, we have the following indices:
! k = 1, l = 2, m = 3, n = 4, p = 5, q = 6
ki = PowerTablei(i,1)
li = PowerTablei(i,2)
mi = PowerTablei(i,3)
ni = PowerTablei(i,4)
pi = PowerTablei(i,5)
qi = PowerTablei(i,6)
kj = PowerTablej(j,1)
lj = PowerTablej(j,2)
mj = PowerTablej(j,3)
nj = PowerTablej(j,4)
pj = PowerTablej(j,5)
qj = PowerTablej(j,6)
! Combine powers of like r's from f_i, f_j and the powers in rpowers.pdf.
do n = 1, 6, 1
rPowerTable(n,1) = ki + kj + rPowers(n,1)
rPowerTable(n,2) = li + lj + rPowers(n,2)
rPowerTable(n,3) = mi + mj + rPowers(n,3)
rPowerTable(n,4) = ni + nj + rPowers(n,4)
rPowerTable(n,5) = pi + pj + rPowers(n,5)
rPowerTable(n,6) = qi + qj + rPowers(n,6)
enddo
return
end
! Sets all WMatrix elements equal to 0.
! TODO: Is there an equivalent to the C++ memset?
subroutine ClearWMatrix(WMatrix)
use WLimits
implicit none
real*16, dimension(lmin:lmax, mmin:mmax, nmin:nmax, 6) :: WMatrix
integer l, m, n
do l = lmin, lmax, 1
do m = mmin, mmax, 1
do n = nmin, nmax, 1
WMatrix(l, m, n, 1) = 0.0q0
WMatrix(l, m, n, 2) = 0.0q0
WMatrix(l, m, n, 3) = 0.0q0
WMatrix(l, m, n, 4) = 0.0q0
WMatrix(l, m, n, 5) = 0.0q0
WMatrix(l, m, n, 6) = 0.0q0
end do
end do
end do
return
end
! This precalculates the W matrices, since the W function is computationally expensive.
subroutine CalcWMatrices(Omega, WMatrix, Alpha, Beta, Gamma, pmax)
use WLimits
implicit none
integer Omega
real*16, dimension(lmin:lmax, mmin:mmax, nmin:nmax, 6) :: WMatrix
real*16 Alpha, Beta, Gamma
integer pmax
real*16 W
integer l, m, n, k
!$omp parallel do shared(lmin,lmax,mmin,mmax,nmin,nmax,pmax,Alpha,Beta,Gamma,WMatrix) private(m,n) schedule(dynamic,5)
do l = lmin, lmax, 1
write (*,*) "wmatrix l:", l, "/", lmax
!write (*,"(a)",advance='no') '.'
do m = mmin, mmax, 1
do n = nmin, nmax, 1
!if (l + m + n + 2 >= 0 .and. l + m + 1 >= 0) then ! TODO: Needed?
do k = 1, 6, 1
! Note that the ordering of alpha, beta and gamma is the same as the ordering
! in the terms of equation (6) of Drake/Yan '95.
if (WMatrix(l, m, n, k) /= 0.0q0) then
select case(k)
case (1)
WMatrix(l, m, n, 1) = W(l, m, n, Alpha, Beta, Gamma, pmax)
case (2)
WMatrix(l, m, n, 2) = W(l, m, n, Alpha, Gamma, Beta, pmax)
case (3)
WMatrix(l, m, n, 3) = W(l, m, n, Beta, Alpha, Gamma, pmax)
case (4)
WMatrix(l, m, n, 4) = W(l, m, n, Beta, Gamma, Alpha, pmax)
case (5)
WMatrix(l, m, n, 5) = W(l, m, n, Gamma, Alpha, Beta, pmax)
case (6)
WMatrix(l, m, n, 6) = W(l, m, n, Gamma, Beta, Alpha, pmax)
end select ! There is no need for a default case.
end if
end do
!end if
end do
end do
end do
write (*,*)
return
end
subroutine CalcMatricesSub(RunCalc, UsePreCalc, CalcSH, PowerTablei, PowerTablej, WMatrix, PhiPhi, Phi2HPhi, NumTerms, OffI, OffJ, Alphai, Alphaj, Betai, Betaj, &
Gammai, Gammaj, l1l, l2l, l3l, m1l, m2l, m3l, l1r, l2r, l3r, m1r, m2r, m3r, M12max, M23max, M31max, pmax, IsTriplet, Method)
use WLimits
implicit none
real*16, dimension(34) :: CoeffTable
real*16, dimension(6) :: CoeffTableSH
integer, dimension(34,6) :: rPowers
integer, dimension(6,6) :: rPowersSH
integer, dimension(6) :: sldata, srdata
integer, dimension(NumTerms,6) :: PowerTablei, PowerTablej
real*16, dimension(lmin:lmax, mmin:mmax, nmin:nmax, 6) :: WMatrix
real*16, dimension(NumTerms*2,NumTerms*2) :: PhiPhi, Phi2HPhi
integer M12max, M23max, M31max, pmax, NumTerms, OffI, OffJ, Method
logical RunCalc, UsePreCalc, CalcSH
integer l1l, l2l, l3l, m1l, m2l, m3l, l1r, l2r, l3r, m1r, m2r, m3r
real*16 Alphai, Alphaj, Betai, Betaj, Gammai, Gammaj
real*16 HylleraasIntegral, HylleraasIntegralGeneral, CP12Angular
real*16 Sum, SumPiHalf
real*16 PI
integer i, j, n, IsTriplet
real*16 RemoveMe, SHPart, SHPartPiHalf
real*16 PiHalf
data sldata / 2, 3, 3, 1, 1, 2 /
data srdata / 1, 1, 2, 2, 3, 3 /
! 1/(2pi) calculated in Mathematica
PiHalf = 0.1591549430918953357688837633725143620345_16
!call omp_set_num_threads(1)
!$omp parallel do shared(NumTerms) private(j,Sum,SHPart,RemoveMe,CoeffTable,CoeffTableSH,rPowers,rPowersSH) schedule(dynamic,10)
do i = 1, NumTerms, 1
write (*,*) i
!write (*,"(i7)",advance='no') i
!do j = 1, i, 1
do j = 1, NumTerms, 1
Sum = 0.0q0
call GenCoeffTable(CoeffTable, PowerTablei, i, PowerTablej, j, NumTerms, &
Alphai, Alphaj, Betai, Betaj, Gammai, Gammaj, l1r, l2r, l3r)
call GenRPowerTable(rPowers, PowerTablei, i, PowerTablej, j, NumTerms)
call GenCoeffTableSH(CoeffTableSH, PowerTablei, i, PowerTablej, j, NumTerms, &
Alphai, Alphaj, Betai, Betaj, Gammai, Gammaj, l1r, l2r, l3r)
call GenRPowerTableSH(rPowersSH, PowerTablei, i, PowerTablej, j, NumTerms)
do n = 1, 34, 1
!do n = 4, 4, 1
if (CoeffTable(n) /= 0.0q0) then
! Note that the order of the parameters to the HylleraasIntegral function looks strange. This is
! because the Drake and Yan paper (1995) expressed the integrand in the following order:
! r1 r2 r3 r12 r23 r31, and the Armour and Humberston article has them in this order:
! r1 r2 r12 r3 r13 r23 (equation 3.14). So we have to swap the third and fourth parameters, along
! with the fifth and sixth.
! Do the PhiPhi inner product
RemoveMe = HylleraasIntegralGeneral(RunCalc, UsePreCalc, rPowers(n,1), rPowers(n,2), rPowers(n,4), rPowers(n,3), rPowers(n,6), &
rPowers(n,5), Alphai+Alphaj, Betai+Betaj, Gammai+Gammaj, &
l1l, l2l, l3l, m1l, m2l, m3l, l1r, l2r, l3r, m1r, m2r, m3r, M12max, M23max, M31max, pmax, 0, 0, 0, WMatrix, Method)
Sum = Sum + RemoveMe * CoeffTable(n)
end if
enddo
! TODO: Check for whether these terms should be 0 before calculating.
! Now we need to take care of the terms that act on the spherical harmonics. There are 6 terms total from this.
if (CalcSH == .true.) then
SHPart = 0.0q0
do n = 1, 6, 1
if (CoeffTableSH(n) /= 0.0q0) then
RemoveMe = HylleraasIntegralGeneral(RunCalc, UsePreCalc, rPowersSH(n,1), rPowersSH(n,2), rPowersSH(n,4), rPowersSH(n,3), rPowersSH(n,6), &
rPowersSH(n,5), Alphai+Alphaj, Betai+Betaj, Gammai+Gammaj, &
l1l, l2l, l3l, m1l, m2l, m3l, l1r, l2r, l3r, m1r, m2r, m3r, M12max, M23max, M31max, pmax, n, sldata(n), srdata(n), WMatrix, Method)
SHPart = SHPart + RemoveMe * CoeffTableSH(n)
end if
end do
!SumPiHalf = Sum * PiHalf ! TODO: Remove this.
!SHPartPiHalf = SHPart * PiHalf ! TODO: Remove this.
Sum = Sum + SHPart * 2 ! The other terms have a 1/2 in front, and we are calculating 2H
end if
if (IsTriplet == 0) then
Phi2HPhi(i+OffI,j+OffJ) = Phi2HPhi(i+OffI,j+OffJ) + Sum * PiHalf
else
Phi2HPhi(i+OffI,j+OffJ) = Phi2HPhi(i+OffI,j+OffJ) - Sum * PiHalf
end if
! Do the PhiPhi inner product
RemoveMe = HylleraasIntegralGeneral(RunCalc, UsePreCalc, PowerTablei(i,1)+PowerTablej(j,1), PowerTablei(i,2)+PowerTablej(j,2), &
PowerTablei(i,4)+PowerTablej(j,4), PowerTablei(i,3)+PowerTablej(j,3), &
PowerTablei(i,6)+PowerTablej(j,6), PowerTablei(i,5)+PowerTablej(j,5), Alphai+Alphaj, Betai+Betaj, Gammai+Gammaj, &
l1l, l2l, l3l, m1l, m2l, m3l, l1r, l2r, l3r, m1r, m2r, m3r, M12max, M23max, M31max, pmax, 0, 0, 0, WMatrix, Method)
if (IsTriplet == 0) then
PhiPhi(i+OffI,j+OffJ) = PhiPhi(i+OffI,j+OffJ) + RemoveMe * PiHalf
else
PhiPhi(i+OffI,j+OffJ) = PhiPhi(i+OffI,j+OffJ) - RemoveMe * PiHalf
endif
enddo
enddo
write (*,*)
return
end
subroutine CalcMatrices(RunCalc, UsePreCalc, PowerTabler1i, PowerTabler1j, PowerTabler2i, PowerTabler2j, WMatrix, PhiPhi, Phi2HPhi, NumTerms, &
Alphai, Alphaj, Betai, Betaj, Gammai, Gammaj, LValue, M12max, M23max, M31max, pmax, IsTriplet, Method, Exchanged)
use WLimits
implicit none
integer, dimension(NumTerms/2,6) :: PowerTabler1i, PowerTabler1j, PowerTabler2i, PowerTabler2j
real*16, dimension(lmin:lmax, mmin:mmax, nmin:nmax, 6) :: WMatrix
real*16, dimension(NumTerms,NumTerms) :: PhiPhi, Phi2HPhi
integer NumTerms, LValue, M12max, M23max, M31max, pmax, IsTriplet, Method
logical RunCalc, UsePreCalc
real*16 Alphai, Alphaj, Betai, Betaj, Gammai, Gammaj
logical Exchanged
! 1-1 matrix elements
write (*,*) "Calculating 1-1 matrix elements"
call CalcMatricesSub(RunCalc, UsePreCalc, .false., PowerTabler1i, PowerTabler1j, WMatrix, PhiPhi, Phi2HPhi, NumTerms/2, 0, 0, Alphai, Alphaj, Betai, Betaj, &
Gammai, Gammaj, LValue, 0, 0, 0, 0, 0, LValue, 0, 0, 0, 0, 0, M12max, M23max, M31max, pmax, IsTriplet, Method)
if (LValue == 0) then ! S-wave only has one symmetry
return
end if
! 2-2 matrix elements
write (*,*) "Calculating 2-2 matrix elements"
if (Exchanged == .true.) then ! Swap l2r and l3r
call CalcMatricesSub(RunCalc, UsePreCalc, .true., PowerTabler2i, PowerTabler2j, WMatrix, PhiPhi, Phi2HPhi, NumTerms/2, NumTerms/2, NumTerms/2, Alphai, Alphaj, Betai, Betaj, &
Gammai, Gammaj, 0, LValue, 0, 0, 0, 0, 0, 0, LValue, 0, 0, 0, M12max, M23max, M31max, pmax, IsTriplet, Method)
else
call CalcMatricesSub(RunCalc, UsePreCalc, .false., PowerTabler2i, PowerTabler2j, WMatrix, PhiPhi, Phi2HPhi, NumTerms/2, NumTerms/2, NumTerms/2, Alphai, Alphaj, Betai, Betaj, &
Gammai, Gammaj, 0, LValue, 0, 0, 0, 0, 0, LValue, 0, 0, 0, 0, M12max, M23max, M31max, pmax, IsTriplet, Method)
end if
! 1-2 matrix elements
write (*,*) "Calculating 1-2 matrix elements"
if (Exchanged == .true.) then ! Swap l2r and l3r
call CalcMatricesSub(RunCalc, UsePreCalc, .true., PowerTabler1i, PowerTabler2j, WMatrix, PhiPhi, Phi2HPhi, NumTerms/2, 0, NumTerms/2, Alphai, Alphaj, Betai, Betaj, &
Gammai, Gammaj, LValue, 0, 0, 0, 0, 0, 0, 0, LValue, 0, 0, 0, M12max, M23max, M31max, pmax, IsTriplet, Method)
else
call CalcMatricesSub(RunCalc, UsePreCalc, .true., PowerTabler1i, PowerTabler2j, WMatrix, PhiPhi, Phi2HPhi, NumTerms/2, 0, NumTerms/2, Alphai, Alphaj, Betai, Betaj, &
Gammai, Gammaj, LValue, 0, 0, 0, 0, 0, 0, LValue, 0, 0, 0, 0, M12max, M23max, M31max, pmax, IsTriplet, Method)
end if
! 2-1 matrix elements
write (*,*) "Calculating 2-1 matrix elements"
call CalcMatricesSub(RunCalc, UsePreCalc, .true., PowerTabler2i, PowerTabler1j, WMatrix, PhiPhi, Phi2HPhi, NumTerms/2, NumTerms/2, 0, Alphai, Alphaj, Betai, Betaj, &
Gammai, Gammaj, 0, LValue, 0, 0, 0, 0, LValue, 0, 0, 0, 0, 0, M12max, M23max, M31max, pmax, IsTriplet, Method)
return
end
!real*16 function ederivwrapper(Omega, NumTerms, M12max, pmax, IsTriplet, Ordering, Method, EigenRoutine, Alpha, Beta, Gamma, Iter, Energy)
! implicit none
! real*16, allocatable, dimension(:,:) :: PhiPhi, Phi2HPhi
! real*16, allocatable, dimension(:,:) :: PhiPhi8, Phi2HPhi8
! real*16, allocatable, dimension(:) :: Energies, Workspace
! integer Omega, NumTerms, M12max, pmax, IsTriplet, Ordering, Method, EigenRoutine, Info, Iter
! real*16 Alpha, Beta, Gamma, Energy
! real*16 EDeriv
!
! allocate(PhiPhi(NumTerms,NumTerms))
! allocate(Phi2HPhi(NumTerms,NumTerms))
! allocate(Energies(NumTerms))
! allocate(Workspace(3*NumTerms-1)) ! Not really needed for Newdsygv?
!
! Energies = 0.0q0
!
! Energy = EDeriv(Omega, NumTerms, Alpha, Beta, Gamma, 1, PhiPhi, Phi2HPhi, PhiPhi8, Phi2HPhi8, &
! M12max, pmax, 0, IsTriplet, Ordering, Method, EigenRoutine)
!
! Phi2HPhi = Phi2HPhi / 2.0q0 ! TODO: Remove?
! if (EigenRoutine == 1) then
! allocate(PhiPhi8(NumTerms,NumTerms))
! allocate(Phi2HPhi8(NumTerms,NumTerms))
! PhiPhi8 = PhiPhi
! Phi2HPhi8 = Phi2HPhi
! call dsygv(1, 'N', 'L', NumTerms, Phi2HPhi8, NumTerms, PhiPhi8, NumTerms, Energies, Workspace, 3*NumTerms-1, Info)
! ! TODO: Test Info!
! write (*,*) "Info: ", Info
! deallocate(PhiPhi8, Phi2HPhi8)
! else ! EigenRoutine == 2
! call Newdsygv(1, 'N', 'L', NumTerms, Phi2HPhi, NumTerms, PhiPhi, NumTerms, Energies, Workspace, 3*NumTerms-1, Info)
! end if
!
! Energy = Energies(1)
!
! write (*,"(A,3F8.5)") "Alpha, Beta, Gamma: ", Alpha, Beta, Gamma
! write (10,"(I4,A,3F8.5)") Iter, "; Alpha, Beta, Gamma: ", Alpha, Beta, Gamma ! I dislike hardcoding the 10, but it's easier at this point.
! write (10,"(A,F16.12)") "Energy: ", Energy
! write (10,*)
! flush(10)
!
! deallocate(PhiPhi, Phi2HPhi, Energies, Workspace)
! ederivwrapper = 1.0q0
!end
! Calculates the derivative of the energy function. If the Deriv parameter = 0, then it exits early with
! PhiPhi and Phi2HPhi filled.
real*16 function EDeriv(Omega, NumTerms, Alpha, Beta, Gamma, EigenNum, PhiPhi, Phi2HPhi, &
M12max, M23max, M31max, pmax, Deriv, IsTriplet, Ordering, Method, EigenRoutine, LValue)
use WLimits
!use nag_nsym_gen_eig
implicit none
real*16 Alpha, Beta, Gamma, Alphai, Alphaj, Betai, Betaj, Gammai, Gammaj, e
integer, allocatable, dimension(:,:) :: PowerTabler1i, PowerTabler1j, PowerTabler2i, PowerTabler2j ! This contains the powers of the Hylleraas terms.
integer CalcPowerTableSize, PreCalcGammaSize ! Function definitions
integer EigenNum ! This is the energy eigenvalue number we are dealing with.
integer Omega, NumTerms, M12max, M23max, M31max, pmax, Deriv, IsTriplet, Method, Ordering, i, j, n, TempJ, Info, EigenRoutine, LValue
real*16, dimension(NumTerms,NumTerms) :: PhiPhi, Phi2HPhi
real*16, allocatable, dimension(:,:,:,:) :: WMatrix
real*16 PhiHPhiSum, PhiPhiSum, Norm
integer l, m, k
logical UsePreCalc
UsePreCalc = .true.
!UsePreCalc = .false.
allocate(PowerTabler1i(NumTerms/2, 6))
call GenOmegaPowerTable(Omega, PowerTabler1i, NumTerms/2, Ordering, LValue, 0)
allocate(PowerTabler1j(NumTerms/2, 6))
call GenOmegaPowerTable(Omega, PowerTabler1j, NumTerms/2, Ordering, LValue, 0)
allocate(PowerTabler2i(NumTerms/2, 6))
call GenOmegaPowerTable(Omega, PowerTabler2i, NumTerms/2, Ordering, 0, LValue)
allocate(PowerTabler2j(NumTerms/2, 6))
call GenOmegaPowerTable(Omega, PowerTabler2j, NumTerms/2, Ordering, 0, LValue)
! Set this matrix equal to 0 because of the Phi2HPhi(i,j) = Phi2HPhi(i,j) + Sum line in CalcMatrices
! (and equivalent for PhiPhi). This allows us to use the same subroutine for the direct and exchange elements.
Phi2HPhi = 0.0_16
PhiPhi = 0.0_16
! For the direct integration, f_i and f_j have beta and gamma in the same place.
Alphai = Alpha; Alphaj = Alpha; Betai = Beta; Betaj = Beta; Gammai = Gamma; Gammaj = Gamma;
! Precalculate the W matrices. lmin, lmax, etc. are the upper and lower limits for each of the three
! parameters to the W function (in HylleraasIntegral.f90). These limits were determined by me, and
! there is a document detailing these limits (W Function Limits.pdf).
!!lmin = 1; lmax = 2*(Omega+LValue) + 4*max(linf,minf) + 4;
!!mmin = -1 - 3*max(linf,minf); mmax = 2*(Omega+LValue) + 3*max(linf,minf) + 3;
!!nmin = -2 - 4*max(linf,minf); nmax = 2*(Omega+LValue) + 2;
lmin = 1; lmax = min(M12max, min(M23max, M31max))*2 + LValue*2 + Omega*2 + 5;
mmin = -1; mmax = 2*(LValue+Omega) + 3;
nmin = -(min(M12max, min(M23max, M31max))*3 + 2*Omega); nmax = 2*(Omega+LValue) + 2;
allocate(WMatrix(lmin:lmax, mmin:mmax, nmin:nmax, 6))
call GenFactorial(400)
write (*,*)
write (*,*) "Starting calculation of direct-direct terms"
if (UsePreCalc == .true.) then
write (*,*) "Precomputing direct-direct W matrix"
call ClearWMatrix(WMatrix)
! First call it with RunCalc = false to determine what W functions to calculate.
call CalcMatrices(.false., .true., PowerTabler1i, PowerTabler1j, PowerTabler2i, PowerTabler2j, WMatrix, PhiPhi, Phi2HPhi, NumTerms, &
Alphai, Alphaj, Betai, Betaj, Gammai, Gammaj, LValue, M12max, M23max, M31max, pmax, IsTriplet, Method, .false.)
! Then precalculate all W functions that are used.
call CalcWMatrices(Omega, WMatrix, Alphai+Alphaj, Betai+Betaj, Gammai+Gammaj, pmax)
write (*,*) "Finished precomputing direct-direct W matrix"
end if
! This subroutine calculates the PhiPhi and Phi2HPhi matrices for the direct integrals.
call CalcMatrices(.true., UsePreCalc, PowerTabler1i, PowerTabler1j, PowerTabler2i, PowerTabler2j, WMatrix, PhiPhi, Phi2HPhi, NumTerms, &
Alphai, Alphaj, Betai, Betaj, Gammai, Gammaj, LValue, M12max, M23max, M31max, pmax, IsTriplet, Method, .false.)
if (IsTriplet == 1) then ! Only do this for the triplet case.
PhiPhi = -PhiPhi
Phi2HPhi = -Phi2HPhi
endif
!goto 300 ! Skip permutation for testing
! Now we have to do the same thing for the exchange operator, P_23. We operate it on f_j by switching the
! 2<->3 powers in PowerTablej and recalculating, along with beta and gamma.
do n = 1, NumTerms/2, 1
! This exchanges r2 and r3.
TempJ = PowerTabler1j(n,2)
PowerTabler1j(n,2) = PowerTabler1j(n,4) ! l with n
PowerTabler1j(n,4) = TempJ
TempJ = PowerTabler2j(n,2)
PowerTabler2j(n,2) = PowerTabler2j(n,4) ! l with n
PowerTabler2j(n,4) = TempJ
! This exchanges r12 and r13 (r31).
TempJ = PowerTabler1j(n,3)
PowerTabler1j(n,3) = PowerTabler1j(n,5) ! m with p
PowerTabler1j(n,5) = TempJ
TempJ = PowerTabler2j(n,3)
PowerTabler2j(n,3) = PowerTabler2j(n,5) ! m with p
PowerTabler2j(n,5) = TempJ
enddo
! Swap beta and gamma for f_j.
Alphai = Alpha; Alphaj = Alpha; Betaj = Gamma; Betai = Beta; Gammaj = Beta; Gammai = Gamma;
!i = 0
!do l = lmin, lmax, 1
! do m = mmin, mmax, 1
! do n = nmin, nmax, 1
! do k = 1, 6, 1
! if (WMatrix(l, m, n, k) == 1000.0q0) i = i + 1
! end do
! end do
! end do
!end do
!
!write (*,*) "Total WMatrix used: ", i
!write (*,*) "Total WMatrix size: ", (lmax-lmin+1) * (mmax-mmin+1) * (nmax-nmin+1) * 6
!stop
! We have to recalculate the W matrices again, since we had the swaps.
write (*,*) "Starting calculation of direct-exchange terms"
if (UsePreCalc == .true.) then
write (*,*) "Precomputing direct-exchange W matrix"
call ClearWMatrix(WMatrix)
! The W functions to calculate are not, in general, the same as that for the direct-direct case.
call CalcMatrices(.false., UsePreCalc, PowerTabler1i, PowerTabler1j, PowerTabler2i, PowerTabler2j, WMatrix, PhiPhi, Phi2HPhi, NumTerms, &
Alphai, Alphaj, Betai, Betaj, Gammai, Gammaj, LValue, M12max, M23max, M31max, pmax, IsTriplet, Method, .true.)
call CalcWMatrices(Omega, WMatrix, Alphai+Alphaj, Betai+Betaj, Gammai+Gammaj, pmax)
write (*,*) "Finished precomputing direct-exchange W matrix"
end if
! Calculate the matrix elements for the exchanged terms, and add them to the direct terms.
call CalcMatrices(.true., UsePreCalc, PowerTabler1i, PowerTabler1j, PowerTabler2i, PowerTabler2j, WMatrix, PhiPhi, Phi2HPhi, NumTerms, &
Alphai, Alphaj, Betai, Betaj, Gammai, Gammaj, LValue, M12max, M23max, M31max, pmax, IsTriplet, Method, .true.)
! If we are not optimizing the nonlinear parameters, return since matrices have been calculated.
300 EDeriv = 1.0q0 ! Random value, return not used in this case.
! Clean up memory before exiting
deallocate(WMatrix)
deallocate(PowerTabler1i)
deallocate(PowerTabler1j)
deallocate(PowerTabler2i)
deallocate(PowerTabler2j)
return
end
|
{"hexsha": "648aeec2f89ad6dbc100a864365f2de470d0f460", "size": 44989, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "General Code/Short-Range/Quadruple Precision/PsHMain.f90", "max_stars_repo_name": "DentonW/Ps-H-Scattering", "max_stars_repo_head_hexsha": "943846d1deadbe99a98d2c2e26bcebf55986d8e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-08-02T03:50:06.000Z", "max_stars_repo_stars_event_max_datetime": "2017-08-02T03:50:06.000Z", "max_issues_repo_path": "General Code/Short-Range/Quadruple Precision/PsHMain.f90", "max_issues_repo_name": "DentonW/Ps-H-Scattering", "max_issues_repo_head_hexsha": "943846d1deadbe99a98d2c2e26bcebf55986d8e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "General Code/Short-Range/Quadruple Precision/PsHMain.f90", "max_forks_repo_name": "DentonW/Ps-H-Scattering", "max_forks_repo_head_hexsha": "943846d1deadbe99a98d2c2e26bcebf55986d8e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-28T22:09:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-28T22:09:05.000Z", "avg_line_length": 37.9974662162, "max_line_length": 190, "alphanum_fraction": 0.6548934184, "num_tokens": 17302}
|
# -*- coding:utf-8 -*-
from __future__ import division, absolute_import
import numpy as np
from simple_ml.base.base_error import *
from simple_ml.base.base_model import BaseTransform
__all__ = ['PCA', 'SuperPCA']
class PCA(BaseTransform):
def __init__(self, top_n):
super(PCA, self).__init__()
self.top_n = top_n
self._variable_num = None
self._eigen_value = None
self._eigen_vector = None
self._explain = None
self._top_n_index = None
def fit(self, x, y=None):
super(PCA, self).fit(x, y)
self._variable_num = x.shape[1]
if self.top_n > self._variable_num:
raise TopNTooLargeError
self._fit(x)
@property
def eigen_value(self):
return self._eigen_value
@property
def eigen_vecttor(self):
return self._eigen_vector
def _fit(self, x):
cov_mat = np.cov(x.T)
# 下式得到的特征值和特征矩阵有如下特点:
# 1. 特征矩阵每列表示当前特征值对应的特征向量
# 2. 特征值没有按照从大到小排列
self._eigen_value, self._eigen_vector = np.linalg.eig(cov_mat)
self._top_n_index = self._eigen_value.argsort()[-self.top_n:]
@property
def explain_ratio(self):
return self._explain
def transform(self, x):
super(PCA, self).transform(x)
if self._eigen_value is None:
raise ModelNotFittedError
self._explain = np.sum(self._eigen_value[self._top_n_index]) / np.sum(self._eigen_value)
new_x = np.array([self._transform_single(i) for i in x])
return new_x
def _transform_single(self, row):
res = [np.dot(row, i) for i in self._eigen_vector.T[self._top_n_index]]
return np.array(res)
def fit_transform(self, x, y=None):
self.fit(x)
return self.transform(x)
class SuperPCA(PCA):
def __init__(self, top_n):
"""
针对数据维度大于样本数目的情况,可以通过矩阵分解简化计算,但是最多只能得到等同于样本数目的主成分个数:
Pv = lambda v
XX'v = lambda v
X'XX'v = X'lambda v
sigma x' v = lambda X'v
sigma (X'v) = lambda (X'v)
所以只要求 XX'的主成分即可,其中X为去每列减去均值后除以根号(总行数-1),保证协方差矩阵sigma等于 X'X
:param top_n: 主成分个数,当大于样本个数时报错
"""
super(SuperPCA, self).__init__(top_n)
def fit(self, x, y=None):
super(PCA, self).fit(x, y)
_sample_number = x.shape[0]
self._variable_num = x.shape[1]
if _sample_number == 1:
raise NeedMoreSampleError
if _sample_number > self._variable_num:
raise SampleNumberMismatchError("SuperPCA样本数必须小于变量数")
if self.top_n > _sample_number:
raise TopNTooLargeError
self._fit(x)
def _fit(self, x):
x_new = x.copy()
for i in range(x_new.shape[0]):
x_new[:, i] = (x_new[:, i] - np.mean(x_new[:, i])) * 1.0 / np.sqrt(x.shape[0] - 1)
p = np.dot(x_new, x_new.T)
self._eigen_value, _eigen_vector = np.linalg.eig(p) # 得到n个特征值
self._eigen_vector = np.dot(x.T, _eigen_vector) # 得到p行n列的特征矩阵, n<<p
self._top_n_index = self._eigen_value.argsort()[-self.top_n:]
|
{"hexsha": "1bf8380cd3c5417c15203471b6005d8a4252bc27", "size": 3119, "ext": "py", "lang": "Python", "max_stars_repo_path": "simple_ml/pca.py", "max_stars_repo_name": "Yangruipis/simple_ml", "max_stars_repo_head_hexsha": "09657f6b017b973a5201aa611774d6ac8f0fc0a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2018-04-17T04:38:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-09T04:07:53.000Z", "max_issues_repo_path": "simple_ml/pca.py", "max_issues_repo_name": "Yangruipis/simple_ml", "max_issues_repo_head_hexsha": "09657f6b017b973a5201aa611774d6ac8f0fc0a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "simple_ml/pca.py", "max_forks_repo_name": "Yangruipis/simple_ml", "max_forks_repo_head_hexsha": "09657f6b017b973a5201aa611774d6ac8f0fc0a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-04-17T05:27:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-01T02:55:15.000Z", "avg_line_length": 28.8796296296, "max_line_length": 96, "alphanum_fraction": 0.6050016031, "include": true, "reason": "import numpy", "num_tokens": 1024}
|
great_ne(q1,h1).
great_ne(t1,hh1).
great_ne(v1,w1).
great_ne(kk1,ff1).
great_ne(bb1,f1).
great_ne(jj1,f1).
great_ne(m1,c1).
great_ne(jj1,aa1).
great_ne(p1,ii1).
great_ne(p1,dd1).
great_ne(t1,m1).
great_ne(v1,i1).
great_ne(dd1,bb1).
great_ne(jj1,k1).
great_ne(cc1,kk1).
great_ne(aa1,i1).
great_ne(m1,v1).
great_ne(cc1,y1).
great_ne(jj1,i1).
great_ne(y1,a1).
great_ne(z1,kk1).
great_ne(ii1,f1).
great_ne(cc1,m1).
great_ne(hh1,y1).
great_ne(kk1,k1).
great_ne(p1,ee1).
great_ne(dd1,b1).
great_ne(t1,i1).
great_ne(q1,a1).
great_ne(u1,h1).
great_ne(k1,i1).
great_ne(t1,v1).
great_ne(u1,jj1).
great_ne(dd1,w1).
|
{"hexsha": "13ce3a30e97be9870d0d5231d7680b33c5aa6cda", "size": 604, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "foldsCreator/files/datasets/alzheimer_anime_noiseless/test10.f", "max_stars_repo_name": "manoelfranca/cilppp", "max_stars_repo_head_hexsha": "732189159a64e56cd32a5ad8a9010d928f4d41c6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-05-27T04:33:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-13T21:06:29.000Z", "max_issues_repo_path": "foldsCreator/files/datasets/alzheimer_anime_noiseless/test10.f", "max_issues_repo_name": "manoelfranca/cilppp", "max_issues_repo_head_hexsha": "732189159a64e56cd32a5ad8a9010d928f4d41c6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-08-09T16:54:52.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-09T16:54:52.000Z", "max_forks_repo_path": "foldsCreator/files/datasets/alzheimer_anime_noiseless/test10.f", "max_forks_repo_name": "manoelfranca/cilppp", "max_forks_repo_head_hexsha": "732189159a64e56cd32a5ad8a9010d928f4d41c6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.2571428571, "max_line_length": 18, "alphanum_fraction": 0.7185430464, "num_tokens": 251}
|
import os.path
import random
import numpy as np
from matplotlib import pyplot as plt
import mnist
from network import NeuralNetwork
# Download dataset
if(not os.path.exists('mnist.pkl')): mnist.get()
# Load dataset
training_data, training_labels, testing_data, testing_labels = mnist.load()
# Create NN
nn = NeuralNetwork(784, 100, 10)
# Train
nn.train(training_data, training_labels, 1200, 50, 0.02)
# Print acc and plot perf
acc = nn.accuracy(training_data, training_labels)
print('Train Accuracy:', acc)
acc = nn.accuracy(testing_data, testing_labels)
print('Test Accuracy:', acc)
nn.plot_performance()
# Pick a random example from testing data
index = random.randint(0, 9999)
# Show the test data and the label
plt.imshow(training_data[index].reshape(28, 28))
plt.show()
# Show prediction
nn.feed(training_data[index])
model_output = nn.get_output()
print('Predicted: ', np.argmax(model_output))
|
{"hexsha": "eb55a1ec805b40571728fbdd8ed6e9bdb33e4573", "size": 910, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "TheGameDevDude/DigitRecognition", "max_stars_repo_head_hexsha": "59269d640b89f991ab32b50042e5a26179f89b2d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test.py", "max_issues_repo_name": "TheGameDevDude/DigitRecognition", "max_issues_repo_head_hexsha": "59269d640b89f991ab32b50042e5a26179f89b2d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "TheGameDevDude/DigitRecognition", "max_forks_repo_head_hexsha": "59269d640b89f991ab32b50042e5a26179f89b2d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.75, "max_line_length": 75, "alphanum_fraction": 0.7626373626, "include": true, "reason": "import numpy", "num_tokens": 224}
|
# Diffusion maps
# --------------
# Diffusion maps,
# Coifman, R. & Lafon, S., Applied and Computational Harmonic Analysis, Elsevier, 2006, 21, 5-30
#### DiffMap type
struct DiffMap{T <: AbstractFloat} <: SpectralResult
t::Int
ɛ::Float64
K::AbstractMatrix{T}
proj::Projection{T}
DiffMap{T}(t::Int, ɛ::T, K::AbstractMatrix{T}, proj::Projection{T}) where T = new(t, ɛ, K, proj)
end
## properties
outdim(M::DiffMap) = size(M.proj, 1)
projection(M::DiffMap) = M.proj
kernel(M::DiffMap) = M.K
## show & dump
function show(io::IO, M::DiffMap)
print(io, "Diffusion Maps(outdim = $(outdim(M)), t = $(M.t), ɛ = $(M.ɛ))")
end
function Base.dump(io::IO, M::DiffMap)
println(io, "Dimensionality:")
show(io, outdim(M))
print(io, "\n\n")
println(io, "Timesteps:")
show(io, M.t)
print(io, "\n\n")
println(io, "Kernel: ")
Base.showarray(io, M.K, false, header=false)
println(io)
println(io, "Embedding:")
Base.showarray(io, projection(M), false, header=false)
end
## interface functions
function transform(::Type{DiffMap}, X::DenseMatrix{T};
d::Int=2, t::Int=1, ɛ::T=1.0) where T<:AbstractFloat
transform!(fit(UnitRangeTransform, X), X)
sumX = sum(X.^ 2, dims=1)
K = exp.(( transpose(sumX) .+ sumX .- 2*transpose(X) * X ) ./ ɛ)
p = transpose(sum(K, dims=1))
K ./= ((p * transpose(p)) .^ t)
p = transpose(sqrt.(sum(K, dims=1)))
K ./= (p * transpose(p))
U, S, V = svd(K, full=true)
U ./= U[:,1]
Y = U[:,2:(d+1)]
return DiffMap{T}(t, ɛ, K, transpose(Y))
end
|
{"hexsha": "16eb59e45769b7106e83ff0bfb848ac999f4caf7", "size": 1581, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/diffmaps.jl", "max_stars_repo_name": "simonschoelly/ManifoldLearning.jl", "max_stars_repo_head_hexsha": "ac0f75fd0ea71a625e939d36d3f2ae118fdb2069", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/diffmaps.jl", "max_issues_repo_name": "simonschoelly/ManifoldLearning.jl", "max_issues_repo_head_hexsha": "ac0f75fd0ea71a625e939d36d3f2ae118fdb2069", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/diffmaps.jl", "max_forks_repo_name": "simonschoelly/ManifoldLearning.jl", "max_forks_repo_head_hexsha": "ac0f75fd0ea71a625e939d36d3f2ae118fdb2069", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7966101695, "max_line_length": 101, "alphanum_fraction": 0.5831752056, "num_tokens": 538}
|
import os
extension_modules = {}
directory = 'src/xxdata_11'
sources = ['xxdata_11.for', 'xxrptn.for', 'i4unit.for',
'i4fctn.for', 'xxword.for', 'xxcase.for', 'xfelem.for', 'xxslen.for',
'../xxdata_11.pyf', '../helper_functions.for']
extension_modules['_xxdata_11'] = dict(sources=sources, directory=directory)
directory = 'src/xxdata_15'
sources = ['xxdata_15.for', 'xxrptn.for', 'xxmkrp.for', 'i4unit.for',
'i4fctn.for', 'r8fctn.for', 'xxhkey.for', 'xxword.for', 'xxcase.for',
'i4eiz0.for', 'xfelem.for', 'xxslen.for',
'../xxdata_15.pyf', '../helper_functions.for']
extension_modules['_xxdata_15'] = dict(sources=sources, directory=directory)
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('atomic', parent_package, top_path)
for module, values in extension_modules.iteritems():
directory = values['directory']
sources = values['sources']
sources = [os.path.join(directory, i) for i in sources]
config.add_extension(module, sources)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
{"hexsha": "717dbf662027141d041c7f93c7269922a2586b17", "size": 1230, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "wagdav/atomic", "max_stars_repo_head_hexsha": "c54225abdb607c53a9d35658d381635403c751fb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-03-05T21:20:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-30T06:49:15.000Z", "max_issues_repo_path": "setup.py", "max_issues_repo_name": "wagdav/atomic", "max_issues_repo_head_hexsha": "c54225abdb607c53a9d35658d381635403c751fb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "setup.py", "max_forks_repo_name": "wagdav/atomic", "max_forks_repo_head_hexsha": "c54225abdb607c53a9d35658d381635403c751fb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2016-09-16T17:23:06.000Z", "max_forks_repo_forks_event_max_datetime": "2017-03-03T15:38:29.000Z", "avg_line_length": 33.2432432432, "max_line_length": 76, "alphanum_fraction": 0.6861788618, "include": true, "reason": "from numpy", "num_tokens": 339}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Ryan L. Collins <rlcollins@g.harvard.edu>
# and the Talkowski Laboratory
# Distributed under terms of the MIT license.
"""
Create single BED file of pext scores per base per gene
"""
import json
from pandas import to_numeric
from numpy import nanmax
import argparse
from sys import stdout
from os import path
from pysam import TabixFile
import subprocess
import gzip
def process_pext_line(line, gene_field, pext_field, pan_tissue):
"""
Extract single line from data in pext tsv and reformat as BED
"""
dat = line.split('\t')
chrom = dat[0]
start = int(dat[1])
end = start + 1
annos = json.loads(dat[-1])[0]
gene = annos[gene_field]
if pan_tissue:
ignore_fields = 'ensg csq symbol lof lof_flag'.split()
vals = [v for k, v in annos.items() if k not in ignore_fields]
val = str(nanmax(to_numeric(vals, errors='coerce')))
else:
val = str(annos[pext_field])
# In raw pext file, nan corresponds to not expressed
if val in 'NaN nan'.split():
val = '0'
return chrom, start, end, gene, val
def main():
"""
Main block
"""
# Parse command line arguments and options
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('pext', help='TSV of all pext scores')
parser.add_argument('--gene-field', help='Field name in pext tsv to use as ' +
'gene identifier [default: "symbol"]', default='symbol')
parser.add_argument('--pext-field', help='Field name in pext tsv to report as ' +
'pext value [default: "mean_proportion"]',
default='mean_proportion')
parser.add_argument('--pan-tissue', action='store_true', help='Report max pext ' +
'across all tissues. Ignores --pext-field. [Default: false]')
parser.add_argument('--contig', help='Specify a single contig to process. ' +
'[default: process all autosomes]')
parser.add_argument('-o', '--outbed', help='Path to output BED file. ' +
'[default: stdout]')
parser.add_argument('-z', '--bgzip', dest='bgzip', action='store_true',
help='Compress output BED with bgzip.')
args = parser.parse_args()
# Open connection to output file
if args.outbed is None \
or args.outbed in 'stdout -'.split():
outbed = stdout
else:
if path.splitext(args.outbed)[-1] in '.gz .bz .bgz .bgzip .gzip'.split():
outbed_path = path.splitext(args.outbed)[0]
else:
outbed_path = args.outbed
outbed = open(outbed_path, 'w')
outbed_header = '\t'.join('#chr start end gene'.split())
# Iterate over chromosomes with tabix and process pext data
with TabixFile(args.pext) as pextfile:
if args.contig is None:
contigs = [i + 1 for i in range(22)]
else:
contigs = [args.contig]
for contig in contigs:
processed = {}
for line in pextfile.fetch(contig):
chrom, start, end, gene, val \
= process_pext_line(line, args.gene_field, args.pext_field,
args.pan_tissue)
if gene not in processed.keys():
processed[gene] = [start]
else:
if start not in processed[gene]:
pext_data = [chrom, start, end, gene, val]
outbed.write('\t'.join([str(x) for x in pext_data]) + '\n')
processed[gene].append(start)
# Bgzip output, if optioned
if args.outbed is not None \
and args.outbed not in 'stdout -'.split() \
and args.bgzip:
subprocess.run(['bgzip', '-f', outbed_path])
if __name__ == '__main__':
main()
|
{"hexsha": "344315eb327c3f2c441564e0a509f8b3e96b105d", "size": 3989, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_curation/gene/process_pext.py", "max_stars_repo_name": "talkowski-lab/rCNV2", "max_stars_repo_head_hexsha": "fcc1142d8c13b58d18a37fe129e9bb4d7bd6641d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-01-28T15:46:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-07T06:50:40.000Z", "max_issues_repo_path": "data_curation/gene/process_pext.py", "max_issues_repo_name": "talkowski-lab/rCNV2", "max_issues_repo_head_hexsha": "fcc1142d8c13b58d18a37fe129e9bb4d7bd6641d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-02T01:33:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-02T01:33:53.000Z", "max_forks_repo_path": "data_curation/gene/process_pext.py", "max_forks_repo_name": "talkowski-lab/rCNV2", "max_forks_repo_head_hexsha": "fcc1142d8c13b58d18a37fe129e9bb4d7bd6641d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-02-21T19:49:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-22T15:56:21.000Z", "avg_line_length": 33.8050847458, "max_line_length": 86, "alphanum_fraction": 0.5863624969, "include": true, "reason": "from numpy", "num_tokens": 977}
|
line_defaults() = Dict{Symbol, Any}([
:labelV => nothing
]);
"""
$(SIGNATURES)
Line graph. Simple wrapper around `lines`.
"""
function line_plot(xV, yV :: AbstractVector{F};
fig = blank_plot(), pos = (1,1), kwargs ...) where F
args = merge(line_defaults(), kwargs);
ax = make_axis(fig, pos; args...);
lines!(ax, xV, yV; args...);
return fig, ax
end
function line_plot(yV :: AbstractVector{F};
fig = blank_plot(), pos = (1,1), kwargs ...) where F
return line_plot(1 : length(yV), yV; fig = fig, pos = pos, kwargs...);
end
"""
$(SIGNATURES)
Multi-line plot from data matrix. Columns are series.
This is really the same as `Makie.series`.
# Arguments
- legPos: `:below` places a legend below the graph (in a new frame).
- labelV: labels for legend (optional).
Fix: Legend colors are wrong +++
# Example
```julia
fig, ax = line_plot(1:4, rand(4,3); labelV = ["Lbl \$j" for j = 1 : 3]);
axislegend()
```
"""
function line_plot(xV, yM :: AbstractMatrix{F};
fig = blank_plot(), pos = (1,1),
legPos = :none,
kwargs...) where F
args = merge(line_defaults(), kwargs);
ax = make_axis(fig, pos; args...);
nr, nc = size(yM);
@assert nr == length(xV);
for j = 1 : nc
add_line!(ax, xV, yM[:, j];
label = get_idx(args[:labelV], j),
color = get_colors(j, nc), # (fill(j, nr), nc),
# color = fill(j, nr), colorrange = (1, nc),
args...);
end
if !isnothing(args[:labelV]) && (legPos == :below)
legPos = (pos[1] + 1, pos[2]);
fig[legPos...] = Legend(fig, ax;
orientation = :horizontal,
tellwidth = false, tellheight = true);
end
return fig, ax
end
"""
$(SIGNATURES)
Add line to a plot.
"""
function add_line!(ax :: Axis, x, y; kwargs...)
lines!(ax, x, y; kwargs...);
end
add_line!(p :: Makie.FigureAxisPlot, x, y; kwargs...) =
add_line!(p.axis, x, y; kwargs...);
"""
$(SIGNATURES)
Add error band.
How to get these colored is not clear. Adding
`color = (get_colors(j, n), 0.2)`
achieves transparency, but no color (all grey).
"""
add_error_band!(ax :: Axis, x, y, errorV; kwargs...) =
band!(ax, x, y .- errorV, y .+ errorV; kwargs...)
add_error_band!(ax :: Axis, x, y, errorV :: Nothing; kwargs...) = nothing;
# -----------
|
{"hexsha": "b20f660699d8a84ff6b076ccbc4d635e003aac46", "size": 2334, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/line_graphs.jl", "max_stars_repo_name": "hendri54/CollegeStratMakie", "max_stars_repo_head_hexsha": "ece1b3be11b2a8528ae7b4eba78b9e9e1b457ec0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/line_graphs.jl", "max_issues_repo_name": "hendri54/CollegeStratMakie", "max_issues_repo_head_hexsha": "ece1b3be11b2a8528ae7b4eba78b9e9e1b457ec0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/line_graphs.jl", "max_forks_repo_name": "hendri54/CollegeStratMakie", "max_forks_repo_head_hexsha": "ece1b3be11b2a8528ae7b4eba78b9e9e1b457ec0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5757575758, "max_line_length": 74, "alphanum_fraction": 0.5702656384, "num_tokens": 716}
|
# Inspired by Self-Driving-Car Nano Degree from Udacity
# Assuming that we know the region which is of interest to us
# Eg: This could be our knowledge of how and where the camera is mounted, therefore what part of the image would have
# the road
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
image_path = os.path.join(os.getcwd(), "../../samples/roads/road_1.jpg")
if not os.path.exists(image_path):
print("Image does not exist!")
exit()
image = mpimg.imread(image_path)
ysize = image.shape[0]
xsize = image.shape[1]
region_select = np.copy(image)
# Define a triangle region of interest
# Keep in mind the origin (x=0, y=0) is in the upper left in image processing
# Note: if you run this code, you'll find these are not sensible values!!
left_bottom = [0, 539]
right_bottom = [900, 300]
apex = [400, 0]
# Fit lines (y=Ax+B) to identify the 3 sided region of interest
# np.polyfit() returns the coefficients [A, B] of the fit
fit_left = np.polyfit((left_bottom[0], apex[0]), (left_bottom[1], apex[1]), 1)
fit_right = np.polyfit((right_bottom[0], apex[0]), (right_bottom[1], apex[1]), 1)
fit_bottom = np.polyfit((left_bottom[0], right_bottom[0]), (left_bottom[1], right_bottom[1]), 1)
# Find the region inside the lines
XX, YY = np.meshgrid(np.arange(0, xsize), np.arange(0, ysize))
region_thresholds = (YY > (XX * fit_left[0] + fit_left[1])) & \
(YY > (XX * fit_right[0] + fit_right[1])) & \
(YY < (XX * fit_bottom[0] + fit_bottom[1]))
# Color pixels red which are inside the region of interest
region_select[region_thresholds] = [255, 0, 0]
# Display the image
plt.imshow(region_select)
plt.show()
|
{"hexsha": "db90d9531243f73018719802d29296aeacafb0f3", "size": 1710, "ext": "py", "lang": "Python", "max_stars_repo_path": "image_processing/basics/002_region_selection.py", "max_stars_repo_name": "eshanmherath/AV-Perception", "max_stars_repo_head_hexsha": "ec56065621141c436d8be39094f4505a6971e796", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-09T09:31:42.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-09T12:23:34.000Z", "max_issues_repo_path": "image_processing/basics/002_region_selection.py", "max_issues_repo_name": "eshanmherath/AV-Perception", "max_issues_repo_head_hexsha": "ec56065621141c436d8be39094f4505a6971e796", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "image_processing/basics/002_region_selection.py", "max_forks_repo_name": "eshanmherath/AV-Perception", "max_forks_repo_head_hexsha": "ec56065621141c436d8be39094f4505a6971e796", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5294117647, "max_line_length": 117, "alphanum_fraction": 0.6929824561, "include": true, "reason": "import numpy", "num_tokens": 484}
|
[STATEMENT]
theorem load_after_alloc_2:
assumes "alloc h c s = Success (h', cap)"
and "|t|\<^sub>\<tau> \<le> s"
and "block_id cap \<noteq> block_id cap'"
shows "load h' cap' t = load h cap' t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. load h' cap' t = load h cap' t
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
alloc h c s = Success (h', cap)
|t|\<^sub>\<tau> \<le> s
block_id cap \<noteq> block_id cap'
goal (1 subgoal):
1. load h' cap' t = load h cap' t
[PROOF STEP]
unfolding alloc_def load_def
[PROOF STATE]
proof (prove)
using this:
(let cap = \<lparr>block_id = next_block h, offset = 0, base = 0, len = s, perm_load = True, perm_cap_load = c, perm_store = True, perm_cap_store = c, perm_cap_store_local = c, perm_global = False, tag = True\<rparr>; h' = h\<lparr>next_block := next_block h + 1, heap_map := Mapping.update (next_block h) (Map \<lparr>bounds = (0, s), content = Mapping.empty, tags = Mapping.empty\<rparr>) (heap_map h)\<rparr> in Success (h', cap)) = Success (h', cap)
|t|\<^sub>\<tau> \<le> s
block_id cap \<noteq> block_id cap'
goal (1 subgoal):
1. (if tag cap' = False then Error (C2Err TagViolation) else if perm_load cap' = False then Error (C2Err PermitLoadViolation) else if int (base cap' + len cap') < offset cap' + int |t|\<^sub>\<tau> then Error (C2Err LengthViolation) else if offset cap' < int (base cap') then Error (C2Err LengthViolation) else if offset cap' mod int |t|\<^sub>\<tau> \<noteq> 0 then Error (C2Err BadAddressViolation) else let obj = Mapping.lookup (heap_map h') (block_id cap') in case obj of None \<Rightarrow> Error (LogicErr MissingResource) | Some Freed \<Rightarrow> Error (LogicErr UseAfterFree) | Some (Map m) \<Rightarrow> if offset cap' < int (fst (bounds m)) \<or> int (snd (bounds m)) < offset cap' + int |t|\<^sub>\<tau> then Error (LogicErr BufferOverrun) else Success (retrieve_tval m (nat (offset cap')) t (perm_cap_load cap'))) = (if tag cap' = False then Error (C2Err TagViolation) else if perm_load cap' = False then Error (C2Err PermitLoadViolation) else if int (base cap' + len cap') < offset cap' + int |t|\<^sub>\<tau> then Error (C2Err LengthViolation) else if offset cap' < int (base cap') then Error (C2Err LengthViolation) else if offset cap' mod int |t|\<^sub>\<tau> \<noteq> 0 then Error (C2Err BadAddressViolation) else let obj = Mapping.lookup (heap_map h) (block_id cap') in case obj of None \<Rightarrow> Error (LogicErr MissingResource) | Some Freed \<Rightarrow> Error (LogicErr UseAfterFree) | Some (Map m) \<Rightarrow> if offset cap' < int (fst (bounds m)) \<or> int (snd (bounds m)) < offset cap' + int |t|\<^sub>\<tau> then Error (LogicErr BufferOverrun) else Success (retrieve_tval m (nat (offset cap')) t (perm_cap_load cap')))
[PROOF STEP]
by force
|
{"llama_tokens": 1021, "file": "CHERI-C_Memory_Model_CHERI_C_Concrete_Memory_Model", "length": 3}
|
from sympy import Symbol, integrate, Eq, Mul, Add, Float, cos, sin, Pow, Unequality, core
import sympy
import re
"""First try to read general sin or cos functions from arguments"""
def gchsnew(funct, der, leng, symbo):
inte = 0
inte2 = 1
print(('in gcf'))
if re.search("sin", str(funct)) is not None or re.search("cos", str(funct)) is not None:
print('in f')
funct1 = funct
for i in range(0, der):
funct1 = integrate(funct1, symbo)
print('After integration: ' + str(funct1))
try:
int_aa = []
for aa in range(0, funct1.args.__len__()):
print('in aa')
print('Length first argh: ' + str(funct1.args.__len__()))
print('Argh erste Ebene: ' + str(funct1.args[aa]))
print(funct1.args[aa].func)
if funct1.args[aa] == -1 or funct1.args[aa] == 1 or funct1.args[aa].func == sin or funct1.args[aa].func == cos or funct1.args[aa].func == Pow:
int_aa.append(funct1.args[aa])
elif re.search('Unequality', str(funct1.args[aa].func)) is not None or re.search('Boolean', str(funct1.args[aa].func)) is not None\
or re.search('Zero', str(funct1.args[aa].func)) is not None:
print('Unequality aa')
continue
else:
print('In else')
int_aa.append(Adding_args_of_one_layer(funct1.args[aa].args, funct1.args[aa]))
for a in int_aa:
print(a)
breakpoint()
for a in int_aa:
print('Should only be seen once')
print(funct.func)
if re.search('Mul', str(funct.func)) is not None:
inte2 = inte2 * a[0]
else:
print('in else')
print(a)
inte = inte + a[0]
print('Inte: ' +str(inte))
if re.search('Mul', str(funct.func)) is not None:
inte = inte2
print(inte)
inte = inte.subs(symbo, leng) - inte.subs(symbo, 0)
except:
print('In exception')
return funct1
else:
inte = funct
for i in range(0, der - 1):
inte = integrate(inte, symbo)
inte = integrate(inte, (symbo, 0, leng))
return inte
def gchs(funct, der, leng, symbo):
inte = 0
inte2 = 1
print(('in gcf'))
if re.search("sin", str(funct)) is not None or re.search("cos", str(funct)) is not None:
print('in f')
funct1 = funct
for i in range(0, der):
funct1 = integrate(funct1, symbo)
print('After integration: ' + str(funct1))
try:
int_aa = []
for aa in range(0, funct1.args.__len__()):
print('in aa')
print('Length first argh: ' + str(funct1.args.__len__()))
print('Argh erste Ebene: ' + str(funct1.args[aa]))
print(funct1.args[aa].func)
if funct1.args[aa] == -1 or funct1.args[aa] == 1 or funct1.args[aa].func == sin or funct1.args[aa].func == cos or funct1.args[aa].func == Pow:
int_aa.append(funct1.args[aa])
elif re.search('Unequality', str(funct1.args[aa].func)) is not None or re.search('Boolean', str(funct1.args[aa].func)) is not None\
or re.search('Zero', str(funct1.args[aa].func)) is not None:
print('Unequality aa')
continue
else:
b_parameters = []
for ba in range(0, funct1.args[aa].__len__()):
arg_ba = funct1.args[aa][ba]
print('in ba')
print(funct1.args[aa][ba].func)
print('Length second argh: ' + str(funct1.args[aa].__len__()))
print('Argh zweite Ebene: ' + str(funct1.args[aa][ba]))
if check_if_further(arg_ba) == False or re.search('Mul', str(funct1.args[aa][ba].func)) is not None:
print('if ba')
b_parameters.append(arg_ba)
print(b_parameters)
elif check_if_stop(arg_ba):
print('Unequality aa')
continue
else:
continue
print('lö')
int_a = 1
int_a2 = 0
for bam in b_parameters:
print('in bam')
print(bam)
if re.search('Mul', str(funct1.args[aa].func)) is not None:
int_a = int_a * bam
else:
int_a2 = int_a2 + bam
b_parameters.clear()
if re.search('Mul', str(funct1.args[aa].func)) is not None:
int_aa.append(int_a)
else:
int_aa.append(int_a2)
print('Results from second level:')
print(int_aa)
for a in int_aa:
print('Should only be seen once')
if funct1.func == Mul:
inte2 = inte2 * a
else:
inte = inte + a
if funct1.args[aa].func == Mul:
inte = inte2
inte = inte.subs(symbo, leng) - inte.subs(symbo, 0)
except:
print('Du bist doof')
else:
inte = funct
for i in range(0, der - 1):
inte = integrate(inte, symbo)
inte = integrate(inte, (symbo, 0, leng))
return inte
def check_if_further(funct):
if funct == -1 or funct == 1 or re.search('sin', str(funct.func)) is not None or \
re.search('cos', str(funct.func)) is not None or re.search('Pow', str(funct.func)) is not None:
answer = False
else:
answer = True
return answer
def check_if_stop(funct):
if re.search('Unequality', str(funct.func)) is not None or re.search('Boolean', str(funct.func)) is not None\
or re.search('Zero', str(funct.func)) is not None:
answer = True
else:
answer = False
return answer
def Adding_args_of_one_layer(funct, func_prior):
print('In extra function')
print(func_prior)
print(func_prior.func)
result = []
try:
b_parameters = []
for ba in range(0, funct.__len__()):
arg_ba = funct[ba]
print('in range')
print('Funktion: ' + str(arg_ba.func))
print('Fuktion value: ' + str(arg_ba))
if check_if_further(arg_ba) == False:
print('if ba')
b_parameters.append(arg_ba)
print(b_parameters)
elif check_if_stop(arg_ba):
print('Unequality aa')
continue
else:
b_parameters = Adding_args_of_one_layer(arg_ba.args, arg_ba)
print('lö')
int_a = 1
int_a2 = 0
for bam in b_parameters:
print('in bam')
print(bam)
print(func_prior.func)
if re.search('Mul', str(func_prior.func)) is not None:
int_a = int_a * bam
else:
int_a2 = int_a2 + bam
b_parameters.clear()
if re.search('Mul', str(func_prior.func)) is not None:
result.append(int_a)
else:
result.append(int_a2)
print('Results from second level:')
print(result)
return result
except:
return funct
|
{"hexsha": "c3299999beda3d1ee5a23fd0e1e7b82ce9800ce4", "size": 7823, "ext": "py", "lang": "Python", "max_stars_repo_path": "System_of_Beams/testing_further_possibilties/integ_general.py", "max_stars_repo_name": "ChairOfStructuralMechanicsTUM/Mechanics_Apps", "max_stars_repo_head_hexsha": "b064a42d4df3fa9bde62a5cff9cb27ca61b0127c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2017-05-06T17:05:29.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-12T09:26:47.000Z", "max_issues_repo_path": "System_of_Beams/testing_further_possibilties/integ_general.py", "max_issues_repo_name": "ChairOfStructuralMechanicsTUM/Mechanics_Apps", "max_issues_repo_head_hexsha": "b064a42d4df3fa9bde62a5cff9cb27ca61b0127c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 49, "max_issues_repo_issues_event_min_datetime": "2017-04-20T11:26:11.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-29T13:18:06.000Z", "max_forks_repo_path": "System_of_Beams/testing_further_possibilties/integ_general.py", "max_forks_repo_name": "ChairOfStructuralMechanicsTUM/Mechanics_Apps", "max_forks_repo_head_hexsha": "b064a42d4df3fa9bde62a5cff9cb27ca61b0127c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-02-14T12:55:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-12T15:07:07.000Z", "avg_line_length": 39.3115577889, "max_line_length": 158, "alphanum_fraction": 0.4824236227, "include": true, "reason": "import sympy,from sympy", "num_tokens": 1866}
|
from model import create_model
from ..data import DataLoader
from pathlib import Path
import numpy as np
import os
import matplotlib.pyplot as plt
import itertools
from sklearn.metrics import confusion_matrix
def shuffle(x_train, y_train):
idx = np.arange(x_train.shape[0])
np.random.shuffle(idx)
return x_train[idx], y_train[idx]
def plot_loss_graph(history):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()), 1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0, 1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.savefig('./../../reports/figures/loss-curve.png')
return 0
def plot_confusion_matrix(cm,
target_names,
title='Confusion matrix',
cmap=None,
normalize=True):
"""
given a sklearn confusion matrix (cm), make a nice plot
Arguments
---------
cm: confusion matrix from sklearn.metrics.confusion_matrix
target_names: given classification classes such as [0, 1, 2]
the class names, for example: ['high', 'medium', 'low']
title: the text to display at the top of the matrix
cmap: the gradient of the values displayed from matplotlib.pyplot.cm
see http://matplotlib.org/examples/color/colormaps_reference.html
plt.get_cmap('jet') or plt.cm.Blues
normalize: If False, plot the raw numbers
If True, plot the proportions
Usage
-----
plot_confusion_matrix(cm = cm, # confusion matrix created by
# sklearn.metrics.confusion_matrix
normalize = True, # show proportions
target_names = y_labels_vals, # list of names of the classes
title = best_estimator_name) # title of graph
Citiation
---------
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(8, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
plt.savefig('./../../reports/figures/confusion_matrx.png')
return 0
if __name__ == '__main__':
model = create_model()
project_dir = Path(__file__).resolve().parents[2]
print("Project dir {0}".format(project_dir))
loader = DataLoader(os.path.join(project_dir, "data/external"))
x_train, x_test, y_train, y_test = loader.load_data()
x_train, y_train = shuffle(x_train, y_train)
x_train = x_train / 255.0
x_test = x_test / 255.0
history = model.fit(x_train, y_train, batch_size=16, epochs=200, validation_data=(x_test, y_test))
plot_confusion_matrix(history)
y_pred_class = model.predict(x_test)
y_pred_class[y_pred_class < 0.5] = 0
y_pred_class[y_pred_class >= 0.5] = 1
cm = confusion_matrix(y_test, y_pred_class)
plot_confusion_matrix(cm,
normalize=False,
target_names=['normal', 'cataract'],
title="Confusion Matrix")
model.save('final-700imgs.h5')
|
{"hexsha": "8facd435d8b201a8ca8750cc44ea5ae355d3b794", "size": 4892, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/train_model.py", "max_stars_repo_name": "akash-harijan/cataract-detection", "max_stars_repo_head_hexsha": "ccb7045290a7a002bba1ff68220d19ec3a79ea2d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/models/train_model.py", "max_issues_repo_name": "akash-harijan/cataract-detection", "max_issues_repo_head_hexsha": "ccb7045290a7a002bba1ff68220d19ec3a79ea2d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/models/train_model.py", "max_forks_repo_name": "akash-harijan/cataract-detection", "max_forks_repo_head_hexsha": "ccb7045290a7a002bba1ff68220d19ec3a79ea2d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7662337662, "max_line_length": 102, "alphanum_fraction": 0.5983237939, "include": true, "reason": "import numpy", "num_tokens": 1139}
|
[STATEMENT]
lemma \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V_skeleton_wf [simp]:
"wellformed \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V_skeleton"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wellformed \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V_skeleton
[PROOF STEP]
proof (rule, intro allI)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>pn pn'. call(pn') \<notin> stermsl (\<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V_skeleton pn)
[PROOF STEP]
fix pn pn'
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>pn pn'. call(pn') \<notin> stermsl (\<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V_skeleton pn)
[PROOF STEP]
show "call(pn') \<notin> stermsl (\<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V_skeleton pn)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. call(pn') \<notin> stermsl (\<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V_skeleton pn)
[PROOF STEP]
by (cases pn) simp_all
[PROOF STATE]
proof (state)
this:
call(pn') \<notin> stermsl (\<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V_skeleton pn)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 459, "file": "AODV_Aodv", "length": 5}
|
#include <CExprI.h>
#include <boost/math/special_functions/erf.hpp>
#include <CMathGen.h>
#include <CInvNorm.h>
#include <COSNaN.h>
#include <cmath>
#include <ccomplex>
#include <cstdlib>
#include <cstring>
// NOTE: types are only needed if normal conversion rules don't handle the type correctly
#ifdef GNUPLOT_EXPR
namespace {
double invnorm(double x) {
//return sqrt(2)/erf(2*x - 1);
double y = CInvNorm::calc(x);
if (COSNaN::is_pos_inf(y) || COSNaN::is_neg_inf(y))
COSNaN::set_nan(y);
return y;
}
double norm(double x)
{
x = 0.5*sqrt(2)*x;
x = 0.5*erfc(-x);
return x;
}
double inverf(double x) {
try {
return boost::math::erf_inv(x);
} catch (...) {
return CMathGen::getNaN();
}
}
// TODO
std::complex<double> cerf(const std::complex<double> &c) {
double r = ::erf(c.real());
return std::complex<double>(r, 0);
}
// TODO
std::complex<double> cerfc(const std::complex<double> &c) {
double r = ::erfc(c.real());
return std::complex<double>(r, 0);
}
double RadToDeg(double x) {
return 180.0*x/M_PI;
}
double DegToRad(double x) {
return M_PI*x/180.0;
}
}
class CExprSubStr {
public:
std::string operator()(const std::string &str, int i1, int i2) {
if (i1 < 1 ) i1 = 1;
if (i1 > int(str.size())) i1 = str.size();
if (i2 < 0 ) i2 = str.size();
if (i2 > int(str.size())) i2 = str.size();
std::string str1;
for (int j = i1 - 1; j <= i2 - 1; ++j)
str1 += str[j];
return str1;
}
};
class CExprStrStrT {
public:
// find position of str2 in str1
int operator()(const std::string &str1, const std::string &str2) {
auto p = str1.find(str2);
if (p == std::string::npos)
return 0;
else
return p + 1;
}
};
class CExprWords {
public:
int operator()(const std::string &str) {
std::vector<std::string> words;
(void) CStrUtil::addWords(str, words);
return words.size();
}
};
class CExprWord {
public:
std::string operator()(const std::string &str, int i) {
std::vector<std::string> words;
(void) CStrUtil::addWords(str, words);
if (i > 0 && i <= int(words.size()))
return words[i - 1];
else
return "";
}
};
class CExprStrLen {
public:
int operator()(const std::string &str) {
return str.size();
}
};
class CExprSystem {
public:
std::string operator()(const std::string &str) {
FILE *fp = popen(str.c_str(), "r");
if (! fp) return "";
std::string res;
int c;
while ((c = fgetc(fp)) != EOF)
res += char(c);
pclose(fp);
return res;
}
};
#endif
struct CExprBuiltinFunction {
const char *name;
const char *args;
CExprFunctionProc proc;
};
static CExprValuePtr
CExprFunctionSqrt(CExpr *expr, const CExprValueArray &values)
{
assert(values.size() == 1);
double real;
if (! values[0]->getRealValue(real))
return CExprValuePtr();
if (real >= 0.0) {
double real1 = ::sqrt(real);
return expr->createRealValue(real1);
}
else {
double real1 = ::sqrt(-real);
return expr->createComplexValue(std::complex<double>(0, real1));
}
}
static CExprValuePtr
CExprFunctionExp(CExpr *expr, const CExprValueArray &values)
{
assert(values.size() == 1);
double r;
std::complex<double> c;
if (values[0]->isComplexValue()) {
if (values[0]->getComplexValue(c)) {
double r1 = exp(c.real())*cos(c.imag());
double c1 = exp(c.real())*sin(c.imag());
return expr->createComplexValue(std::complex<double>(r1, c1));
}
else
return CExprValuePtr();
}
else if (values[0]->getRealValue(r)) {
double r1 = exp(r);
return expr->createRealValue(r1);
}
else
return CExprValuePtr();
}
#define CEXPR_REAL_TO_REAL_FUNC(NAME, F) \
static CExprValuePtr \
CExprFunction##NAME(CExpr *expr, const CExprValueArray &values) { \
assert(values.size() == 1); \
double r = 0.0; \
if (values[0]->getRealValue(r)) { \
} \
else \
return CExprValuePtr(); \
double r1 = F(r); \
return expr->createRealValue(r1); \
}
#define CEXPR_REALC_TO_REAL_FUNC(NAME, F) \
static CExprValuePtr \
CExprFunction##NAME(CExpr *expr, const CExprValueArray &values) { \
assert(values.size() == 1); \
double r = 0.0; \
if (values[0]->isComplexValue()) { \
std::complex<double> c; \
if (values[0]->getComplexValue(c)) { \
r = c.real(); \
} \
else \
return CExprValuePtr(); \
} \
else if (values[0]->getRealValue(r)) { \
} \
else \
return CExprValuePtr(); \
double r1 = F(r); \
return expr->createRealValue(r1); \
}
#define CEXPR_REALC_TO_REALC_FUNC(NAME, F) \
static CExprValuePtr \
CExprFunction##NAME(CExpr *expr, const CExprValueArray &values) { \
assert(values.size() == 1); \
double r; \
if (values[0]->isComplexValue()) { \
std::complex<double> c; \
if (values[0]->getComplexValue(c)) { \
errno = 0; \
std::complex<double> c1 = F(c); \
if (errno != 0) return CExprValuePtr(); \
return expr->createComplexValue(c1); \
} \
return CExprValuePtr(); \
} \
else if (values[0]->getRealValue(r)) { \
double r1 = F(r); \
return expr->createRealValue(r1); \
} \
else \
return CExprValuePtr(); \
}
#define CEXPR_ANGLE_TO_REAL_FUNC(NAME, F) \
static CExprValuePtr \
CExprFunction##NAME(CExpr *expr, const CExprValueArray &values) { \
assert(values.size() == 1); \
double real; \
if (values[0]->getRealValue(real)) { \
if (expr->getDegrees()) \
real = DegToRad(real); \
double real1 = F(real); \
return expr->createRealValue(real1); \
} \
return CExprValuePtr(); \
}
#define CEXPR_REALC_TO_ANGLE_FUNC(NAME, F) \
static CExprValuePtr \
CExprFunction##NAME(CExpr *expr, const CExprValueArray &values) { \
assert(values.size() == 1); \
double r; \
if (values[0]->isComplexValue()) { \
std::complex<double> c; \
if (values[0]->getComplexValue(c)) { \
errno = 0; \
std::complex<double> c1 = F(c); \
if (errno != 0) return CExprValuePtr(); \
if (expr->getDegrees()) \
c1 = std::complex<double>(RadToDeg(c1.real()), RadToDeg(c1.imag())); \
return expr->createComplexValue(c1); \
} \
else \
return CExprValuePtr(); \
} \
else if (values[0]->getRealValue(r)) { \
errno = 0; \
double r1 = F(r); \
if (errno == 0) { \
if (expr->getDegrees()) \
r1 = RadToDeg(r1); \
return expr->createRealValue(r1); \
} \
else if (errno == EDOM) { \
std::complex<double> c(r,0); \
errno = 0; \
std::complex<double> c1 = F(c); \
if (errno != 0) return CExprValuePtr(); \
if (expr->getDegrees()) \
c1 = std::complex<double>(RadToDeg(c1.real()), RadToDeg(c1.imag())); \
return expr->createComplexValue(c1); \
} \
else { \
return CExprValuePtr(); \
} \
} \
else \
return CExprValuePtr(); \
}
#define CEXPR_REAL2_TO_ANGLE_FUNC(NAME, F) \
static CExprValuePtr \
CExprFunction##NAME(CExpr *expr, const CExprValueArray &values) { \
assert(values.size() == 2); \
double real1, real2; \
if (values[0]->getRealValue(real1) && values[1]->getRealValue(real2)) { \
double real = F(real1, real2); \
if (expr->getDegrees()) \
real = RadToDeg(real); \
return expr->createRealValue(real); \
} \
return CExprValuePtr(); \
}
#define CEXPR_COMPLEX_TO_COMPLEX_FUNC(NAME, F) \
static CExprValuePtr \
CExprFunction##NAME(CExpr *expr, const CExprValueArray &values) { \
assert(values.size() == 1); \
std::complex<double> c; \
if (! values[0]->getComplexValue(c)) { \
double r; \
if (! values[0]->getRealValue(r)) \
return CExprValuePtr(); \
c = std::complex<double>(r, 0); \
} \
std::complex<double> c1 = F(c); \
return expr->createComplexValue(c1); \
}
class CExprFunctionAbs : public CExprFunctionObj {
public:
CExprValuePtr operator()(CExpr *expr, const CExprValueArray &values) {
assert(values.size() == 1);
if (values[0]->isRealValue()) {
double real;
if (values[0]->getRealValue(real))
return expr->createRealValue(std::abs(real));
}
else if (values[0]->isIntegerValue()) {
long integer;
if (values[0]->getIntegerValue(integer))
return expr->createIntegerValue(std::abs(integer));
}
else if (values[0]->isComplexValue()) {
std::complex<double> c;
if (values[0]->getComplexValue(c))
return expr->createRealValue(std::abs(c));
}
return CExprValuePtr();
}
};
class CExprFunctionCArg : public CExprFunctionObj {
public:
CExprValuePtr operator()(CExpr *expr, const CExprValueArray &values) {
assert(values.size() == 1);
if (values[0]->isComplexValue()) {
std::complex<double> c;
if (values[0]->getComplexValue(c)) {
double r = std::arg(c);
if (expr->getDegrees())
r = DegToRad(r);
return expr->createRealValue(r);
}
}
return CExprValuePtr();
}
};
class CExprFunctionImag : public CExprFunctionObj {
public:
CExprValuePtr operator()(CExpr *expr, const CExprValueArray &values) {
assert(values.size() == 1);
if (values[0]->isComplexValue()) {
std::complex<double> c;
if (values[0]->getComplexValue(c))
return expr->createRealValue(c.imag());
}
return CExprValuePtr();
}
};
class CExprFunctionSign : public CExprFunctionObj {
public:
CExprValuePtr operator()(CExpr *expr, const CExprValueArray &values) {
assert(values.size() == 1);
if (values[0]->isRealValue()) {
double real;
if (values[0]->getRealValue(real))
return expr->createIntegerValue(real >= 0 ? (real == 0 ? 0 : 1) : -1);
}
else if (values[0]->isIntegerValue()) {
long integer;
if (values[0]->getIntegerValue(integer))
return expr->createIntegerValue(integer >= 0 ? (integer == 0 ? 0 : 1) : -1);
}
else if (values[0]->isComplexValue()) {
std::complex<double> c;
if (values[0]->getComplexValue(c))
return expr->createIntegerValue(c.real() >= 0 ? (c.real() == 0 ? 0 : 1) : -1);
}
return CExprValuePtr();
}
};
class CExprFunctionExpr : public CExprFunctionObj {
public:
CExprValuePtr operator()(CExpr *expr, const CExprValueArray &values) {
assert(values.size() == 1);
std::string exprStr;
if (! values[0]->getStringValue(exprStr))
return CExprValuePtr();
expr->saveCompileState();
CExprValuePtr value;
if (! expr->evaluateExpression(exprStr, value))
value = CExprValuePtr();
expr->restoreCompileState();
return value;
}
};
#ifdef GNUPLOT_EXPR
class CExprFunctionRand : public CExprFunctionObj {
public:
CExprValuePtr operator()(CExpr *expr, const CExprValueArray &values) {
assert(values.size() == 1);
if (values[0]->isIntegerValue()) {
long integer = 0;
(void) values[0]->getIntegerValue(integer);
if (integer < 0)
srand(0);
else if (integer > 0)
srand(integer);
double r = (1.0*rand())/RAND_MAX;
return expr->createRealValue(r);
}
return CExprValuePtr();
}
};
#endif
#define CEXPR_REALC_TO_REAL_FOBJ(NAME, F) \
class CExprFunction##NAME : public CExprFunctionObj { \
public: \
CExprValuePtr operator()(CExpr *expr, const CExprValueArray &values) { \
assert(values.size() == 1); \
double r = 0.0; \
if (values[0]->isRealValue()) { \
if (! values[0]->getRealValue(r)) return CExprValuePtr(); \
} \
else if (values[0]->isIntegerValue()) { \
long i = 0; \
if (! values[0]->getIntegerValue(i)) return CExprValuePtr(); \
r = i; \
} \
else if (values[0]->isComplexValue()) { \
std::complex<double> c; \
if (! values[0]->getComplexValue(c)) return CExprValuePtr(); \
r = c.real(); \
} \
else { \
return CExprValuePtr(); \
} \
double r1 = F(r); \
return expr->createRealValue(r1); \
} \
};
class CExprFunctionInt : public CExprFunctionObj {
public:
CExprValuePtr operator()(CExpr *expr, const CExprValueArray &values) {
assert(values.size() == 1);
double r = 0.0;
if (values[0]->isRealValue()) {
if (! values[0]->getRealValue(r)) return CExprValuePtr();
}
else if (values[0]->isIntegerValue()) {
long i = 0;
if (! values[0]->getIntegerValue(i)) return CExprValuePtr();
r = i;
}
else if (values[0]->isComplexValue()) {
std::complex<double> c;
if (! values[0]->getComplexValue(c)) return CExprValuePtr();
r = c.real();
}
else {
return CExprValuePtr();
}
int i1 = static_cast<int>(r);
return expr->createIntegerValue(i1);
}
};
CEXPR_REALC_TO_REAL_FOBJ(Ceil , std::ceil)
CEXPR_REALC_TO_REAL_FOBJ(Floor, std::floor)
CEXPR_REALC_TO_REAL_FOBJ(Real , static_cast<double>)
#ifdef GNUPLOT_EXPR
class CExprFunctionSPrintF : public CExprFunctionObj {
public:
CExprValuePtr operator()(CExpr *expr, const CExprValueArray &values) {
assert(values.size() >= 1);
std::string fmt;
if (! values[0]->getStringValue(fmt))
return CExprValuePtr();
CExprValueArray values1;
for (uint i = 1; i < values.size(); ++i)
values1.push_back(values[i]);
std::string res = expr->printf(fmt, values1);
return expr->createStringValue(res);
}
};
#endif
template<typename T, typename R, typename FUNC>
class CExprFunctionObjT1 : public CExprFunctionObj {
public:
CExprFunctionObjT1(CExprFunctionMgr *mgr, const std::string &name) {
std::string argsStr = CExprUtil<T>::argTypeStr();
func_ = mgr->addObjFunction(name, argsStr, this);
}
CExprValuePtr operator()(CExpr *expr, const CExprValueArray &values) {
assert(values.size() == 1);
T v;
if (CExprUtil<T>::getTypeValue(values[0], v))
return CExprUtil<R>::createValue(expr, f_(v));
return CExprValuePtr();
}
void setBuiltin(bool b) {
func_->setBuiltin(b);
}
private:
FUNC f_;
CExprFunctionPtr func_;
};
template<typename T1, typename T2, typename R, typename FUNC>
class CExprFunctionObjT2 : public CExprFunctionObj {
public:
CExprFunctionObjT2(CExprFunctionMgr *mgr, const std::string &name) {
std::string argsStr = CExprUtil<T1>::argTypeStr() + "," + CExprUtil<T2>::argTypeStr();
func_ = mgr->addObjFunction(name, argsStr, this);
}
CExprValuePtr operator()(CExpr *expr, const CExprValueArray &values) {
assert(values.size() == 2);
T1 v1; T2 v2;
if (CExprUtil<T1>::getTypeValue(values[0], v1) &&
CExprUtil<T2>::getTypeValue(values[1], v2))
return CExprUtil<R>::createValue(expr, f_(v1, v2));
return CExprValuePtr();
}
void setBuiltin(bool b) {
func_->setBuiltin(b);
}
private:
FUNC f_;
CExprFunctionPtr func_;
};
template<typename T1, typename T2, typename T3, typename R, typename FUNC>
class CExprFunctionObjT3 : public CExprFunctionObj {
public:
CExprFunctionObjT3(CExprFunctionMgr *mgr, const std::string &name) {
std::string argsStr = CExprUtil<T1>::argTypeStr() + "," +
CExprUtil<T2>::argTypeStr() + "," + CExprUtil<T3>::argTypeStr();
func_ = mgr->addObjFunction(name, argsStr, this);
}
CExprValuePtr operator()(CExpr *expr, const CExprValueArray &values) {
assert(values.size() == 3);
T1 v1; T2 v2; T3 v3;
if (CExprUtil<T1>::getTypeValue(values[0], v1) &&
CExprUtil<T2>::getTypeValue(values[1], v2) &&
CExprUtil<T2>::getTypeValue(values[2], v3))
return CExprUtil<R>::createValue(expr, f_(v1, v2, v3));
return CExprValuePtr();
}
void setBuiltin(bool b) {
func_->setBuiltin(b);
}
private:
FUNC f_;
CExprFunctionPtr func_;
};
CEXPR_REALC_TO_REALC_FUNC(Log , std::log)
CEXPR_REALC_TO_REALC_FUNC(Log10, std::log10)
CEXPR_ANGLE_TO_REAL_FUNC(Sin, ::sin)
CEXPR_ANGLE_TO_REAL_FUNC(Cos, ::cos)
CEXPR_ANGLE_TO_REAL_FUNC(Tan, ::tan)
CEXPR_REALC_TO_ANGLE_FUNC(ACos, std::acos)
CEXPR_REALC_TO_ANGLE_FUNC(ASin, std::asin)
CEXPR_REALC_TO_ANGLE_FUNC(ATan, std::atan)
CEXPR_REAL2_TO_ANGLE_FUNC(ATan2, ::atan2)
CEXPR_REALC_TO_REALC_FUNC(SinH , std::sinh)
CEXPR_REALC_TO_REALC_FUNC(CosH , std::cosh)
CEXPR_REALC_TO_REALC_FUNC(TanH , std::tanh)
CEXPR_REALC_TO_REALC_FUNC(ASinH, std::asinh)
CEXPR_REALC_TO_REALC_FUNC(ACosH, std::acosh)
CEXPR_REALC_TO_REALC_FUNC(ATanH, std::atanh)
#ifdef GNUPLOT_EXPR
// TODO: besy0, besy1
CEXPR_REAL_TO_REAL_FUNC(BesJ0 , ::j0)
CEXPR_REAL_TO_REAL_FUNC(BesJ1 , ::j1)
CEXPR_REALC_TO_REAL_FUNC(Erf , ::erf)
CEXPR_REALC_TO_REAL_FUNC(ErfC , ::erfc)
CEXPR_REALC_TO_REAL_FUNC(InvErf, ::inverf)
CEXPR_COMPLEX_TO_COMPLEX_FUNC(CErf , ::cerf)
CEXPR_COMPLEX_TO_COMPLEX_FUNC(CErfC, ::cerfc)
// TODO: invnorm, norm
CEXPR_REALC_TO_REAL_FUNC(Gamma , ::gamma)
// TODO: igamma
CEXPR_REALC_TO_REAL_FUNC(LGamma , ::lgamma)
CEXPR_REALC_TO_REAL_FUNC(Norm , ::norm)
CEXPR_REALC_TO_REAL_FUNC(InvNorm, ::invnorm)
// TODO: lambertw
#endif
static CExprBuiltinFunction
builtinFns[] = {
{ "sqrt" , "r" , CExprFunctionSqrt },
{ "exp" , "rc" , CExprFunctionExp },
{ "log" , "rc" , CExprFunctionLog },
{ "log10" , "rc" , CExprFunctionLog10 },
{ "sin" , "rc" , CExprFunctionSin },
{ "cos" , "rc" , CExprFunctionCos },
{ "tan" , "rc" , CExprFunctionTan },
{ "asin" , "rc" , CExprFunctionASin },
{ "acos" , "rc" , CExprFunctionACos },
{ "atan" , "rc" , CExprFunctionATan },
{ "atan2" , "r,r", CExprFunctionATan2 },
{ "sinh" , "rc" , CExprFunctionSinH },
{ "cosh" , "rc" , CExprFunctionCosH },
{ "tanh" , "rc" , CExprFunctionTanH },
{ "asinh" , "rc" , CExprFunctionASinH },
{ "acosh" , "rc" , CExprFunctionACosH },
{ "atanh" , "rc" , CExprFunctionATanH },
#ifdef GNUPLOT_EXPR
// EllipticK, EllipticE, EllipticPi
{ "besj0" , "r" , CExprFunctionBesJ0 },
{ "besj1" , "r" , CExprFunctionBesJ1 },
// besy0, besy1
{ "erf" , "rc" , CExprFunctionErf },
{ "erfc" , "rc" , CExprFunctionErfC },
{ "inverf" , "rc" , CExprFunctionInvErf },
{ "cerf" , "c" , CExprFunctionCErf },
{ "cerfc" , "c" , CExprFunctionCErfC },
{ "gamma" , "r" , CExprFunctionGamma },
{ "lgamma" , "r" , CExprFunctionLGamma },
#endif
{ "norm" , "r" , CExprFunctionNorm },
{ "invnorm", "r" , CExprFunctionInvNorm },
{ "", "", 0 }
};
//------
CExprFunctionMgr::
CExprFunctionMgr(CExpr *expr) :
expr_(expr)
{
}
void
CExprFunctionMgr::
addFunctions()
{
for (uint i = 0; builtinFns[i].proc; ++i) {
CExprFunctionPtr function =
addProcFunction(builtinFns[i].name, builtinFns[i].args, builtinFns[i].proc);
function->setBuiltin(true);
}
addObjFunction("abs" , "ric", new CExprFunctionAbs )->setBuiltin(true);
addObjFunction("arg" , "c" , new CExprFunctionCArg )->setBuiltin(true);
addObjFunction("ceil" , "rc" , new CExprFunctionCeil )->setBuiltin(true);
addObjFunction("floor", "rc" , new CExprFunctionFloor)->setBuiltin(true);
// TODO: use conversion rules
addObjFunction("int" , "ric", new CExprFunctionInt )->setBuiltin(true);
addObjFunction("real" , "ric", new CExprFunctionReal )->setBuiltin(true);
addObjFunction("imag" , "c" , new CExprFunctionImag )->setBuiltin(true);
addObjFunction("sgn" , "ric", new CExprFunctionSign )->setBuiltin(true);
#ifdef GNUPLOT_EXPR
addObjFunction("rand", "i", new CExprFunctionRand)->setBuiltin(true);
// input types ..., return type, function
(new CExprFunctionObjT1<std::string,long,CExprStrLen>
(this, "strlen" ))->setBuiltin(true);
(new CExprFunctionObjT2<std::string,std::string,long,CExprStrStrT>
(this, "strstrt"))->setBuiltin(true);
(new CExprFunctionObjT3<std::string,long,long,std::string,CExprSubStr>
(this, "substr" ))->setBuiltin(true);
(new CExprFunctionObjT1<std::string,std::string,CExprSystem>
(this, "system" ))->setBuiltin(true);
(new CExprFunctionObjT2<std::string,long,std::string,CExprWord>
(this, "word" ))->setBuiltin(true);
(new CExprFunctionObjT1<std::string,long,CExprWords>
(this, "words" ))->setBuiltin(true);
// gprintf ?
addObjFunction("sprintf", "s,...", new CExprFunctionSPrintF)->setBuiltin(true);
#endif
addObjFunction("expr", "s", new CExprFunctionExpr)->setBuiltin(true);
}
CExprFunctionPtr
CExprFunctionMgr::
getFunction(const std::string &name)
{
for (const auto &func : functions_)
if (func->name() == name)
return func;
return CExprFunctionPtr();
}
void
CExprFunctionMgr::
getFunctions(const std::string &name, Functions &functions)
{
for (const auto &func : functions_)
if (func->name() == name)
functions.push_back(func);
}
CExprFunctionPtr
CExprFunctionMgr::
addProcFunction(const std::string &name, const std::string &argsStr, CExprFunctionProc proc)
{
Args args;
bool variableArgs;
(void) parseArgs(argsStr, args, variableArgs);
CExprFunctionPtr function(new CExprProcFunction(name, args, proc));
function->setVariableArgs(variableArgs);
removeFunction(name);
functions_.push_back(function);
resetCompiled(name);
return function;
}
CExprFunctionPtr
CExprFunctionMgr::
addObjFunction(const std::string &name, const std::string &argsStr,
CExprFunctionObj *proc, bool resetCompiled)
{
Args args;
bool variableArgs;
(void) parseArgs(argsStr, args, variableArgs);
CExprFunctionPtr function(new CExprObjFunction(name, args, proc));
function->setVariableArgs(variableArgs);
if (! proc->isOverload())
removeFunction(name);
functions_.push_back(function);
if (resetCompiled)
this->resetCompiled(name);
return function;
}
CExprFunctionPtr
CExprFunctionMgr::
addUserFunction(const std::string &name, const std::vector<std::string> &args,
const std::string &proc)
{
CExprFunctionPtr function(new CExprUserFunction(name, args, proc));
removeFunction(name);
functions_.push_back(function);
resetCompiled(name);
return function;
}
void
CExprFunctionMgr::
removeFunction(const std::string &name)
{
removeFunction(getFunction(name));
}
void
CExprFunctionMgr::
removeFunction(CExprFunctionPtr function)
{
if (function.isValid())
functions_.remove(function);
}
void
CExprFunctionMgr::
getFunctionNames(std::vector<std::string> &names) const
{
for (const auto &func : functions_)
names.push_back(func->name());
}
void
CExprFunctionMgr::
resetCompiled(const std::string &name)
{
for (const auto &func : functions_) {
if (func->hasFunction(name))
func->reset();
}
}
bool
CExprFunctionMgr::
parseArgs(const std::string &argsStr, Args &args, bool &variableArgs)
{
variableArgs = false;
bool rc = true;
std::vector<std::string> args1;
CStrUtil::addTokens(argsStr, args1, ", ");
uint num_args = args1.size();
for (uint i = 0; i < num_args; ++i) {
const std::string &arg = args1[i];
if (arg == "..." && i == num_args - 1) {
variableArgs = true;
break;
}
uint types = uint(CExprValueType::NONE);
uint len = arg.size();
for (uint j = 0; j < len; j++) {
char c = arg[j];
if (c == 'b') types |= uint(CExprValueType::BOOLEAN);
else if (c == 'i') types |= uint(CExprValueType::INTEGER);
else if (c == 'r') types |= uint(CExprValueType::REAL);
else if (c == 's') types |= uint(CExprValueType::STRING);
else if (c == 'c') types |= uint(CExprValueType::COMPLEX);
else if (c == 'n') types |= uint(CExprValueType::NUL);
else {
CExpr::instance()->
errorMsg("Invalid argument type char '" + std::string(&c, 1) + "'");
rc = false;
}
}
args.push_back(CExprFunctionArg((CExprValueType) types));
}
return rc;
}
//----------
CExprProcFunction::
CExprProcFunction(const std::string &name, const Args &args, CExprFunctionProc proc) :
CExprFunction(name), args_(args), proc_(proc)
{
}
bool
CExprProcFunction::
checkValues(const CExprValueArray &values) const
{
return (values.size() == numArgs());
}
CExprValuePtr
CExprProcFunction::
exec(CExpr *expr, const CExprValueArray &values)
{
assert(checkValues(values));
return (*proc_)(expr, values);
}
//----------
CExprObjFunction::
CExprObjFunction(const std::string &name, const Args &args, CExprFunctionObj *proc) :
CExprFunction(name), args_(args), proc_(proc)
{
}
CExprObjFunction::
~CExprObjFunction()
{
delete proc_;
}
bool
CExprObjFunction::
checkValues(const CExprValueArray &values) const
{
if (isVariableArgs())
return (values.size() >= numArgs());
else
return (values.size() == numArgs());
}
CExprValuePtr
CExprObjFunction::
exec(CExpr *expr, const CExprValueArray &values)
{
assert(checkValues(values));
return (*proc_)(expr, values);
}
//----------
CExprUserFunction::
CExprUserFunction(const std::string &name, const Args &args, const std::string &proc) :
CExprFunction(name), args_(args), proc_(proc), compiled_(false)
{
}
bool
CExprUserFunction::
checkValues(const CExprValueArray &values) const
{
return (values.size() >= numArgs());
}
void
CExprUserFunction::
reset()
{
compiled_ = false;
pstack_.clear();
cstack_.clear();
itoken_ = CExprITokenPtr();
}
CExprValuePtr
CExprUserFunction::
exec(CExpr *expr, const CExprValueArray &values)
{
assert(checkValues(values));
//---
if (! compiled_) {
pstack_ = expr->parseLine(proc_);
itoken_ = expr->interpPTokenStack(pstack_);
cstack_ = expr->compileIToken(itoken_);
compiled_ = true;
}
//---
typedef std::map<std::string,CExprValuePtr> VarValues;
VarValues varValues;
// set arg values (save previous values)
for (uint i = 0; i < numArgs(); ++i) {
const std::string &arg = args_[i];
CExprVariablePtr var = expr->getVariable(arg);
if (var.isValid()) {
varValues[arg] = var->getValue();
var->setValue(values[i]);
}
else {
varValues[arg] = CExprValuePtr();
expr->createVariable(arg, values[i]);
}
}
// run proc
expr->saveCompileState();
CExprValuePtr value;
//if (! expr->evaluateExpression(proc_, value))
// value = CExprValuePtr();
//if (! expr->executePTokenStack(pstack_, value))
// value = CExprValuePtr();
if (! expr->executeCTokenStack(cstack_, value))
value = CExprValuePtr();
expr->restoreCompileState();
// restore variables
for (const auto &v : varValues) {
const std::string varName = v.first;
CExprValuePtr value = v.second;
if (value.isValid()) {
CExprVariablePtr var = expr->getVariable(varName);
var->setValue(value);
}
else
expr->removeVariable(varName);
}
return value;
}
|
{"hexsha": "2e8b57b3bb6c997d99ecd4740a75a301d6f271c6", "size": 26651, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/CExprFunction.cpp", "max_stars_repo_name": "colinw7/CQGnuPlot", "max_stars_repo_head_hexsha": "8001b0a0d40c1fde8e5efe05ebe0c9b0541daa94", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/CExprFunction.cpp", "max_issues_repo_name": "colinw7/CQGnuPlot", "max_issues_repo_head_hexsha": "8001b0a0d40c1fde8e5efe05ebe0c9b0541daa94", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/CExprFunction.cpp", "max_forks_repo_name": "colinw7/CQGnuPlot", "max_forks_repo_head_hexsha": "8001b0a0d40c1fde8e5efe05ebe0c9b0541daa94", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-04-01T13:08:45.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-01T13:08:45.000Z", "avg_line_length": 24.7685873606, "max_line_length": 92, "alphanum_fraction": 0.6343852013, "num_tokens": 8012}
|
"""
_to_union(datatype)
Make a minimal `Union` type from a collection of data types.
"""
_to_union(datatype) = Union{(isa(datatype, Type) ? [datatype] : datatype)...}
"""
_find_rand_argmax(d::DictionaryView)
Compute `argmax` of `d` and select one element randomly.
"""
function _find_rand_argmax(d::DictionaryView)
max = -Inf
argmax = Vector{Int}()
for (k, v) in pairs(d)
if v > max
max = v
argmax = [k]
elseif v == max
push!(argmax, k)
end
end
return rand(argmax)
end
abstract type FunctionContainer end
apply(fc::FC) where {FC <: FunctionContainer} = fc.f
apply(fc::FC, x, X) where {FC <: FunctionContainer} = convert(Float64, apply(fc)(x, X))
apply(fc::FC, x) where {FC <: FunctionContainer} = convert(Float64, apply(fc)(x))
|
{"hexsha": "27c143498cc7bfbc698cfde58ed7467ea1a4b03b", "size": 818, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/utils.jl", "max_stars_repo_name": "JuliaConstraints/LocalSearchSolvers.jl", "max_stars_repo_head_hexsha": "cd34500d131b05afdef4383f9a3b32a7e0c4ec41", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2020-12-18T11:29:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-05T03:40:38.000Z", "max_issues_repo_path": "src/utils.jl", "max_issues_repo_name": "JuliaConstraints/LocalSearchSolvers.jl", "max_issues_repo_head_hexsha": "cd34500d131b05afdef4383f9a3b32a7e0c4ec41", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 88, "max_issues_repo_issues_event_min_datetime": "2020-12-18T09:04:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T09:00:01.000Z", "max_forks_repo_path": "src/utils.jl", "max_forks_repo_name": "JuliaConstraints/LocalSearchSolvers.jl", "max_forks_repo_head_hexsha": "cd34500d131b05afdef4383f9a3b32a7e0c4ec41", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-01-13T10:15:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-15T00:36:50.000Z", "avg_line_length": 28.2068965517, "max_line_length": 87, "alphanum_fraction": 0.6246943765, "num_tokens": 229}
|
import pandas
class LiteralCache:
"""class which stores literals
and corresponding truth values
e.g. [
"food=banana": [True, True, False, False, True],
"food=apple" : [True, True, True, True, False]
]
"""
def __init__(self):
self.__cache = {}
def insert(self, literal, truth_values):
self.__cache[literal] = truth_values
def get(self, literal):
return self.__cache[literal]
def __contains__(self, literal):
"""function for using in
on LiteralCache object
"""
return literal in self.__cache.keys()
import pandas
import numpy as np
class QuantitativeDataFrame:
def __init__(self, dataframe):
if type(dataframe) != pandas.DataFrame:
raise Exception("type of dataframe must be pandas.dataframe")
self.__dataframe = dataframe
self.__dataframe.iloc[:,-1] = self.__dataframe.iloc[:,-1].astype(str)
# sorted and unique columns of the dataframe
# saved as a numpy array
self.__preprocessed_columns = self.__preprocess_columns(dataframe)
# literal cache for computing rule statistics
# - support and confidence
self.__literal_cache = LiteralCache()
# so that it doesn't have to be computed over and over
self.size = dataframe.index.size
@property
def dataframe(self):
return self.__dataframe
def column(self, colname):
return self.__preprocessed_columns[colname]
def mask(self, vals):
return self.__dataframe[vals]
def find_covered_by_antecedent_mask(self, antecedent):
"""
returns:
mask - an array of boolean values indicating which instances
are covered by antecedent
"""
# todo: compute only once to make function faster
dataset_size = self.__dataframe.index.size
cummulated_mask = np.ones(dataset_size).astype(bool)
for literal in antecedent:
attribute, interval = literal
# the column that concerns the
# iterated attribute
# instead of pandas.Series, grab the ndarray
# using values attribute
relevant_column = self.__dataframe[[attribute]].values.reshape(dataset_size)
# this tells us which instances satisfy the literal
current_mask = self.get_literal_coverage(literal, relevant_column)
# add cummulated and current mask using logical AND
cummulated_mask &= current_mask
return cummulated_mask
def find_covered_by_literal_mask(self, literal):
"""
returns:
mask - an array of boolean values indicating which instances
are covered by literal
"""
for literal in rule.antecedent:
attribute, interval = literal
# the column that concerns the
# iterated attribute
# instead of pandas.Series, grab the ndarray
# using values attribute
relevant_column = self.__dataframe[[attribute]].values.reshape(dataset_size)
# this tells us which instances satisfy the literal
current_mask = self.get_literal_coverage(literal, relevant_column)
# add cummulated and current mask using logical AND
cummulated_mask &= current_mask
def find_covered_by_rule_mask(self, rule):
"""
returns:
covered_by_antecedent_mask:
- array of boolean values indicating which
dataset rows satisfy antecedent
covered_by_consequent_mask:
- array of boolean values indicating which
dataset rows satisfy conseqeunt
"""
dataset_size = self.__dataframe.index.size
# initialize a mask filled with True values
# it will get modified as futher literals get
# tested
# for optimization - create cummulated mask once
# in constructor
cummulated_mask = np.array([True] * dataset_size)
for literal in rule.antecedent:
attribute, interval = literal
# the column that concerns the
# iterated attribute
# instead of pandas.Series, grab the ndarray
# using values attribute
relevant_column = self.__dataframe[[attribute]].values.reshape(dataset_size)
# this tells us which instances satisfy the literal
current_mask = self.get_literal_coverage(literal, relevant_column)
# add cummulated and current mask using logical AND
cummulated_mask &= current_mask
instances_satisfying_antecedent_mask = cummulated_mask
instances_satisfying_consequent_mask = self.__get_consequent_coverage_mask(rule)
instances_satisfying_consequent_mask = instances_satisfying_consequent_mask.reshape(dataset_size)
return instances_satisfying_antecedent_mask, instances_satisfying_consequent_mask
def calculate_rule_statistics(self, rule):
"""calculates rule's confidence and
support using efficient numpy functions
returns:
--------
support:
float
confidence:
float
"""
dataset_size = self.__dataframe.index.size
# initialize a mask filled with True values
# it will get modified as futher literals get
# tested
# for optimization - create cummulated mask once
# in constructor
cummulated_mask = np.array([True] * dataset_size)
for literal in rule.antecedent:
attribute, interval = literal
# the column that concerns the
# iterated attribute
# instead of pandas.Series, grab the ndarray
# using values attribute
relevant_column = self.__dataframe[[attribute]].values.reshape(dataset_size)
# this tells us which instances satisfy the literal
current_mask = self.get_literal_coverage(literal, relevant_column)
# add cummulated and current mask using logical AND
cummulated_mask &= current_mask
instances_satisfying_antecedent = self.__dataframe[cummulated_mask].index
instances_satisfying_antecedent_count = instances_satisfying_antecedent.size
# using cummulated mask to filter out instances that satisfy consequent
# but do not satisfy antecedent
instances_satisfying_consequent_mask = self.__get_consequent_coverage_mask(rule)
instances_satisfying_consequent_mask = instances_satisfying_consequent_mask.reshape(dataset_size)
instances_satisfying_consequent_and_antecedent = self.__dataframe[
instances_satisfying_consequent_mask & cummulated_mask
].index
instances_satisfying_consequent_and_antecedent_count = instances_satisfying_consequent_and_antecedent.size
instances_satisfying_consequent_count = self.__dataframe[instances_satisfying_consequent_mask].index.size
# instances satisfying consequent both antecedent and consequent
support = instances_satisfying_antecedent_count / dataset_size
confidence = 0
if instances_satisfying_antecedent_count != 0:
confidence = instances_satisfying_consequent_and_antecedent_count / instances_satisfying_antecedent_count
return support, confidence
def __get_consequent_coverage_mask(self, rule):
consequent = rule.consequent
attribute, value = consequent
class_column = self.__dataframe[[attribute]].values
class_column = class_column.astype(str)
literal_key = "{}={}".format(attribute, value)
mask = []
if literal_key in self.__literal_cache:
mask = self.__literal_cache.get(literal_key)
else:
mask = class_column == value
return mask
def get_literal_coverage(self, literal, values):
"""returns mask which describes the instances that
satisfy the interval
function uses cached results for efficiency
"""
if type(values) != np.ndarray:
raise Exception("Type of values must be numpy.ndarray")
mask = []
attribute, interval = literal
literal_key = "{}={}".format(attribute, interval)
# check if the result is already cached, otherwise
# calculate and save the result
if literal_key in self.__literal_cache:
mask = self.__literal_cache.get(literal_key)
else:
mask = None
if type(interval) == str:
mask = np.array([ val == interval for val in values ])
else:
mask = interval.test_membership(values)
self.__literal_cache.insert(literal_key, mask)
# reshape mask into single dimension
mask = mask.reshape(values.size)
return mask
def __preprocess_columns(self, dataframe):
# covert to dict
# column -> list
# need to convert it to numpy array
dataframe_dict = dataframe.to_dict(orient="list")
dataframe_ndarray = {}
for column, value_list in dataframe_dict.items():
transformed_list = np.sort(np.unique(value_list))
dataframe_ndarray[column] = transformed_list
return dataframe_ndarray
|
{"hexsha": "2a7c6a6be2b6d0e86c04d8bdedf5377bc6ecd0eb", "size": 10187, "ext": "py", "lang": "Python", "max_stars_repo_path": "build/lib/pyarc/qcba/data_structures/quant_dataset.py", "max_stars_repo_name": "jirifilip/CBA", "max_stars_repo_head_hexsha": "59168ef6fb4c9e319475f9a7498446ba5ff306e1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2019-08-05T12:02:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-20T08:26:56.000Z", "max_issues_repo_path": "build/lib/pyarc/qcba/data_structures/quant_dataset.py", "max_issues_repo_name": "jirifilip/CBA", "max_issues_repo_head_hexsha": "59168ef6fb4c9e319475f9a7498446ba5ff306e1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2019-08-05T14:08:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-08T07:58:34.000Z", "max_forks_repo_path": "build/lib/pyarc/qcba/data_structures/quant_dataset.py", "max_forks_repo_name": "jirifilip/CBA", "max_forks_repo_head_hexsha": "59168ef6fb4c9e319475f9a7498446ba5ff306e1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2018-08-09T06:41:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-17T07:37:11.000Z", "avg_line_length": 32.9676375405, "max_line_length": 117, "alphanum_fraction": 0.6032197899, "include": true, "reason": "import numpy", "num_tokens": 1937}
|
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Tensorflow TIN
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim import arg_scope
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.ops import nn_ops
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.python.framework import ops
from ult.config import cfg
from ult.visualization import draw_bounding_boxes_HOI
import numpy as np
import ipdb
def resnet_arg_scope(is_training=True,
weight_decay=cfg.TRAIN.WEIGHT_DECAY,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'is_training': False,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'trainable': False,
'updates_collections': ops.GraphKeys.UPDATE_OPS
}
with arg_scope(
[slim.conv2d, slim.fully_connected],
weights_regularizer=tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
weights_initializer=slim.variance_scaling_initializer(),
biases_regularizer=tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
biases_initializer=tf.constant_initializer(0.0),
trainable=is_training,
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
class ResNet50(): # 64--128--256--512--512
def __init__(self):
self.visualize = {}
self.intermediate = {}
self.predictions = {}
self.score_summaries = {}
self.event_summaries = {}
self.train_summaries = []
self.losses = {}
self.image = tf.placeholder(tf.float32, shape=[1, None, None, 3], name='image')
self.spatial = tf.placeholder(tf.float32, shape=[None, 64, 64, 3], name='sp')
self.H_boxes = tf.placeholder(tf.float32, shape=[None, 5], name='H_boxes')
self.O_boxes = tf.placeholder(tf.float32, shape=[None, 5], name='O_boxes')
self.Part0 = tf.placeholder(tf.float32, shape=[None, 5], name='Part0_boxes')
self.Part1 = tf.placeholder(tf.float32, shape=[None, 5], name='Part1_boxes')
self.Part2 = tf.placeholder(tf.float32, shape=[None, 5], name='Part2_boxes')
self.Part3 = tf.placeholder(tf.float32, shape=[None, 5], name='Part3_boxes')
self.Part4 = tf.placeholder(tf.float32, shape=[None, 5], name='Part4_boxes')
self.Part5 = tf.placeholder(tf.float32, shape=[None, 5], name='Part5_boxes')
self.Part6 = tf.placeholder(tf.float32, shape=[None, 5], name='Part6_boxes')
self.Part7 = tf.placeholder(tf.float32, shape=[None, 5], name='Part7_boxes')
self.Part8 = tf.placeholder(tf.float32, shape=[None, 5], name='Part8_boxes')
self.Part9 = tf.placeholder(tf.float32, shape=[None, 5], name='Part9_boxes')
self.gt_binary_label = tf.placeholder(tf.float32, shape=[None, 2], name='gt_binary_label')
self.gt_binary_label_10v = tf.placeholder(tf.float32, shape=[None, 10, 2], name='gt_binary_label')
self.num_vec = 10
self.H_num = tf.placeholder(tf.int32)
self.binary_weight = np.array([1.6094379124341003, 0.22314355131420976], dtype='float32').reshape(1, 2)
self.num_classes = 600 # HOI
self.num_binary = 2 # existence (0 or 1) of HOI
self.num_fc = 1024
self.scope = 'resnet_v1_50'
self.stride = [16, ]
self.lr = tf.placeholder(tf.float32)
if tf.__version__ == '1.1.0':
self.blocks = [resnet_utils.Block('block1', resnet_v1.bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block('block2', resnet_v1.bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
resnet_utils.Block('block3', resnet_v1.bottleneck, [(1024, 256, 1)] * 5 + [(1024, 256, 1)]),
resnet_utils.Block('block4', resnet_v1.bottleneck, [(2048, 512, 1)] * 3),
resnet_utils.Block('block5', resnet_v1.bottleneck, [(2048, 512, 1)] * 3)]
else: # we use tf 1.2.0 here, Resnet-50
from tensorflow.contrib.slim.python.slim.nets.resnet_v1 import resnet_v1_block
self.blocks = [resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
# a resnet_v1 bottleneck block
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=6, stride=1), # feature former
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
resnet_v1_block('block5', base_depth=512, num_units=3, stride=1)]
def build_base(self):
with tf.variable_scope(self.scope, self.scope):
net = resnet_utils.conv2d_same(self.image, 64, 7, stride=2, scope='conv1') # conv2d + subsample
net = tf.pad(net, [[0, 0], [1, 1], [1, 1], [0, 0]])
net = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='pool1')
return net
# Number of fixed blocks during training, by default ***the first of all 4 blocks*** is fixed (Resnet-50 block)
# Range: 0 (none) to 3 (all)
# __C.RESNET.FIXED_BLOCKS = 1
# feature extractor
def image_to_head(self, is_training):
with slim.arg_scope(resnet_arg_scope(is_training=False)):
net = self.build_base()
net, _ = resnet_v1.resnet_v1(net,
self.blocks[0:cfg.RESNET.FIXED_BLOCKS], # now 1, block 1
global_pool=False,
include_root_block=False,
scope=self.scope)
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
head, _ = resnet_v1.resnet_v1(net,
self.blocks[cfg.RESNET.FIXED_BLOCKS:-2], # now 1, block 2~3
global_pool=False,
include_root_block=False,
scope=self.scope)
return head
# spatial configuration, conv-pool-conv-pool-flatten
def sp_to_head(self):
with tf.variable_scope(self.scope, self.scope):
# (num_pos_neg,64,64,2)->(num_pos_neg,60,60,64)
conv1_sp = slim.conv2d(self.spatial[:, :, :, 0:2], 64, [5, 5], padding='VALID', scope='conv1_sp')
# (num_pos_neg,60,60,64)->(num_pos_neg,30,30,64)
pool1_sp = slim.max_pool2d(conv1_sp, [2, 2], scope='pool1_sp')
# (num_pos_neg,30,30,64)->(num_pos_neg,26,26,32)
conv2_sp = slim.conv2d(pool1_sp, 32, [5, 5], padding='VALID', scope='conv2_sp')
# (num_pos_neg,26,26,32)->(num_pos_neg,13,13,32)
pool2_sp = slim.max_pool2d(conv2_sp, [2, 2], scope='pool2_sp')
# (num_pos_neg,13,13,32)->(num_pos_neg,5408)
pool2_flat_sp = slim.flatten(pool2_sp)
return pool2_flat_sp
def res5(self, pool5_H, pool5_O, sp, is_training, name):
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
fc7_H, _ = resnet_v1.resnet_v1(pool5_H, # H input, one block
self.blocks[-2:-1],
global_pool=False,
include_root_block=False,
reuse=False,
scope=self.scope)
fc7_H = tf.reduce_mean(fc7_H, axis=[1, 2])
fc7_O, _ = resnet_v1.resnet_v1(pool5_O, # O input, one block
self.blocks[-1:],
global_pool=False,
include_root_block=False,
reuse=False,
scope=self.scope)
fc7_O = tf.reduce_mean(fc7_O, axis=[1, 2])
return fc7_H, fc7_O
def head_to_tail(self, fc7_H, fc7_O, pool5_SH, pool5_SO, sp, is_training, name):
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
fc7_SH = tf.reduce_mean(pool5_SH, axis=[1, 2])
fc7_SO = tf.reduce_mean(pool5_SO, axis=[1, 2])
Concat_SH = tf.concat([fc7_H, fc7_SH], 1)
fc8_SH = slim.fully_connected(Concat_SH, self.num_fc, scope='fc8_SH') # fc size = 1024
fc8_SH = slim.dropout(fc8_SH, keep_prob=0.5, is_training=is_training, scope='dropout8_SH')
fc9_SH = slim.fully_connected(fc8_SH, self.num_fc, scope='fc9_SH')
fc9_SH = slim.dropout(fc9_SH, keep_prob=0.5, is_training=is_training, scope='dropout9_SH')
Concat_SO = tf.concat([fc7_O, fc7_SO], 1)
fc8_SO = slim.fully_connected(Concat_SO, self.num_fc, scope='fc8_SO')
fc8_SO = slim.dropout(fc8_SO, keep_prob=0.5, is_training=is_training, scope='dropout8_SO')
fc9_SO = slim.fully_connected(fc8_SO, self.num_fc, scope='fc9_SO')
fc9_SO = slim.dropout(fc9_SO, keep_prob=0.5, is_training=is_training, scope='dropout9_SO')
Concat_SHsp = tf.concat([fc7_H, sp], 1)
Concat_SHsp = slim.fully_connected(Concat_SHsp, self.num_fc, scope='Concat_SHsp')
Concat_SHsp = slim.dropout(Concat_SHsp, keep_prob=0.5, is_training=is_training, scope='dropout6_SHsp')
fc7_SHsp = slim.fully_connected(Concat_SHsp, self.num_fc, scope='fc7_SHsp')
fc7_SHsp = slim.dropout(fc7_SHsp, keep_prob=0.5, is_training=is_training, scope='dropout7_SHsp')
return fc9_SH, fc9_SO, fc7_SHsp, fc7_SH, fc7_SO
def crop_pool_layer(self, bottom, rois, name, reuse=False):
with tf.variable_scope(name) as scope:
if reuse:
scope.reuse_variables()
batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1])
bottom_shape = tf.shape(bottom)
height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self.stride[0])
width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self.stride[0])
x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width
y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height
x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width
y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height
bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))
if cfg.RESNET.MAX_POOL:
pre_pool_size = cfg.POOLING_SIZE * 2
crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size],
name="crops")
crops = slim.max_pool2d(crops, [2, 2], padding='SAME')
else:
crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids),
[cfg.POOLING_SIZE, cfg.POOLING_SIZE], name="crops")
return crops
def attention_pool_layer_H(self, bottom, fc7_H, is_training, name):
with tf.variable_scope(name) as scope:
fc1 = slim.fully_connected(fc7_H, 512, scope='fc1_b')
fc1 = slim.dropout(fc1, keep_prob=0.8, is_training=is_training, scope='dropout1_b')
fc1 = tf.reshape(fc1, [tf.shape(fc1)[0], 1, 1, tf.shape(fc1)[1]])
att = tf.reduce_mean(tf.multiply(bottom, fc1), 3, keep_dims=True)
return att
def attention_norm_H(self, att, name):
with tf.variable_scope(name) as scope:
att = tf.transpose(att, [0, 3, 1, 2])
att_shape = tf.shape(att)
att = tf.reshape(att, [att_shape[0], att_shape[1], -1])
att = tf.nn.softmax(att)
att = tf.reshape(att, att_shape)
att = tf.transpose(att, [0, 2, 3, 1])
return att
def attention_pool_layer_O(self, bottom, fc7_O, is_training, name):
with tf.variable_scope(name) as scope:
fc1 = slim.fully_connected(fc7_O, 512, scope='fc1_b')
fc1 = slim.dropout(fc1, keep_prob=0.8, is_training=is_training, scope='dropout1_b')
fc1 = tf.reshape(fc1, [tf.shape(fc1)[0], 1, 1, tf.shape(fc1)[1]])
att = tf.reduce_mean(tf.multiply(bottom, fc1), 3, keep_dims=True)
return att
def attention_norm_O(self, att, name):
with tf.variable_scope(name) as scope:
att = tf.transpose(att, [0, 3, 1, 2])
att_shape = tf.shape(att)
att = tf.reshape(att, [att_shape[0], att_shape[1], -1])
att = tf.nn.softmax(att) ###
att = tf.reshape(att, att_shape)
att = tf.transpose(att, [0, 2, 3, 1])
return att
def bottleneck(self, bottom, is_training, name, reuse=False):
with tf.variable_scope(name) as scope:
if reuse:
scope.reuse_variables()
head_bottleneck = slim.conv2d(bottom, 1024, [1, 1], scope=name) # 1x1, 1024, fc
return head_bottleneck
def ho_att(self, head, fc7_H, fc7_O, is_training, name):
with tf.variable_scope(name) as scope:
# whole image feature
head_phi = slim.conv2d(head, 512, [1, 1], scope='head_phi')
# whole image feature
head_g = slim.conv2d(head, 512, [1, 1], scope='head_g')
Att_H = self.attention_pool_layer_H(head_phi, fc7_H, is_training, 'Att_H')
Att_H = self.attention_norm_H(Att_H, 'Norm_Att_H') # softmax
att_head_H = tf.multiply(head_g, Att_H)
Att_O = self.attention_pool_layer_O(head_phi, fc7_O, is_training, 'Att_O')
Att_O = self.attention_norm_O(Att_O, 'Norm_Att_O') # softmax
att_head_O = tf.multiply(head_g, Att_O)
pool5_SH = self.bottleneck(att_head_H, is_training, 'bottleneck', False)
pool5_SO = self.bottleneck(att_head_O, is_training, 'bottleneck', True)
fc7_SH = tf.reduce_mean(pool5_SH, axis=[1, 2])
fc7_SO = tf.reduce_mean(pool5_SO, axis=[1, 2])
return fc7_SH, fc7_SO
def ROI_for_parts(self, head, name):
with tf.variable_scope(name) as scope:
pool5_P0 = self.crop_pool_layer(head, self.Part0, 'crop_P0') # RAnk
pool5_P1 = self.crop_pool_layer(head, self.Part1, 'crop_P1') # RKnee
pool5_P2 = self.crop_pool_layer(head, self.Part2, 'crop_P2') # LKnee
pool5_P3 = self.crop_pool_layer(head, self.Part3, 'crop_P3') # LAnk
pool5_P4 = self.crop_pool_layer(head, self.Part4, 'crop_P4') # Hip
pool5_P5 = self.crop_pool_layer(head, self.Part5, 'crop_P5') # Head
pool5_P6 = self.crop_pool_layer(head, self.Part6, 'crop_P6') # RHand
pool5_P7 = self.crop_pool_layer(head, self.Part7, 'crop_P7') # RSho
pool5_P8 = self.crop_pool_layer(head, self.Part8, 'crop_P8') # LSho
pool5_P9 = self.crop_pool_layer(head, self.Part9, 'crop_P9') # LHand
return pool5_P0, pool5_P1, pool5_P2, pool5_P3, pool5_P4, pool5_P5, pool5_P6, pool5_P7, pool5_P8, pool5_P9
def vec_classification(self, pool5_P0, pool5_P1, pool5_P2, pool5_P3, pool5_P4, pool5_P5, pool5_P6, pool5_P7,
pool5_P8, pool5_P9, is_training, initializer,
name):
with tf.variable_scope(name) as scope:
pool5_all = tf.concat(
[pool5_P0, pool5_P1, pool5_P2, pool5_P3, pool5_P4, pool5_P5, pool5_P6, pool5_P7, pool5_P8, pool5_P9],
axis=3) # (?, 7, 7, 45*6)
pool5_all_flat = slim.flatten(pool5_all) # (?, 7*7*45*6)
fc8_vec_roi = slim.fully_connected(pool5_all_flat, 1024, weights_initializer=initializer,
trainable=is_training)
fc8_vec_roi = slim.dropout(fc8_vec_roi, keep_prob=0.5, is_training=is_training)
fc9_vec_roi = slim.fully_connected(fc8_vec_roi, 1024, weights_initializer=initializer,
trainable=is_training)
fc9_vec_roi = slim.dropout(fc9_vec_roi, keep_prob=0.5, is_training=is_training)
cls_score_vec = slim.fully_connected(fc9_vec_roi, self.num_vec,
weights_initializer=initializer,
trainable=is_training,
activation_fn=None)
cls_prob_vec = tf.nn.sigmoid(cls_score_vec)
tf.reshape(cls_prob_vec, [1, self.num_vec])
self.predictions["cls_score_vec"] = cls_score_vec
self.predictions["cls_prob_vec"] = cls_prob_vec
return cls_prob_vec
def vec_attention(self, cls_prob_vec, pool5_P0, pool5_P1, pool5_P2, pool5_P3, pool5_P4, pool5_P5, pool5_P6,
pool5_P7, pool5_P8, pool5_P9, is_training,
name):
with tf.variable_scope(name) as scope:
# att * roi feature, not the fc feature
pool5_P0 = tf.reduce_mean(pool5_P0, axis=[1, 2]) #(?,7,7,64)->(?,64)
pool5_P1 = tf.reduce_mean(pool5_P1, axis=[1, 2]) #(?,7,7,64)->(?,64)
pool5_P2 = tf.reduce_mean(pool5_P2, axis=[1, 2]) #(?,7,7,64)->(?,64)
pool5_P3 = tf.reduce_mean(pool5_P3, axis=[1, 2]) #(?,7,7,64)->(?,64)
pool5_P4 = tf.reduce_mean(pool5_P4, axis=[1, 2]) #(?,7,7,64)->(?,64)
pool5_P5 = tf.reduce_mean(pool5_P5, axis=[1, 2]) #(?,7,7,64)->(?,64)
pool5_P6 = tf.reduce_mean(pool5_P6, axis=[1, 2]) #(?,7,7,64)->(?,64)
pool5_P7 = tf.reduce_mean(pool5_P7, axis=[1, 2]) #(?,7,7,64)->(?,64)
pool5_P8 = tf.reduce_mean(pool5_P8, axis=[1, 2]) #(?,7,7,64)->(?,64)
pool5_P9 = tf.reduce_mean(pool5_P9, axis=[1, 2]) #(?,7,7,64)->(?,64)
pool5_P0 = tf.multiply(pool5_P0, cls_prob_vec[:, 0:1])
pool5_P1 = tf.multiply(pool5_P1, cls_prob_vec[:, 1:2])
pool5_P2 = tf.multiply(pool5_P2, cls_prob_vec[:, 2:3])
pool5_P3 = tf.multiply(pool5_P3, cls_prob_vec[:, 3:4])
pool5_P4 = tf.multiply(pool5_P4, cls_prob_vec[:, 4:5])
pool5_P5 = tf.multiply(pool5_P5, cls_prob_vec[:, 5:6])
pool5_P6 = tf.multiply(pool5_P6, cls_prob_vec[:, 6:7])
pool5_P7 = tf.multiply(pool5_P7, cls_prob_vec[:, 7:8])
pool5_P8 = tf.multiply(pool5_P8, cls_prob_vec[:, 8:9])
pool5_P9 = tf.multiply(pool5_P9, cls_prob_vec[:, 9:10]) # (?,64)
pool5_P_att = tf.concat([pool5_P0, pool5_P1, pool5_P2, pool5_P3, pool5_P4, pool5_P5, pool5_P6,
pool5_P7, pool5_P8, pool5_P9], axis=1) #(?,640)
return pool5_P_att
def binary_discriminator(self, fc7_H, fc7_O, fc7_SH, fc7_SO, fc7_P, sp, is_training, name):
with tf.variable_scope(name) as scope:
# print('---<', name, '>---')
# print("fc7_P.shape", fc7_P.shape) #(?,64) or (?,31360)
conv1_pose_map = slim.conv2d(self.spatial[:, :, :, 2:], 32, [5, 5], padding='VALID', scope='conv1_pose_map')
pool1_pose_map = slim.max_pool2d(conv1_pose_map, [2, 2], scope='pool1_pose_map')
conv2_pose_map = slim.conv2d(pool1_pose_map, 16, [5, 5], padding='VALID', scope='conv2_pose_map')
pool2_pose_map = slim.max_pool2d(conv2_pose_map, [2, 2], scope='pool2_pose_map')
pool2_flat_pose_map = slim.flatten(pool2_pose_map)
# fc7_H + fc7_SH + sp---fc1024---fc8_binary_1
fc_binary_1 = tf.concat([fc7_H, fc7_SH, fc7_P], 1) # (?,2048+1024+64=3136)
fc_binary_1 = tf.concat([fc_binary_1, sp, pool2_flat_pose_map], 1) # (?,11248)
fc8_binary_1 = slim.fully_connected(fc_binary_1, 1024, scope='fc8_binary_1') # 1024? we should try
fc8_binary_1 = slim.dropout(fc8_binary_1, keep_prob=cfg.TRAIN_DROP_OUT_BINARY, is_training=is_training,
scope='dropout8_binary_1') # [pos + neg,1024]
# fc7_O + fc7_SO---fc1024---fc8_binary_2
fc_binary_2 = tf.concat([fc7_O, fc7_SO], 1) # (?, 3072)
fc8_binary_2 = slim.fully_connected(fc_binary_2, 1024, scope='fc8_binary_2')
fc8_binary_2 = slim.dropout(fc8_binary_2, keep_prob=cfg.TRAIN_DROP_OUT_BINARY, is_training=is_training,
scope='dropout8_binary_2') # (?,1024)
# fc8_binary_1 + fc8_binary_2---fc1024---fc9_binary
fc8_binary = tf.concat([fc8_binary_1, fc8_binary_2], 1) # (?,2048)
fc9_binary = slim.fully_connected(fc8_binary, 1024, scope='fc9_binary')
fc9_binary = slim.dropout(fc9_binary, keep_prob=cfg.TRAIN_DROP_OUT_BINARY, is_training=is_training,
scope='dropout9_binary')
# print('---</', name, '>---')
return fc9_binary
def binary_classification(self, fc9_binary, is_training, initializer, name, mode=True):
with tf.variable_scope(name) as scope:
cls_score_binary = slim.fully_connected(fc9_binary, self.num_binary,
weights_initializer=initializer,
trainable=is_training,
activation_fn=None, scope='cls_score_binary')
tf.reshape(cls_score_binary, [1, self.num_binary])
cls_prob_binary = tf.nn.sigmoid(cls_score_binary, name='cls_prob_binary')
if mode: # restore instance binary score in predictions , for part , not change
self.predictions["cls_score_binary"] = cls_score_binary
self.predictions["cls_prob_binary"] = cls_prob_binary
return cls_score_binary
def binary_cls_part(self, fc7_H, fc7_O, fc7_SH, fc7_SO, pool5_P, sp, is_training, initializer, name):
with tf.variable_scope(name) as scope:
fc7_P = tf.reduce_mean(pool5_P, axis=[1, 2]) # (?, 7,7,64)->(?, 64)
fc9_P = self.binary_discriminator(fc7_H, fc7_O, fc7_SH, fc7_SO, fc7_P, sp, is_training, 'fc_0')
cls_score_binary_P = self.binary_classification(fc9_P, is_training, initializer, 'cls_P', False)
tf.reshape(cls_score_binary_P, [1, self.num_binary])
return cls_score_binary_P
def binary_cls_all(self, fc7_H, fc7_O, fc7_SH, fc7_SO, sp, pool5_P0, pool5_P1, pool5_P2, pool5_P3, pool5_P4,
pool5_P5, pool5_P6, pool5_P7, pool5_P8, pool5_P9, is_training, initializer, name):
with tf.variable_scope(name) as scope:
cls_score_binary0 = self.binary_cls_part(fc7_H, fc7_O, fc7_SH, fc7_SO, pool5_P0, sp, is_training,
initializer, 'score_0') # (None,2)
cls_score_binary1 = self.binary_cls_part(fc7_H, fc7_O, fc7_SH, fc7_SO, pool5_P1, sp, is_training,
initializer, 'score_1')
cls_score_binary2 = self.binary_cls_part(fc7_H, fc7_O, fc7_SH, fc7_SO, pool5_P2, sp, is_training,
initializer, 'score_2')
cls_score_binary3 = self.binary_cls_part(fc7_H, fc7_O, fc7_SH, fc7_SO, pool5_P3, sp, is_training,
initializer, 'score_3')
cls_score_binary4 = self.binary_cls_part(fc7_H, fc7_O, fc7_SH, fc7_SO, pool5_P4, sp, is_training,
initializer, 'score_4')
cls_score_binary5 = self.binary_cls_part(fc7_H, fc7_O, fc7_SH, fc7_SO, pool5_P5, sp, is_training,
initializer, 'score_5')
cls_score_binary6 = self.binary_cls_part(fc7_H, fc7_O, fc7_SH, fc7_SO, pool5_P6, sp, is_training,
initializer, 'score_6')
cls_score_binary7 = self.binary_cls_part(fc7_H, fc7_O, fc7_SH, fc7_SO, pool5_P7, sp, is_training,
initializer, 'score_7')
cls_score_binary8 = self.binary_cls_part(fc7_H, fc7_O, fc7_SH, fc7_SO, pool5_P8, sp, is_training,
initializer, 'score_8')
cls_score_binary9 = self.binary_cls_part(fc7_H, fc7_O, fc7_SH, fc7_SO, pool5_P9, sp, is_training,
initializer, 'score_9')
cls_score_binary_all = tf.concat(
[tf.expand_dims(cls_score_binary0, 1), tf.expand_dims(cls_score_binary1, 1),
tf.expand_dims(cls_score_binary2, 1), tf.expand_dims(cls_score_binary3, 1),
tf.expand_dims(cls_score_binary4, 1), tf.expand_dims(cls_score_binary5, 1),
tf.expand_dims(cls_score_binary6, 1), tf.expand_dims(cls_score_binary7, 1),
tf.expand_dims(cls_score_binary8, 1), tf.expand_dims(cls_score_binary9, 1)
], axis=1) # (None,10,2)
cls_score_binary_all = tf.reduce_max(cls_score_binary_all, axis=1, name='cls_score_binary_all') # (None,2)
self.predictions["cls_score_binary_all"] = cls_score_binary_all
cls_prob_binary_all = tf.nn.sigmoid(cls_score_binary_all, name='cls_prob_binary_all')
self.predictions["cls_prob_binary_all"] = cls_prob_binary_all
return cls_score_binary_all
def compress(self, head, is_training, initializer, name):
with tf.variable_scope(name) as scope:
head = slim.conv2d(head, 64, [1, 1], padding='VALID', scope='dim_red_head')
return head
def build_network(self, is_training):
initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
# ResNet Backbone
head = self.image_to_head(is_training) # (1, ?, ?, 1024)
sp = self.sp_to_head() # (?,5408)
pool5_H = self.crop_pool_layer(head, self.H_boxes, 'Crop_H') # (?, 7, 7, 1024)
pool5_O = self.crop_pool_layer(head, self.O_boxes, 'Crop_O') # (?, 7, 7, 1024)
fc7_H, fc7_O = self.res5(pool5_H, pool5_O, sp, is_training, 'res5') # (?, 2048)
fc7_SH, fc7_SO = self.ho_att(head, fc7_H, fc7_O, is_training, 'ho_att') # (?, 1024)
head = self.compress(head, is_training, initializer, 'Compress') # (1, ?, ?, 64)
pool5_P0, pool5_P1, pool5_P2, pool5_P3, pool5_P4, pool5_P5, pool5_P6, pool5_P7, pool5_P8, pool5_P9 = self.ROI_for_parts(
head, 'ROI_for_parts') # (?,7,7,64)
# get 10 part binary score, ultilize reduce-max
cls_score_binary_all = self.binary_cls_all(fc7_H, fc7_O, fc7_SH, fc7_SO, sp, pool5_P0, pool5_P1, pool5_P2,
pool5_P3, pool5_P4, pool5_P5, pool5_P6, pool5_P7, pool5_P8, pool5_P9,
is_training, initializer,
'binary_cls_all')
# get 10 part att score, for part-level attention as human feature
cls_prob_vec = self.vec_classification(pool5_P0, pool5_P1, pool5_P2, pool5_P3, pool5_P4, pool5_P5, pool5_P6,
pool5_P7, pool5_P8, pool5_P9, is_training,
initializer=initializer, name='vec_classification') # 10v
pool5_P_att = self.vec_attention(cls_prob_vec, pool5_P0, pool5_P1, pool5_P2, pool5_P3, pool5_P4, pool5_P5,
pool5_P6, pool5_P7, pool5_P8, pool5_P9, is_training, name='vec_attention_roi')
# concat attention to cls binary
fc9_binary = self.binary_discriminator(fc7_H, fc7_O, fc7_SH, fc7_SO, pool5_P_att, sp, is_training, 'fc_binary')
cls_score_binary = self.binary_classification(fc9_binary, is_training, initializer, 'binary_classification')
self.score_summaries.update(self.predictions)
# self.visualize["attention_map_H"] = (Att_H - tf.reduce_min(Att_H[0,:,:,:])) / tf.reduce_max((Att_H[0,:,:,:] - tf.reduce_min(Att_H[0,:,:,:])))
# self.visualize["attention_map_O"] = (Att_O - tf.reduce_min(Att_O[0,:,:,:])) / tf.reduce_max((Att_O[0,:,:,:] - tf.reduce_min(Att_O[0,:,:,:])))
return
def create_architecture(self, is_training):
self.build_network(is_training)
for var in tf.trainable_variables():
self.train_summaries.append(var)
self.add_loss()
layers_to_output = {}
layers_to_output.update(self.losses)
val_summaries = []
with tf.device("/cpu:0"):
# val_summaries.append(self.add_gt_image_summary_H())
# val_summaries.append(self.add_gt_image_summary_HO())
# tf.summary.image('ATTENTION_MAP_H', self.visualize["attention_map_H"], max_outputs=1)
# tf.summary.image('ATTENTION_MAP_O', self.visualize["attention_map_O"], max_outputs=1)
for key, var in self.event_summaries.items():
val_summaries.append(tf.summary.scalar(key, var))
# for key, var in self.score_summaries.items():
# self.add_score_summary(key, var)
# for var in self.train_summaries:
# self.add_train_summary(var)
val_summaries.append(tf.summary.scalar('lr', self.lr))
self.summary_op = tf.summary.merge_all()
self.summary_op_val = tf.summary.merge(val_summaries)
return layers_to_output
def add_loss(self):
with tf.variable_scope('LOSS') as scope:
cls_score_binary = self.predictions["cls_score_binary"]
cls_score_binary_all = self.predictions["cls_score_binary_all"] # here use cls_score, not cls_prob
cls_score_binary_with_weight = tf.multiply(cls_score_binary, self.binary_weight)
cls_score_binary_all_with_weight = tf.multiply(cls_score_binary_all, self.binary_weight)
label_binary = self.gt_binary_label
binary_cross_entropy = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=label_binary, logits=cls_score_binary_with_weight))
binary_all_cross_entropy = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=label_binary, logits=cls_score_binary_all_with_weight))
mse = tf.losses.mean_squared_error(cls_score_binary_with_weight, cls_score_binary_all_with_weight)
loss = binary_cross_entropy + binary_all_cross_entropy + mse
self.losses['binary_cross_entropy'] = binary_cross_entropy
self.losses['binary_all_cross_entropy'] = binary_all_cross_entropy
self.losses['mse'] = mse
self.losses['total_loss'] = loss
self.event_summaries.update(self.losses)
return loss
def add_score_summary(self, key, tensor):
tf.summary.histogram('SCORE/' + tensor.op.name + '/' + key + '/scores', tensor)
def add_train_summary(self, var):
tf.summary.histogram('TRAIN/' + var.op.name, var)
def train_step(self, sess, blobs, lr, train_op):
feed_dict = {self.image: blobs['image'], self.H_boxes: blobs['H_boxes'],
self.O_boxes: blobs['O_boxes'],
self.spatial: blobs['sp'], self.lr: lr,
self.H_num: blobs['H_num'],
self.gt_binary_label: blobs['binary_label'], self.Part0: blobs['Part0'],
self.Part1: blobs['Part1'], self.Part2: blobs['Part2'], self.Part3: blobs['Part3'],
self.Part4: blobs['Part4'], self.Part5: blobs['Part5'], self.Part6: blobs['Part6'],
self.Part7: blobs['Part7'], self.Part8: blobs['Part8'], self.Part9: blobs['Part9'],
self.gt_binary_label_10v: blobs['binary_label_10v']}
loss, _ = sess.run([self.losses['total_loss'],
train_op],
feed_dict=feed_dict)
return loss
def train_step_with_summary(self, sess, blobs, lr, train_op):
feed_dict = {self.image: blobs['image'], self.H_boxes: blobs['H_boxes'],
self.O_boxes: blobs['O_boxes'],
self.spatial: blobs['sp'], self.lr: lr,
self.H_num: blobs['H_num'],
self.gt_binary_label: blobs['binary_label'], self.Part0: blobs['Part0'],
self.Part1: blobs['Part1'], self.Part2: blobs['Part2'], self.Part3: blobs['Part3'],
self.Part4: blobs['Part4'], self.Part5: blobs['Part5'], self.Part6: blobs['Part6'],
self.Part7: blobs['Part7'], self.Part8: blobs['Part8'], self.Part9: blobs['Part9'],
self.gt_binary_label_10v: blobs['binary_label_10v']
}
loss, summary, _ = sess.run([self.losses['total_loss'],
self.summary_op,
train_op],
feed_dict=feed_dict)
return loss, summary
# return late fusion prediction, cls_prob
def test_image_HO(self, sess, image, blobs):
feed_dict = {self.image: image, self.H_boxes: blobs['H_boxes'], self.O_boxes: blobs['O_boxes'],
self.spatial: blobs['sp'],
self.H_num: blobs['H_num'], self.Part0: blobs['Part0'],
self.Part1: blobs['Part1'], self.Part2: blobs['Part2'], self.Part3: blobs['Part3'],
self.Part4: blobs['Part4'], self.Part5: blobs['Part5'], self.Part6: blobs['Part6'],
self.Part7: blobs['Part7'], self.Part8: blobs['Part8'], self.Part9: blobs['Part9'],
self.gt_binary_label_10v: blobs['binary_label_10v']
}
cls_prob_HO = sess.run([self.predictions["cls_prob_HO"]], feed_dict=feed_dict)
return cls_prob_HO
def test_image_binary(self, sess, image, blobs):
feed_dict = {self.image: image, self.H_boxes: blobs['H_boxes'], self.O_boxes: blobs['O_boxes'],
self.spatial: blobs['sp'],
self.H_num: blobs['H_num'], self.Part0: blobs['Part0'],
self.Part1: blobs['Part1'], self.Part2: blobs['Part2'], self.Part3: blobs['Part3'],
self.Part4: blobs['Part4'], self.Part5: blobs['Part5'], self.Part6: blobs['Part6'],
self.Part7: blobs['Part7'], self.Part8: blobs['Part8'], self.Part9: blobs['Part9'],
self.gt_binary_label_10v: blobs['binary_label_10v']
}
cls_prob_binary = sess.run([self.predictions["cls_prob_binary"]], feed_dict=feed_dict)
return cls_prob_binary
|
{"hexsha": "a9f45b2dc2f00774d784f3850af0496aa2a8631d", "size": 35705, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/networks/TIN_HICO_with_part.py", "max_stars_repo_name": "enlighten0707/Transferable-Interactiveness-Network", "max_stars_repo_head_hexsha": "5ffbf1d0779702225bac37d6cc5d3ddf9b17a1cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/networks/TIN_HICO_with_part.py", "max_issues_repo_name": "enlighten0707/Transferable-Interactiveness-Network", "max_issues_repo_head_hexsha": "5ffbf1d0779702225bac37d6cc5d3ddf9b17a1cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/networks/TIN_HICO_with_part.py", "max_forks_repo_name": "enlighten0707/Transferable-Interactiveness-Network", "max_forks_repo_head_hexsha": "5ffbf1d0779702225bac37d6cc5d3ddf9b17a1cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.4952531646, "max_line_length": 151, "alphanum_fraction": 0.5889371237, "include": true, "reason": "import numpy", "num_tokens": 9380}
|
import pandas as pd
import numpy as np
import os
import time
import regex as re
import math
from underthesea import word_tokenize
from utils import remove_html, remove_emojis, covert_unicode, lowercase_remove_noise_character
def clean_review(review_str):
clean_string = review_str.replace("\n","")
clean_string = " ".join(clean_string.split())
clean_string = remove_html(clean_string)
clean_string = remove_emojis(clean_string)
unicode_string = covert_unicode(clean_string)
sign_string = lowercase_remove_noise_character(unicode_string)
sign_string = re.sub(r'[^\s\wáàảãạăắằẳẵặâấầẩẫậéèẻẽẹêếềểễệóòỏõọôốồổỗộơớờởỡợíìỉĩịúùủũụưứừửữựýỳỷỹỵđ_]','',sign_string)
sign_string = " ".join(sign_string.split())
if len(sign_string) <= 1:
return '', False
token_string = word_tokenize(sign_string, format="text")
return token_string, True
|
{"hexsha": "0bd7599b9bafb7f88382f3595698ab168b0b3ef6", "size": 890, "ext": "py", "lang": "Python", "max_stars_repo_path": "Model/preprocess.py", "max_stars_repo_name": "Haiduongcable/DataScience-SentimentAnalysis", "max_stars_repo_head_hexsha": "2500b70d2ef66202d47e5277c8bd6ebcb8a6a905", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-11-22T15:26:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T03:59:18.000Z", "max_issues_repo_path": "Model/preprocess.py", "max_issues_repo_name": "Haiduongcable/DataScience-SentimentAnalysis", "max_issues_repo_head_hexsha": "2500b70d2ef66202d47e5277c8bd6ebcb8a6a905", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Model/preprocess.py", "max_forks_repo_name": "Haiduongcable/DataScience-SentimentAnalysis", "max_forks_repo_head_hexsha": "2500b70d2ef66202d47e5277c8bd6ebcb8a6a905", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6, "max_line_length": 119, "alphanum_fraction": 0.7584269663, "include": true, "reason": "import numpy", "num_tokens": 266}
|
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.models import Model
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
from keras.applications import MobileNet
import os
import numpy as np
import csv
from sklearn.metrics import classification_report, confusion_matrix
from collections import Counter
from sklearn.utils import class_weight
from utils import plot_cm
def task2():
# Create a MobileNet model
mobile = MobileNet(weights='imagenet')
# See a summary of the layers in the model
mobile.summary()
# Modify the model
# Exclude the last 5 layers of the model
x = mobile.layers[-6].output
# Add a dropout and dense layer for predictions
x = Dropout(0.25)(x)
predictions = Dense(7, activation='softmax')(x)
# Create a new model with the new outputs
model = Model(inputs=mobile.input, outputs=predictions)
# See a summary of the new layers in the model
model.summary()
# Freeze the weights of the layers that we aren't training (training the last 23)
for layer in model.layers[:-23]:
layer.trainable = False
# Compile the model
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# Useful variables
data_folder = '../res/Task 2/Training'
test_folder = '../res/Task 2/Test'
total_train = 8012
total_test = 2003
labels = ["AK", "BCC", "BK", "D", "MN", "M", "VL"]
batch_size = 100
epochs = 10
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
data_folder, class_mode='categorical', batch_size=batch_size, target_size=(224, 224),)
test_generator = test_datagen.flow_from_directory(
test_folder, class_mode='categorical', batch_size=batch_size, target_size=(224, 224))
# Try to deal with class imbalance: calculate class_weights so that the minority classes have a larger weight
# than the majority classes.
class_weights = class_weight.compute_class_weight(
'balanced',
np.unique(train_generator.classes),
train_generator.classes)
class_weights = dict(enumerate(class_weights))
# Train the model
model.fit_generator(
train_generator,
steps_per_epoch=total_train // batch_size,
epochs=epochs,
class_weight=class_weights
)
# Evaluate the model accuracy with the testing dataset
scores = model.evaluate_generator(test_generator, total_test // batch_size)
print("Test accuracy = ", scores[1])
# Generate predictions with the test dataset
# softmax returns a value for each class
# the predicted class for a given sample will be the one that has the maximum value
predictions = model.predict_generator(test_generator, total_test // batch_size + 1)
y_pred = np.argmax(predictions, axis=1)
# Save the predictions in a csv file
with open('results2.csv', mode="w") as results_file:
writer = csv.writer(results_file, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
for x in predictions:
writer.writerow(x)
# Generate confusion matrix and classification report
# Helps to evaluate metrics such as accuracy, precision, recall
print('Confusion Matrix')
cm = confusion_matrix(test_generator.classes, y_pred)
print(cm)
plot_cm(cm, labels, "second.png")
print('Classification Report')
print(classification_report(test_generator.classes, y_pred, target_names=labels))
|
{"hexsha": "b9dd9e73252395dbca1691e25725df8c454e2328", "size": 4022, "ext": "py", "lang": "Python", "max_stars_repo_path": "T2/src/task2.py", "max_stars_repo_name": "pedromsfernandes/VCOM1920", "max_stars_repo_head_hexsha": "c50874c32e1e470bd30bed5b732737ac55ef40a5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "T2/src/task2.py", "max_issues_repo_name": "pedromsfernandes/VCOM1920", "max_issues_repo_head_hexsha": "c50874c32e1e470bd30bed5b732737ac55ef40a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "T2/src/task2.py", "max_forks_repo_name": "pedromsfernandes/VCOM1920", "max_forks_repo_head_hexsha": "c50874c32e1e470bd30bed5b732737ac55ef40a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-04T01:29:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-04T01:29:57.000Z", "avg_line_length": 35.592920354, "max_line_length": 113, "alphanum_fraction": 0.6964196917, "include": true, "reason": "import numpy", "num_tokens": 896}
|
import numpy as np
from sklearn.svm import SVC
def run(x_train, y_train, x_test, y_test, clf):
clf.fit(x_train, y_train)
return clf.score(x_test, y_test)
def split(x,y,k,m):
ns = int(y.shape[0]/m)
s = []
for i in range(m):
s.append([x[(ns*i):(ns*i+ns)], y[(ns*i):(ns*i+ns)]])
x_test, y_test = s[k]
x_train = []
y_train = []
for i in range(m):
if (i==k):
continue
else:
a,b = s[i]
x_train.append(a)
y_train.append(b)
x_train = np.array(x_train).reshape(((m-1)*ns,30))
y_train = np.array(y_train).reshape((m-1)*ns)
return [x_train, y_train, x_test, y_test]
def main():
m = 5
x = np.load("../data/breast/bc_features_standard.npy")
y = np.load("../data/breast/bc_labels.npy")
idx = np.argsort(np.random.random(y.shape[0]))
x = x[idx]
y = y[idx]
Cs = np.array([0.01,0.1,1.0,2.0,10.0,50.0,100.0])
gs = (1./30)*2.0**np.array([-4,-3,-2,-1,0,1,2,3])
zmax = 0.0
for C in Cs:
for g in gs:
z = np.zeros(m)
for k in range(m):
x_train, y_train, x_test, y_test = split(x,y,k,m)
z[k] = run(x_train, y_train, x_test, y_test, SVC(C=C,gamma=g,kernel="rbf"))
if (z.mean() > zmax):
zmax = z.mean()
bestC = C
bestg = g
print("best C = %0.5f" % bestC)
print(" gamma = %0.5f" % bestg)
print(" accuracy= %0.5f" % zmax)
main()
|
{"hexsha": "15b4bab6c47fac7f74056bd62e3e4f1c59fb3d46", "size": 1520, "ext": "py", "lang": "Python", "max_stars_repo_path": "chapter_07/bc_rbf_svm_search.py", "max_stars_repo_name": "haloway13/PracticalDeepLearningPython", "max_stars_repo_head_hexsha": "c3760b17945c9389421c2970a3d16c6528fb7af6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 44, "max_stars_repo_stars_event_min_datetime": "2021-02-25T00:52:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T02:04:50.000Z", "max_issues_repo_path": "chapter_07/bc_rbf_svm_search.py", "max_issues_repo_name": "rkneusel9/PracticalDeepLearningWithPython", "max_issues_repo_head_hexsha": "561004e76b3e0828a59952874443384c31b6d84e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapter_07/bc_rbf_svm_search.py", "max_forks_repo_name": "rkneusel9/PracticalDeepLearningWithPython", "max_forks_repo_head_hexsha": "561004e76b3e0828a59952874443384c31b6d84e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2021-03-18T11:22:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T21:10:42.000Z", "avg_line_length": 28.1481481481, "max_line_length": 91, "alphanum_fraction": 0.5, "include": true, "reason": "import numpy", "num_tokens": 507}
|
# This third demo shows how a robot swarm can autonomously choose an open curve shape and form
# the shape in a distributed way. This simulation shares the same strategy with second demo in
# organizing the robots, but it needs no role assignment on the open curve.
# input arguments:
# '-n': number of robots
# '--manual': manual mode, press ENTER to proceed between simulations
# Description:
# Starting dispersed in random positions, the swarm aggregates together to form a straight line
# with the merging method. From here, the following steps run repeatedly. The robots first make
# collective decision of which open curve shape to form, then adjust the local shapes so the
# curve reshapes to the target shape.
# the simulations that run consecutively
# Simulation 1: aggregation together to form a straight line
# Simulation 2: consensus decision making for target curve shape
# Simulation 3: curve reshape
# It's not a good demo.
from __future__ import print_function
import pygame
import sys, os, getopt, math
import numpy as np
import pickle
swarm_size = 30 # default swarm size
manual_mode = False # manually press enter key to proceed between simulations
# read command line options
try:
opts, args = getopt.getopt(sys.argv[1:], 'n:', ['manual'])
except getopt.GetoptError as err:
print(str(err))
sys.exit()
for opt,arg in opts:
if opt == '-n':
swarm_size = int(arg)
elif opt == '--manual':
manual_mode = True
# calculate world size and screen size
power_exponent = 1.3 # between 1.0 and 2.0
# the larger the parameter, the slower the windows grows with swarm size; vice versa
pixels_per_length = 50 # fixed
# calculate world_side_coef from a desired screen size for 30 robots
def cal_world_side_coef():
desired_screen_size = 600 # desired screen size for 30 robots
desired_world_size = float(desired_screen_size) / pixels_per_length
return desired_world_size / pow(30, 1/power_exponent)
world_side_coef = cal_world_side_coef()
world_side_length = world_side_coef * pow(swarm_size, 1/power_exponent)
world_size = (world_side_length, world_side_length) # square physical world
# screen size calculated from world size
screen_side_length = int(pixels_per_length * world_side_length)
screen_size = (screen_side_length, screen_side_length) # square display world
# formation configuration
comm_range = 0.65
desired_space_ratio = 0.8
desired_space = comm_range * desired_space_ratio
# deviate robot heading, so as to avoid robot travlling perpendicular to the walls
perp_thres = math.pi/18 # threshold, range from the perpendicular line
devia_angle = math.pi/9 # deviate these much angle from perpendicualr line
# consensus configuration
curve_folder = "curve-data" # folder to store the curve shapes
shape_catalog = ["ARM", "CWRU", "DIRL", "KID", "MAD", "squarehelix"]
shape_quantity = len(shape_catalog)
shape_decision = -1 # the index of chosen decision, in range(shape_quantity)
# also the index in shape_catalog
# variable to force shape to different choices, for video recording
force_shape_set = range(shape_quantity)
# robot properties
robot_poses = np.random.rand(swarm_size, 2) * world_side_length # initialize the robot poses
dist_table = np.zeros((swarm_size, swarm_size)) # distances between robots
conn_table = np.zeros((swarm_size, swarm_size)) # connections between robots
# 0 for disconnected, 1 for connected
conn_lists = [[] for i in range(swarm_size)] # lists of robots connected
# function for all simulations, update the distances and connections between the robots
def dist_conn_update():
global dist_table
global conn_table
global conn_lists
conn_lists = [[] for i in range(swarm_size)] # empty the lists
for i in range(swarm_size):
for j in range(i+1, swarm_size):
dist_temp = np.linalg.norm(robot_poses[i] - robot_poses[j])
dist_table[i,j] = dist_temp
dist_table[j,i] = dist_temp
if dist_temp > comm_range:
conn_table[i,j] = 0
conn_table[j,i] = 0
else:
conn_table[i,j] = 1
conn_table[j,i] = 1
conn_lists[i].append(j)
conn_lists[j].append(i)
dist_conn_update() # update the distances and connections
disp_poses = [] # display positions
# function for all simulations, update the display positions
def disp_poses_update():
global disp_poses
poses_temp = robot_poses / world_side_length
poses_temp[:,1] = 1.0 - poses_temp[:,1]
poses_temp = poses_temp * screen_side_length
disp_poses = poses_temp.astype(int) # convert to int and assign to disp_poses
disp_poses_update()
# deciding the seed robots, used in simulations with moving robots
seed_percentage = 0.1 # the percentage of seed robots in the swarm
seed_quantity = min(max(int(swarm_size*seed_percentage), 1), swarm_size)
# no smaller than 1, and no larger than swarm_size
robot_seeds = [False for i in range(swarm_size)] # whether a robot is a seed robot
# only seed robot can initialize the forming a new group
seed_list_temp = np.arange(swarm_size)
np.random.shuffle(seed_list_temp)
for i in seed_list_temp[:seed_quantity]:
robot_seeds[i] = True
# visualization configuration
color_white = (255,255,255)
color_black = (0,0,0)
color_grey = (128,128,128)
color_red = (255,0,0)
distinct_color_set = ((230,25,75), (60,180,75), (255,225,25), (0,130,200), (245,130,48),
(145,30,180), (70,240,240), (240,50,230), (210,245,60), (250,190,190),
(0,128,128), (230,190,255), (170,110,40), (128,0,0),
(170,255,195), (128,128,0), (0,0,128))
color_quantity = 17
robot_size = 5
robot_empty_width = 2
conn_width = 2
# sizes for consensus simulation
robot_size_consensus = 7
conn_width_thin_consensus = 2
conn_width_thick_consensus = 4
# set up the simulation window
pygame.init()
font = pygame.font.SysFont("Cabin", 12)
icon = pygame.image.load("icon_geometry_art.jpg")
pygame.display.set_icon(icon)
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption("Demo 3")
# draw the network
screen.fill(color_white)
for i in range(swarm_size):
pygame.draw.circle(screen, color_black, disp_poses[i], robot_size,
robot_empty_width)
pygame.display.update()
# pause to check the network before the simulations, or for screen recording
raw_input("<Press Enter to continue>")
# function for simulation 1, group robots by their group ids, and find the largest group
def S1_robot_grouping(robot_list, robot_group_ids, groups):
# the input list 'robot_list' should not be empty
groups_temp = {} # key is group id, value is list of robots
for i in robot_list:
group_id_temp = robot_group_ids[i]
if group_id_temp not in groups_temp.keys():
groups_temp[group_id_temp] = [i]
else:
groups_temp[group_id_temp].append(i)
group_id_max = -1 # the group with most members
# regardless of only one group or multiple groups in groups_temp
if len(groups_temp.keys()) > 1: # there is more than one group
# find the largest group and disassemble the rest
group_id_max = groups_temp.keys()[0]
size_max = len(groups[group_id_max][0])
for group_id_temp in groups_temp.keys()[1:]:
size_temp = len(groups[group_id_temp][0])
if size_temp > size_max:
group_id_max = group_id_temp
size_max = size_temp
else: # only one group, automatically the largest one
group_id_max = groups_temp.keys()[0]
return groups_temp, group_id_max
# function for simulation 1, find the closest robot to a host robot
# use global variable "dist_table"
def S1_closest_robot(robot_host, robot_neighbors):
# "robot_host": the robot to measure distance from
# "robot_neighbors": a list of robots to be compared with
robot_closest = robot_neighbors[0]
dist_closest = dist_table[robot_host,robot_closest]
for i in robot_neighbors[1:]:
dist_temp = dist_table[robot_host,i]
if dist_temp < dist_closest:
robot_closest = i
dist_closest = dist_temp
return robot_closest
# general function to steer robot away from wall if out of boundary (following physics)
# use global variable "world_side_length"
def robot_boundary_check(robot_pos, robot_ori):
new_ori = robot_ori
if robot_pos[0] >= world_side_length: # outside of right boundary
if math.cos(new_ori) > 0:
new_ori = reset_radian(2*(math.pi/2) - new_ori)
# further check if new angle is too much perpendicular
if new_ori > 0:
if (math.pi - new_ori) < perp_thres:
new_ori = new_ori - devia_angle
else:
if (new_ori + math.pi) < perp_thres:
new_ori = new_ori + devia_angle
elif robot_pos[0] <= 0: # outside of left boundary
if math.cos(new_ori) < 0:
new_ori = reset_radian(2*(math.pi/2) - new_ori)
if new_ori > 0:
if new_ori < perp_thres:
new_ori = new_ori + devia_angle
else:
if (-new_ori) < perp_thres:
new_ori = new_ori - devia_angle
if robot_pos[1] >= world_side_length: # outside of top boundary
if math.sin(new_ori) > 0:
new_ori = reset_radian(2*(0) - new_ori)
if new_ori > -math.pi/2:
if (new_ori + math.pi/2) < perp_thres:
new_ori = new_ori + devia_angle
else:
if (-math.pi/2 - new_ori) < perp_thres:
new_ori = new_ori - devia_angle
elif robot_pos[1] <= 0: # outside of bottom boundary
if math.sin(new_ori) < 0:
new_ori = reset_radian(2*(0) - new_ori)
if new_ori > math.pi/2:
if (new_ori - math.pi/2) < perp_thres:
new_ori = new_ori + devia_angle
else:
if (math.pi/2 - new_ori) < perp_thres:
new_ori = new_ori - devia_angle
return new_ori
# general function to reset radian angle to [-pi, pi)
def reset_radian(radian):
while radian >= math.pi:
radian = radian - 2*math.pi
while radian < -math.pi:
radian = radian + 2*math.pi
return radian
########### simulation 1: aggregate together to form a straight line ###########
print("##### simulation 1: line formation #####")
# robot perperties
# all robots start with state '-1'
robot_states = np.array([-1 for i in range(swarm_size)])
# '-1' for wandering around, ignoring all connections
# '0' for wandering around, available to connection
# '1' for in a group, transit state, only one key neighbor
# '2' for in a group, both key neighbors secured
n1_life_lower = 2 # inclusive
n1_life_upper = 6 # exclusive
robot_n1_lives = np.random.uniform(n1_life_lower, n1_life_upper, swarm_size)
robot_oris = np.random.rand(swarm_size) * 2 * math.pi - math.pi # in range of [-pi, pi)
robot_key_neighbors = [[] for i in range(swarm_size)] # key neighbors for robot on the line
# for state '1' robot: one key neighbor
# for state '2' robot: two key neighbor on its left and right sides
# robots on the two ends will have one key neighbor being '-1'
# group properties
groups = {}
# key is the group id, value is a list, in the list:
# [0]: a list of robots in the group, both state '1' and '2'
# [1]: remaining life time of the group
# [2]: whether or not being the dominant group
life_incre = 5 # number of seconds added to the life of a group when new robot joins
group_id_upper = swarm_size # upper limit of group id
robot_group_ids = np.array([-1 for i in range(swarm_size)]) # group id for the robots
# '-1' for not in a group
# movement configuration
step_moving_dist = 0.05
destination_error = 0.08
# for adjusting line space
space_good_thres = desired_space * 0.9
# the loop for simulation 1
sim_haulted = False
time_last = pygame.time.get_ticks()
time_now = time_last
frame_period = 50
sim_freq_control = True
iter_count = 0
line_formed = False
ending_period = 5.0 # grace period
print("swarm robots are forming a straight line ...")
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT: # close window button is clicked
print("program exit in simulation 1")
sys.exit() # exit the entire program
if event.type == pygame.KEYUP:
if event.key == pygame.K_SPACE:
sim_haulted = not sim_haulted # reverse the pause flag
if sim_haulted: continue
# simulation frequency control
if sim_freq_control:
time_now = pygame.time.get_ticks()
if (time_now - time_last) > frame_period:
time_last = time_now
else:
continue
# increase iteration count
iter_count = iter_count + 1
# state transition variables
st_n1to0 = [] # robot '-1' gets back to '0' after life time ends
# list of robots changing to '0' from '-1'
st_gton1 = [] # group disassembles either because life expires, or triggered by others
# list of groups to be disassembled
st_0to1 = {} # robot '0' detects robot'2', join its group
# key is the robot '0', value is the group id
st_0to2 = {} # robot '0' detects another robot '0', forming a new group
# key is the robot '0', value is the other neighbor robot '0'
st_1to2 = {} # robot '1' finds another key neighbor, becoming '2'
# key is the robot '1', value is its key neighbor and merging side
# sides: 0 for left side, 1 for right side
# check state transitions, and schedule the tasks
dist_conn_update()
for i in range(swarm_size):
if robot_states[i] == -1: # for host robot with state '-1'
if robot_n1_lives[i] < 0:
st_n1to0.append(i)
else:
if len(conn_lists[i]) == 0: continue
state12_list = []
for j in conn_lists[i]:
if robot_states[j] == 1 or robot_states[j] == 2:
state12_list.append(j)
# disassemble minority groups
if len(state12_list) != 0:
groups_local, group_id_max = S1_robot_grouping(
state12_list, robot_group_ids, groups)
if len(groups_local.keys()) > 1:
# disassemble all groups except the largest one
for group_id_temp in groups_local.keys():
if ((group_id_temp != group_id_max) and
(group_id_temp not in st_gton1)):
# schedule to disassemble this group
st_gton1.append(group_id_temp)
elif robot_states[i] == 0: # for host robot with state '0'
if len(conn_lists[i]) == 0: continue
state2_list = []
state1_list = []
state0_list = []
for j in conn_lists[i]:
if robot_states[j] == 2:
state2_list.append(j)
elif robot_states[j] == 1:
state1_list.append(j)
elif robot_states[j] == 0:
state0_list.append(j)
state2_quantity = len(state2_list)
state1_quantity = len(state1_list)
state0_quantity = len(state0_list)
# disassemble minority groups if there are multiple groups
if state2_quantity + state1_quantity > 1:
# there is in-group robot in the neighbors
groups_local, group_id_max = S1_robot_grouping(state2_list+state1_list,
robot_group_ids, groups)
# disassmeble all groups except the largest one
for group_id_temp in groups_local.keys():
if (group_id_temp != group_id_max) and (group_id_temp not in st_gton1):
st_gton1.append(group_id_temp) # schedule to disassemble this group
# responses to the state '2' and '0' robots
if state2_quantity != 0:
# join the group with state '2' robots
if state2_quantity == 1: # only one state '2' robot
# join the group of the state '2' robot
st_0to1[i] = robot_group_ids[state2_list[0]]
robot_key_neighbors[i] = [state2_list[0]] # add key neighbor
else: # multiple state '2' robots
# it's possible that the state '2' robots are in different groups
# find the closest one in the largest group, and join the group
groups_local, group_id_max = S1_robot_grouping(state2_list,
robot_group_ids, groups)
robot_closest = S1_closest_robot(i, groups_local[group_id_max])
st_0to1[i] = group_id_max
robot_key_neighbors[i] = [robot_closest] # add key neighbor
elif state0_quantity != 0:
# form new group with state '0' robots
st_0to2[i] = S1_closest_robot(i, state0_list)
elif (robot_states[i] == 1) or (robot_states[i] == 2):
# disassemble the minority groups
state12_list = [] # list of state '1' and '2' robots in the list
has_other_group = False
host_group_id = robot_group_ids[i]
for j in conn_lists[i]:
if (robot_states[j] == 1) or (robot_states[j] == 2):
state12_list.append(j)
if robot_group_ids[j] != host_group_id:
has_other_group = True
if has_other_group:
groups_local, group_id_max = S1_robot_grouping(state12_list,
robot_group_ids, groups)
for group_id_temp in groups_local.keys():
if (group_id_temp != group_id_max) and (group_id_temp not in st_gton1):
st_gton1.append(group_id_temp) # schedule to disassemble this group
# check if state '1' robot is qualified for becoming state '2'
if robot_states[i] == 1:
key = robot_key_neighbors[i][0]
if (robot_key_neighbors[key][0] == -1 or robot_key_neighbors[key][1] == -1):
# the key neighbor is on one end of the line
if (robot_key_neighbors[key][0] == -1 and
robot_key_neighbors[key][1] != -1): # key is at left end
key_next = robot_key_neighbors[key][1]
vect_next = robot_poses[key_next] - robot_poses[key]
vect_i = robot_poses[i] - robot_poses[key]
# whether to merge depends on the side
if np.dot(vect_next,vect_i) > 0:
if key_next in conn_lists[i]:
st_1to2[i] = [key, 1]
else:
st_1to2[i] = [key, 0]
elif (robot_key_neighbors[key][0] != -1 and
robot_key_neighbors[key][1] == -1): # key is at right end
key_next = robot_key_neighbors[key][0]
vect_next = robot_poses[key_next] - robot_poses[key]
vect_i = robot_poses[i] - robot_poses[key]
if np.dot(vect_next,vect_i) > 0:
if key_next in conn_lists[i]:
st_1to2[i] = [key, 0]
else:
st_1to2[i] = [key, 1]
else:
print("key neighbor error(st check)")
sys.exit()
else: # the key neighbor is in the middle of the line
key_left = robot_key_neighbors[key][0]
key_right = robot_key_neighbors[key][1]
if (key_left in conn_lists[i] and key_right in conn_lists[i]):
side = -1
if dist_table[i,key_left] < dist_table[i,key_right]: side = 0
else: side = 1
st_1to2[i] = [key, side]
elif (key_left in conn_lists[i] and key_right not in conn_lists[i]):
st_1to2[i] = [key, 0]
elif (key_left not in conn_lists[i] and key_right in conn_lists[i]):
st_1to2[i] = [key, 1]
# check the life time of the groups; schedule disassembling if expired
for group_id_temp in groups.keys():
if groups[group_id_temp][1] < 0: # life time of a group ends
if group_id_temp not in st_gton1:
st_gton1.append(group_id_temp)
# process the state transitions
# 1.st_1to2, state '1' becomes state '2'
while len(st_1to2.keys()) != 0:
joiner = st_1to2.keys()[0]
key = st_1to2[joiner][0]
side = st_1to2[joiner][1]
side_other = 1 - side
st_1to2.pop(joiner)
if robot_key_neighbors[key][side] == -1: # join at one end
# check if other robots are join the same slot
key_rest = st_1to2.keys()[:]
for joiner_temp in key_rest:
if (st_1to2[joiner_temp][0] == key and st_1to2[joiner_temp][1] == side):
# "joiner_temp" is joining same slot as "joiner"
st_1to2.pop(joiner_temp)
if dist_table[key,joiner] > dist_table[key,joiner_temp]:
joiner = joiner_temp
# merge the robot at the end
robot_states[joiner] = 2
if side == 0: robot_key_neighbors[joiner] = [-1,key]
else: robot_key_neighbors[joiner] = [key,-1]
robot_key_neighbors[key][side] = joiner
else: # join in between
key_other = robot_key_neighbors[key][side]
side_other = 1 - side
des_pos = (robot_poses[key] + robot_poses[key_other]) / 2.0
# check if other robots are join the same slot
key_rest = st_1to2.keys()[:]
for joiner_temp in key_rest:
if ((st_1to2[joiner_temp][0] == key and st_1to2[joiner_temp][1] == side) or
(st_1to2[joiner_temp][0] == key_next and st_1to2[joiner_temp][1] == side_other)):
# "joiner_temp" is joining same slot as "joiner"
st_1to2.pop(joiner_temp)
if dist_table[key,joiner] > dist_table[key,joiner_temp]:
joiner = joiner_temp
# merge the robot in between
robot_states[joiner] = 2
if side == 0:
robot_key_neighbors[joiner] = [key_other,key]
else:
robot_key_neighbors[joiner] = [key,key_other]
robot_key_neighbors[key][side] = joiner
robot_key_neighbors[key_other][side_other] = joiner
# 2.st_0to1, robot '0' joins a group, becoming '1'
for joiner in st_0to1.keys():
group_id_temp = st_0to1[joiner]
# update the robot properties
robot_states[joiner] = 1
robot_group_ids[joiner] = group_id_temp
# update the group properties
groups[group_id_temp][0].append(joiner)
groups[group_id_temp][1] = groups[group_id_temp][1] + life_incre
# 3.st_0to2, robot '0' forms new group with '0', both becoming '2'
while len(st_0to2.keys()) != 0:
pair0 = st_0to2.keys()[0]
pair1 = st_0to2[pair0]
st_0to2.pop(pair0)
if (pair1 in st_0to2.keys()) and (st_0to2[pair1] == pair0):
st_0to2.pop(pair1)
# only forming a group if there is at least one seed robot in the pair
if robot_seeds[pair0] or robot_seeds[pair1]:
# forming new group for robot pair0 and pair1
group_id_temp = np.random.randint(0, group_id_upper)
while group_id_temp in groups.keys():
group_id_temp = np.random.randint(0, group_id_upper)
# update properties of the robots
robot_states[pair0] = 2
robot_states[pair1] = 2
robot_group_ids[pair0] = group_id_temp
robot_group_ids[pair1] = group_id_temp
robot_key_neighbors[pair0] = [-1,pair1] # pair0 automatically becomes left end
robot_key_neighbors[pair1] = [pair0,-1] # pair1 becomes right end
# update properties of the group
groups[group_id_temp] = [[pair0, pair1], life_incre*2, False]
# 4.st_gton1, groups get disassembled, life time ends or triggered by others
for group_id_temp in st_gton1:
for robot_temp in groups[group_id_temp][0]:
robot_states[robot_temp] = -1
robot_n1_lives[robot_temp] = np.random.uniform(n1_life_lower, n1_life_upper)
robot_group_ids[robot_temp] = -1
robot_oris[robot_temp] = np.random.rand() * 2 * math.pi - math.pi
robot_key_neighbors[robot_temp] = []
groups.pop(group_id_temp)
# 5.st_n1to0, life time of robot '-1' ends, get back to '0'
for robot_temp in st_n1to0:
robot_states[robot_temp] = 0
# check if a group becomes dominant
for group_id_temp in groups.keys():
if len(groups[group_id_temp][0]) > swarm_size/2.0:
groups[group_id_temp][2] = True
else:
groups[group_id_temp][2] = False
# update the physics
robot_poses_t = np.copy(robot_poses) # as old poses
no_state1_robot = True
# space on the line is good(not too crowded)
line_space_good = np.array([-1 for i in range(swarm_size)])
for i in range(swarm_size):
if robot_states[i] == 2:
if robot_key_neighbors[i][0] == -1:
key_next = robot_key_neighbors[i][1]
if (dist_table[i,key_next] > space_good_thres):
line_space_good[i] = 1
else:
line_space_good[i] = 0
elif robot_key_neighbors[i][1] == -1:
key_next = robot_key_neighbors[i][0]
if (dist_table[i,key_next] > space_good_thres):
line_space_good[i] = 1
else:
line_space_good[i] = 0
else:
key_left = robot_key_neighbors[i][0]
key_right = robot_key_neighbors[i][1]
if (dist_table[i,key_left] > space_good_thres and
dist_table[i,key_right] > space_good_thres):
line_space_good[i] = 1
else:
line_space_good[i] = 0
for i in range(swarm_size):
# adjusting moving direction for state '1' and '2' robots
if robot_states[i] == 1:
no_state1_robot = False
# rotating around its key neighbor, get closer to the other key neighbor
center = robot_key_neighbors[i][0] # the center robot
if dist_table[i,center] > (desired_space + step_moving_dist):
# moving toward the center robot
vect_temp = robot_poses_t[center] - robot_poses_t[i]
robot_oris[i] = math.atan2(vect_temp[1], vect_temp[0])
elif (dist_table[i,center] + step_moving_dist) < desired_space:
# moving away from the center robot
vect_temp = robot_poses_t[i] - robot_poses_t[center]
robot_oris[i] = math.atan2(vect_temp[1], vect_temp[0])
else:
# moving tangent along the circle of radius of "desired_space"
# find the rotating direction to the closer potential neighbor
rotate_dir = 0 # 1 for ccw, -1 for cw
if (robot_key_neighbors[center][0] == -1 or
robot_key_neighbors[center][1] == -1):
key_next = -1 # rotating toward to key_next
if (robot_key_neighbors[center][0] == -1 and
robot_key_neighbors[center][1] != -1):
key_next = robot_key_neighbors[center][1]
elif (robot_key_neighbors[center][0] != -1 and
robot_key_neighbors[center][1] == -1):
key_next = robot_key_neighbors[center][0]
else:
print("key neighbor error(physics update1)")
sys.exit()
vect_next = robot_poses[key_next] - robot_poses[center]
vect_i = robot_poses[i] - robot_poses[center]
if np.dot(vect_next, vect_i) > 0:
if np.cross(vect_i, vect_next) > 0: rotate_dir = 1
else: rotate_dir = -1
else: continue # stay in position if out at one end
else:
key_left = robot_key_neighbors[center][0]
key_right = robot_key_neighbors[center][1]
key_next = -1
if dist_table[i,key_left] < dist_table[i,key_right]: key_next = key_left
else: key_next = key_right
vect_next = robot_poses[key_next] - robot_poses[center]
vect_i = robot_poses[i] - robot_poses[center]
if np.cross(vect_i, vect_next) > 0: rotate_dir = 1
else: rotate_dir = -1
# calculate the new moving direction
vect_i = robot_poses[i] - robot_poses[center]
robot_oris[i] = math.atan2(vect_i[1], vect_i[0])
int_angle_temp = math.acos((math.pow(dist_table[i,center],2) +
math.pow(step_moving_dist,2) - math.pow(desired_space,2)) /
(2.0*dist_table[i,center]*step_moving_dist))
robot_oris[i] = reset_radian(robot_oris[i] +
rotate_dir*(math.pi - int_angle_temp))
elif robot_states[i] == 2:
# adjusting position to maintain the line
if (robot_key_neighbors[i][0] == -1 or robot_key_neighbors[i][1] == -1):
key = -1
vect_line = np.zeros(2)
if (robot_key_neighbors[i][0] == -1 and robot_key_neighbors[i][1] != -1):
key = robot_key_neighbors[i][1]
if robot_key_neighbors[key][1] == -1:
vect_line = robot_poses[i] - robot_poses[key]
vect_line = vect_line / np.linalg.norm(vect_line)
else:
key_other = robot_key_neighbors[key][1]
vect_line = robot_poses[key] - robot_poses[key_other]
vect_line = vect_line / np.linalg.norm(vect_line)
elif (robot_key_neighbors[i][0] != -1 and robot_key_neighbors[i][1] == -1):
key = robot_key_neighbors[i][0]
if robot_key_neighbors[key][0] == -1:
vect_line = robot_poses[i] - robot_poses[key]
vect_line = vect_line / np.linalg.norm(vect_line)
else:
key_other = robot_key_neighbors[key][0]
vect_line = robot_poses[key] - robot_poses[key_other]
vect_line = vect_line / np.linalg.norm(vect_line)
else:
print("key neighbor error(physics update2)")
sys.exit()
des_pos = robot_poses[key] + vect_line*desired_space
vect_des = des_pos - robot_poses[i]
if np.linalg.norm(vect_des) < destination_error:
continue
else:
robot_oris[i] = math.atan2(vect_des[1], vect_des[0])
else:
key_left = robot_key_neighbors[i][0]
key_right = robot_key_neighbors[i][1]
if (line_space_good[i] == 0 and
line_space_good[key_left] != line_space_good[key_right]):
if line_space_good[key_left] == 1:
vect_left = robot_poses[key_left] - robot_poses[i]
robot_oris[i] = math.atan2(vect_left[1], vect_left[0])
else:
vect_right = robot_poses[key_right] - robot_poses[i]
robot_oris[i] = math.atan2(vect_right[1], vect_right[0])
else:
des_pos = (robot_poses[key_left] + robot_poses[key_right])/2.0
des_vect = des_pos - robot_poses[i]
if np.linalg.norm(des_vect) < destination_error:
continue # stay in position if within destination error
else:
robot_oris[i] = math.atan2(des_vect[1], des_vect[0])
# check if out of boundaries
if (robot_states[i] == -1) or (robot_states[i] == 0):
# only applies for state '-1' and '0'
robot_oris[i] = robot_boundary_check(robot_poses_t[i], robot_oris[i])
# update one step of move
robot_poses[i] = robot_poses_t[i] + (step_moving_dist *
np.array([math.cos(robot_oris[i]), math.sin(robot_oris[i])]))
# update the graphics
disp_poses_update()
screen.fill(color_white)
# draw the robots of states '-1' and '0'
for i in range(swarm_size):
if robot_seeds[i]:
color_temp = color_red
else:
color_temp = color_grey
if robot_states[i] == -1: # empty circle for state '-1' robot
pygame.draw.circle(screen, color_temp, disp_poses[i],
robot_size, robot_empty_width)
elif robot_states[i] == 0: # full circle for state '0' robot
pygame.draw.circle(screen, color_temp, disp_poses[i],
robot_size, 0)
# if robot_states[i] == -1: # empty circle for state '-1' robot
# pygame.draw.circle(screen, color_grey, disp_poses[i],
# robot_size, robot_empty_width)
# elif robot_states[i] == 0: # full circle for state '0' robot
# if robot_seeds[i]: # black color for seed robot
# pygame.draw.circle(screen, color_black, disp_poses[i],
# robot_size, 0)
# else: # grey for non-seed robot
# pygame.draw.circle(screen, color_grey, disp_poses[i],
# robot_size, 0)
# draw the in-group robots by group
for group_id_temp in groups.keys():
if groups[group_id_temp][2]:
# highlight the dominant group with black color
color_group = color_black
else:
color_group = color_grey
conn_draw_sets = [] # avoid draw same connection two times
# draw the robots and connections in the group
for i in groups[group_id_temp][0]:
for j in robot_key_neighbors[i]:
if j == -1: continue
if set([i,j]) not in conn_draw_sets:
pygame.draw.line(screen, color_group, disp_poses[i],
disp_poses[j], conn_width)
conn_draw_sets.append(set([i,j]))
# draw robots in the group
# if robot_seeds[i]: # force color black for seed robot
# pygame.draw.circle(screen, color_black, disp_poses[i],
# robot_size, 0)
# else:
# pygame.draw.circle(screen, color_group, disp_poses[i],
# robot_size, 0)
if robot_seeds[i]: # force color red for seed robot
pygame.draw.circle(screen, color_red, disp_poses[i],
robot_size, 0)
else:
pygame.draw.circle(screen, color_group, disp_poses[i],
robot_size, 0)
pygame.display.update()
# reduce life time of robot '-1' and groups
for i in range(swarm_size):
if robot_states[i] == -1:
robot_n1_lives[i] = robot_n1_lives[i] - frame_period/1000.0
for group_id_temp in groups.keys():
if not groups[group_id_temp][2]: # skip dominant group
groups[group_id_temp][1] = groups[group_id_temp][1] - frame_period/1000.0
# check exit condition of simulation 1
if not line_formed:
if ((len(groups.keys()) == 1) and (len(groups.values()[0][0]) == swarm_size)
and no_state1_robot):
line_formed = True
if line_formed:
if ending_period <= 0:
print("simulation 1 is finished")
if manual_mode: raw_input("<Press Enter to continue>")
print("") # empty line
break
else:
ending_period = ending_period - frame_period/1000.0
# check if the line is complete; list robots' order on the line
robot_starter = -1
for i in range(swarm_size):
if robot_key_neighbors[i][0] == -1:
robot_starter = i
break
line_robots = [robot_starter] # robots on the line, in order
robot_curr = robot_starter
while (robot_key_neighbors[robot_curr][1] != -1):
robot_next = robot_key_neighbors[robot_curr][1]
line_robots.append(robot_next)
robot_curr = robot_next
if (len(set(line_robots)) != swarm_size):
print("line is incomplete after line formation")
sys.exit()
# # store the variable "robot_poses", "robot_key_neighbors"
# tmp_filepath = os.path.join('tmp', 'demo3_30_robot_poses')
# # tmp_filepath = os.path.join('tmp', 'demo3_100_robot_poses')
# with open(tmp_filepath, 'w') as f:
# pickle.dump([robot_poses, robot_key_neighbors, line_robots], f)
# raw_input("<Press Enter to continue>")
# sys.exit()
# # restore variable "robot_poses", "robot_key_neighbors"
# tmp_filepath = os.path.join('tmp', 'demo3_30_robot_poses')
# # tmp_filepath = os.path.join('tmp', 'demo3_100_robot_poses')
# with open(tmp_filepath) as f:
# robot_poses, robot_key_neighbors, line_robots = pickle.load(f)
# simulation 2 and 3 will run repeatedly since here
while True:
########### simulation 2: consensus decision making for target curve shape ###########
print("##### simulation 2: consensus decision making #####")
# shift the robots to the middle of the window
x_max, y_max = np.amax(robot_poses, axis=0)
x_min, y_min = np.amin(robot_poses, axis=0)
robot_middle = np.array([(x_max+x_min)/2.0, (y_max+y_min)/2.0])
world_middle = np.array([world_side_length/2.0, world_side_length/2.0])
for i in range(swarm_size):
robot_poses[i] = robot_poses[i] - robot_middle + world_middle
# draw the network for the first time
disp_poses_update()
screen.fill(color_white)
for i in range(swarm_size):
pygame.draw.circle(screen, color_black, disp_poses[i], robot_size, 0)
if robot_key_neighbors[i][1] == -1: continue
pygame.draw.line(screen, color_black, disp_poses[i],
disp_poses[robot_key_neighbors[i][1]], conn_width)
pygame.display.update()
# initialize the decision making variables
shape_decision = -1
deci_dist = np.random.rand(swarm_size, shape_quantity)
sum_temp = np.sum(deci_dist, axis=1)
for i in range(swarm_size):
deci_dist[i] = deci_dist[i] / sum_temp[i]
deci_domi = np.argmax(deci_dist, axis=1)
groups = [] # group robots by local consensus
robot_group_sizes = [0 for i in range(swarm_size)]
# color assignment
color_initialized = False
deci_colors = [-1 for i in range(shape_quantity)]
color_assigns = [0 for i in range(color_quantity)]
group_colors = []
robot_colors = [0 for i in range(swarm_size)]
# decision making control variables
dist_diff_thres = 0.3
dist_diff_ratio = [0.0 for i in range(swarm_size)]
dist_diff_power = 0.3
# the loop for simulation 2
sim_haulted = False
time_last = pygame.time.get_ticks()
time_now = time_last
frame_period = 500
sim_freq_control = True
iter_count = 0
sys.stdout.write("iteration {}".format(iter_count))
sys.stdout.flush()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT: # close window button is clicked
print("program exit in simulation 2")
sys.exit() # exit the entire program
if event.type == pygame.KEYUP:
if event.key == pygame.K_SPACE:
sim_haulted = not sim_haulted # reverse the pause flag
if sim_haulted: continue
# simulation frequency control
if sim_freq_control:
time_now = pygame.time.get_ticks()
if (time_now - time_last) > frame_period:
time_last = time_now
else:
continue
# increase iteration count
iter_count = iter_count + 1
sys.stdout.write("\riteration {}".format(iter_count))
sys.stdout.flush()
# update the dominant decision for all robot
deci_domi = np.argmax(deci_dist, axis=1)
# update the groups
robot_curr = line_robots[0]
groups = [[robot_curr]] # empty the group container
# slice the loop at the connection before id '0' robot
while (robot_key_neighbors[robot_curr][1] != -1):
robot_next = robot_key_neighbors[robot_curr][1]
if (deci_domi[robot_curr] == deci_domi[robot_next]):
groups[-1].append(robot_next)
else:
groups.append([robot_next])
robot_curr = robot_next
# the decisions for the groups
group_deci = [deci_domi[groups[i][0]] for i in range(len(groups))]
# update group sizes for robots
for group_temp in groups:
group_temp_size = len(group_temp)
for i in group_temp:
robot_group_sizes[i] = group_temp_size
# update colors for the decisions
if not color_initialized:
color_initialized = True
color_set = range(color_quantity)
deci_set = set(group_deci)
for deci in deci_set:
if len(color_set) == 0:
color_set = range(color_quantity)
chosen_color = np.random.choice(color_set)
color_set.remove(chosen_color)
deci_colors[deci] = chosen_color
color_assigns[chosen_color] = color_assigns[chosen_color] + 1
else:
# remove the color for a decision, if it's no longer the decision of any group
deci_set = set(group_deci)
for deci_temp in range(shape_quantity):
color_temp = deci_colors[deci_temp] # corresponding color for deci_temp
if (color_temp != -1 and deci_temp not in deci_set):
color_assigns[color_temp] = color_assigns[color_temp] - 1
deci_colors[deci_temp] = -1
# assign color for a new decision
color_set = []
for i in range(len(groups)):
if deci_colors[group_deci[i]] == -1:
if len(color_set) == 0:
# construct a new color set
color_assigns_min = min(color_assigns)
for color_temp in range(color_quantity):
if color_assigns[color_temp] == color_assigns_min:
color_set.append(color_temp)
# if here, the color set is good to go
chosen_color = np.random.choice(color_set)
color_set.remove(chosen_color)
deci_colors[group_deci[i]] = chosen_color
color_assigns[chosen_color] = color_assigns[chosen_color] + 1
# update the colors for the groups and robots
group_colors = []
for i in range(len(groups)):
color_temp = deci_colors[group_deci[i]]
group_colors.append(color_temp)
for j in groups[i]:
robot_colors[j] = color_temp
# decision distribution evolution
converged_all = True
deci_dist_t = np.copy(deci_dist) # deep copy the 'deci_dist'
for i in range(swarm_size):
if robot_key_neighbors[i][0] == -1 or robot_key_neighbors[i][1] == -1:
# for robots on the two ends
i_next = -1
if robot_key_neighbors[i][0] == -1:
i_next = robot_key_neighbors[i][1]
elif robot_key_neighbors[i][1] == -1:
i_next = robot_key_neighbors[i][0]
if deci_domi[i] == deci_domi[i_next]: # locally converged
# step 1: take equal weight average
deci_dist[i] = deci_dist_t[i] + deci_dist_t[i_next]
dist_sum = np.sum(deci_dist[i])
deci_dist[i] = deci_dist[i] / dist_sum
# step 2: increase the unipolarity by applying the linear multiplier
dist_diff = np.linalg.norm(deci_dist_t[i]-deci_dist_t[i_next], 1)
if dist_diff < dist_diff_thres:
dist_diff_ratio[i] = dist_diff/dist_diff_thres # for debugging
small_end = 1.0/shape_quantity * np.power(dist_diff/dist_diff_thres,
dist_diff_power)
large_end = 2.0/shape_quantity - small_end
# sort the magnitude of processed distribution
dist_t = np.copy(deci_dist[i]) # temporary distribution
sort_index = range(shape_quantity)
for j in range(shape_quantity-1): # bubble sort, ascending order
for k in range(shape_quantity-1-j):
if dist_t[k] > dist_t[k+1]:
# exchange values in 'dist_t'
temp = dist_t[k]
dist_t[k] = dist_t[k+1]
dist_t[k+1] = temp
# exchange values in 'sort_index'
temp = sort_index[k]
sort_index[k] = sort_index[k+1]
sort_index[k+1] = temp
# applying the linear multiplier
dist_sum = 0
for j in range(shape_quantity):
multiplier = (small_end +
float(j)/(shape_quantity-1) * (large_end-small_end))
deci_dist[i,sort_index[j]] = deci_dist[i,sort_index[j]] * multiplier
dist_sum = np.sum(deci_dist[i])
deci_dist[i] = deci_dist[i] / dist_sum
else:
dist_diff_ratio[i] = 1.0 # for debugging, ratio overflowed
else: # not converged on the ends
if converged_all: converged_all = False
dist_diff_ratio[i] = -1.0 # indicating linear multiplier was not used
# take unequal weight in the averaging process based on group property
deci_dist[i] = (deci_dist_t[i] +
robot_group_sizes[i_next] * deci_dist_t[i_next])
dist_sum = np.sum(deci_dist[i])
deci_dist[i] = deci_dist[i] / dist_sum
else:
i_l = robot_key_neighbors[i][0] # index of neighbor on the left
i_r = robot_key_neighbors[i][1] # index of neighbor on the right
# deciding if two neighbors have converged ideas with host robot
converged_l = False
if (deci_domi[i_l] == deci_domi[i]): converged_l = True
converged_r = False
if (deci_domi[i_r] == deci_domi[i]): converged_r = True
# weighted averaging depending on group property
if converged_l and converged_r: # all three robots are locally converged
# step 1: take equal weight average on all three distributions
deci_dist[i] = deci_dist_t[i_l] + deci_dist_t[i] + deci_dist_t[i_r]
dist_sum = np.sum(deci_dist[i])
deci_dist[i] = deci_dist[i] / dist_sum
# step 2: increase the unipolarity by applying the linear multiplier
dist_diff = [np.linalg.norm(deci_dist_t[i_l]-deci_dist_t[i], 1),
np.linalg.norm(deci_dist_t[i_r]-deci_dist_t[i], 1),
np.linalg.norm(deci_dist_t[i_l]-deci_dist_t[i_r], 1)]
dist_diff_max = max(dist_diff) # maximum distribution difference
if dist_diff_max < dist_diff_thres:
dist_diff_ratio[i] = dist_diff_max/dist_diff_thres # for debugging
small_end = 1.0/shape_quantity * np.power(dist_diff_max/dist_diff_thres,
dist_diff_power)
large_end = 2.0/shape_quantity - small_end
# sort the magnitude of processed distribution
dist_t = np.copy(deci_dist[i]) # temporary distribution
sort_index = range(shape_quantity)
for j in range(shape_quantity-1): # bubble sort, ascending order
for k in range(shape_quantity-1-j):
if dist_t[k] > dist_t[k+1]:
# exchange values in 'dist_t'
temp = dist_t[k]
dist_t[k] = dist_t[k+1]
dist_t[k+1] = temp
# exchange values in 'sort_index'
temp = sort_index[k]
sort_index[k] = sort_index[k+1]
sort_index[k+1] = temp
# applying the linear multiplier
dist_sum = 0
for j in range(shape_quantity):
multiplier = (small_end +
float(j)/(shape_quantity-1) * (large_end-small_end))
deci_dist[i,sort_index[j]] = deci_dist[i,sort_index[j]] * multiplier
dist_sum = np.sum(deci_dist[i])
deci_dist[i] = deci_dist[i] / dist_sum
else:
dist_diff_ratio[i] = 1.0 # for debugging, ratio overflowed
else: # at least one side has not converged yet
if converged_all: converged_all = False
dist_diff_ratio[i] = -1.0 # indicating linear multiplier was not used
# take unequal weight in the averaging process based on group property
deci_dist[i] = (robot_group_sizes[i_l] * deci_dist_t[i_l] +
deci_dist_t[i] +
robot_group_sizes[i_r] * deci_dist_t[i_r])
dist_sum = np.sum(deci_dist[i])
deci_dist[i] = deci_dist[i] / dist_sum
# update the graphics
screen.fill(color_white)
# draw the connections first
for i in range(swarm_size):
i_next = robot_key_neighbors[i][1]
if i_next == -1: continue
if (deci_domi[i] == deci_domi[i_next]):
pygame.draw.line(screen, distinct_color_set[robot_colors[i]],
disp_poses[i], disp_poses[i_next], conn_width_thick_consensus)
else:
pygame.draw.line(screen, color_black, disp_poses[i], disp_poses[i_next],
conn_width_thin_consensus)
# draw the robots
for i in range(swarm_size):
pygame.draw.circle(screen, distinct_color_set[robot_colors[i]], disp_poses[i],
robot_size_consensus, 0)
pygame.display.update()
# check exit condition for simulations 2
if converged_all:
shape_decision = deci_domi[0]
print("") # move cursor to the new line
print("converged to decision {}".format(shape_decision))
print("simulation 2 is finished")
if manual_mode: raw_input("<Press Enter to continue>")
print("") # empty line
break
########### simulation 3: curve reshape ###########
print("##### simulation 3: curve reshape #####")
print("chosen shape {}: {}".format(shape_decision, shape_catalog[shape_decision]))
# # force the choice of shape, for video recording
# if len(force_shape_set) == 0: force_shape_set = range(shape_quantity)
# forced_choice = np.random.choice(force_shape_set)
# force_shape_set.remove(forced_choice)
# shape_decision = forced_choice
# print("force shape to {}: {} (for video recording)".format(shape_decision,
# shape_catalog[shape_decision]))
# read the loop shape from file
filename = str(swarm_size) + "-" + shape_catalog[shape_decision]
filepath = os.path.join(os.getcwd(), curve_folder, filename)
if os.path.isfile(filepath):
with open(filepath, 'r') as f:
target_poses = pickle.load(f)
else:
print("fail to locate shape file: {}".format(filepath))
sys.exit()
# calculate the interior angles for the robots
inter_target = np.zeros(swarm_size)
for i in range(swarm_size): # i on the target loop
if i == 0 or i == (swarm_size-1): # robots on the ends
inter_target[i] = 0
continue
vect_l = target_poses[i-1] - target_poses[i]
vect_r = target_poses[i+1] - target_poses[i]
dist_l = np.linalg.norm(vect_l)
dist_r = np.linalg.norm(vect_r)
inter_target[i] = math.acos(np.around(
np.dot(vect_l, vect_r) / (dist_l * dist_r) ,6))
if np.cross(vect_r, vect_l) < 0:
inter_target[i] = 2*math.pi - inter_target[i]
# draw the network for the first time
screen.fill(color_white)
for i in range(swarm_size):
pygame.draw.circle(screen, color_black, disp_poses[i], robot_size, 0)
if robot_key_neighbors[i][1] != -1:
pygame.draw.line(screen, color_black, disp_poses[i],
disp_poses[robot_key_neighbors[i][1]], conn_width)
pygame.display.update()
# formation control variables
inter_err_thres = 0.1
inter_target_line = math.pi # interior angle for straight line
formation_stretched = False # whether the stretching process is done
formation_stretched_err = inter_target_line*0.1
# spring constants in SMA
linear_const = 1.0
bend_const = 0.8
disp_coef = 0.05
# the loop for simulation 3
sim_haulted = False
time_last = pygame.time.get_ticks()
time_now = time_last
frame_period = 200
sim_freq_control = True
print("line is stretching ...")
iter_count = 0
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT: # close window button is clicked
print("program exit in simulation 3")
sys.exit() # exit the entire program
if event.type == pygame.KEYUP:
if event.key == pygame.K_SPACE:
sim_haulted = not sim_haulted # reverse the pause flag
if sim_haulted: continue
# simulation frequency control
if sim_freq_control:
time_now = pygame.time.get_ticks()
if (time_now - time_last) > frame_period:
time_last = time_now
else:
continue
# increase iteration count
iter_count = iter_count + 1
# update the physics
robot_poses_t = np.copy(robot_poses) # as old poses
inter_curr = np.zeros(swarm_size)
for i in range(swarm_size):
if robot_key_neighbors[i][0] == -1 or robot_key_neighbors[i][1] == -1:
i_next = -1
if robot_key_neighbors[i][0] == -1:
i_next = robot_key_neighbors[i][1]
elif robot_key_neighbors[i][1] == -1:
i_next = robot_key_neighbors[i][0]
vect_next = robot_poses_t[i_next]-robot_poses_t[i]
dist_next = np.linalg.norm(vect_next)
u_vect_next = vect_next / dist_next
fb_vect = (dist_next - desired_space) * linear_const * u_vect_next
robot_poses[i] = robot_poses_t[i] + disp_coef * fb_vect
else:
i_l = robot_key_neighbors[i][0]
i_r = robot_key_neighbors[i][1]
# vectors
vect_l = robot_poses_t[i_l] - robot_poses_t[i]
vect_r = robot_poses_t[i_r] - robot_poses_t[i]
vect_lr = robot_poses_t[i_r] - robot_poses_t[i_l]
# distances
dist_l = np.linalg.norm(vect_l)
dist_r = np.linalg.norm(vect_r)
dist_lr = np.linalg.norm(vect_lr)
# unit vectors
u_vect_l = vect_l / dist_l
u_vect_r = vect_r / dist_r
u_vect_in = np.array([-vect_lr[1], vect_lr[0]]) / dist_lr
# calculate current interior angle
inter_curr[i] = math.acos(np.around(
np.dot(vect_l, vect_r) / (dist_l * dist_r), 6))
if np.cross(vect_r, vect_l) < 0:
inter_curr[i] = 2*math.pi - inter_curr[i]
# feedback vector for the SMA algorithm
fb_vect = np.zeros(2)
fb_vect = fb_vect + (dist_l - desired_space) * linear_const * u_vect_l
fb_vect = fb_vect + (dist_r - desired_space) * linear_const * u_vect_r
if formation_stretched:
fb_vect = (fb_vect +
(inter_target[i] - inter_curr[i]) * bend_const * u_vect_in)
else:
fb_vect = (fb_vect +
(inter_target_line - inter_curr[i]) * bend_const * u_vect_in)
# update one step of position
robot_poses[i] = robot_poses_t[i] + disp_coef * fb_vect
# check if the stretching process is done
if not formation_stretched:
formation_stretched = True
for i in range(swarm_size):
if robot_key_neighbors[i][0] != -1 and robot_key_neighbors[i][1] != -1:
if abs(inter_curr[i] - inter_target_line) > formation_stretched_err:
formation_stretched = False
break
if formation_stretched:
# stretching finished
print("line is reshaping to " + shape_catalog[shape_decision] + " ...")
# update the graphics
disp_poses_update()
screen.fill(color_white)
# draw the connections first
for i in range(swarm_size):
i_next = robot_key_neighbors[i][1]
if i_next != -1:
pygame.draw.line(screen, color_black, disp_poses[i], disp_poses[i_next],
conn_width)
# draw the robots
for i in range(swarm_size):
pygame.draw.circle(screen, color_black, disp_poses[i], robot_size, 0)
pygame.display.update()
# calculate the maximum error of interior angle
inter_err_max = 0
for i in range(swarm_size):
err_curr = abs(inter_curr[i] - inter_target[i])
if err_curr > inter_err_max: inter_err_max = err_curr
# check exit condition of simulation 3
if converged_all and inter_err_max < inter_err_thres:
print("simulation 3 is finished")
if manual_mode: raw_input("<Press Enter to continue>")
print("") # empty line
break
|
{"hexsha": "be0ae99da0ea54f21092034b028e6b9dffd4751c", "size": 60234, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo_3.py", "max_stars_repo_name": "yangliu28/swarm_formation_sim", "max_stars_repo_head_hexsha": "5c6dd025667338103500c35b2ecee7aceec886a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 61, "max_stars_repo_stars_event_min_datetime": "2018-06-22T17:57:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T11:19:28.000Z", "max_issues_repo_path": "demo_3.py", "max_issues_repo_name": "yangliu28/swarm_formation_sim", "max_issues_repo_head_hexsha": "5c6dd025667338103500c35b2ecee7aceec886a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demo_3.py", "max_forks_repo_name": "yangliu28/swarm_formation_sim", "max_forks_repo_head_hexsha": "5c6dd025667338103500c35b2ecee7aceec886a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2018-06-20T09:18:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T19:39:35.000Z", "avg_line_length": 47.9952191235, "max_line_length": 101, "alphanum_fraction": 0.5786266892, "include": true, "reason": "import numpy", "num_tokens": 14204}
|
\documentclass[12pt,titlepage]{article}
\setlength{\oddsidemargin}{0in}
\setlength{\evensidemargin}{0in}
\setlength{\textwidth}{6.5in}
%
\setlength{\textheight}{9in}
\setlength{\topmargin}{0in}
\setlength{\headsep}{0in}
\setlength{\topskip}{0in}
\setlength{\headheight}{0in}
\usepackage{graphicx}
\usepackage{times}
\usepackage[plainpages=false, colorlinks=true, anchorcolor=blue, linkcolor=blue, citecolor=blue, bookmarks=false, urlcolor=blue]
{hyperref}
\usepackage[square,comma,authoryear]{natbib}
\title{DOE Office of Science INCITE Project:\\
{\it Extreme-scale Simulation of Supernovae and Magnetars from Realistic Progenitors}\\
2019 Q3 Report}
\author{Principal Investigator:\\Sean M. Couch\\
Michigan State University \vspace{0.1in}\\
Co-Investigators: \\
Andrew Christlieb (Michigan State University) \\
Evan O'Connor (Stockholm University)\\
Kuo-Chuan Pan (National Tsinghua University) \\
Luke Roberts (Michigan State University) \\
MacKenzie Warren (Michigan State University) \\
}
\date{October 1, 2019}
\begin{document}
\maketitle
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Project Usage}
%%%%%%%%%%%%%%%%%%%
\begin{figure}
% \begin{tabular}{cc}
\includegraphics[width=3.25in]{on_track_graph_mira.png}
\includegraphics[width=3.25in]{categorized_hours_graph_mira.png} \\
% \includegraphics[width=3.25in]{on_track_graph}
% \includegraphics[width=3.25in]{categorized_hours_graph}
% \end{tabular}
\caption{Allocation usage.}
\label{fig:usage}
\end{figure}
So far in 2019 we expended 129.8M core-hours on Mira out of our total 2019 allocation of 150M core-hours (86.5\% usage).
This is just slightly ahead of the linear usage curve.
Our burn rate in Q3 was substantially larger than for Q2.
We are now running two of our primary simulation milestones in the Capability queue and will start a second Capability-scale simulation in the next week or two.
Figure \ref{fig:usage} we show our current usage and categorized hours on Mira.
After completely code tuning, and overcoming an issues with constructing a new set of initial conditions, we have begun production simulations on Theta in Q3.
In just a couple weeks of production, we have expended 2.2M core-hours on Theta (12.5\% of total allocation) and the simulation has already grown to the Capability scale.
Based on our experience last year running on Theta, we do not anticipate any difficulty in expending our entire allocation before the end of the calendar year.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Report on Project Milestones}
%%%%%%%%%%%%%%%%%%%
Our milestones for Year 2, and corresponding progress, were:
\begin{enumerate}
\item Long time simulations of MHD CCSNe - These simulations have been restarted from simulations carried over from 2018 and are running in the Capability queue. Substantial progress has been made on these simulations in Q3 and they are nearing completion.
\item High-resolution simulation of MHD dynamos in the proto-neutron star - So far in Q1/Q2 we have analyzed simulations from 2018 that will serves as the initial conditions for this high-resolution simulation. We have started this simulation in Q3. Given the extreme resolution of this simulation, it will run in the Capability queue from the outset.
\item MHD simulation of CCSN progenitors - These simulations will be started in Q2. We are tuning our progenitor application to make better use of OpenMP threading.
\item CCSN simulation with 3D progenitors - This simulation is now running at Capability scale on Theta.
\item Implement microphysics from TEAMS SciDAC collaboration and neutrino-electron scattering (NES) - the TEAMS microphysics package is not yet ready for production simulations. We have during Q1 finished an implementation of NES and are now using it in production on Theta.
\end{enumerate}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Project Productivity}
%%%%%%%%%%%%%%%%%%%
\subsection{Primary}
\noindent {\bf Publications}
\begin{itemize}
\item \href{https://ui.adsabs.harvard.edu/#abs/2019arXiv190201340C/abstract}{``Simulating Turbulence-aided Neutrino-driven Core-collapse Supernova Explosions in One Dimension''}, Couch, S. M., Warren, M. L., O'Connor, E. P. 2019, {\itshape arXiv e-prints}, arXiv:1902.01340
\item \href{https://ui.adsabs.harvard.edu/#abs/2019arXiv190109055P/abstract}{``Features of Accretion Phase Gravitational Wave Emission from Two-dimensional Rotating Core-Collapse Supernovae''}, Pajkos, M. A., Couch, S. M., Pan, K., O'Connor, E. P. 2019, {\itshape Astrophysical Journal}, 878, 13
\end{itemize}
\noindent {\bf Presentations}
\begin{itemize}
\item ``Gravitational Waves from Core-collapse Supernovae,'' S.M. Couch, LIGO SN Group Seminar, March 2019
\item ``Multidimensionl Supernova Progenitors,'' S.M. Couch, SciDAC TEAMS Collaboration Meeting, May 2019
\item ``Predicting Supernova Neutrino Signals,'' M. Warren, Supernova Early Warning System (SNEWS) 2.0 Workshop, June 2019
\item ``High-order MHD for Supernovae,'' S.M. Couch, ASTRONUM 2019, July 2019
\end{itemize}
% \subsubsection{Secondary}
% \begin{itemize}
% \item Co-I and postdoc Kuo-Chuan Pan started a tenure-track faculty position at National Tsing Hua University in Taiwan.
% \item Co-I and postdoc MacKenzie Warren won a prestigious NSF Postdoctoral Fellowship.
% \end{itemize}
\section{Center Feedback}
Our catalyst, Adrian Pope, has been extremely helpful.
He is now helping us tune our code for Theta.
\section{Code Description and Characterization}
\texttt{FLASH} is a highly capable, fully modular, extensible,
community code that is widely used in astrophysics, cosmology, fluid
dynamics, and plasma physics, and other fields. The capabilities of
the FLASH code include adaptive mesh refinement (AMR), several
self-gravity solvers, an advection-diffusion-reaction (ADR) flame
model, an accurate and detailed treatment of nuclear burning, and a
sophisticated two-moment neutrino transport scheme based on an
explicit hyperbolic solver. The neutrino interactions are included
through the open-source neutrino interaction library
\texttt{NuLib}. We have enhanced the
performance of the two-moment neutrino transport scheme significantly
as well as upgraded the transport to now include full velocity and
gravitational red-shift dependence in the evolution equations.
\texttt{FLASH} is written in modern Fortran, with some utility
functions written in C, and a build system written in Python. It
requires MPI library support, and either HDF5 or P-NetCDF for I/O.
Additional mathematical software, such as \texttt{Hypre}, may be
required to configure \texttt{FLASH} for particular simulations.
Algorithm classes used within \texttt{FLASH} include Sparse Linear
Algebra solvers, FFT, active and passive particles, structured grids,
and AMR.
\end{document}
|
{"hexsha": "2b1f8781b61c0fd34e9a5ee757a0b87a19ac002c", "size": 6859, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "reports/2019Q3/Couch_Q3_CY2019.tex", "max_stars_repo_name": "smcouch/INCITE_2018", "max_stars_repo_head_hexsha": "dd87ad70e934d4c274ab90e37b90b3c7dc2f61e7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reports/2019Q3/Couch_Q3_CY2019.tex", "max_issues_repo_name": "smcouch/INCITE_2018", "max_issues_repo_head_hexsha": "dd87ad70e934d4c274ab90e37b90b3c7dc2f61e7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reports/2019Q3/Couch_Q3_CY2019.tex", "max_forks_repo_name": "smcouch/INCITE_2018", "max_forks_repo_head_hexsha": "dd87ad70e934d4c274ab90e37b90b3c7dc2f61e7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.3445945946, "max_line_length": 355, "alphanum_fraction": 0.7614812655, "num_tokens": 1734}
|
import Mathlib.Data.Int.Basic
import Mathlib.Data.Nat.Prime
import Mathlib.Tactic.LibrarySearch
import Mathlib.Tactic.Linarith
import Aesop
import Mathlib.Data.Set.Basic
/-
Lean is a language that we will be using in CS22 this year.
If you're in this class, you've most likely used a programming language before.
Lean is a programming language too. But we'll be using it for a different reason:
Lean lets us state *propositions*, write *proofs* of these propositions, and *check*
automatically that these proofs are correct.
Some basic Lean syntax:
* Block comments, like this one, are written as /- ... -/.
* Line comments begin with --
* If a file imports other files, this appears at the very top of the file.
You shouldn't change these imports!
**Definition**. A *proposition* is a statement with a truth value.
That is, it is a statement that could be either true or false.
"Lean is a language" is a proposition. "Lean is not a language" is a proposition.
"1 + 1 = 2" is a proposition, as is "1 + 1 = 3".
But "Lean" is not a proposition, nor is "1 + 1", nor is "is Lean a language?".
-/
#check Prop
#check ℤ
/-
Lean uses the shorthand `Prop` for proposition.
The `#check` command asks Lean to tell us "what kind of thing" something is.
(This will be very useful for us!)
Lean tells us that `Prop` is a Type, that is, a "kind of thing" -- not very enlightening!
But what if we try some of our examples from above?
-/
#check 1 + 1 = 2
#check 1 + 1 = 3
#check True
#check False
/-
In normal math, it's common for us to write things like
"let p, q, and r be propositions" or "let x and y be integers".
In Lean, we write:
-/
variable (p q r : Prop)
#check p
#check p ∧ q
#check p ∧ q → r
#check p ∨ q ∨ p ∧ r ∧ ¬ (p ∧ q ∧ r)
/-
A few things to note here.
* In the third `#check` above, if you hover over the output in the infoview,
you can see how this formula is parenthesized!
* Those unicode symbols are input using \ . To write the third line I typed
`p \and q \to r`. But there are lots of variants.
They usually match the LaTeX command.
* `∧`: and, wedge
* `∨`: or, vee
* `¬`: not, neg
* `→`: to, imp, rightarrow
* `↔`: iff
* `ℕ`: N, nat
* `ℤ`: Z, int
* `∀`: all, forall
* `∃`: ex, exist
* `∣` (divides): | (note, you need to type `\|`, this isn't the normal pipe character)
You may have guessed from the list, Lean lets us write first-order propositions
(i.e. with quantifiers). The syntax here looks like:
-/
#check ∀ x : ℕ, ∃ y : ℕ, x < y ∧ 1 = 1
#check ∀ x : ℕ, ∃ y : ℕ, Prime x ∧ x ∣ y
/-
Try it out yourself: write some propositions.
If you want to use things like `Prime`, you can try to guess with
auto-complete: write `#check Pr` and hit ctrl-space.
-/
#check Prime
def f (x : ℕ) : ℕ := x + 2
/-
The real magic of Lean is that we can *prove* these propositions.
For today we'll stick mostly to basic logic.
There is an array of *tactics* which represent individual proof steps.
To write a proof, we state a theorem, and then write a sequence of tactics.
The tactics manipulate the *proof state* by changing our *hypotheses* and *goals*.
-/
theorem my_first_theorem : p ∧ q → q ∧ p := by
intro hpq -- Assume we know `p ∧ q`.
cases' hpq with hp hq -- This means that we know `p` and we know `q`.
apply And.intro -- In order to prove `q ∧ p`, we must prove `q` and then prove `p`.
. exact hq -- We can prove `q`, since we know `q`!
. exact hp -- And we can prove `p`, since we know `p`.
theorem false_context : 1 = 2 → 1 = 2 := by
intro h12
exact h12
/-
Some notes here:
* `intro`, `cases'`, `apply`, `exact` are *tactics*.
* At each line in the proof we have 1 or more *goals*.
* In each goal, there are 0 or more *hypotheses*.
* The distinction here: hypotheses are what we know,
and the goal is what we are trying to show.
* Some tactics take arguments. These can be fresh names (`intro hpq`),
or names of hypotheses (`exact hq`), or names of rules (`apply And.intro`).
* When we applied a tactic that left us with multiple goals,
I tried to solve each one individually, indenting with `.`
Here are some useful tactics:
* `intro`: when our goal is of the form `_ → _`,
`intro h` will move the left hand side into a hypothesis,
like saying "Assume _".
When our goal is `∀ _, _`, `intro x` will create a new variable named `x`.
* `cases'`: note the '. If `h` is a hypothesis proving an `and`,
`cases' h` will split it into its components.
If `h` is a hypothesis proving an `or`, `cases' h` will set up a
proof by cases with two goals.
If `h` is a hypothesis proving an `exists`, `cases' h` will find a witness.
Use the syntax `with` to name the new hypotheses.
In general, `cases'` is something we do to *hypotheses* only,
to "extract" information from them.
* `apply`: uses a rule from the library, or from your context.
If a rule `r` says "to prove `b`, it suffices to prove `a`"
and your goal is to prove `b`, then
`apply r` will change your goal to proving `a`.
* `exact`: when a hypothesis `h` matches the goal exactly,
`exact h` finishes that part of the proof.
* `use`: when the goal is `∃ x, P(x)`,
`use z` will change the goal to `P(z)`.
Essentially, this is providing a *witness* to prove the existential.
* `contradiction`: if we have hypotheses `p` and `¬ p`, we can prove anything!
* `linarith`: does "easy" arithmetic to prove inequalities and equalities.
Try out a few examples. Some useful rules from the library:
-/
#check Or.inl -- to prove `a ∨ b`, it suffices to prove `a`.
#check Or.inr -- to prove `a ∨ b`, it suffices to prove `b`.
theorem flip_ors : p ∨ q → q ∨ p := by
sorry
theorem from_above : ∀ x : ℕ, ∃ y : ℕ, x < y := by
sorry
theorem other_order : ∃ x : ℕ, ∀ y : ℕ, x ≤ y := by
sorry
theorem two_imp
(h1 : p → q) (h2 : q → ¬ r) : r → ¬ p := by
sorry
/- together: -/
theorem quantifier_switch (P : ℕ → Prop) :
(¬ ∃ x, P x) → (∀ x, ¬ P x) := by
sorry
/- Some other cool things we can do: induction!
Note: `n ∣ m` is defined to be `∃ k, n * k = m`. -/
lemma div_by_5 : ∀ n : ℕ, 5 ∣ 11^n - 6 := by
intro n
induction' n with k ih
. simp
. cases' ih with w hw
have : 11 ^ k = 5*w + 6
. sorry
simp [Nat.pow_succ, this, add_mul]
use w*11
ring
section
variable (P : ℕ → Prop)
example : ∀ n : ℕ, P n := by
intro n
induction' n with k ih
example : ∀ n : ℕ, P n := by
intro n
induction' n using Nat.strong_induction_on with k ih
example : ∀ n : ℕ, P n := by
intro n
induction' n using Nat.two_step_induction with k ih1 ih2
end
/- sets! -/
theorem sets_eq (s t u : Set ℕ) : (s ∪ t) ∩ u = (s ∩ u) ∪ (t ∩ u) := by
ext x
constructor
#check ℕ
|
{"author": "robertylewis", "repo": "leanclass", "sha": "f609276675431388632d46619581bdb7c557be50", "save_path": "github-repos/lean/robertylewis-leanclass", "path": "github-repos/lean/robertylewis-leanclass/leanclass-f609276675431388632d46619581bdb7c557be50/BrownCs22/Demos/01-intro.lean"}
|
import easycorrector.ngram_model.load_model as load_model
import numpy as np
import easycorrector.common.common as common
model_name = "ngram_model"
def correct(text):
lm = load_model.get_char_ngram_lm_model()
maybe_errors = []
if not text.strip():
return maybe_errors
ngram_avg_scores = []
for n in [2, 3]:
scores = []
for i in range(len(text) - n + 1):
word = text[i:i + n]
score = lm.score(" ".join(word), bos=False, eos=False)
scores.append(score)
for _ in range(n - 1):
scores.insert(0, scores[0])
scores.append(scores[-1])
avg_scores = [sum(scores[i:i + n]) / len(scores[i:i + n]) for i in range(len(text))]
ngram_avg_scores.append(avg_scores)
char_scores = list(np.average(np.array(ngram_avg_scores), axis=0))
result = _compute_errors_and_correct(text, char_scores, lm)
return result
def _compute_errors_and_correct(text, char_scores, lm, ratio=0.6745, threshold=3):
"""
取疑似错字的位置,通过平均绝对离差(MAD)
:param scores: np.array
:param ratio: 正态分布表参数
:param threshold: 阈值越小,得到疑似错别字越多
:return: 全部疑似错误字的index: list
"""
result = []
scores = np.array(char_scores)
if len(scores.shape) == 1:
scores = scores[:, None]
median = np.median(scores, axis=0) # get median of all scores
margin_median = np.abs(scores - median).flatten() # deviation from the median
# 平均绝对离差值
med_abs_deviation = np.median(margin_median)
if med_abs_deviation == 0:
return result
y_score = ratio * margin_median / med_abs_deviation
# 打平
scores = scores.flatten()
maybe_error_indices = np.where((y_score > threshold) & (scores < median))
# 取全部疑似错误字的index
possible = list(maybe_error_indices[0])
# 生成修改建议,目前留空
result = []
for idx in possible:
replace_char = get_replace_char(text, idx, lm)
if replace_char != text[idx]: # 添加一个修正项
result.append(common.CorrectItem(start=idx, end=idx + 1, replace=replace_char))
text = text[:idx] + replace_char + text[idx + 1:]
return result
def get_replace_char(text, idx, lm):
cur_char = text[idx]
candidates = common.get_same_pinyin_or_same_stroke(cur_char)
if cur_char not in candidates:
candidates.add(cur_char)
scores = {i: lm.score(" ".join(text[:idx] + i + text[idx + 1:])) for i in candidates}
sorted_scores = sorted(scores.items(), key=lambda d: d[1], reverse=True)
return sorted_scores[0][0]
|
{"hexsha": "3f086fdf2c8d8c08eb7e43d7442b1c010e69d795", "size": 2535, "ext": "py", "lang": "Python", "max_stars_repo_path": "easycorrector/ngram_model/ngram_correct.py", "max_stars_repo_name": "milter001/text_corrector", "max_stars_repo_head_hexsha": "ca28fe0ebc008c1c9b1c640eacb78e876e9a3e84", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-07-19T03:14:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-21T09:21:14.000Z", "max_issues_repo_path": "easycorrector/ngram_model/ngram_correct.py", "max_issues_repo_name": "milter001/text_corrector", "max_issues_repo_head_hexsha": "ca28fe0ebc008c1c9b1c640eacb78e876e9a3e84", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "easycorrector/ngram_model/ngram_correct.py", "max_forks_repo_name": "milter001/text_corrector", "max_forks_repo_head_hexsha": "ca28fe0ebc008c1c9b1c640eacb78e876e9a3e84", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-19T01:39:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-22T07:31:00.000Z", "avg_line_length": 32.9220779221, "max_line_length": 92, "alphanum_fraction": 0.6437869822, "include": true, "reason": "import numpy", "num_tokens": 736}
|
import os
import time
import json
from textblob import TextBlob
import htmllib
import difflib
import pandas as pd
import numpy as np
import sklearn
from bs4 import BeautifulSoup
from src.helper.collection import handle_error, light_error_handle, get_response
from src.helper.constant import ANSWER, QUESTION, TAG
class MLHandler(object):
@staticmethod
def process(text):
try:
text_blob_text = TextBlob(text)
text_blob_text.correct()
#print text_blob_text.polarity, text_blob_text.subjectivity
question_path = os.path.join(os.getcwd(), 'questions.csv')
answer_path = os.path.join(os.getcwd(), 'answers.csv')
tag_path = os.path.join(os.getcwd(), 'tags.csv')
#map correct question and get its answer
q_dataset = pd.read_csv(question_path)
q_dataset = q_dataset[QUESTION]
title_array = np.array(q_dataset['Title'])
id_array = np.array(q_dataset['Id'])
answer = ''
result_array = difflib.get_close_matches(str(text_blob_text), list(title_array), 10, 0.5)
print result_array
if len(result_array) > 0:
#getting index
a_dataset = pd.read_csv(answer_path)
a_dataset = a_dataset[ANSWER]
body_array = np.array(a_dataset['Body'])
parent_id_array = np.array(a_dataset['ParentId'])
for _item in result_array:
#print _item
_index = np.nonzero(title_array == _item)[0][0]
#print _index
_id = id_array[_index]
#print _id
if _id is not None:
__index = np.nonzero(parent_id_array == _id)[0][0]
#print __index
if __index is not None:
__body = body_array[__index]
#print __body
answer = answer + 'Question:::::\n\n' + str(_item) + '\n------\n' + 'Answer::::\n' + str(__body)
if answer is not None and answer is not '':
soup = BeautifulSoup(answer, 'lxml')
answer = soup.get_text()
answer = 'Results\n\n\n' + answer
else:
answer = "Didn't found anything. Try something else."
#print result_array
#print q_dataset.head(2)
#print q_dataset.head(5)
#print '888888888888888888888888'
#print q_dataset.describe()
#print q_dataset.groupby('author').size()
res_json = get_response({'text': answer})
return res_json
except Exception as exception:
return light_error_handle(exception)
|
{"hexsha": "08807f646e803dcbfb95945906db2526e48e4794", "size": 2273, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python-Server/src/module/ml_handler.py", "max_stars_repo_name": "MCD-50/OpenEd", "max_stars_repo_head_hexsha": "e6cf6d05b3c002de21bdd69b3adbe9c6602d3cc3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python-Server/src/module/ml_handler.py", "max_issues_repo_name": "MCD-50/OpenEd", "max_issues_repo_head_hexsha": "e6cf6d05b3c002de21bdd69b3adbe9c6602d3cc3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python-Server/src/module/ml_handler.py", "max_forks_repo_name": "MCD-50/OpenEd", "max_forks_repo_head_hexsha": "e6cf6d05b3c002de21bdd69b3adbe9c6602d3cc3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7195121951, "max_line_length": 103, "alphanum_fraction": 0.6911570612, "include": true, "reason": "import numpy", "num_tokens": 598}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
CSV File for NAB
Usage:
nabcsv.py [--cmd cmdtype] --threshold threshold --input inputfile --output outputfile
cmdtype ; score by default, to convert nab result file
; flag, to convert flaginfo.csv
; prune, to remove datapoints not in green flag status
"""
import sys,os,re
import datetime
import random
import numpy as np
import logging
from optparse import OptionParser
logger = logging.getLogger(__name__)
def prune(csvfile, flagfile, outfile):
"""
csvfile ; timestamp
flagfile; np array
"""
flaginfo = np.loadtxt(flagfile)
inf = open(csvfile,'r')
outf = open(outfile, 'w')
lineid = 0
prunecnt = 0
curend = flaginfo[0,0] + flaginfo[0,1]
curflag = flaginfo[0,2]
curidx = 0
rowcnt, colcnt = flaginfo.shape
for line in inf:
if lineid == 0:
#head
outf.write('%s'%(line))
lineid += 1
continue
items = line.strip().split(',')
tmall = (items[0].split())[1].split('.')
tms = [int(x) for x in tmall[0].split(':')]
tmms = (tms[0] * 3600 + tms[1] * 60 + tms[2])*1000. + int(tmall[1])
if tmms >= curend:
#get to the end, check flag
curidx += 1
if curidx < rowcnt:
curend = flaginfo[curidx,0] + flaginfo[curidx,1]
curflag = flaginfo[curidx,2]
else:
curflag = 0
if (curflag == 0):
outf.write('%s'%(line))
else:
prunecnt += 1
lineid += 1
logger.info('%d lines processed, %d pruned.', lineid, prunecnt)
inf.close()
outf.close()
def convert_flag(csvfile, outfile):
"""
input: (flag result csv file)
timestamp(10 thousands), len, flag, ....
ret:
ms, len, flag
"""
inf = open(csvfile,'r')
outf = open(outfile, 'w')
lineid = 0
for line in inf:
items = line.strip().split('\t')
tmall = items[0].split('.')
tms = [int(x) for x in tmall[0].split(':')]
tmms = (tms[0] * 3600 + tms[1] * 60 + tms[2])*1000. + int(tmall[1])/10
length = int(items[1])/10
outf.write('%s %s %s\n'%(tmms, length, 0 if items[2]=='G' else 1))
lineid += 1
logger.info('%d lines converted.', lineid)
inf.close()
outf.close()
def _timestr(timestamp, scale=10000):
s, ms = divmod(timestamp, scale)
hours, remainder = divmod(s, 3600)
minutes, seconds = divmod(remainder, 60)
timestr = '{:02}:{:02}:{:02}.{:03}'.format(int(hours), int(minutes), int(seconds), int(ms))
return timestr
def convert_confuse(csvfile, outfile):
"""
add random timestamp
input: (flag result csv file)
timestamp(10 thousands), len, flag, ....
"""
inf = open(csvfile,'r')
outf = open(outfile, 'w')
lineid = 0
timestamp = 0
for line in inf:
if lineid == 0:
#head
outf.write('%s'%(line))
lineid += 1
continue
items = line.strip().split(',')
#tmall = (items[0].split())[1].split('.')
#tms = [int(x) for x in tmall[0].split(':')]
#tmms = (tms[0] * 3600 + tms[1] * 60 + tms[2])*1000. + int(tmall[1])
#get random timestamp
timestamp += random.randint(1,100)
outf.write('2000-01-01 %s,%s\n'%(_timestr(timestamp, scale=1000), items[1]))
lineid += 1
logger.info('%d lines converted.', lineid)
inf.close()
outf.close()
def convert_score(csvfile, outfile, threshold):
"""
input: (NAB result csv file)
timestamp, value, anomly_score, ....
ret:
ms, value, 1/0
"""
inf = open(csvfile,'r')
outf = open(outfile, 'w')
lineid = 0
anomalycnt = 0
for line in inf:
#skip header
if lineid == 0 :
lineid += 1
header = line.strip().split(',')
#outf.write('%s,%s,%s\n'%(header[0],header[1],'anomaly'))
continue
items = line.strip().split(',')
tmall = (items[0].split())[1].split('.')
tms = [int(x) for x in tmall[0].split(':')]
tmms = (tms[0] * 3600 + tms[1] * 60 + tms[2])*1000. + float(tmall[1])
score = 1 if float(items[2]) > threshold else 0
outf.write('%s %s %s\n'%(tmms, items[1], score))
lineid += 1
if score == 1:
anomalycnt += 1
logger.info('%d lines converted, %d anomaly records found.', lineid, anomalycnt)
inf.close()
outf.close()
if __name__=="__main__":
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.DEBUG)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'nabcsv.py --threshold threshold --input inputfile --output outputfile'
parser = OptionParser(usage)
parser.add_option("--input", dest="inputfile")
parser.add_option("--output", dest="outputfile")
parser.add_option("--threshold", type=float, dest="threshold")
parser.add_option("--flagfile", default="")
parser.add_option("--cmd", dest="cmd", default='score')
opt, args = parser.parse_args()
if opt.inputfile is None:
logger.error(globals()['__doc__'] % locals())
sys.exit(1)
if opt.cmd == 'score':
convert_score(opt.inputfile, opt.outputfile, opt.threshold)
elif opt.cmd == 'flag':
convert_flag(opt.inputfile, opt.outputfile)
elif opt.cmd == 'prune':
prune(opt.inputfile, opt.flagfile,opt.outputfile)
elif opt.cmd == 'confuse':
convert_confuse(opt.inputfile, opt.outputfile)
|
{"hexsha": "862ea5b8ed58bc0289746d10cf93d8bd486946e7", "size": 5866, "ext": "py", "lang": "Python", "max_stars_repo_path": "docs/report/fa20-523-349/project/RankNet/indycar/nabcsv.py", "max_stars_repo_name": "mikahla1/cybertraining-dsc.github.io", "max_stars_repo_head_hexsha": "168cadb2f755cb6ad4907e5656bd879d57e01e43", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-10-16T21:59:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-27T16:32:50.000Z", "max_issues_repo_path": "docs/report/fa20-523-349/project/RankNet/indycar/nabcsv.py", "max_issues_repo_name": "mikahla1/cybertraining-dsc.github.io", "max_issues_repo_head_hexsha": "168cadb2f755cb6ad4907e5656bd879d57e01e43", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-09-04T13:14:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-19T09:05:27.000Z", "max_forks_repo_path": "docs/report/fa20-523-349/project/RankNet/indycar/nabcsv.py", "max_forks_repo_name": "mikahla1/cybertraining-dsc.github.io", "max_forks_repo_head_hexsha": "168cadb2f755cb6ad4907e5656bd879d57e01e43", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2020-08-16T17:17:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-08T22:54:34.000Z", "avg_line_length": 26.3049327354, "max_line_length": 95, "alphanum_fraction": 0.5533583362, "include": true, "reason": "import numpy", "num_tokens": 1668}
|
library(tidyverse)
df <- read_csv("Top2000_extra_columns_spotify_genre.csv")
df$...1 <- NULL
colnames(df) <- tolower(colnames(df))
audio_features <- c("tempo","danceability","energy","valence","loudness","instrumentalness")
col_years <- colnames(df)[grepl("[0-9]{4}", colnames(df))]
non_col_years <- colnames(df)[!grepl("[0-9]{4}", colnames(df))]
get_legend<-function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
df[df$artist == 'Pearl Jam',c('genre_groups')] <- "rock"
df_unique_genres <- df %>%
count(artist, genre_groups) %>%
mutate(n = ifelse(genre_groups == 'unknown', 0, n)) %>%
arrange(artist, desc(n)) %>%
distinct(artist, .keep_all = TRUE) %>%
select(artist,genre_groups)
df$genre_groups <- NULL
df <- df %>% left_join(df_unique_genres, by = 'artist')
|
{"hexsha": "e61703d3607cbff99bc00cbaf431ecb86aa2c3f8", "size": 906, "ext": "r", "lang": "R", "max_stars_repo_path": "shiny/global.r", "max_stars_repo_name": "AzucenaMV/top2000-dashboard", "max_stars_repo_head_hexsha": "d1fa465469024e7b97d8db8160ae85199b8f5642", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "shiny/global.r", "max_issues_repo_name": "AzucenaMV/top2000-dashboard", "max_issues_repo_head_hexsha": "d1fa465469024e7b97d8db8160ae85199b8f5642", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "shiny/global.r", "max_forks_repo_name": "AzucenaMV/top2000-dashboard", "max_forks_repo_head_hexsha": "d1fa465469024e7b97d8db8160ae85199b8f5642", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2413793103, "max_line_length": 92, "alphanum_fraction": 0.6721854305, "num_tokens": 273}
|
#!/usr/bin/python
import cv2
from Brain import Brain
from os import listdir
import numpy as np
imageSize = Brain.IMAGE_SIZE
# tools
def read_image(path, scale_size=imageSize):
img = cv2.resize(cv2.imread(path, 0), scale_size)
d = np.asarray(img)
d = d.reshape((1, scale_size[0] * scale_size[1]))
return d
class Core(object):
def __init__(self):
self.brain = Brain()
self.mode = "prediction"
def load_data(self, dataset_root_dir):
self.mode = "train"
# -- Scaning dataset directory
dataset_pos = dataset_root_dir + "_pos"
dataset_neg = dataset_root_dir + "_neg"
# [TODO] : check if the directory exists
onlyfiles_pos = [f for f in listdir(dataset_pos) if isfile(join(dataset_pos, f))]
onlyfiles_neg = [f for f in listdir(dataset_neg) if isfile(join(dataset_neg, f))]
# -- Initializing data
datasetSize = len(onlyfiles_pos) + len(onlyfiles_neg)
self.brain.data = np.zeros((datasetSize, imageSize[0] * imageSize[1]))
self.brain.labels = np.zeros((datasetSize, 1))
# -- Loading files
for i in range(0, len(onlyfiles_pos)):
f = dataset_pos + "/" + onlyfiles_pos[i]
print "reading : ", f
self.brain.data[i] = read_image(f, imageSize)
self.brain.labels[i] = 1
offset = len(onlyfiles_pos)
for i in range(0, len(onlyfiles_neg)):
f = dataset_neg + "/" + onlyfiles_neg[i]
print "reading : ", f
self.brain.data[i + offset] = read_image(f, imageSize)
self.brain.labels[i + offset] = 0
def train(self):
if self.mode != "train":
raise Exception("can not use train in modes other than 'train'")
self.brain.train()
self.brain.save('model')
def predict_ball(self, data):
if self.mode != "prediction":
raise Exception("can not use prediction in modes other than 'prediction'")
self.brain.data = data
self.brain.predict()
result = self.brain.labels[0]
return result
def init(mode="prediction"):
global core
core = Core()
if mode == "train":
core.load_data("./dataset")
core.train()
elif mode == "prediction":
pass
else:
raise Exception("undefined behavior")
def run(chars):
global core
res = np.fromstring(chars, dtype=np.uint8)
res = res.reshape((1, -1))
return core.predict_ball(res[0:1, :])[0]
|
{"hexsha": "cac46b5a64415a03e254ca0739bc84016cf0549e", "size": 2515, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/modules/ml/mmlcore.py", "max_stars_repo_name": "ArefMq/SoccerBallDetection", "max_stars_repo_head_hexsha": "3df55ed96fb42c5bc85d10f69a21eaa5bc3a948e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2017-04-23T16:42:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-05T12:49:20.000Z", "max_issues_repo_path": "src/modules/ml/mmlcore.py", "max_issues_repo_name": "ArefMq/SoccerBallDetection", "max_issues_repo_head_hexsha": "3df55ed96fb42c5bc85d10f69a21eaa5bc3a948e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-10-20T17:38:52.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-20T17:38:52.000Z", "max_forks_repo_path": "src/modules/ml/mmlcore.py", "max_forks_repo_name": "ArefMq/SoccerBallDetection", "max_forks_repo_head_hexsha": "3df55ed96fb42c5bc85d10f69a21eaa5bc3a948e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2017-10-27T08:35:23.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-13T02:59:09.000Z", "avg_line_length": 29.2441860465, "max_line_length": 89, "alphanum_fraction": 0.5996023857, "include": true, "reason": "import numpy", "num_tokens": 624}
|
# code for running scalability experiments in JAIR submission
import sys
import numpy as NP
import random
import math
import time
import scipy
from tensorlog import comline
from tensorlog import dataset
from tensorlog import declare
from tensorlog import expt
from tensorlog import funs
from tensorlog import interp
from tensorlog import learn
from tensorlog import matrixdb
from tensorlog import ops
from tensorlog import plearn
from tensorlog import program
from tensorlog import simple
EDGE_WEIGHT = 0.2
SUBGRID = 10
def nodeName(i,j):
return '%d,%d' % (i,j)
def generateGrid(n,outf):
fp = open(outf,'w')
for i in range(1,n+1):
for j in range(1,n+1):
for di in [-1,0,+1]:
for dj in [-1,0,+1]:
if (1 <= i+di <= n) and (1 <= j+dj <= n):
fp.write('edge\t%s\t%s\t%f\n' % (nodeName(i,j),nodeName(i+di,j+dj),EDGE_WEIGHT))
def generateData(n,trainFile,testFile):
fpTrain = open(trainFile,'w')
fpTest = open(testFile,'w')
r = random.Random()
for i in range(1,n+1):
for j in range(1,n+1):
#target - note early version used i,j < n/2 which is a bug
ti = (i/SUBGRID)*SUBGRID + SUBGRID/2
tj = (j/SUBGRID)*SUBGRID + SUBGRID/2
x = nodeName(i,j)
y = nodeName(ti,tj)
fp = fpTrain if r.random()<0.67 else fpTest
fp.write('\t'.join(['path',x,y]) + '\n')
# parse command line args
def getargs():
goal = 'acc'
if len(sys.argv)>1:
goal = sys.argv[1]
n = 6
if len(sys.argv)>2:
n = int(sys.argv[2])
maxD = round(n/2.0)
if len(sys.argv)>3:
maxD = int(sys.argv[3])
epochs = 30
if len(sys.argv)>4:
epochs = int(sys.argv[4])
return (goal,n,maxD,epochs)
# generate all inputs for an accuracy (or timing) experiment
def genInputs(n):
#generate grid
stem = 'inputs/g%d' % n
factFile = stem+'.cfacts'
trainFile = stem+'-train.exam'
testFile = stem+'-test.exam'
generateGrid(n,factFile)
generateData(n,trainFile,testFile)
return (factFile,trainFile,testFile)
# run timing experiment
def timingExpt(prog,maxD,trainFile,minibatch):
times = []
print('depth',maxD,'minibatch',minibatch)
ti = interp.Interp(prog)
ti.prog.maxDepth = maxD
tlog = simple.Compiler(db=prog.db,prog=prog)
dset = tlog.load_dataset(trainFile)
if minibatch:
batchSize = minibatch
quitAfter = 1
else:
batchSize = 1
quitAfter = 25
start = time.time()
for k,(mode,(X0,Y0)) in enumerate(tlog.minibatches(dset,batch_size=batchSize)):
print('batch',k)
X = scipy.sparse.csr_matrix(X0)
Y = scipy.sparse.csr_matrix(Y0)
ti.prog.eval(declare.asMode(mode), [X])
if k>=quitAfter:
break
elapsed = time.time() - start
print(k*batchSize,'examples','miniBatchSize',batchSize,'time',elapsed,'qps',k*batchSize/elapsed)
return elapsed
# run accuracy experiment
def accExpt(prog,trainFile,testFile,n,maxD,epochs):
print('grid-acc-expt: %d x %d grid, %d epochs, maxPath %d' % (n,n,epochs,maxD))
trainData = dataset.Dataset.loadExamples(prog.db,trainFile)
testData = dataset.Dataset.loadExamples(prog.db,testFile)
prog.db.markAsParameter('edge',2)
prog.maxDepth = maxD
# 20 epochs and rate=0.01 is ok for grid size 16 depth 10
# then it gets sort of chancy
#learner = learn.FixedRateGDLearner(prog,epochs=epochs,epochTracer=learn.EpochTracer.cheap)
learner = learn.FixedRateGDLearner(prog,epochs=epochs,epochTracer=learn.EpochTracer.cheap,rate=0.005)
plearner = plearn.ParallelFixedRateGDLearner(
prog,
epochs=epochs,
parallel=40,
miniBatchSize=BATCHSIZE,
regularizer=learn.L2Regularizer(),
epochTracer=learn.EpochTracer.cheap,
rate=0.01)
params = {'prog':prog,
'trainData':trainData, 'testData':testData,
'savedTestPredictions':'tmp-cache/test.solutions.txt',
'savedTestExamples':'tmp-cache/test.examples',
'learner':learner,
}
NP.seterr(divide='raise')
t0 = time.time()
result = expt.Expt(params).run()
print('elapsed time',time.time()-t0)
return result
def runMain():
# usage: acc [grid-size] [maxDepth] [epochs]"
# time [grid-size] [maxDepth] [no-minibatch]"
(goal,n,maxD,epochsOrMinibatch) = getargs()
print('args',(goal,n,maxD,epochsOrMinibatch))
(factFile,trainFile,testFile) = genInputs(n)
db = matrixdb.MatrixDB.loadFile(factFile)
prog = program.Program.loadRules("grid.ppr",db)
if goal=='time':
print(timingExpt(prog,maxD,trainFile,epochsOrMinibatch))
elif goal=='acc':
print(accExpt(prog,trainFile,testFile,n,maxD,epochsOrMinibatch))
print('prog.maxDepth',prog.maxDepth)
else:
assert False,'bad goal %s' % goal
if __name__=="__main__":
runMain()
|
{"hexsha": "c01ea3c7d501217fcafe986e90d1f985677a3433", "size": 4991, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/grid/bigexpt.py", "max_stars_repo_name": "saraswat/TensorLog", "max_stars_repo_head_hexsha": "c56cebfa33b5123d5340a7b429e333da09d223d8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 108, "max_stars_repo_stars_event_min_datetime": "2016-05-24T16:49:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-02T19:06:14.000Z", "max_issues_repo_path": "datasets/grid/bigexpt.py", "max_issues_repo_name": "saraswat/TensorLog", "max_issues_repo_head_hexsha": "c56cebfa33b5123d5340a7b429e333da09d223d8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2016-09-07T18:04:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-07T01:18:08.000Z", "max_forks_repo_path": "datasets/grid/bigexpt.py", "max_forks_repo_name": "saraswat/TensorLog", "max_forks_repo_head_hexsha": "c56cebfa33b5123d5340a7b429e333da09d223d8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 22, "max_forks_repo_forks_event_min_datetime": "2016-06-17T18:59:18.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-28T02:13:59.000Z", "avg_line_length": 31.3899371069, "max_line_length": 105, "alphanum_fraction": 0.6349428972, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1430}
|
submodule (real_transform_routines) real_initialization_routines
contains
module subroutine rfft1i(n, wsave, lensav, ierror)
!
! rfft1i: initialization for rfft1b and rfft1f.
!
! purpose:
!
! rfft1i initializes array wsave for use in its companion routines
! rfft1b and rfft1f. the prime factorization of n together with a
! tabulation of the trigonometric functions are computed and stored
! in array wsave. separate wsave arrays are required for different
! values of n.
!
! parameters:
!
! integer n, the length of the sequence to be
! transformed. the transform is most efficient when n is a product of
! small primes.
!
! real wsave(lensav), containing the prime factors of
! n and also containing certain trigonometric values which will be used in
! routines rfft1b or rfft1f.
!
! integer lensav, the dimension of the wsave array.
! lensav must be at least n + int(log(real(n))) + 4.
!
! integer ierror, error_flag.
! 0, successful exit;
! 2, input parameter lensav not big enough.
!
!--------------------------------------------------------------
! Dummy arguments
!--------------------------------------------------------------
integer (ip), intent (in) :: n
real (wp), intent (out) :: wsave(lensav)
integer (ip), intent (in) :: lensav
integer (ip), intent (out) :: ierror
!--------------------------------------------------------------
if (lensav < n + int(log(real(n, kind=wp) )/log(TWO)) + 4) then
ierror = 2
call fft_error_handler('rfft1i ', 3)
else
ierror = 0
end if
if (n /= 1) call rffti1(n, wsave(1), wsave(n+1))
end subroutine rfft1i
module subroutine rfft2i(l, m, wsave, lensav, ierror)
!
! rfft2i: initialization for rfft2b and rfft2f.
!
! purpose:
! rfft2i initializes real array wsave for use in its companion routines
! rfft2f and rfft2b for computing the two-dimensional fast fourier
! transform of real data. prime factorizations of l and m, together with
! tabulations of the trigonometric functions, are computed and stored in
! array wsave. rfft2i must be called prior to the first call to rfft2f
! or rfft2b. separate wsave arrays are required for different values of
! l or m.
!
!
! integer l, the number of elements to be transformed
! in the first dimension. the transform is most efficient when l is a
! product of small primes.
!
! integer m, the number of elements to be transformed
! in the second dimension. the transform is most efficient when m is a
! product of small primes.
!
! integer lensav, the number of elements in the wsave
! array. lensav must be at least l + m + int(log(real(l)))
! + int(log(real(m))) + 8.
!
! real wsave(lensav), containing the prime factors
! of l and m, and also containing certain trigonometric values which
! will be used in routines rfft2b or rfft2f.
!
! integer ier, error_flag.
! 0, successful exit;
! 2, input parameter lensav not big enough;
! 20, input error returned by lower level routine.
!
!--------------------------------------------------------------
! Dummy arguments
!--------------------------------------------------------------
integer (ip), intent (in) :: l
integer (ip), intent (in) :: m
real (wp), intent (out) :: wsave(lensav)
integer (ip), intent (in) :: lensav
integer (ip), intent (out) :: ierror
!--------------------------------------------------------------
! Dummy arguments
!--------------------------------------------------------------
integer (ip) :: local_error_flag, lwsav, mmsav, mwsav
!--------------------------------------------------------------
! initialize error flag
ierror = 0
!
!==> verify lensav
!
lwsav = l+int(log(real(l, kind=wp) )/log(TWO)) + 4
mwsav = 2*m+int(log(real(m, kind=wp) )/log(TWO)) + 4
mmsav = m+int(log(real(m, kind=wp) )/log(TWO)) + 4
if (lensav < lwsav+mwsav+mmsav) then
ierror = 2
call fft_error_handler('rfft2i', 4)
return
end if
call rfftmi(l, wsave(1), lwsav, local_error_flag)
if (local_error_flag /= 0) then
ierror = 20
call fft_error_handler('rfft2i',-5)
return
end if
call cfftmi(m, wsave(lwsav+1), mwsav, local_error_flag)
if (local_error_flag /= 0) then
ierror = 20
call fft_error_handler('rfft2i',-5)
return
end if
call rfftmi(m, wsave(lwsav+mwsav+1), mmsav, local_error_flag)
if (local_error_flag /= 0) then
ierror = 20
call fft_error_handler('rfft2i',-5)
return
end if
end subroutine rfft2i
module subroutine rfftmi(n, wsave, lensav, ierror)
!
! rfftmi: initialization for rfftmb and rfftmf.
!
! purpose:
!
! rfftmi initializes array wsave for use in its companion routines
! rfftmb and rfftmf. the prime factorization of n together with a
! tabulation of the trigonometric functions are computed and stored
! in array wsave. separate wsave arrays are required for different
! values of n.
!
! input
!
! integer n, the length of each sequence to be
! transformed. the transform is most efficient when n is a product of
! small primes.
!
! integer lensav, the dimension of the wsave array.
! lensav must be at least n + int(log(real(n))) + 4.
!
! output
! real wsave(lensav), work array containing the prime
! factors of n and also containing certain trigonometric
! values which will be used in routines rfftmb or rfftmf.
!
! integer ierror, error_flag.
! 0, successful exit;
! 2, input parameter lensav not big enough.
!
!--------------------------------------------------------------
! Dummy arguments
!--------------------------------------------------------------
integer (ip), intent (in) :: n
integer (ip), intent (in) :: lensav
real (wp), intent (out) :: wsave(lensav)
integer (ip), intent (out) :: ierror
!--------------------------------------------------------------
!
!==> Check validity of input arguments
!
if (lensav < n + int(log(real(n, kind=wp) )/log(TWO)) + 4) then
ierror = 2
call fft_error_handler('rfftmi ', 3)
return
else
ierror = 0
end if
!
!==> Perform transform
!
if (n /= 1) call mrfti1(n, wsave(1), wsave(n+1))
end subroutine rfftmi
subroutine rffti1(n, wa, fac)
!
! Parameters:
!
! input
!
! n, the number for which factorization
! and other information is needed.
!
! output
! wa(n), trigonometric information.
!
! output
!
! fac(15), factorization information.
! fac(1) is n, fac(2) is nf, the number of factors, and fac(3:nf+2) are the
! factors.
!
!--------------------------------------------------------------
! Dummy arguments
!--------------------------------------------------------------
integer (ip), intent (in) :: n
real (wp), intent (out) :: fac(15)
real (wp), intent (out) :: wa(n)
!--------------------------------------------------------------
! Local variables
!--------------------------------------------------------------
integer (ip) :: i, ib, ido, ii, iip, ipm, is
integer (ip) :: j, k1, l1, l2, ld
integer (ip) :: nf, nfm1, nl, nq, nr, ntry
integer (ip), parameter :: NTRYH(*) = [4, 2, 3, 5]
real (wp), parameter :: TWO_PI = TWO * acos(-ONE)
real (wp) :: arg, argh, argld, fi
!--------------------------------------------------------------
ntry = 0
nl = n
nf = 0
j = 0
factorize_loop: do
! Increment j
j = j+1
! Choose ntry
if (j <= 4) then
ntry = NTRYH(j)
else
ntry = ntry+2
end if
inner_loop: do
nq = nl/ntry
nr = nl-ntry*nq
if (nr < 0) then
cycle factorize_loop
else if (nr == 0) then
nf = nf+1
fac(nf+2) = ntry
nl = nq
if (ntry == 2 .and. nf /= 1) then
do i=2,nf
ib = nf-i+2
fac(ib+2) = fac(ib+1)
end do
fac(3) = 2
end if
if (nl /= 1) cycle inner_loop
else
cycle factorize_loop
end if
exit inner_loop
end do inner_loop
exit factorize_loop
end do factorize_loop
fac(1) = n
fac(2) = nf
argh = TWO_PI/n
is = 0
nfm1 = nf-1
l1 = 1
if (nfm1 /= 0) then
do k1=1,nfm1
iip = int(fac(k1+2), kind=ip)
ld = 0
l2 = l1*iip
ido = n/l2
ipm = iip-1
do j=1,ipm
ld = ld+l1
i = is
argld = real(ld, kind=wp) * argh
fi = ZERO
do ii=3,ido,2
i = i+2
fi = fi + ONE
arg = fi*argld
wa(i-1) = cos(arg)
wa(i) = sin(arg)
end do
is = is+ido
end do
l1 = l2
end do
end if
end subroutine rffti1
subroutine mrfti1(n, wa, fac)
!
! input
! n, the number for which factorization and
! other information is needed.
!
! output
! wa(n), trigonometric information.
!
! output
! fac(15), factorization information. fac(1) is
! n, fac(2) is nf, the number of factors, and fac(3:nf+2) are the factors.
!
!--------------------------------------------------------------
! Dummy arguments
!--------------------------------------------------------------
integer (ip), intent (in) :: n
real (wp), intent (out) :: wa(n)
real (wp), intent (out) :: fac(15)
!--------------------------------------------------------------
! Local variables
!--------------------------------------------------------------
integer (ip) :: i, ib, ido, ii, iip, ipm, is
integer (ip) :: j, k1, l1, l2, ld
integer (ip) :: nf, nfm1, nl, nq, nr, ntry
integer (ip), parameter :: NTRYH(*) = [4, 2, 3, 5]
real (wp), parameter :: TWO_PI = TWO * acos(-ONE)
real (wp) :: arg, argh, argld, fi
!--------------------------------------------------------------
ntry = 0
nl = n
nf = 0
j = 0
factorize_loop: do
! Increment j
j = j+1
! Choose ntry
if (j <= 4) then
ntry = NTRYH(j)
else
ntry = ntry+2
end if
inner_loop: do
nq = nl/ntry
nr = nl-ntry*nq
if (nr < 0) then
cycle factorize_loop
else if (nr == 0) then
nf = nf+1
fac(nf+2) = ntry
nl = nq
if (ntry == 2 .and. nf /= 1) then
do i=2,nf
ib = nf-i+2
fac(ib+2) = fac(ib+1)
end do
fac(3) = 2
end if
if (nl /= 1) then
cycle inner_loop
end if
else
cycle factorize_loop
end if
exit inner_loop
end do inner_loop
exit factorize_loop
end do factorize_loop
fac(1) = n
fac(2) = nf
argh = TWO_PI/n
is = 0
nfm1 = nf-1
l1 = 1
do k1=1,nfm1
iip = int(fac(k1+2), kind=ip)
ld = 0
l2 = l1*iip
ido = n/l2
ipm = iip-1
do j=1,ipm
ld = ld+l1
i = is
argld = real(ld, kind=wp) * argh
fi = ZERO
do ii=3,ido,2
i = i+2
fi = fi + ONE
arg = fi*argld
wa(i-1) = cos(arg)
wa(i) = sin(arg)
end do
is = is+ido
end do
l1 = l2
end do
end subroutine mrfti1
end submodule real_initialization_routines
|
{"hexsha": "df9d1394ad6b65124afb63689ebeca644be0b9ce", "size": 14053, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/real_initialization_routines.f90", "max_stars_repo_name": "jbdv-no/modern_fftpack", "max_stars_repo_head_hexsha": "6909d44988925dcae1ee478c06be31e5605d3974", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2016-05-06T18:42:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-16T11:35:56.000Z", "max_issues_repo_path": "src/real_initialization_routines.f90", "max_issues_repo_name": "jlokimlin/fftpack6.0", "max_issues_repo_head_hexsha": "6909d44988925dcae1ee478c06be31e5605d3974", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2016-05-07T21:36:00.000Z", "max_issues_repo_issues_event_max_datetime": "2016-05-09T16:44:47.000Z", "max_forks_repo_path": "src/real_initialization_routines.f90", "max_forks_repo_name": "jlokimlin/fftpack6.0", "max_forks_repo_head_hexsha": "6909d44988925dcae1ee478c06be31e5605d3974", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2017-05-27T12:03:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-04T05:49:34.000Z", "avg_line_length": 32.2316513761, "max_line_length": 84, "alphanum_fraction": 0.4135771721, "num_tokens": 3555}
|
[STATEMENT]
lemma sameDom_sym:
"sameDom inp inp' = sameDom inp' inp"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sameDom inp inp' = sameDom inp' inp
[PROOF STEP]
unfolding sameDom_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<forall>i. (inp i = None) = (inp' i = None)) = (\<forall>i. (inp' i = None) = (inp i = None))
[PROOF STEP]
by auto
|
{"llama_tokens": 162, "file": "Binding_Syntax_Theory_Preliminaries", "length": 2}
|
#coding:utf-8
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.linear_model import SGDClassifier
from sklearn import cross_validation
import csv
from scipy import sparse
def read_file():
print "read_train"
f = open("train.tsv","U")
reader = csv.reader(f,delimiter='\t')
#用来存放训练集的文章
text_train = []
#用来存放训练集的标签
label = []
#用来存放测试集的文章
text_test = []
#用来存放urlid
urlid = []
extra_train = []
extra_test = []
g = lambda x : x.isalpha or x == ' '
a = 0
print "read train file begin"
for row in reader:
if a == 0:
a = a + 1
else:
#标签为最后一项
label.append(int(row[len(row)-1]))
#选择第二项作为训练文章
#text_train.append(row[2]+" "+row[0]+" "+row[3])
text_train.append(row[2]+" "+row[0])
#处理一下其他feature
#extra_train.append([float(i)/100.0 for i in row[5:len(row)-1]])
#extra_train.append([float(row[13]),float(row[6])/10.0,float(row[7])/10.0,float(row[8])/10.0,float(row[9])/10.0])
extra_train.append([float(row[7])])
f.close()
print "read test"
f = open("test.tsv","U")
reader = csv.reader(f,delimiter='\t')
a = 0
for row in reader:
if a == 0:
a = a + 1
else:
urlid.append(row[1])
#text_test.append(row[2]+" "+row[0]+" "+row[3])
text_test.append(row[2]+" "+row[0])
#extra_test.append([float(i)/100.0 for i in row[5:len(row)]])
#extra_test.append([float(row[13]),float(row[6])/10.0,float(row[7])/10.0,float(row[8])/10.0,float(row[9])/10.0])
extra_test.append([float(row[7])])
return text_train,label,text_test,urlid,extra_train,extra_test
def remain(answer):
"""
Arguments:
- `answer`:
"""
for i in range(len(answer)):
if answer[i] > 0.9725:
answer[i] = 1.0
return answer
if __name__ == "__main__":
train,label,test,urlid,extra_train1,extra_test1 = read_file()
print "train length",len(train)
print "test length",len(test)
vectorizer = TfidfVectorizer(sublinear_tf=True,min_df = 3,ngram_range=(1,2),smooth_idf=True,token_pattern=r'\w{1,}',use_idf=1,analyzer='word',strip_accents='unicode')
print "transform train to tf matrix"
print "transform test to tf matrix"
length_train = len(train)
x_all = train + test
x_all = vectorizer.fit_transform(x_all)
x = x_all[:length_train]
t = x_all[length_train:]
extra_train,extra_test = [],[]
print "读topic"
f1 = open("topic_train.txt")
for line in f1.readlines():
sp = line.split()
sp = [float(j) for j in sp]
extra_train.append(sp)
f2 = open("topic_test.txt")
for line in f2.readlines():
sp = line.split()
sp = [float(j) for j in sp]
extra_test.append(sp)
extra_train = np.array(extra_train)
extra_test = np.array(extra_test)
print "topic num",extra_train.shape
print "合并特征"
x = sparse.hstack((x,extra_train)).tocsr()
t = sparse.hstack((t,extra_test)).tocsr()
#x = sparse.hstack((x,extra_train1)).tocsr()
#t = sparse.hstack((t,extra_test1)).tocsr()
label = np.array(label)
clf = LogisticRegression(penalty='l1',C=30,tol=1e-9)
x = clf.fit_transform(x,label)
t = clf.transform(t)
print "x shape",x.shape
print "t.shape",t.shape
#clf = svm.SVC(kernel='sigmoid',degree=9,gamma=10)
#clf = svm.SVC(degree=9,gamma=0.001)
#clf = KNeighborsClassifier(n_neighbors=1)
#
#clf = SGDClassifier(loss="log",n_iter=300, penalty="l2",alpha=0.0003)
clf = LogisticRegression(penalty='l2',dual=True,fit_intercept=False,C=3.2,tol=1e-9,class_weight=None, random_state=None, intercept_scaling=1.0)
print "交叉验证"
print np.mean(cross_validation.cross_val_score(clf,x,label,cv=20,scoring='roc_auc'))
clf.fit(x,label)
#验一下自己的结果
print "训练自己",clf.score(x,label)
answer = clf.predict_proba(t)[:,1]
#answer = remain(answer)
f = open("hand_answer.csv","w")
f.write('urlid,label\n')
for i in xrange(len(test)):
f.write("%s,%s\n"%(urlid[i],answer[i]))
|
{"hexsha": "b87be4f8ec1f43d0c87370f86c8a1a45b3382db5", "size": 4569, "ext": "py", "lang": "Python", "max_stars_repo_path": "tfidf.py", "max_stars_repo_name": "ezhouyang/class", "max_stars_repo_head_hexsha": "6bef374a78bdef8df946680c283df9643ac91ed7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-11-18T13:44:39.000Z", "max_stars_repo_stars_event_max_datetime": "2017-11-18T13:44:39.000Z", "max_issues_repo_path": "tfidf.py", "max_issues_repo_name": "ezhouyang/class", "max_issues_repo_head_hexsha": "6bef374a78bdef8df946680c283df9643ac91ed7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tfidf.py", "max_forks_repo_name": "ezhouyang/class", "max_forks_repo_head_hexsha": "6bef374a78bdef8df946680c283df9643ac91ed7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.101910828, "max_line_length": 170, "alphanum_fraction": 0.6130444299, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1335}
|
# String matching functions for Aaron
using StringDistances
# Fairly arbitrarily chosen list of stop words
const stopwords = [
"the",
"is",
"at",
"which",
"on",
"in",
"for",
"with"
]
function clean_string(x)
xlwr = lowercase(x)
xcln = replace(xlwr, r"[^-a-z]", " ") # keep only letters
return xcln
end
function cut_stopwords(x_arr, stopwords)
res = [z for z in x_arr if z ∉ stopwords]
return res
end
function is_anagram(a, b)
if length(a) ≠ length(b)
res = false
else
a_sorted = sort(collect(a))
b_sorted = sort(collect(b))
res = a_sorted == b_sorted
end
return res
end
# Given two strings, this function returns a boolean indicating
# whether or not the first letters of all words in the phrases
# are the same.
function same_first_letters(x, y)
x_arr = split(x)
y_arr = split(y)
n_words = length(x_arr)
res = true
if n_words ≠ length(y_arr)
warn("Attempted to compare phrases with differing number of words")
res = false
else
for i = 1:n_words
if first(x_arr[i]) ≠ first(y_arr[i])
res = false
break
end
end
end
return res
end
## testing
same_first_letters("this is a phrase", "this is also phrase")
same_first_letters("this is a phrase", "this also phrase")
# This function uses moving window to compare words
# combination from two strings (strings are arrays of words).
function found_match(x_arr, y_arr, thresh)
# By convention, y_arr will be longer
len_y = length(y_arr)
len_x = length(x_arr)
x_str = join(x_arr, " ")
windowsize = len_x
for i = 1:(len_y - windowsize + 1)
y_str = join(y_arr[i:(i + windowsize - 1)], " ")
n_char = length(y_str)
if !same_first_letters(x_str, y_str)
continue
end
println("Comparing: \'$y_str\' and \'$x_str\'")
similarity = compare(DamerauLevenshtein(), y_str, x_str)
if n_char ≤ 8 && similarity ≥ thresh
println("Matched \'$y_str\' and \'$x_str\'")
return true
elseif n_char ≤ 12 && similarity ≥ thresh - 0.05
println("Matched \'$y_str\' and \'$x_str\' with similarity $similarity")
return true
elseif n_char ≤ 18 && similarity ≥ thresh - 0.1
println("Matched \'$y_str\' and \'$x_str\' with similarity $similarity")
return true
elseif similarity ≥ thresh - 0.15
println("Matched \'$y_str\' and \'$x_str\' with similarity $similarity")
return true
end
end
return false
end
function is_string_match(x, y, thresh, stopwords)
x_cln = clean_string(x)
y_cln = clean_string(y)
x_arr = cut_stopwords(split(x_cln), stopwords)
y_arr = cut_stopwords(split(y_cln), stopwords)
len_x = length(x_arr)
len_y = length(y_arr)
if len_x < len_y
res = found_match(x_arr, y_arr, thresh)
else
# switching position of x and y
res = found_match(y_arr, x_arr, thresh)
end
return res
end
# testing our functions
target1 = "pericardial effusion"
candidate1 = "Metastatic non-small cell lung cancer. Dyspnea. No pulsus on exam. Evaluate for perracardial perracardial effusion/interval change, signs of tamponade."
threshold1 = 0.75
is_string_match(target1, candidate1, threshold1, stopwords)
is_string_match(candidate1, target1, threshold1, stopwords)
x1 = "left ventricular left ventricular hypertophy"
y1 = "left ventricular fxn"
is_string_match(x1, y1, 0.95, stopwords)
is_string_match(y1, x1, 0.95, stopwords)
|
{"hexsha": "c41d11ea11add4a81167bc273785d6d40ca51c83", "size": 3668, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "string_matching.jl", "max_stars_repo_name": "paulstey/string_matching", "max_stars_repo_head_hexsha": "2cd307eecd732d3ff674764f0e80e34fbc5658bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "string_matching.jl", "max_issues_repo_name": "paulstey/string_matching", "max_issues_repo_head_hexsha": "2cd307eecd732d3ff674764f0e80e34fbc5658bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "string_matching.jl", "max_forks_repo_name": "paulstey/string_matching", "max_forks_repo_head_hexsha": "2cd307eecd732d3ff674764f0e80e34fbc5658bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8309859155, "max_line_length": 167, "alphanum_fraction": 0.6303162486, "num_tokens": 997}
|
import numpy as np
#from gym.envs.mujoco import mujoco_env
#from gym import utils
import os
import gym
from meta_mb.logger import logger
from gym.envs.mujoco.mujoco_env import MujocoEnv
from meta_mb.meta_envs.base import MetaEnv
from meta_mb.meta_envs.base import RandomEnv
class FetchJellyEnv(RandomEnv, gym.utils.EzPickle):
def __init__(self):
xml_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'assets', 'jelly.xml')
self.goal = np.append(np.random.uniform(-5, 5, 2), np.random.uniform(0, 0.15))
RandomEnv.__init__(self, 0, xml_file, 2)
gym.utils.EzPickle.__init__(self)
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat[1:],
self.sim.data.qvel.flat,
self.sim.data.body_xpos.flat[:3],
self.get_body_com("base_link") - self.goal
])
def step(self, action):
self.prev_pos = self.get_body_com("base_link")
self.do_simulation(action, self.frame_skip)
self.curr_pos = self.get_body_com("base_link")
vec_to_goal = self.get_body_com("base_link") - self.goal
self.reward_dist = -(np.linalg.norm(self.curr_pos - self.prev_pos)) / self.dt
reward_ctrl = -0.5*0.1*np.square(action).sum()
reward_fetch = -np.linalg.norm(vec_to_goal)
reward = 1.25e-4 * (reward_dist - reward_ctrl + 1) + reward_fetch
observation = self._get_obs()
done = False
info = dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)
return observation, reward, done, info
def reward(self, obs, act, obs_next):
assert obs.ndim == act.ndim == obs_next.ndim
if obs.ndim == 2:
assert obs.shape == obs_next.shape and act.shape[0] == obs.shape[0]
vec_to_goal = self.get_body_com("base_link") - self.goal
reward_dist = -(np.linalg.norm(self.curr_pos - self.prev_pos)) / self.dt
reward_ctrl = -0.5*0.1*np.square(action).sum()
reward_fetch = -np.linalg.norm(vec_to_goal)
reward = 1.25e-4 * (forward_progress - cost + 1) + reward_fetch
return reward
elif obs.ndim == 1:
return self.reward(obs[None], act[None], obs_next[None])[0]
else:
raise NotImplementedError
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.sim.model.body_pos[-1] = np.append(np.random.uniform(-5, 5, 2), np.random.uniform(0, 0.15))
self.set_state(qpos, qvel)
return self._get_obs()
def body_position(self):
return self.get_body_com("base_link")
if __name__ == "__main__":
env = FetchJellyEnv()
while True:
env.reset()
for _ in range(10000):
action = env.action_space.sample()
env.step(action)
env.render()
|
{"hexsha": "c5a79aa1470ede48b0c7f0a43ff6ee1745f80c0e", "size": 2971, "ext": "py", "lang": "Python", "max_stars_repo_path": "meta_mb/envs/jelly/fetch_jelly.py", "max_stars_repo_name": "iclavera/meta-mb", "max_stars_repo_head_hexsha": "a1204e573c1415161129403cfb287bf120488fd0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-01-07T08:22:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-27T10:53:14.000Z", "max_issues_repo_path": "meta_mb/envs/jelly/fetch_jelly.py", "max_issues_repo_name": "iclavera/meta-mb", "max_issues_repo_head_hexsha": "a1204e573c1415161129403cfb287bf120488fd0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "meta_mb/envs/jelly/fetch_jelly.py", "max_forks_repo_name": "iclavera/meta-mb", "max_forks_repo_head_hexsha": "a1204e573c1415161129403cfb287bf120488fd0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6790123457, "max_line_length": 104, "alphanum_fraction": 0.6300908785, "include": true, "reason": "import numpy", "num_tokens": 769}
|
import numbers
import time
import numpy as np
import scipy
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.decomposition.nmf import _beta_divergence, _beta_loss_to_float
from scipy.special import expit
from scipy.sparse import issparse
USE_CYTHON = False # currently, cython is disabled due to unsolved numerical bugs
EPSILON = np.finfo(np.float32).eps
INTEGER_TYPES = (numbers.Integral, np.integer)
# utility functions
def sigmoid(M):
return expit(M)
def d_sigmoid(M):
sgm = sigmoid(M)
return sgm * (1 - sgm)
def inverse(x, link):
if link == "linear":
return x
elif link == "logit":
return sigmoid(x)
else:
raise ValueError("Invalid link function {}".format(link))
def compute_factorization_error(target, left_factor, right_factor, link, beta_loss):
if target is None:
return 0
elif link == "linear":
return _beta_divergence(target, left_factor, right_factor, beta_loss, square_root=True)
elif link == "logit":
return np.linalg.norm(target - sigmoid(np.dot(left_factor, right_factor)))
class _IterativeCMFSolver:
"""Boilerplate for iterative solvers (mu and newton)
Implement the update_step method in concrete subclasses to use.
Parameters
----------
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
l1_reg : double, default: 0.
L1 regularization parameter. Currently same for all matrices.
l2_reg : double, default: 0.
L2 regularization parameter
alpha: double, default: 0.5
Determines trade-off between optimizing for X and Y.
The larger the value, the more X is prioritized in optimization.
beta_loss : float or string, default 'frobenius'
Currently disabled. Used only in 'mu' solver.
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits.
update_H : boolean, default: True
Currently disabled. Need to enable in future to implement transform method in CNMF.
verbose : integer, default: 0
The verbosity level.
U_non_negative: bool, default: True
Whether to enforce non-negativity for U. Only applicable for the newton solver.
V_non_negative: bool, default: True
Whether to enforce non-negativity for V. Only applicable for the newton solver.
Z_non_negative: bool, default: True
Whether to enforce non-negativity for Z. Only applicable for the newton solver.
x_link: str, default: "linear"
One of either "logit" of "linear". The link function for transforming UV^T to approximate X
y_link: str, default: "linear"
One of either "logit" of "linear". The link function for transforming VZ^T to approximate Y
hessian_pertubation: double, default: 0.2
The pertubation to the Hessian in the newton solver to maintain positive definiteness
"""
def __init__(self, max_iter=200, tol=1e-4, beta_loss="frobenius",
l1_reg=0, l2_reg=0, alpha=0.5, verbose=0,
U_non_negative=True, V_non_negative=True, Z_non_negative=True,
update_U=True, update_V=True, update_Z=True,
x_link="linear", y_link="linear", hessian_pertubation=0.2,
sg_sample_ratio=1., random_state=None):
self.max_iter = max_iter
self.tol = tol
self.beta_loss = _beta_loss_to_float(beta_loss)
self.l1_reg = l1_reg
self.l2_reg = l2_reg
self.alpha = alpha
self.verbose = verbose
self.U_non_negative = U_non_negative
self.V_non_negative = V_non_negative
self.Z_non_negative = Z_non_negative
self.update_U = update_U
self.update_V = update_V
self.update_Z = update_Z
self.x_link = x_link
self.y_link = y_link
self.hessian_pertubation = hessian_pertubation
self.sg_sample_ratio = sg_sample_ratio
if random_state is not None:
np.random.seed(random_state)
def update_step(self, X, Y, U, V, Z, l1_reg, l2_reg, alpha):
"""A single update step for all the matrices in the factorization."""
raise NotImplementedError("Implement in concrete subclass to use")
def compute_error(self, X, Y, U, V, Z):
return self.alpha * compute_factorization_error(X, U, V.T, self.x_link, self.beta_loss) + \
(1 - self.alpha) * compute_factorization_error(Y, V, Z.T, self.y_link, self.beta_loss)
def fit_iterative_update(self, X, Y, U, V, Z):
"""Compute CMF with iterative methods.
The objective function is minimized with an alternating minimization of U, V
and Z. Regularly prints error and stops update when improvement stops.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
First data matrix to be decomposed
Y : {array-like, sparse matrix}, shape (n_features, n_labels)
Second data matrix to be decomposed
U : array-like, shape (n_samples, n_components)
V : array-like, shape (n_features, n_components)
Z : array-like, shape (n_labels, n_components)
Returns
-------
U : array, shape (n_samples, n_components)
Transformed data.
V : array, shape (n_features, n_components)
Transformed data.
Z : array, shape (n_labels, n_components)
Transformed data.
n_iter : int
The number of iterations done by the algorithm.
"""
start_time = time.time()
# TODO: handle beta loss other than fnorm
previous_error = error_at_init = self.compute_error(X, Y, U, V, Z)
for n_iter in range(1, self.max_iter + 1):
self.update_step(X, Y, U, V, Z, self.l1_reg, self.l2_reg, self.alpha)
# test convergence criterion every 10 iterations
if self.tol > 0 and n_iter % 10 == 0:
error = self.compute_error(X, Y, U, V, Z)
if self.verbose:
iter_time = time.time()
print("Epoch %02d reached after %.3f seconds, error: %f" %
(n_iter, iter_time - start_time, error))
improvement_stopped = (previous_error - error) / error_at_init < self.tol
if improvement_stopped:
break
previous_error = error
# do not print if we have already printed in the convergence test
if self.verbose and (self.tol == 0 or n_iter % 10 != 0):
end_time = time.time()
print("Epoch %02d reached after %.3f seconds." %
(n_iter, end_time - start_time))
return U, V, Z, n_iter
class MUSolver(_IterativeCMFSolver):
"""Internal solver that solves by iteratively multiplying the matrices element wise.
The multiplying factors are always positive, meaning this solver can only return positive matrices.
References
----------
Wang, Y., Yanchunzhangvueduau, E., & Zhou, B. (n.d.).
Semi-supervised collective matrix factorization for topic detection and document clustering.
Lee, D., & Seung, H. (2001). Algorithms for non-negative matrix factorization.
Advances in Neural Information Processing Systems, (1), 556–562.
https://doi.org/10.1109/IJCNN.2008.4634046
"""
@classmethod
def _regularized_delta(cls, numerator, denominator, l1_reg, l2_reg, gamma, H):
# Add L1 and L2 regularization
if l1_reg > 0:
denominator += l1_reg
if l2_reg > 0:
denominator = denominator + l2_reg * H
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta = numerator
# gamma is in ]0, 1]
if gamma != 1:
delta **= gamma
return delta
@classmethod
def _multiplicative_update_u(cls, X, U, V, beta_loss, l1_reg, l2_reg, gamma):
numerator = safe_sparse_dot(X, V)
denominator = np.dot(np.dot(U, V.T), V)
return cls._regularized_delta(numerator, denominator, l1_reg, l2_reg, gamma, U)
@classmethod
def _multiplicative_update_z(cls, Y, V, Z, beta_loss, l1_reg, l2_reg, gamma):
numerator = safe_sparse_dot(Y.T, V)
denominator = np.dot(np.dot(Z, V.T), V)
return cls._regularized_delta(numerator, denominator, l1_reg, l2_reg, gamma, Z)
@classmethod
def _multiplicative_update_v(cls, X, Y, U, V, Z, beta_loss, l1_reg, l2_reg, gamma):
numerator = safe_sparse_dot(X.T, U) + safe_sparse_dot(Y, Z)
denominator = np.dot(V, (np.dot(U.T, U) + np.dot(Z.T, Z)))
return cls._regularized_delta(numerator, denominator, l1_reg, l2_reg, gamma, V)
def update_step(self, X, Y, U, V, Z, l1_reg, l2_reg, alpha):
# TODO: Enable specification of gamma
gamma = 1.
if self.update_V:
delta_V = self._multiplicative_update_v(X, Y, U, V, Z, self.beta_loss, l1_reg,
l2_reg, gamma)
V *= delta_V
if self.update_U:
delta_U = self._multiplicative_update_u(X, U, V, self.beta_loss, l1_reg, l2_reg, gamma)
U *= delta_U
if self.update_Z:
delta_Z = self._multiplicative_update_z(Y, V, Z, self.beta_loss, l1_reg, l2_reg, gamma)
Z *= delta_Z
if USE_CYTHON:
class NewtonSolver(_IterativeCMFSolver):
"""Internal solver that solves using the Newton-Raphson method.
Updates each row independently using a Newton-Raphson step. Can handle various link functions and settings.
The gradient and Hessian are computed based on the residual between the target and the estimate.
Computing the entire target/estimate can be memory intensive, so the option to compute the residual
based on a stochastic sample can be enabled by setting sg_sample_ratio < 1.0.
References
----------
Singh, A. P., & Gordon, G. J. (2008). Relational learning via collective matrix factorization.
Proceeding of the 14th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining
KDD 08, 650. https://doi.org/10.1145/1401890.1401969
"""
def fit_iterative_update(self, X, Y, U, V, Z):
# handle memory ordering and format issues for speed up
X_ = X.tocsr() if issparse(X) else np.ascontiguousarray(X) if X is not None else X
# instead of solving for Y = VZ^T, in order to make access to V continuous
# for approximating both X and Y, we will solve Y^T = ZV^T
Y_ = Y.T.tocsr() if issparse(Y) else np.ascontiguousarray(Y.T) if Y is not None else Y
# U, V, Z must be C-ordered for cython dot product to work
U = np.ascontiguousarray(U)
V = np.ascontiguousarray(V)
Z = np.ascontiguousarray(Z)
return super().fit_iterative_update(X_, Y_, U, V, Z)
def update_step(self, X, Y, U, V, Z, l1_reg, l2_reg, alpha):
if self.update_U:
_newton_update_left(U, V, X, alpha, l1_reg, l2_reg,
self.x_link, self.U_non_negative,
self.sg_sample_ratio,
self.hessian_pertubation)
if self.update_Z:
_newton_update_left(Z, V, Y, 1 - alpha, l1_reg, l2_reg,
self.y_link, self.Z_non_negative,
self.sg_sample_ratio,
self.hessian_pertubation)
if self.update_V:
_newton_update_V(V, U, Z, X, Y, alpha, l1_reg, l2_reg,
self.x_link, self.y_link,
self.V_non_negative,
self.sg_sample_ratio,
self.hessian_pertubation)
def compute_error(self, X, Y, U, V, Z):
# override because we are solving for Y^T = ZV^T
return self.alpha * compute_factorization_error(X, U, V.T, self.x_link, self.beta_loss) + \
(1 - self.alpha) * compute_factorization_error(Y, Z, V.T, self.y_link, self.beta_loss)
else:
class NewtonSolver(_IterativeCMFSolver):
"""Default implementation when Cython cannot be used."""
@classmethod
def _row_newton_update(cls, M, idx, dM, ddM_inv,
eta=1., non_negative=True):
M[idx, :] = M[idx, :] - eta * np.dot(dM, ddM_inv)
if non_negative:
M[idx, :][M[idx, :] < 0] = 0.
def _stochastic_sample(self, features, target, axis=0):
assert(features.shape[axis] == target.shape[axis])
if self.sg_sample_ratio < 1.:
sample_size = int(features.shape[axis] * self.sg_sample_ratio)
sample_mask = np.random.permutation(np.arange(features.shape[axis]))[:sample_size]
if axis == 0:
features_sampled = features[sample_mask, :]
target_sampled = target[sample_mask, :]
elif axis == 1:
features_sampled = features[:, sample_mask]
target_sampled = target[:, sample_mask]
else:
raise ValueError("Axis {} out of bounds".format(axis))
else:
features_sampled = features
target_sampled = target
return features_sampled, target_sampled
def _safe_invert(self, M):
"""Computed according to reccomendations of
http://web.stanford.edu/class/cme304/docs/newton-type-methods.pdf"""
if scipy.sparse.issparse(M):
eigs, V = scipy.sparse.linalg.eigsh(M)
else:
eigs, V = scipy.linalg.eigh(M)
# perturb hessian to be positive definite
eigs = np.abs(eigs)
eigs[eigs < self.hessian_pertubation] = self.hessian_pertubation
return np.dot(np.dot(V, np.diag(1 / eigs)), V.T)
def _force_flatten(self, v):
"""Forcibly flattens an indexed row or column of a matrix or sparse matrix"""
if np.ndim(v) > 1:
if issparse(v):
v_ = v.toarray()
elif isinstance(v, np.matrix):
v_ = np.asarray(v)
else:
raise ValueError("Indexing array returns {} dimensions " +
"but is not sparse or a matrix".format(np.ndim(v)))
return v_.flatten()
else:
return v.flatten()
def _residual(self, left, right, target, link):
"""Computes residual:
inverse(left @ right, link) - target
The number of dimensions of the residual and estimate will be the same.
This is necessary because the indexing behavior of np.ndarrays and scipy sparse matrices are different.
Specifically, slicing scipy sparse matrices does not return a 1 dimensional vector.
e.g.
>>> import numpy as np; from scipy.sparse import csc_matrix
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> B = csc_matrix(A)
>>> A[:, 0].shape
(2,)
>>> B[:, 0].shape
(2, 1)
"""
estimate = inverse(np.dot(left, right), link)
ground_truth = target
if issparse(target) and np.ndim(estimate) == 1:
return estimate - ground_truth.toarray().flatten()
else:
return estimate - ground_truth
def _newton_update_U(self, U, V, X, alpha, l1_reg, l2_reg,
link="linear", non_negative=True):
precompute_dU = self.sg_sample_ratio == 1.
if precompute_dU:
# dU is constant across samples
res_X = inverse(np.dot(U, V.T), link) - X
dU_full = alpha * np.dot(res_X, V) + l1_reg * np.sign(U) + l2_reg * U
if issparse(dU_full):
dU_full = dU_full.toarray()
elif isinstance(dU_full, np.matrix):
dU_full = np.asarray(dU_full)
# iterate over rows
precompute_ddU_inv = (link == "linear" and self.sg_sample_ratio == 1.)
if precompute_ddU_inv:
# ddU_inv is constant across samples
ddU_inv = self._safe_invert(alpha * np.dot(V.T, V) + l2_reg * np.eye(U.shape[1]))
for i in range(U.shape[0]):
u_i = U[i, :]
V_T_sampled, X_sampled = self._stochastic_sample(V.T, X, axis=1)
if precompute_dU:
dU = dU_full[i, :]
assert(np.ndim(dU) == 1)
else:
res_X = self._residual(u_i, V_T_sampled, X_sampled[i, :], link)
dU = alpha * np.dot(res_X, V_T_sampled.T) + l1_reg * np.sign(u_i) + l2_reg * u_i
if not precompute_ddU_inv:
if link == "linear":
ddU_inv = self._safe_invert(alpha * np.dot(V_T_sampled, V_T_sampled.T) +
l2_reg * np.eye(U.shape[1]))
elif link == "logit":
D = np.diag(d_sigmoid(np.dot(u_i, V_T_sampled)))
ddU_inv = self._safe_invert(alpha * np.dot(np.dot(V_T_sampled, D), V_T_sampled.T))
self._row_newton_update(U, i, dU, ddU_inv, non_negative=non_negative)
def _newton_update_V(self, V, U, Z, X, Y, alpha, l1_reg, l2_reg,
x_link="linear", y_link="linear", non_negative=True):
precompute_dV = (self.sg_sample_ratio == 1.)
if precompute_dV:
res_X_T = inverse(np.dot(U, V.T), x_link) - X
res_Y_T = inverse(np.dot(Z, V.T), y_link) - Y.T
dV_full = alpha * np.dot(res_X_T.T, U) + \
(1 - alpha) * np.dot(res_Y_T.T, Z) + \
l1_reg * np.sign(V) + l2_reg * V
if isinstance(dV_full, np.matrix):
dV_full = np.asarray(dV_full)
precompute_ddV_inv = (x_link == "linear" and y_link == "linear" and self.sg_sample_ratio == 1.)
if precompute_ddV_inv:
# ddV_inv is constant w.r.t. the samples of V, so we precompute it to save computation
ddV_inv = self._safe_invert(alpha * np.dot(U.T, U) +
(1 - alpha) * np.dot(Z.T, Z) +
l2_reg * np.eye(V.shape[1]))
for i in range(V.shape[0]):
v_i = V[i, :]
U_sampled, X_sampled = self._stochastic_sample(U, X)
Z_T_sampled, Y_sampled = self._stochastic_sample(Z.T, Y, axis=1)
if not precompute_dV:
res_X = self._residual(U_sampled, v_i.T, X_sampled[:, i], x_link)
res_Y = self._residual(v_i, Z_T_sampled, Y_sampled[i, :], y_link)
dV = alpha * np.dot(res_X.T, U_sampled) + \
(1 - alpha) * np.dot(res_Y, Z_T_sampled.T) + \
l1_reg * np.sign(v_i) + l2_reg * v_i
else:
dV = dV_full[i, :]
if not precompute_ddV_inv:
if x_link == "logit":
D_u = np.diag(d_sigmoid(np.dot(U_sampled, v_i.T)))
ddV_wrt_U = np.dot(np.dot(U_sampled.T, D_u), U_sampled)
elif x_link == "linear":
ddV_wrt_U = np.dot(U_sampled.T, U_sampled)
if y_link == "logit":
# in the original paper, the equation was v_i.T @ Z,
# which clearly does not work due to the dimensionality
D_z = np.diag(d_sigmoid(np.dot(v_i, Z_T_sampled)))
ddV_wrt_Z = np.dot(np.dot(Z_T_sampled, D_z), Z_T_sampled.T)
elif y_link == "linear":
ddV_wrt_Z = np.dot(Z_T_sampled, Z_T_sampled.T)
ddV_inv = self._safe_invert(alpha * ddV_wrt_U +
(1 - alpha) * ddV_wrt_Z +
l2_reg * np.eye(V.shape[1]))
self._row_newton_update(V, i, dV, ddV_inv, non_negative=non_negative)
def _newton_update_Z(self, Z, V, Y, alpha, l1_reg, l2_reg,
link="linear", non_negative=True):
for i in range(Z.shape[0]):
z_i = Z[i, :]
V_sampled, Y_sampled = self._stochastic_sample(V, Y)
res_Y = self._residual(V_sampled, z_i.T, Y_sampled[:, i], link)
dZ = (1 - alpha) * np.dot(res_Y.T, V_sampled) + \
l1_reg * np.sign(z_i) + l2_reg * z_i
if link == "linear":
ddZ_inv = self._safe_invert((1 - alpha) * np.dot(V_sampled.T, V_sampled) +
l2_reg * np.eye(Z.shape[1]))
elif link == "logit":
D = np.diag(d_sigmoid(np.dot(V_sampled, z_i.T)))
ddZ_inv = self._safe_invert((1 - alpha) * np.dot(np.dot(V_sampled.T, D), V_sampled) +
l2_reg * np.eye(Z.shape[1]))
self._row_newton_update(Z, i, dZ, ddZ_inv, non_negative=non_negative)
def update_step(self, X, Y, U, V, Z, l1_reg, l2_reg, alpha):
if self.update_U:
self._newton_update_U(U, V, X, alpha, l1_reg, l2_reg,
non_negative=self.U_non_negative, link=self.x_link)
if self.update_Z:
self._newton_update_Z(Z, V, Y, alpha, l1_reg, l2_reg,
non_negative=self.Z_non_negative, link=self.y_link)
if self.update_V:
self._newton_update_V(V, U, Z, X, Y, alpha, l1_reg, l2_reg,
non_negative=self.V_non_negative, x_link=self.x_link,
y_link=self.y_link)
|
{"hexsha": "69671c4d6a40be260c03099c6cc0d08c58d76dec", "size": 22938, "ext": "py", "lang": "Python", "max_stars_repo_path": "pycmf/cmf_solvers.py", "max_stars_repo_name": "smn-ailab/PyCMF", "max_stars_repo_head_hexsha": "f2c3b7117b2b3b59d73d472f11c9bb1a6d537fe0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2018-02-09T10:32:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-13T15:20:06.000Z", "max_issues_repo_path": "pycmf/cmf_solvers.py", "max_issues_repo_name": "smn-ailab/PyCMF", "max_issues_repo_head_hexsha": "f2c3b7117b2b3b59d73d472f11c9bb1a6d537fe0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-02-14T05:08:27.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-15T09:11:07.000Z", "max_forks_repo_path": "pycmf/cmf_solvers.py", "max_forks_repo_name": "smn-ailab/PyCMF", "max_forks_repo_head_hexsha": "f2c3b7117b2b3b59d73d472f11c9bb1a6d537fe0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-09-09T23:08:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T21:36:07.000Z", "avg_line_length": 43.8585086042, "max_line_length": 115, "alphanum_fraction": 0.5605545383, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 5519}
|
"""Numpy based linear algebra backend."""
import numpy as np
def det(*args, **kwargs):
return np.linalg.det(*args, **kwargs)
def norm(*args, **kwargs):
return np.linalg.norm(*args, **kwargs)
def inv(*args, **kwargs):
return np.linalg.inv(*args, **kwargs)
def matrix_rank(*args, **kwargs):
return np.linalg.matrix_rank(*args, **kwargs)
def eigvalsh(*args, **kwargs):
return np.linalg.eigvalsh(*args, **kwargs)
def svd(*args, **kwargs):
return np.linalg.svd(*args, **kwargs)
def eigh(*args, **kwargs):
return np.linalg.eigh(*args, **kwargs)
def eig(*args, **kwargs):
return np.linalg.eig(*args, **kwargs)
def exp(*args, **kwargs):
return np.exp(*args, **kwargs)
|
{"hexsha": "7e99c3e06e97a25c70fa2ecb9b7c447f47a5024e", "size": 715, "ext": "py", "lang": "Python", "max_stars_repo_path": "geomstats/backend/numpy_linalg.py", "max_stars_repo_name": "effigies/geomstats", "max_stars_repo_head_hexsha": "0d6979a15cefcf98f7f92bade9d0e4abee3dde14", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-05-23T20:18:23.000Z", "max_stars_repo_stars_event_max_datetime": "2018-05-23T20:18:23.000Z", "max_issues_repo_path": "geomstats/backend/numpy_linalg.py", "max_issues_repo_name": "leslie-chu/geomstats", "max_issues_repo_head_hexsha": "fbed39b47b16eab4a48179106e8d0c1a5891243d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "geomstats/backend/numpy_linalg.py", "max_forks_repo_name": "leslie-chu/geomstats", "max_forks_repo_head_hexsha": "fbed39b47b16eab4a48179106e8d0c1a5891243d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.875, "max_line_length": 49, "alphanum_fraction": 0.6377622378, "include": true, "reason": "import numpy", "num_tokens": 195}
|
# -*- coding: utf-8 -*-
"""\
Copyright (c) 2015-2018, MGH Computational Pathology
"""
from __future__ import print_function
from numpy.random.mtrand import RandomState
from calicoml.core.utils import with_numpy_arrays, format_p_value
import numpy as np
import pandas as pd
import sklearn
from scipy.stats import pearsonr
from sklearn.metrics import roc_curve, roc_auc_score
@with_numpy_arrays
def ppv(y_true, y_pred):
"""\
Calculates the positive predictive value
:param y_true: truth labels (0 or 1)
:param y_pred: predicted labels (0 or 1)
:return: the PPV
"""
assert len(y_true) == len(y_pred)
assert set(y_true).issubset({0, 1})
assert set(y_pred).issubset({0, 1})
return float(np.sum(y_true[y_pred == 1] == 1)) / np.sum(y_pred == 1)
def npv(y_true, y_pred):
"""\
Calculates the negative predictive value
:param y_true: truth labels (0 or 1)
:param y_pred: predicted labels (0 or 1)
:return: the NPV
"""
return ppv(np.ones(len(y_true)) - y_true, np.ones(len(y_pred)) - y_pred)
def threshold_at_median(metric, y_true, y_pred):
"""\
Binarizes scores at the median, and then applies the given metric
:param metric: the metric to apply
:param y_true: truth labels (0 or 1)
:param y_pred: predicted scores
:return: metric applied to the binarized scores
"""
cutoff = np.median(y_pred)
return metric(y_true, [1 if score >= cutoff else 0 for score in y_pred])
def f_pearson(X, y):
""" Computes pearson correlation for columns of X vs. y\
TODO: for y with categorical values replace column y with expected means from each column of X"""
rs_pearson = []
ps_pearson = []
for feat_vals in X.T:
r_column, p_column = pearsonr(feat_vals, y)
rs_pearson.append(r_column)
ps_pearson.append(p_column)
return rs_pearson, ps_pearson
class ConditionalMeansSelector(object):
""" wrapper to be enable feature selection for multiclass"""
def __init__(self, selector, column_pairwise_selector=False):
"""\
:param selector: matrix or vector base correlation-like function for feature selection
:param column_pairwise_selector: True if selector is vector based, False if matrix based
"""
self.selector = selector
self.column_pairwise_selector = column_pairwise_selector
@staticmethod
def _conditional_map(y, feat_vals):
""" method to build conditional means map for given feature column"""
y_to_cond_mean = {}
y_to_cond_count = {}
for index, y_value in enumerate(y):
if y_value not in y_to_cond_mean:
y_to_cond_mean[y_value] = 0.0
y_to_cond_count[y_value] = 0
y_to_cond_mean[y_value] += feat_vals[index]
y_to_cond_count[y_value] += 1
for y_value in y_to_cond_mean:
y_to_cond_mean[y_value] /= y_to_cond_count[y_value]
return y_to_cond_mean
def selector_function(self, X, y):
""" apply selection function after replacing target column with conditional means"""
rs_result = []
ps_result = []
for feat_vals in X.T:
map_y_to_conditional_mean = ConditionalMeansSelector._conditional_map(y, feat_vals)
y_mapped = np.asarray([map_y_to_conditional_mean[y_value] for y_value in y])
if self.column_pairwise_selector:
rs_column, ps_column = self.selector(feat_vals, y_mapped)
rs_result.append(rs_column)
ps_result.append(ps_column)
else:
feat_arr2d = np.asarray(feat_vals).reshape((feat_vals.shape[0], 1))
rs_column, ps_column = self.selector(feat_arr2d, y_mapped)
rs_result.append(rs_column[0])
ps_result.append(ps_column[0])
return rs_result, ps_result
def compute_averaged_metrics(y_truth, y_score, compute_metric):
""" Compute metrics by averaging over target class values for multiclass
y_score should contain score vector for binary case and scores matrix for multiclass case"""
y_values = np.unique(y_truth)
sum_results = 0.0
count_results = 0
for y_value in y_values:
y_one_vs_all = [1 if y == y_value else 0 for y in y_truth]
has_0 = 0 in y_one_vs_all
has_1 = 1 in y_one_vs_all
if not (has_0 and has_1):
print("Warning: while computing metric ignored unknown class")
continue
scores_y_one_vs_all = [score[y_value] for score in y_score]
metrics_value = compute_metric(y_one_vs_all, scores_y_one_vs_all)
sum_results += metrics_value
count_results += 1
return 0.0 if count_results == 0 else sum_results / count_results
def accuracy_from_confusion_matrix(y_truth, y_score, confusion_matrix):
""" computes accuracy count from confusion matrix"""
sample_count = len(y_truth)
if sample_count < 1:
return 0.0
if len(y_score) != sample_count:
raise ValueError("Score size is different from sample size")
accurate_count = 0
for index in range(confusion_matrix.shape[0]):
accurate_count += confusion_matrix[index, index]
return float(accurate_count) / float(sample_count)
class ConfidenceInterval(object):
"""Represents a confidence interval for an estimate, including (optionally) a p value"""
def __init__(self, estimate, low, high, pval=None):
"""
:param estimate: point estimate
:param low: lower bound
:param high: upper bound
:param pval: p value (default: None)
"""
assert low <= estimate <= high
if pval is not None and not np.isnan(pval):
assert 0.0 <= pval <= 1.0
self.estimate, self.low, self.high, self.pval = estimate, low, high, pval
def __str__(self):
if self.pval is not None and not np.isnan(self.pval):
pval_str = ' p={}'.format(format_p_value(self.pval, True))
else:
pval_str = ''
return '{:.3f} ({:.3f} - {:.3f}){}'.format(self.estimate, self.low, self.high, pval_str)
class ROC(object):
"""\
Container for a receiver operating characteristic (ROC) curve.
"""
def __init__(self, fpr, tpr, thresholds, y_true=None, y_pred=None, ci_width=95):
"""Creates ROC from false positive rate, true positive rate, and scores"""
self.fpr = fpr
self.tpr = tpr
self.thresholds = thresholds
self.y_true, self.y_pred = np.asarray(y_true), np.asarray(y_pred)
self.ci_width = float(ci_width)
self._auc = sklearn.metrics.auc(self.fpr, self.tpr, reorder=True)
self._ci = None
@property
def auc_ci(self):
"""The AUC confidence interval computed with 10k rounds of bootstrapping"""
if self._ci is not None:
return self._ci
elif self.y_true is None or self.y_pred is None:
raise ValueError("Cannot compute confidence interval without y_true and y_pred")
rnd = RandomState(seed=0xC0FFEE)
aucs = []
for _ in range(10000):
idx = rnd.randint(0, len(self.y_true), len(self.y_true))
if len(set(self.y_true[idx])) < 2:
continue # skip sets without both labels
aucs.append(roc_auc_score(self.y_true[idx], self.y_pred[idx]))
delta = (100.0 - self.ci_width) / 2
return ConfidenceInterval(self.auc, np.percentile(aucs, delta), np.percentile(aucs, 100.0 - delta))
@staticmethod
def from_scores(y_true, y_pred):
"""Creates a ROC from true/predicted scores"""
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
return ROC(fpr, tpr, thresholds, y_true, y_pred)
@property
def auc(self):
"""Computes and returns the area under the ROC curve"""
return self._auc
@property
def dataframe(self):
"""Builds a Pandas dataframe from the ROC"""
return pd.DataFrame({'fpr': self.fpr, 'tpr': self.tpr, 'thresholds': self.thresholds})
|
{"hexsha": "38659f7da7f72ca443e2135d119f5426d5b0e444", "size": 8089, "ext": "py", "lang": "Python", "max_stars_repo_path": "calicoml/core/metrics.py", "max_stars_repo_name": "MGHComputationalPathology/CalicoML", "max_stars_repo_head_hexsha": "e296f1cc0a78c4bf05e719aae96d8ea2e9d6c03c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "calicoml/core/metrics.py", "max_issues_repo_name": "MGHComputationalPathology/CalicoML", "max_issues_repo_head_hexsha": "e296f1cc0a78c4bf05e719aae96d8ea2e9d6c03c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "calicoml/core/metrics.py", "max_forks_repo_name": "MGHComputationalPathology/CalicoML", "max_forks_repo_head_hexsha": "e296f1cc0a78c4bf05e719aae96d8ea2e9d6c03c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4212765957, "max_line_length": 107, "alphanum_fraction": 0.6503894177, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 1994}
|
//----------------------------------*-C++-*----------------------------------//
/**
* @file Material.hh
* @author Jeremy Roberts
* @brief Material class definition.
*/
//---------------------------------------------------------------------------//
#ifndef detran_material_MATERIAL_HH_
#define detran_material_MATERIAL_HH_
#include "material/material_export.hh"
#include "utilities/Definitions.hh"
#include "utilities/SP.hh"
#include <string>
#ifdef DETRAN_ENABLE_BOOST
#include <boost/archive/binary_iarchive.hpp>
#include <boost/archive/binary_oarchive.hpp>
#include <boost/archive/text_iarchive.hpp>
#include <boost/archive/text_oarchive.hpp>
#include <boost/serialization/vector.hpp>
#endif
namespace detran_material
{
//---------------------------------------------------------------------------//
/**
* @class Material
* @brief Simple cross section container.
*
* All data is stored with the material index changing fastest. This
* appears to be the best storage scheme with respect to memory access.
*/
//---------------------------------------------------------------------------//
class MATERIAL_EXPORT Material
{
public:
//-------------------------------------------------------------------------//
// TYPEDEFS
//-------------------------------------------------------------------------//
typedef detran_utilities::SP<Material> SP_material;
typedef detran_utilities::vec_dbl vec_dbl;
typedef detran_utilities::vec2_dbl vec2_dbl;
typedef detran_utilities::vec3_dbl vec3_dbl;
typedef detran_utilities::vec_int vec_int;
typedef detran_utilities::vec2_int vec2_int;
typedef detran_utilities::vec_size_t vec_size_t;
typedef detran_utilities::vec2_size_t vec2_size_t;
typedef detran_utilities::size_t size_t;
//-------------------------------------------------------------------------//
// PUBLIC INTERFACE
//-------------------------------------------------------------------------//
/**
* @brief Constructor.
* @param number_materials Number of materials.
* @param number_groups Number of energy groups.
* @param downscatter Switch on to use only downscatter.
*/
Material(const size_t number_materials,
const size_t number_groups,
std::string name = "no name given");
/// Virtual destructor
virtual ~Material(){}
/// SP constructor
static SP_material Create(const size_t number_materials,
const size_t number_groups,
std::string name = "no name given");
//--------------------------------------------------------------------------//
// Setters
//--------------------------------------------------------------------------//
/**
* @brief Explicitly turn on downscatter-only
*/
void set_downscatter(bool v, bool tran = false)
{
if (tran) d_downscatter[1] = v;
d_downscatter[0] = v;
if (d_finalized) finalize();
}
void set_sigma_t(size_t m, size_t g, double v);
void set_sigma_a(size_t m, size_t g, double v);
void set_nu_sigma_f(size_t m, size_t g, double v);
void set_sigma_f(size_t m, size_t g, double v);
void set_nu(size_t m, size_t g, double v);
void set_chi(size_t m, size_t g, double v);
void set_sigma_s(size_t m, size_t g, size_t gp, double v);
void set_diff_coef(size_t m, size_t g, double v);
// Vectorized setters
void set_sigma_t(size_t m, vec_dbl &v);
void set_sigma_a(size_t m, vec_dbl &v);
void set_nu_sigma_f(size_t m, vec_dbl &v);
void set_sigma_f(size_t m, vec_dbl &v);
void set_nu(size_t m, vec_dbl &v);
void set_chi(size_t m, vec_dbl &v);
void set_sigma_s(size_t m, size_t g, vec_dbl &v);
void set_diff_coef(size_t m, vec_dbl &v);
//------------------------------------------------------------------------//
// Getters
//------------------------------------------------------------------------//
virtual double sigma_t(size_t m, size_t g) const;
virtual double sigma_a(size_t m, size_t g) const;
virtual double nu_sigma_f(size_t m, size_t g) const;
virtual double sigma_f(size_t m, size_t g) const;
virtual double nu(size_t m, size_t g) const;
virtual double chi(size_t m, size_t g) const;
virtual double sigma_s(size_t m, size_t g, size_t gp) const;
virtual double diff_coef(size_t m, size_t g) const;
// Vectorized getters
virtual vec_dbl sigma_t(size_t m) const;
virtual vec_dbl sigma_a(size_t m) const;
virtual vec_dbl nu_sigma_f(size_t m) const;
virtual vec_dbl sigma_f(size_t m) const;
virtual vec_dbl nu(size_t m) const;
virtual vec_dbl chi(size_t m) const;
virtual vec2_dbl sigma_s(size_t m) const;
virtual vec_dbl diff_coef(size_t m) const;
//------------------------------------------------------------------------//
// OTHER ACCESSORS
//------------------------------------------------------------------------//
size_t number_groups() const
{
return d_number_groups;
}
size_t number_materials() const
{
return d_number_materials;
}
/**
* @brief Lower scatter group bound.
*
* This is the *lowest* index (highest energy) \f$ g' \f$
* that leads to downscatter for a given outgoing group \f$ g \f$.
*
* @param g Row of the scattering matrix
* @param tran Flag for accessing transpose of S
*/
size_t lower(size_t g, bool tran = false) const;
/**
* @brief Upper scatter group bound.
*
* This is the *highest* index (lowest energy) \f$ g' \f$
* that upscatters size_to the outgoing group \f$ g \f$.
*
* @param g Row of the scattering matrix
* @param tran Flag for accessing transpose of S
*/
size_t upper(size_t g, bool tran = false) const;
/// Do we do only downscatter?
bool downscatter(bool tran = false) const;
/**
* @brief Index below which upscatter doesn't occur for any material.
*
* For adjoint problems, this is the group above which
*/
size_t upscatter_cutoff(bool tran = false) const;
/**
* @brief Compute the absorption cross section from total and scattering.
* @note this overwrites any data for \f$ \Sigma_a \f$ already stored.
*/
void compute_sigma_a();
/**
* @brief Compute the diffusion coefficient from \f$ \Sigma_t \f$.
*
* Assuming isotropic scattering in the LAB, the diffusion
* coefficient is simply \f$ D = 1/3\Sigma_t \f$.
*
* @todo Update diffusion definition if anisotropic scattering
* is added.
* @note This overwrites any data for \f$ D \f$ already stored.
*/
void compute_diff_coef();
/// Computes scattering bounds and absorption cross section.
void finalize();
/// Pretty print the material database.
virtual void display();
protected:
//-------------------------------------------------------------------------//
// DATA
//-------------------------------------------------------------------------//
/// Material name
std::string d_name;
/// Number of groups
size_t d_number_groups;
/// Number of materials
size_t d_number_materials;
/// Downscatter switch (when true, upscatter ignored)
bool d_downscatter[2];
/// Total cross section [material, group]
vec2_dbl d_sigma_t;
/// Absorption cross section [material, group]
vec2_dbl d_sigma_a;
/// nu * Fission [material, group]
vec2_dbl d_nu_sigma_f;
/// Fission [material, group]
vec2_dbl d_sigma_f;
/// nu [material, group]
vec2_dbl d_nu;
/// Fission spectrum [material, group]
vec2_dbl d_chi;
/// Scatter [material, group<-, group']
vec3_dbl d_sigma_s;
/// Diffusion coefficient [material, group]
vec2_dbl d_diff_coef;
/// Scatter bounds applied to all materials [group, 2]
vec2_size_t d_scatter_bounds;
/// Groups equal to or above cutoff are subject to upscatter iterations
size_t d_upscatter_cutoff[2];
/// Are we ready to be used?
bool d_finalized;
//-------------------------------------------------------------------------//
// IMPLEMENTATION
//-------------------------------------------------------------------------//
void material_display();
#ifdef DETRAN_ENABLE_BOOST
/// Default constructor needed for serialization
Material(){}
friend class boost::serialization::access;
template<class Archive>
void serialize(Archive & ar, const unsigned int version)
{
ar & d_number_groups;
ar & d_number_materials;
ar & d_downscatter;
ar & d_sigma_t;
ar & d_sigma_a;
ar & d_nu_sigma_f;
ar & d_sigma_f;
ar & d_nu;
ar & d_chi;
ar & d_sigma_s;
ar & d_diff_coef;
ar & d_scatter_bounds;
ar & d_upscatter_cutoff;
ar & d_finalized;
}
#endif
};
MATERIAL_TEMPLATE_EXPORT(detran_utilities::SP<Material>)
} // end namespace detran_material
//---------------------------------------------------------------------------//
// INLINE FUNCTIONS
//---------------------------------------------------------------------------//
#include "Material.i.hh"
#endif /* detran_material_MATERIAL_HH_ */
|
{"hexsha": "e91828d43df5049e8086a77f4e5f5ae8fe428131", "size": 9008, "ext": "hh", "lang": "C++", "max_stars_repo_path": "src/material/Material.hh", "max_stars_repo_name": "baklanovp/libdetran", "max_stars_repo_head_hexsha": "820efab9d03ae425ccefb9520bdb6c086fdbf939", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2015-03-07T16:20:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-10T13:40:16.000Z", "max_issues_repo_path": "src/material/Material.hh", "max_issues_repo_name": "baklanovp/libdetran", "max_issues_repo_head_hexsha": "820efab9d03ae425ccefb9520bdb6c086fdbf939", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2018-02-27T21:24:22.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-16T00:56:44.000Z", "max_forks_repo_path": "src/material/Material.hh", "max_forks_repo_name": "baklanovp/libdetran", "max_forks_repo_head_hexsha": "820efab9d03ae425ccefb9520bdb6c086fdbf939", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9.0, "max_forks_repo_forks_event_min_datetime": "2015-03-07T16:20:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-29T00:14:23.000Z", "avg_line_length": 31.169550173, "max_line_length": 80, "alphanum_fraction": 0.5684946714, "num_tokens": 2103}
|
function [d,fp,dt,tc,t]=readhtk(file)
%READHTK read an HTK parameter file [D,FP,DT,TC,T]=(FILE)
%
% d is data, fp is frame period in seconds
% dt is data type, tc is full type code, t is a text version of the full typecode
% tc is the sum of the following values:
% 0 WAVEFORM
% 1 LPC
% 2 LPREFC
% 3 LPCEPSTRA
% 4 LPDELCEP
% 5 IREFC
% 6 MFCC
% 7 FBANK
% 8 MELSPEC
% 9 USER
% 10 DISCRETE
% 11 PLP
% 64 -E Includes energy terms
% 128 _N Suppress absolute energy
% 256 _D Include delta coefs
% 512 _A Include acceleration coefs
% 1024 _C Compressed
% 2048 _Z Zero mean static coefs
% 4096 _K CRC checksum (not implemented yet)
% 8192 _0 Include 0'th cepstral coef
% Copyright (C) Mike Brookes 1997
%
% This version modified to read HTK's compressed feature files
% 2005-05-18 dpwe@ee.columbia.edu
%
% VOICEBOX home page: http://www.ee.ic.ac.uk/hp/staff/dmb/voicebox/voicebox.html
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You can obtain a copy of the GNU General Public License from
% ftp://prep.ai.mit.edu/pub/gnu/COPYING-2.0 or by writing to
% Free Software Foundation, Inc.,675 Mass Ave, Cambridge, MA 02139, USA.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
fid=fopen(file,'r','b');
if fid < 0
error(sprintf('Cannot read from file %s',file));
end
nf=fread(fid,1,'long');
fp=fread(fid,1,'long')*1.E-7;
by=fread(fid,1,'short');
tc=fread(fid,1,'short');
hb=floor(tc*pow2(-14:-6));
hd=hb(9:-1:2)-2*hb(8:-1:1);
dt=tc-64*hb(9);
% hd(7)=1 CRC check
% hd(5)=1 compressed data
if ( dt == 0 ),
d=fread(fid,Inf,'short');
else
if (hd(5) == 1)
% compressed data - first read scales
nf = nf - 4;
ncol = by / 2;
scales = fread(fid, ncol, 'float');
biases = fread(fid, ncol, 'float');
d = fread(fid,[ncol, nf], 'short');
d = repmat(1./scales,1,nf).*(d+repmat(biases,1,nf));
d = d.';
else
d=fread(fid,[by/4,nf],'float').';
end
end;
fclose(fid);
if nargout > 2
hd(7)=0;
hd(5)=0;
ns=sum(hd);
kinds=['WAVEFORM ';'LPC ';'LPREFC ';'LPCEPSTRA ';'LPDELCEP ';'IREFC ';'MFCC ';'FBANK ';'MELSPEC ';'USER ';'DISCRETE ';'PLP ';'??? '];
kind=kinds(min(dt+1,size(kinds,1)),:);
cc='ENDACZK0';
t=[kind(1:min(find(kind==' '))-1) reshape(['_'*ones(1,ns);cc(hd>0)],1,2*ns)];
end
|
{"author": "stephencwelch", "repo": "Perceptual-Coding-In-Python", "sha": "2993f57570663768c02745019185091a23f021fe", "save_path": "github-repos/MATLAB/stephencwelch-Perceptual-Coding-In-Python", "path": "github-repos/MATLAB/stephencwelch-Perceptual-Coding-In-Python/Perceptual-Coding-In-Python-2993f57570663768c02745019185091a23f021fe/matlabCode/bark_domain_exploration/rastamat/readhtk.m"}
|
from util import *
from util.functions import *
from util.plotting import *
import tensorflow.keras as keras
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import LearningRateScheduler
from keras import initializers
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
from scipy.optimize import linear_sum_assignment as linear_assignment
import sys
class DEC:
"""
Deep Embedded Clustering implementation.
"""
def __init__(self,
x,
dnn,
cluster_num):
# Construct DNN
self.cluster_num = cluster_num
self.model = dnn
self.model.compile(loss="mse",
optimizer='adam')
# Construct clustering
print(bcolors.WARNING, "CHECK: Clustering with", cluster_num, "clusters. ", bcolors.ENDC)
self.clustering = KMeans(n_clusters=cluster_num)
def train(self,
x,
labels,
preset_mu = None,
use_preset_mu=False):
self.model.compile(loss="mse",
optimizer='adam')
# Initial z's
self.z = self.predict_embedding(x)
# Cluster and obtain mu's
if use_preset_mu:
self.mu = preset_mu
else:
self.clustering.fit(self.z)
self.mu = self.clustering.cluster_centers_
for epoch in range(dec_train_epochs):
#* Update step
# Obtain p and q distributions for current epoch
self.q_dists = q_dist(self.z, self.mu)
self.p_dists = p_dist(self.q_dists)
# Update z's and mu's
self.update_z(x)
if not use_preset_mu:
self.update_mu()
# print("Cluster centroids at epoch ", epoch, ": ", self.mu)
#* Visualization step
# Print loss
if not use_preset_mu:
y = self.clustering.predict(self.z)
acc, w = self.cluster_acc(labels, y)
print("Epoch: ", epoch,
"Loss: ", DEC.loss(self.p_dists, self.q_dists),
"Cluster Accuracy: ", str(acc) + "%")
else:
print("Epoch: ", epoch,
"Loss: ", DEC.loss(self.p_dists, self.q_dists))
# Plot intermediate results
tsne_z = TSNE(n_components=representation_dim,
learning_rate='auto',
init='random').fit_transform(self.z)
plot_embeddings(plot_result=False,
embeddings=tsne_z,
cluster_centroids=self.mu,
labels=labels,
name=str(epoch))
#* Feedforward step
# Obtain embeddings z for current epoch
self.z = self.predict_embedding(x)
def predict_embedding(self,
x):
return self.model.predict(x)
def predict_cluster(self,
x):
return self.clustering.predict(x)
def update_z(self,
x):
"""
"""
for i, z in enumerate(self.z):
gradient = (alpha+1)/alpha * sum([(1 + np.linalg.norm(z-mu)**2/alpha)**-1 * (self.p_dists[i][j]-self.q_dists[i][j]) * (z-mu) for j, mu in enumerate(self.mu)])
# print("before: ", updated_z[i])
self.z[i] += gradient * z_learning_rate
# print("after: ", updated_z[i])
# print("gradient: ", gradient)
self.model.fit(x = x,
y = self.z,
epochs = dec_z_epochs,
verbose=0)
def update_mu(self):
"""
"""
for j, mu in enumerate(self.mu):
gradient = -(alpha+1)/alpha * sum([(1 + np.linalg.norm(z-mu)**2/alpha)**-1 * (self.p_dists[i][j]-self.q_dists[i][j]) * (z-mu) for i, z in enumerate(self.z)])
self.mu[j] += gradient * mu_learning_rate
@staticmethod
def loss(p, q):
return kl_div(p, q)
def cluster_acc(self, y_true, y_pred):
y_true = np.array(y_true)
y_pred = np.array(y_pred)
assert y_pred.size == y_true.size
D = max(y_pred.max(), y_true.max())+1
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
ind = linear_assignment(w.max() - w)
count = 0
for index, val in enumerate(ind[0]):
count += w[val, ind[1][index]]
return count*100/y_pred.size, w
def cluster_accuracy(self,
x,
labels):
"""
Checks cluster accuracy. Cluster centroid class is determined by majority.
Requires self.mu to be initialized.
"""
correct = 0
mu_votes = np.zeros((self.cluster_num, self.cluster_num))
cluster_to_label = {}
# Assign with centroid is which class
embeddings = []
if len(self.mu) != 0:
z = self.predict_embedding(x)
predictions = self.clustering.predict(z)
for index, prediction in enumerate(predictions):
mu_votes[prediction][labels[index]] += 1
for index, row in enumerate(mu_votes):
cluster_to_label[index] = list(row).index(max(row))
for index, prediction in enumerate(predictions):
if cluster_to_label[prediction] == labels[index]:
correct += 1
return str(correct/len(labels) * 100)
class SAE:
def __init__(self,
dataset):
"""
Constructor for SAE. Constructs SAE and trains invididual denoising
autoencoders, then finetunes the combined model.
INPUTS: Dataset (Default is MNIST)
RETURNS: Encoder
"""
# Construct loss monitor
# self.callback = keras.callbacks.EarlyStopping(monitor="loss",
# mode="min",
# patience=5,
# min_delta=0.01)
print("Training individual layers...")
# Train first layer
first_layer_encoder, first_layer_decoder = self.train_autoencoder_layer(dataset,
io_dim=sae_first_layer_dim,
hidden_dim=sae_hidden_layer_1_dim,
layer="first")
# Get encoder outputs from first layer
hidden_layer_dataset_1 = first_layer_encoder.predict(dataset)
# Train first hidden layer
hidden_layer_encoder_1, hidden_layer_decoder_1 = self.train_autoencoder_layer(hidden_layer_dataset_1,
io_dim=sae_hidden_layer_1_dim,
hidden_dim=sae_hidden_layer_2_dim,
layer="hidden")
# Get encoder outputs from hidden layer
hidden_layer_dataset_2 = hidden_layer_encoder_1.predict(hidden_layer_dataset_1)
# Train second hidden layer
hidden_layer_encoder_2, hidden_layer_decoder_2 = self.train_autoencoder_layer(hidden_layer_dataset_2,
io_dim=sae_hidden_layer_2_dim,
hidden_dim=sae_hidden_layer_3_dim,
layer="hidden")
# Get encoder outputs from hidden layer
final_layer_dataset = hidden_layer_encoder_2.predict(hidden_layer_dataset_2)
# Train first layer
last_layer_encoder, last_layer_decoder = self.train_autoencoder_layer(final_layer_dataset,
io_dim=sae_hidden_layer_3_dim,
hidden_dim=sae_last_layer_dim,
layer="last")
print("Trained individual layers.")
print("Training final model...")
#? Finetune model
# Construct final model
last_layer_encoder.layers[1].rate = 0
hidden_layer_encoder_2.layers[1].rate = 0
hidden_layer_encoder_1.layers[1].rate = 0
first_layer_encoder.layers[1].rate = 0
first_layer_decoder.layers[1].rate = 0
hidden_layer_decoder_1.layers[1].rate = 0
hidden_layer_decoder_2.layers[1].rate = 0
last_layer_decoder.layers[1].rate = 0
self.final_model_input = keras.Input(shape=(sae_first_layer_dim,))
self.final_model_encoder = last_layer_encoder(hidden_layer_encoder_2(hidden_layer_encoder_1(first_layer_encoder(self.final_model_input))))
self.final_model_decoder = first_layer_decoder(hidden_layer_decoder_1(hidden_layer_decoder_2(last_layer_decoder(self.final_model_encoder))))
self.model = keras.Model(inputs=self.final_model_input,
outputs=self.final_model_decoder)
# Construct encoder section
self.encoder_model = keras.Model(inputs=self.final_model_input,
outputs=self.final_model_encoder)
# Compile and train model
# opt = keras.optimizers.Adam(learning_rate=lr_initial_value)
# self.model.compile(loss="mse",
# optimizer=opt)
self.model.compile(loss="mse",
optimizer="adam")
self.model.fit(x=dataset,
y=dataset,
epochs=sae_full_train_epochs,)
# callbacks=[LearningRateScheduler(SAE.lr_scheduler,
# verbose=0)])
print("Trained final model.")
@staticmethod
def lr_scheduler(epoch, lr):
decay_rate = lr_decay_rate
decay_step = 50
if epoch % decay_step == 0 and epoch:
return lr * decay_rate
return lr
def train_autoencoder_layer(self,
dataset,
io_dim,
hidden_dim,
layer):
"""
Adds first layer of autoencoder to model.
Dataset default is MNIST dataset (784 dimensions).
"""
x = keras.Input(shape=(io_dim,))
x_tilde = keras.layers.Dropout(rate=sae_dropout_rate)(x)
if layer=="last":
h = keras.layers.Dense(hidden_dim,
kernel_initializer=initializers.RandomNormal(stddev=0.01))(x_tilde)
else:
h = keras.layers.Dense(hidden_dim,
activation="relu",
kernel_initializer=initializers.RandomNormal(stddev=0.01))(x_tilde)
h_tilde = keras.layers.Dropout(rate=sae_dropout_rate)(h)
if layer=="first":
y = keras.layers.Dense(io_dim,
kernel_initializer=initializers.RandomNormal(stddev=0.01))(h_tilde)
else:
y = keras.layers.Dense(io_dim,
activation="relu",
kernel_initializer=initializers.RandomNormal(stddev=0.01))(h_tilde)
# Construct model
training_model = keras.Model(inputs=x,
outputs=y)
# Train temporary model
# opt = keras.optimizers.Adam(learning_rate=lr_initial_value)
# training_model.compile(loss="mse",
# optimizer=opt)
# training_model.summary()
training_model.compile(loss="mse",
optimizer="adam")
training_model.fit(x=dataset,
y=dataset,
epochs=sae_train_epochs,)
# callbacks=[LearningRateScheduler(SAE.lr_scheduler,
# verbose=0)])
h_input = keras.Input(shape=(hidden_dim,))
decoder_dropout = training_model.layers[-2]
decoder_dense = training_model.layers[-1]
# Returns encoder and decoder part separately
encoder = keras.Model(inputs=x, outputs=h)
decoder = keras.Model(inputs=h_input, outputs=decoder_dense(decoder_dropout(h_input)))
return encoder, decoder
|
{"hexsha": "23165e8ce15cf7e5928ddd6b09c9195cc90f3c0f", "size": 13266, "ext": "py", "lang": "Python", "max_stars_repo_path": "util/methods.py", "max_stars_repo_name": "narutatsuri/deep_embedded_clustering", "max_stars_repo_head_hexsha": "b38be4742b31d651cf4a04152dd7fc4fd049b312", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "util/methods.py", "max_issues_repo_name": "narutatsuri/deep_embedded_clustering", "max_issues_repo_head_hexsha": "b38be4742b31d651cf4a04152dd7fc4fd049b312", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "util/methods.py", "max_forks_repo_name": "narutatsuri/deep_embedded_clustering", "max_forks_repo_head_hexsha": "b38be4742b31d651cf4a04152dd7fc4fd049b312", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.0714285714, "max_line_length": 173, "alphanum_fraction": 0.5073119252, "include": true, "reason": "from scipy", "num_tokens": 2499}
|
/** @file InterpolationTest.cpp
*
* Copyright (c) 2020 IACE
*/
#define BOOST_TEST_MODULE InterpolationTest
#include "Interpolation.h"
#include <boost/test/unit_test.hpp>
BOOST_AUTO_TEST_CASE( LinearInterpolatorBoundaryTest ) {
double dx[2] = {1, 2};
double dy[2] = {2, 4};
LinearInterpolator li(dx, dy, 2);
BOOST_CHECK_EQUAL(li(10), 4);
BOOST_CHECK_EQUAL(li(0), 2);
}
BOOST_AUTO_TEST_CASE( LinearInterpolatorInterpolateTest ) {
double dx[2] = {1, 2};
double dy[2] = {2, 4};
LinearInterpolator li(dx, dy, 2);
BOOST_CHECK_EQUAL(li(1.5), 3);
}
BOOST_AUTO_TEST_CASE( LinearInterpolatorUpdateDataTest ) {
double dx[2] = {1, 2};
double dy[2] = {2, 4};
LinearInterpolator li(dx, dy, 2);
BOOST_CHECK_EQUAL(li(1.5), 3);
double dxN[2] = {2, 3};
double dyN[2] = {4, 6};
li.updateData(dxN, dyN);
BOOST_CHECK_EQUAL(li(2.5), 5);
}
BOOST_AUTO_TEST_CASE( LinearInterpolatorInitUpdateDataTest ) {
double dx[0] = {};
double dy[0] = {};
LinearInterpolator li(dx, dy, 2);
double dxN[2] = {2, 3};
double dyN[2] = {4, 6};
li.updateData(dxN, dyN);
BOOST_CHECK_EQUAL(li(2.5), 5);
}
BOOST_AUTO_TEST_CASE( LinearInterpolatorSetDataTest ) {
double dx[2] = {1, 2};
double dy[2] = {2, 4};
LinearInterpolator li(dx, dy, 2);
BOOST_CHECK_EQUAL(li(1.5), 3);
double dxN[4] = {1, 2, 3, 4};
double dyN[4] = {2, 4, 2, 4};
li.setData(dxN, dyN, 4);
BOOST_CHECK_EQUAL(li(0.5), 2);
}
BOOST_AUTO_TEST_CASE( LinearInterpolatorChangeDataTest ) {
LinearInterpolator li;
BOOST_CHECK_EQUAL(li(1), 0);
double dxN[4] = {1, 2, 3, 4};
double dyN[4] = {2, 4, 2, 4};
li.setData(dxN, dyN, 4);
BOOST_CHECK_EQUAL(li(0.5), 2);
}
BOOST_AUTO_TEST_CASE( LinearInterpolatorInterpolateMultipleTest ) {
double dx[4] = {1, 2, 3, 4};
double dy[4] = {2, 4, 2, 4};
LinearInterpolator li(dx, dy, 4);
BOOST_CHECK_EQUAL(li(0.5), 2);
BOOST_CHECK_EQUAL(li(1),2);
BOOST_CHECK_EQUAL(li(1.5), 3);
BOOST_CHECK_EQUAL(li(2),4);
BOOST_CHECK_EQUAL(li(2.5), 3);
BOOST_CHECK_EQUAL(li(3),2);
BOOST_CHECK_EQUAL(li(3.5), 3);
BOOST_CHECK_EQUAL(li(4),4);
BOOST_CHECK_EQUAL(li(4.5), 4);
}
|
{"hexsha": "562bb763258a4cb5d63dba66ee751188ea0712db", "size": 2232, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "utils/InterpolationTest.cpp", "max_stars_repo_name": "ummerland/tool-libs", "max_stars_repo_head_hexsha": "8c2d635e067e827b67678b45d17670dd62ef83a0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/InterpolationTest.cpp", "max_issues_repo_name": "ummerland/tool-libs", "max_issues_repo_head_hexsha": "8c2d635e067e827b67678b45d17670dd62ef83a0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 6.0, "max_issues_repo_issues_event_min_datetime": "2020-10-21T13:47:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-30T11:42:13.000Z", "max_forks_repo_path": "utils/InterpolationTest.cpp", "max_forks_repo_name": "ummerland/tool-libs", "max_forks_repo_head_hexsha": "8c2d635e067e827b67678b45d17670dd62ef83a0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2020-12-04T13:32:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-26T13:38:51.000Z", "avg_line_length": 22.32, "max_line_length": 67, "alphanum_fraction": 0.6227598566, "num_tokens": 773}
|
import matplotlib
matplotlib.use("agg")
from matplotlib import pyplot as plt
plt.style.use("ggplot")
import seaborn as sns
from networkx import DiGraph
from IPython.core.display import Image
from .export import to_agraph
from .AnalysisGraph import AnalysisGraph
from .utils.misc import _insert_line_breaks
from functools import singledispatch
from .GrFN.ProgramAnalysisGraph import ProgramAnalysisGraph
from pygraphviz import AGraph
# ==========================================================================
# Visualization
# ==========================================================================
@singledispatch
def visualize():
pass
@visualize.register(AnalysisGraph)
def _(G: AnalysisGraph, *args, **kwargs):
""" Visualize the analysis graph in a Jupyter notebook cell. """
return Image(
to_agraph(G, *args, **kwargs).draw(
format="png", prog=kwargs.get("prog", kwargs.get("layout", "dot"))
),
retina=True,
)
@visualize.register(ProgramAnalysisGraph)
def _(
G: ProgramAnalysisGraph,
show_values=True,
save=False,
filename="program_analysis_graph.pdf",
**kwargs,
):
""" Visualizes ProgramAnalysisGraph in Jupyter notebook cell.
Args:
args
kwargs
Returns:
AGraph
"""
A = AGraph(directed=True)
A.graph_attr.update({"dpi": 227, "fontsize": 20, "fontname": "Menlo"})
A.node_attr.update(
{
"shape": "rectangle",
"color": "#650021",
"style": "rounded",
"fontname": "Gill Sans",
}
)
color_str = "#650021"
for n in G.nodes():
A.add_node(n, label=n)
for e in G.edges(data=True):
A.add_edge(e[0], e[1], color=color_str, arrowsize=0.5)
if show_values:
for n in A.nodes():
value = str(G.nodes[n]["value"])
n.attr["label"] = n.attr["label"] + f": {value:.4}"
if save:
A.draw(filename, prog=kwargs.get("layout", "dot"))
return Image(
A.draw(format="png", prog=kwargs.get("layout", "dot")), retina=True
)
|
{"hexsha": "41c72a42d06204820a617fda41e523a9b8781ff2", "size": 2101, "ext": "py", "lang": "Python", "max_stars_repo_path": "delphi/visualization.py", "max_stars_repo_name": "cthoyt/delphi", "max_stars_repo_head_hexsha": "3df2de639905453f5d28d7a7b3b9f7e5a7a1fb0d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "delphi/visualization.py", "max_issues_repo_name": "cthoyt/delphi", "max_issues_repo_head_hexsha": "3df2de639905453f5d28d7a7b3b9f7e5a7a1fb0d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "delphi/visualization.py", "max_forks_repo_name": "cthoyt/delphi", "max_forks_repo_head_hexsha": "3df2de639905453f5d28d7a7b3b9f7e5a7a1fb0d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.1494252874, "max_line_length": 78, "alphanum_fraction": 0.5773441218, "include": true, "reason": "from networkx", "num_tokens": 492}
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as seabornInstance
from sklearn.linear_model import LassoLars
from sklearn import metrics
from sklearn.model_selection import train_test_split
dataset = pd.read_csv('Weather.csv')
X = dataset['MinTemp'].values.reshape(-1, 1)
y = dataset['MaxTemp'].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
regressor = LassoLars(alpha=1.0, fit_intercept=True, verbose=False, normalize=False, precompute='auto',
max_iter=500, eps=2.220446049250313e-16, copy_X=True, fit_path=True, positive=False)
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print(regressor.intercept_)
print(regressor.coef_)
|
{"hexsha": "dcecba5aa0605b42dfea86c3ca5c57ee7248ff4c", "size": 1033, "ext": "py", "lang": "Python", "max_stars_repo_path": "Sklearn_Regression_Portfolio/Reg_LassoLars.py", "max_stars_repo_name": "KiLJ4EdeN/Sklearn_Regression_Portfolio", "max_stars_repo_head_hexsha": "bc779b82b35583c6f75abe69739dc6b698934047", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-01-16T05:34:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-12T09:35:49.000Z", "max_issues_repo_path": "Sklearn_Regression_Portfolio/Reg_LassoLars.py", "max_issues_repo_name": "KiLJ4EdeN/Sklearn_Regression_Portfolio", "max_issues_repo_head_hexsha": "bc779b82b35583c6f75abe69739dc6b698934047", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Sklearn_Regression_Portfolio/Reg_LassoLars.py", "max_forks_repo_name": "KiLJ4EdeN/Sklearn_Regression_Portfolio", "max_forks_repo_head_hexsha": "bc779b82b35583c6f75abe69739dc6b698934047", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-07-13T03:03:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T07:49:35.000Z", "avg_line_length": 44.9130434783, "max_line_length": 107, "alphanum_fraction": 0.7608906099, "include": true, "reason": "import numpy", "num_tokens": 262}
|
SUBROUTINE SCANINT( STRING, VALUE, NCHARS, NDIGITS )
C***********************************************************************
C Version "$Id: scanint.f 1 2017-06-10 18:05:20Z coats $"
C EDSS/Models-3 I/O API.
C Copyright (C) 1992-2002 MCNC and Carlie J. Coats, Jr., and
C (C) 2003-2010 by Baron Advanced Meteorological Systems.
C Distributed under the GNU LESSER GENERAL PUBLIC LICENSE version 2.1
C See file "LGPL.txt" for conditions of use.
C.........................................................................
C subroutine body starts at line 68
C
C RETURNS:
C INTEGER
C VALUE decoded from STRING, or IMISS3 for "missing",
C after skipping leading blanks.
C NCHARS the number of characters consumed (including
C leading whitespace
C NDIGITS the number of digits (counting leading
C minus-sign, if any)
C
C PRECONDITIONS REQUIRED:
C ASCII.
C Properly formatted integer in STRING. In particular:
C * no whitespace between sign and digits composing the rest
C of the value; and
C * leading whitespace is OK (and is skipped over and counted);
C whitespace is defined to be characters <= BLANK
C
C SUBROUTINES AND FUNCTIONS CALLED:
C M3WARN()
C
C REVISION HISTORY:
C Adapted 7/2001 by CJC from STR2INT()
C
C Modified 03/2010 by CJC: F90 changes for I/O API v3.1
C***********************************************************************
IMPLICIT NONE
C........... INCLUDES:
INCLUDE 'PARMS3.EXT'
C........... ARGUMENTS and their descriptions:
CHARACTER*(*), INTENT(IN ) :: STRING
INTEGER , INTENT( OUT) :: VALUE, NCHARS, NDIGITS
C........... PARAMETERS
CHARACTER*1, PARAMETER :: BLANK = ' '
C........... SCRATCH LOCAL VARIABLES and their descriptions:
INTEGER SUM, SIGN
INTEGER I, J, K, L
INTEGER IC, I0
CHARACTER*256 MESG
C***********************************************************************
C begin body of function SCANINT
L = LEN( STRING )
DO 11 I = 1, L ! skip leading whitespace
IF ( STRING( I:I ) .GT. BLANK ) GO TO 12
11 CONTINUE
C....... If you get to here: no number there
VALUE = IMISS3
NCHARS = L
NDIGITS = 0
RETURN
12 CONTINUE
IF( STRING( I:I ) .EQ. '-' ) THEN ! adjust for sign
SIGN = -1
I = I + 1
NDIGITS = 1
ELSE IF( STRING( I:I ) .EQ. '+' ) THEN
SIGN = 1
I = I + 1
NDIGITS = 0
ELSE
SIGN = 1
NDIGITS = 0
END IF
NCHARS = I
SUM = 0 ! accumulate as long as there are digits.
K = 0
I0 = ICHAR( '0' )
DO 22 J = I, L
IC = ICHAR( STRING( J:J ) ) - I0
IF ( IC .LT. 0 .OR. IC .GT. 9 ) GO TO 23
SUM = 10 * SUM + IC
K = K + 1
22 CONTINUE
23 CONTINUE
NCHARS = NCHARS + K
NDIGITS = NDIGITS + K
IF ( K .GT. 0 ) THEN
VALUE = SIGN * SUM
ELSE
MESG = 'No digits in "' // STRING // '"'
CALL M3WARN( 'SCANINT', 0, 0, MESG )
VALUE = IMISS3
END IF
RETURN
END SUBROUTINE SCANINT
|
{"hexsha": "a93c0eaf69f233684341e13037afdb0af9fdb7c5", "size": 3504, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "JPS_CITY_CHEM/citychem-1.3/preproc/bconcc2.2/ioapi3.2/ioapi/scanint.f", "max_stars_repo_name": "mdhillmancmcl/TheWorldAvatar-CMCL-Fork", "max_stars_repo_head_hexsha": "011aee78c016b76762eaf511c78fabe3f98189f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2021-03-08T01:58:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T15:46:16.000Z", "max_issues_repo_path": "JPS_CITY_CHEM/citychem-1.3/preproc/bconcc2.2/ioapi3.2/ioapi/scanint.f", "max_issues_repo_name": "mdhillmancmcl/TheWorldAvatar-CMCL-Fork", "max_issues_repo_head_hexsha": "011aee78c016b76762eaf511c78fabe3f98189f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 63, "max_issues_repo_issues_event_min_datetime": "2021-05-04T15:05:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T14:32:29.000Z", "max_forks_repo_path": "JPS_CITY_CHEM/citychem-1.3/preproc/bconcc2.2/ioapi3.2/ioapi/scanint.f", "max_forks_repo_name": "mdhillmancmcl/TheWorldAvatar-CMCL-Fork", "max_forks_repo_head_hexsha": "011aee78c016b76762eaf511c78fabe3f98189f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2021-03-08T07:52:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T04:46:20.000Z", "avg_line_length": 28.958677686, "max_line_length": 74, "alphanum_fraction": 0.4726027397, "num_tokens": 949}
|
"""
Parses Resume and returns skill,education,work experience
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import spacy
import pickle
import random
import sys, fitz
import docx
import docx2txt
import os
from utils import constants as cs
import re
def parse_resume(file_path):
text= get_text(file_path)
segment = classify_resume(text)
return segment
def tokenize(text):
nlp = spacy.load('en_core_web_sm')
nlp_text = nlp(text)
noun_chunks = nlp_text.noun_chunks
return nlp_text,noun_chunks
def extract_skills(text):
'''
Helper function to extract skills from spacy nlp text
:param nlp_text: object of `spacy.tokens.doc.Doc`
:param noun_chunks: noun chunks extracted from nlp text
:return: list of skills extracted
'''
nlp_text, noun_chunks = tokenize(text)
tokens = [token.text for token in nlp_text if not token.is_stop]
data = pd.read_csv(os.path.join(os.path.dirname(__file__), '../data/skills.csv'))
skills = list(data.columns.values)
skillset = []
# check for one-grams
for token in tokens:
if token.lower() in skills:
skillset.append(token)
# check for bi-grams and tri-grams
for token in noun_chunks:
token = token.text.lower().strip()
if token in skills:
skillset.append(token)
return [i.capitalize() for i in set([i.lower() for i in skillset])]
def extract_education(nlp_text):
'''
Helper function to extract education from spacy nlp text
:param nlp_text: object of `spacy.tokens.doc.Doc`
:return: tuple of education degree and year if year if found else only returns education degree
'''
edu = {}
# Extract education degree
for index, text in enumerate(nlp_text):
for tex in text.split():
tex = re.sub(r'[?|$|.|!|,]', r'', tex)
if tex.upper() in cs.EDUCATION and tex not in cs.STOPWORDS:
edu[tex] = text + nlp_text[index + 1]
# Extract year
education = []
for key in edu.keys():
year = re.search(re.compile(cs.YEAR), edu[key])
if year:
education.append((key, ''.join(year.group(0))))
else:
education.append(key)
return education
#Extract text from DOCX
def get_text_doc(file_path):
'''
Helper function to extract the plain text from .docx files
:param pdf_path: path to PDF file to be extracted
:return: iterator of string of extracted text
'''
temp = docx2txt.process(file_path)
text = [line.replace('\t', ' ') for line in temp.split('\n') if line]
return ' '.join(text)
# Get text from pdf
def get_text_pdf(file_path):
'''
Helper function to extract the plain text from .pdf files
:param pdf_path: path to PDF file to be extracted
:return: iterator of string of extracted text
'''
doc = fitz.open(file_path)
txt = ""
for page in doc:
txt = txt + str(page.getText())
# print(txt)
text = " ".join(txt.split('\n'))
# print(text)
return text
def get_text(file_path):
'''
Wrapper function to detect the file extension and call text extraction function accordingly
:param file_path: path of file of which text is to be extracted
:param extension: extension of file `file_name`
'''
ext = os.path.splitext(file_path)[1]
if ext==".pdf":
txt = get_text_pdf(file_path)
elif ext==".doc" or ext==".docx":
txt=get_text_doc(file_path)
print(txt)
return txt
def classify_resume(txt):
"""
Classifying the resume using NER
"""
nlp_model = spacy.load('nlp_ner_model')
# Applying the model
doc = nlp_model(txt)
skills = extract_skills(txt)
education = extract_education(txt)
segment = ""
for ent in doc.ents:
segment += (f'{ent.label_.upper():{30}}- {ent.text}\n')
segment += f"{'SKILLS':{30}}- {','.join(skills)}\n"
segment += f"{'EDUCATION':{30}}- {','.join(education)}\n"
return segment
if __name__ == "__main__":
print(parse_resume("./data/resume/ivan_machine_learning_engineer.pdf"))
|
{"hexsha": "5a7291bf89b0d39e4e9f94f186ac9601fcb102fc", "size": 4182, "ext": "py", "lang": "Python", "max_stars_repo_path": "resume_parser/resume_parser.py", "max_stars_repo_name": "ExtremelySunnyYK/Resume-Parser-Name-Entity-Recognition", "max_stars_repo_head_hexsha": "3ebab6eacf01d20fc90325c6696781c14286e1bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-02-25T07:01:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-09T02:16:33.000Z", "max_issues_repo_path": "resume_parser/resume_parser.py", "max_issues_repo_name": "ExtremelySunnyYK/Resume-Parser-Name-Entity-Recognition", "max_issues_repo_head_hexsha": "3ebab6eacf01d20fc90325c6696781c14286e1bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "resume_parser/resume_parser.py", "max_forks_repo_name": "ExtremelySunnyYK/Resume-Parser-Name-Entity-Recognition", "max_forks_repo_head_hexsha": "3ebab6eacf01d20fc90325c6696781c14286e1bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-11T20:58:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T20:58:22.000Z", "avg_line_length": 28.8413793103, "max_line_length": 99, "alphanum_fraction": 0.6425155428, "include": true, "reason": "import numpy", "num_tokens": 1057}
|
#== # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Description
#
# Functions related to the analysis of the Right Ascension of the Ascending
# Node (RAAN).
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # ==#
export compute_RAAN_lt, sim_RAAN_J2
"""
compute_RAAN_lt(JD::Number, asc_node_lt::Number)
Compute the RAAN (0,2π) \\[rad] so that the orbit plane local time is
`asc_node_lt` [hour] at the Julian day `JD`.
"""
function compute_RAAN_lt(JD::Number, asc_node_lt::Number)
# Get the sun position at noon (UT) represented in the Inertial ref. frame.
s_i = sun_position_i(JD)
# Get the desired angle between the Sun and the ascending node [deg].
alpha = (asc_node_lt-12)*π/12
# Get the right ascension of the Sun in the Inertial ref. frame. This is the
# Sun apparent local time.
SALT = atan(s_i[2],s_i[1])
# Get the equation of time to compute the Sun mean local time [rad].
eot = equation_of_time(JD)
# Compute the Sun mean local time.
SMLT = SALT + eot
# Compute the desired RAAN in the interval 0, 2*pi.
RAAN = mod(SMLT+alpha, 2π)
end
"""
sim_RAAN_J2(a::Number, e::Number, i::Number, RAAN_0::Number, numDays::Integer)
Simulate the RAAN of an orbit with semi-major axis `a` [m], eccentricity `e`,
inclination `i` [rad], and initial RAAN `RAAN_0` [rad] considering J2
perturbations. The analysis is performed for `numDays` days.
# Returns
A `numDays` × 2 matrix in which the i-th line is:
| day | RAAN (0,2π) [rad] |
"""
function sim_RAAN_J2(a::Number,
e::Number,
i::Number,
RAAN_0::Number,
numDays::Integer)
# Initialization of variables.
days = collect(0:1:numDays-1) # Vector of the days in which the RAAN will be
# simulated.
# RAAN rotation rate [rad/day].
dOmega = dRAAN(a, e, i, :J2)*24*3600
# Simulate the RAAN for each day considering just the J2 perturbations.
RAAN = mod.(RAAN_0 .+ dOmega.*days, 2π)
[days RAAN]
end
|
{"hexsha": "e125e7a73afa63d3dd9f5996d386052b0c4b9251", "size": 2122, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/analysis/raan.jl", "max_stars_repo_name": "disberd/SatelliteToolbox.jl", "max_stars_repo_head_hexsha": "441470938af978e9d5653a9c4b36ccc107023960", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 157, "max_stars_repo_stars_event_min_datetime": "2018-06-19T21:11:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T19:24:41.000Z", "max_issues_repo_path": "src/analysis/raan.jl", "max_issues_repo_name": "disberd/SatelliteToolbox.jl", "max_issues_repo_head_hexsha": "441470938af978e9d5653a9c4b36ccc107023960", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 71, "max_issues_repo_issues_event_min_datetime": "2018-06-18T20:27:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T21:33:20.000Z", "max_forks_repo_path": "src/analysis/raan.jl", "max_forks_repo_name": "disberd/SatelliteToolbox.jl", "max_forks_repo_head_hexsha": "441470938af978e9d5653a9c4b36ccc107023960", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2018-10-02T02:42:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T20:36:51.000Z", "avg_line_length": 29.4722222222, "max_line_length": 82, "alphanum_fraction": 0.5918944392, "num_tokens": 661}
|
# Copyright (c) 2020 Hartmut Kaiser
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# #1267: np.random.random_sample is not available
from phylanx import Phylanx
import numpy as np
@Phylanx
def test_random_integers1(low):
return np.random.random_integers(low)
@Phylanx
def test_random_integers2(low, high):
return np.random.random_integers(low, high)
@Phylanx
def test_random_integers3(low, high, size):
return np.random.random_integers(low, high, size)
for i in range(10):
result = test_random_integers1(10)
assert result >= 1 and result <= 10, result
result = test_random_integers2(10, 20)
assert result >= 10 and result <= 20, result
result = test_random_integers3(10, 20, 10)
assert len(result) == 10, result
for v in result:
assert v >= 10 and v <= 20, v
result = test_random_integers3(10, 20, (10,))
assert len(result) == 10, result
for v in result:
assert v >= 10 and v <= 20, v
|
{"hexsha": "e335eb2163ecdbb2672e40752da69fa52e96b463", "size": 1075, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/regressions/python/1267_random_integers.py", "max_stars_repo_name": "NanmiaoWu/phylanx", "max_stars_repo_head_hexsha": "295b5f82cc39925a0d53e77ba3b6d02a65204535", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 83, "max_stars_repo_stars_event_min_datetime": "2017-08-27T15:09:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T17:03:41.000Z", "max_issues_repo_path": "tests/regressions/python/1267_random_integers.py", "max_issues_repo_name": "NanmiaoWu/phylanx", "max_issues_repo_head_hexsha": "295b5f82cc39925a0d53e77ba3b6d02a65204535", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 808, "max_issues_repo_issues_event_min_datetime": "2017-08-27T15:35:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-14T17:30:50.000Z", "max_forks_repo_path": "tests/regressions/python/1267_random_integers.py", "max_forks_repo_name": "NanmiaoWu/phylanx", "max_forks_repo_head_hexsha": "295b5f82cc39925a0d53e77ba3b6d02a65204535", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 55, "max_forks_repo_forks_event_min_datetime": "2017-08-27T15:09:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T12:07:34.000Z", "avg_line_length": 25.0, "max_line_length": 79, "alphanum_fraction": 0.6948837209, "include": true, "reason": "import numpy", "num_tokens": 318}
|
from typing import List, Optional
import copy
import numpy as np
class RuleBasedAgentWrapper(object):
def __init__(
self,
ruleBasedAgent:object,
player_idx:int,
nbr_actors:int
):
self.nbr_actors = nbr_actors
self.action_space_dim = ruleBasedAgent.action_space_dim
self.vocab_size = ruleBasedAgent.vocab_size
self.max_sentence_length = ruleBasedAgent.max_sentence_length
self.nbr_communication_rounds = ruleBasedAgent.nbr_communication_rounds
self.nbr_latents = ruleBasedAgent.nbr_latents
self.training = False
self.player_idx = player_idx
self.original_ruleBasedAgent = ruleBasedAgent
self.ruleBasedAgents = []
self.reset_actors()
self.nb_possible_sentences = self.vocab_size**self.max_sentence_length
self._build_sentenceId2sentence()
self.nb_decisions = (self.action_space_dim-1)//self.nb_possible_sentences
def _build_sentenceId2sentence(self):
self.nb_possible_sentences = 1 # empty string...
for pos in range(self.max_sentence_length):
# account for each string of length pos (before EoS)
self.nb_possible_sentences += (self.vocab_size)**(pos+1)
sentenceId2sentence = np.zeros( (self.nb_possible_sentences, self.max_sentence_length))
idx = 1
local_token_pointer = 0
global_token_pointer = 0
while idx != self.nb_possible_sentences:
sentenceId2sentence[idx] = sentenceId2sentence[idx-1]
sentenceId2sentence[idx][local_token_pointer] = (sentenceId2sentence[idx][local_token_pointer]+1)%(self.vocab_size+1)
while sentenceId2sentence[idx][local_token_pointer] == 0:
# remove the possibility of an empty symbol on the left of actual tokens:
sentenceId2sentence[idx][local_token_pointer] += 1
local_token_pointer += 1
sentenceId2sentence[idx][local_token_pointer] = (sentenceId2sentence[idx][local_token_pointer]+1)%(self.vocab_size+1)
idx += 1
local_token_pointer = 0
self.sentenceId2sentence = sentenceId2sentence
self.sentence2sentenceId = {}
for sid in range(self.nb_possible_sentences):
self.sentence2sentenceId[ self.sentenceId2sentence[sid].tostring() ] = sid
def _encode_action(self, action_dict, info_dict):
original_action_decision_id = action_dict['decision']
original_action_sentence = action_dict['communication_channel']
original_action_sentence_id = self.sentence2sentenceId[ original_action_sentence.tostring() ]
# Are there actions available apart from No-op?
available_actions_ids_p1 = info_dict['legal_actions'][0]* np.arange(info_dict['legal_actions'].shape[-1]+1)[1:]
available_actions_set = set(available_actions_ids_p1.astype(int))
available_actions_set = available_actions_set.difference([0])
available_actions = [a-1 for a in available_actions_set]
if available_actions==[self.action_space_dim-1]:
encoded_action = self.action_space_dim-1
else:
encoded_action = original_action_decision_id*self.nb_possible_sentences+original_action_sentence_id
return encoded_action
def clone(self, **kwargs):
cloned_agent = copy.deepcopy(self)
cloned_agent.reset_actors()
return cloned_agent
@property
def handled_experiences(self):
return 0
@handled_experiences.setter
def handled_experiences(self, val):
pass
def get_experience_count(self):
return self.handled_experiences
def get_update_count(self):
return 0
def get_nbr_actor(self) -> int:
return self.nbr_actors
def parameters(self):
return []
def set_nbr_actor(self, nbr_actors:int):
self.nbr_actors = nbr_actors
self.reset_actors()
def get_rnn_states(self):
return copy.deepcopy(self.ruleBasedAgents)
def set_rnn_states(self, rnn_states):
self.ruleBasedAgents = rnn_states
def reset_actors(self, indices:List[int]=None):
if indices is None: indices = list(range(self.nbr_actors))
for idx in indices:
if len(self.ruleBasedAgents) <= idx:
self.ruleBasedAgents.append(copy.deepcopy(self.original_ruleBasedAgent))
continue
self.ruleBasedAgents[idx] = copy.deepcopy(self.original_ruleBasedAgent)
self.ruleBasedAgents[idx].reset()
def get_hidden_state(self):
return [self.ruleBasedAgents[a].get_hidden_state() for a in range(self.nbr_actors)]
def query_action(self, state, infos, as_logit=False):
return self.take_action(state=state, infos=infos, as_logit=as_logit)
def take_action(self, state, infos, as_logit=False):
"""
Convert the :param state: and :param infos:
into the input that the rule-based agent expects.
"""
actions = np.asarray([
self.action_space_dim-1 for _ in range(self.nbr_actors)
]
)
for pidx in range(self.nbr_actors):
next_action_dict = self.ruleBasedAgents[pidx].next_action(
state=state[pidx],
infos=infos[pidx]
)
actions[pidx] = self._encode_action(action_dict=next_action_dict, info_dict=infos[pidx])
return actions
|
{"hexsha": "798b3a4e335c204c9e0c79c99b9ac28e42f0a10e", "size": 5621, "ext": "py", "lang": "Python", "max_stars_repo_path": "symbolic_behaviour_benchmark/utils/agent_wrappers.py", "max_stars_repo_name": "Near32/SymbolicBehaviourBenchmark", "max_stars_repo_head_hexsha": "d1f9f14ed186292e22802781f4737e6747cd8c64", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "symbolic_behaviour_benchmark/utils/agent_wrappers.py", "max_issues_repo_name": "Near32/SymbolicBehaviourBenchmark", "max_issues_repo_head_hexsha": "d1f9f14ed186292e22802781f4737e6747cd8c64", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "symbolic_behaviour_benchmark/utils/agent_wrappers.py", "max_forks_repo_name": "Near32/SymbolicBehaviourBenchmark", "max_forks_repo_head_hexsha": "d1f9f14ed186292e22802781f4737e6747cd8c64", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9802631579, "max_line_length": 133, "alphanum_fraction": 0.657178438, "include": true, "reason": "import numpy", "num_tokens": 1212}
|
import numpy as np
import matplotlib.pyplot as plt
from photutils import Background2D, MedianBackground
from astropy.stats import SigmaClip
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.feature import canny
from skimage.draw import circle_perimeter
from skimage import util, filters, morphology, exposure, measure
from skimage.draw import circle
import pandas as pd
from ipywidgets import interact, fixed, interactive
from IPython.display import display
import holoviews as hv
import holoviews.util
from holoviews import streams
from bokeh.models import HoverTool
from skimage.color import label2rgb
def crop_show(image, size = 7000, save = False):
x, y = image.shape
x_crop = np.int((x-size)/2)
y_crop = np.int((y-size)/2)
fig, ax = plt.subplots(figsize=(12,12))
ax.imshow(image)
ax.plot([y_crop,y_crop+size],[x_crop,x_crop], [y_crop,y_crop],[x_crop,x_crop+size],
[y_crop,y_crop+size],[x_crop+size,x_crop+size], [y_crop+size,y_crop+size],[x_crop,x_crop+size],
'k-', color='r')
if save == True:
fig.savefig('cropped_region.png', dpi =300)
return image[x_crop:x_crop+size, y_crop:y_crop+size]
def crop(image, size = 7000):
x, y = image.shape
x_crop = np.int((x-size)/2)
y_crop = np.int((y-size)/2)
return image[x_crop:x_crop+size, y_crop:y_crop+size]
def chunks(img, overlap = 100, chunks = 8):
x0, y0 = img.shape
x_block, y_block = np.int((x0 + overlap)/chunks), np.int((y0 + overlap)/chunks)
step = x_block - overlap
chunks_img = util.view_as_windows(img, (x_block,y_block), step = step)
chunks_img = np.squeeze(chunks_img)
return chunks_img, x_block, y_block, overlap
def canny_chunks(chunks):
lst = []
for i in range(chunks.shape[0]):
for j in range(chunks.shape[1]):
lst.append(canny(chunks[i,j], sigma=1))
return(lst)
def hough_circle_chunk(lst_canny, low = 17, high = 25):
lst = []
for k in range(len(lst_canny)):
# Shoould be able to change the range:
hough_radii = np.arange(low, high, 1)
#hough_radii = np.asarray([20])
hough_res = hough_circle(lst_canny[k], hough_radii)
# Select the most prominent 5 circles
accums, cx, cy, radii = hough_circle_peaks(hough_res, hough_radii,
min_xdistance=50, min_ydistance=50)
lst.append((accums, cx, cy, radii))
return(lst)
def draw_circle(lst_canny, lst_hough):
x,y = lst_canny[0].shape
lst = []
for l in range(len(lst_canny)):
img_mask = np.zeros((x+30,y + 30), dtype="bool")
_, cx, cy, _ = lst_hough[l]
for center_y, center_x in zip(cy, cx):
circy, circx = circle(center_y, center_x, 20)
img_mask[circy, circx] = True
lst.append(img_mask)
return(lst)
def stitched_x(lst_canny, list_mask, over = 200):
#over should be equla 1/2 overlay
x,y = lst_canny[0].shape
stitched_x = np.hstack((list_mask[0][:, 0:y-over], list_mask[1][:, over:y-over], list_mask[2][:, over:y-over],
list_mask[3][:, over:y-over], list_mask[4][:, over:y-over], list_mask[5][:, over:y-over],
list_mask[6][:, over:y-over], list_mask[7][:, over:y-over], list_mask[8][:, over:y]))
return(stitched_x)
def stitched_y(list_mask, over = 200):
x, y = list_mask[0].shape
stitched_y = np.vstack((list_mask[0][0:x-over, :], list_mask[1][over:x-over, :], list_mask[2][over:x-over, :],
list_mask[3][over:x-over, :], list_mask[4][over:x-over, :], list_mask[5][over:x-over, :],
list_mask[6][over:x-over, :], list_mask[7][over:x-over, :], list_mask[8][over:x, :]))
return(stitched_y)
def stitched(lst_canny, list_mask):
lst_stitched_x = []
for x in range(0, len(list_mask), 9):
lst_stitched_x.append(stitched_x(lst_canny,list_mask[x:len(list_mask)], over = 100))
stitched_img = stitched_y(lst_stitched_x, over = 115)
return(measure.label(stitched_img, background = 0))
def bkg_correct(img):
sigma_clip = SigmaClip(sigma=3., iters=10)
bkg_estimator = MedianBackground()
bkg = Background2D(img, (50, 50), filter_size=(3, 3),
sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)
correct = img - bkg.background
correct = correct+np.abs(correct.min())
return(correct)
def prop_measure(labeled_img, intensity_image, back_sub = False):
if back_sub:
intensity_image = bkg_correct(intensity_image)
#only use the labeled region
props = measure.regionprops(labeled_img, intensity_image=intensity_image)
mean_lst = [np.mean(prop.intensity_image[np.nonzero(prop.intensity_image)]) for prop in props]
median_lst = [np.median(prop.intensity_image[np.nonzero(prop.intensity_image)]) for prop in props]
std_lst = [np.std(prop.intensity_image[np.nonzero(prop.intensity_image)]) for prop in props]
percentile_75_lst = [np.percentile(prop.intensity_image[np.nonzero(prop.intensity_image)], 75) for prop in props]
percentile_50_lst = [np.percentile(prop.intensity_image[np.nonzero(prop.intensity_image)], 50) for prop in props]
percentile_25_lst = [np.percentile(prop.intensity_image[np.nonzero(prop.intensity_image)], 25) for prop in props]
property_lst = [mean_lst, median_lst, std_lst, percentile_75_lst,
percentile_50_lst, percentile_25_lst, props]
return property_lst, props
def data_panda(property_lst):
data = {"mean intensity": property_lst[0],
"median intensity": property_lst[1],
"standard deviation": property_lst[2],
"25th percentile ": property_lst[3],
"50th percentile ": property_lst[4],
"75th percentile ": property_lst[5]}
return(pd.DataFrame(data))
def new_label_holo(liste, labeled_img, int_img):
new_labeled_img = np.copy(labeled_img)
for roi in liste:
xx, yy = np.meshgrid(np.arange(int(roi[3]),int(roi[1])), np.arange(int(roi[0]),int(roi[2])))
new_labeled_img[xx, yy] = 0
props = measure.regionprops(new_labeled_img,
intensity_image=int_img)
circularity = [prop.eccentricity for prop in props]
new_labels = new_labeled_img.copy()
for circ, prop in zip(circularity, props):
if circ >=0.7:
new_labels[tuple(prop.coords.T)] = 0
else:
new_labels[tuple(prop.coords.T)] = prop.label
return(new_labels)
def prop_lab(labeled_img):
props = measure.regionprops(labeled_img)
lst_coord = [prop.centroid[::-1] for prop in props]
label = [prop.label for prop in props]
data = np.concatenate((np.asarray(lst_coord),
np.zeros(np.asarray(label)[:,np.newaxis].shape)),
axis = 1)
data = {"x": data[:,0],
"y": data[:,1],
"empty": data[:,2]}
return data
def equaliz(FAM_cropped):
# Need to crop a little more after stitching
FAM_cropped_n = FAM_cropped[0:6530, 0:6500]
to_show_FAM = FAM_cropped_n[::-1]
p2, p98 = np.percentile(to_show_FAM, (2, 98))
equal = exposure.rescale_intensity(to_show_FAM, in_range=(p2, p98))
return equal, FAM_cropped_n
def to_ROI(to_show_FAM, data):
hv.output(size=200)
dict_spec = {'Points':{'style':dict(cmap='gray', size=0.1, alpha=0.1),
'plot':dict(color_index=2, colorbar=True ,invert_yaxis=True, toolbar='above')},
'Image':{'style':dict(cmap= 'gray'),
'plot':dict(invert_yaxis=True)}}
image = hv.Image(to_show_FAM[::8,::8], bounds=(0,0,to_show_FAM.shape[1],to_show_FAM.shape[0]), label= "FAM")
label = hv.Points(data, vdims=['empty'])
box = streams.Bounds(source=label, bounds=(0,0,0,0))
bounds = hv.DynamicMap(lambda bounds: hv.Bounds(bounds), streams=[box])
return image, label, box, bounds, dict_spec
def show_RGB(labeled_img, s = 4):
hv.output(size=200)
dict_spec = {'RGB':{'plot':dict(xaxis=None, yaxis=None)}}
rescaled = labeled_img[::s,::s]
to_show = hv.RGB(label2rgb(rescaled, bg_label=0))
return to_show.opts(dict_spec)
def show_img(labeled_img, s = 4):
# Need to flip image
to_show = labeled_img[::-1]
hv.output(size=200)
dict_spec = {'Image':{'style':dict(cmap= 'nipy_spectral'),
'plot':dict(invert_yaxis=True)}}
display = hv.Image(to_show[::s,::s], bounds=(0,0,to_show.shape[1],to_show.shape[0]))
return display.opts(dict_spec)
def dict_for_plot(property_lst, props):
lst_coord = [prop.centroid[::-1] for prop in props]
data = np.concatenate((np.asarray(lst_coord),
np.asarray(property_lst[2])[:,np.newaxis],
np.asarray(property_lst[0])[:,np.newaxis],
np.asarray(property_lst[1])[:,np.newaxis],
np.asarray(property_lst[3])[:,np.newaxis],
np.asarray(property_lst[4])[:,np.newaxis],
np.asarray(property_lst[5])[:,np.newaxis]),
axis = 1)
data = {"x": data[:,0],
"y": data[:,1],
"std": data[:,2],
"mean": data[:,3],
"median": data[:,4],
"Q1": data[:,5],
"Q2": data[:,6],
"Q3": data[:,7]}
return data
def create_hover():
hover1 = HoverTool(
tooltips=[
("index", "$index"),
("(x,y)", "($x{0000}, $y{0000})"),
("std", "@std"),
]
)
hover2 = HoverTool(
tooltips=[
("index", "$index"),
("(x,y)", "($x{0000}, $y{0000})"),
("mean", "@mean"),
]
)
hover3 = HoverTool(
tooltips=[
("index", "$index"),
("(x,y)", "($x{0000}, $y{0000})"),
("median", "@median"),
]
)
hover4 = HoverTool(
tooltips=[
("index", "$index"),
("(x,y)", "($x{0000}, $y{0000})"),
("Q1", "@Q1"),
]
)
hover5 = HoverTool(
tooltips=[
("index", "$index"),
("(x,y)", "($x{0000}, $y{0000})"),
("Q2", "@Q2"),
]
)
hover6 = HoverTool(
tooltips=[
("index", "$index"),
("(x,y)", "($x{0000}, $y{0000})"),
("Q3", "@Q3"),
]
)
return [hover1, hover2, hover3, hover4, hover5, hover6]
def plot_result(property_lst, props, to_show_FAM, dye = "FAM"):
data = dict_for_plot(property_lst, props)
hover_lst = create_hover()
hv.output(size=200)
dict_spec = {'Points':{'style':dict(cmap='viridis', size=8),
'plot':dict(color_index=2, colorbar=True ,invert_yaxis=True, toolbar='above')},
'Image':{'style':dict(cmap= 'gray'),
'plot':dict(invert_yaxis=True)},
'Overlay':{'plot':dict(tabs=True)}}
image = hv.Image(to_show_FAM[::8,::8], bounds=(0,0,to_show_FAM.shape[1],to_show_FAM.shape[0]), label= dye)
std = hv.Points(data, vdims=['std'], label = 'std').opts(plot=dict(tools=[hover_lst[0]]))
mean = hv.Points(data, vdims=['mean'], label = 'mean').opts(plot=dict(tools=[hover_lst[1]]))
median = hv.Points(data, vdims=['median'], label = 'median').opts(plot=dict(tools=[hover_lst[2]]))
Q1 = hv.Points(data, vdims=['Q1'], label = 'Q1').opts(plot=dict(tools=[hover_lst[3]]))
Q2 = hv.Points(data, vdims=['Q2'], label = 'Q2').opts(plot=dict(tools=[hover_lst[4]]))
Q3 = hv.Points(data, vdims=['Q3'], label = 'Q3').opts(plot=dict(tools=[hover_lst[5]]))
dlayout = image * std * mean * median * Q1 * Q2 * Q3
return dlayout.opts(dict_spec)
'''
def coord_roi_exclude(w):
start, end = w.result
xx, yy = np.meshgrid(np.arange(start[0],end[0]), np.arange(start[1],end[1]))
return np.transpose(np.vstack([xx.ravel(), yy.ravel()]))
def lst_label_in_roitoexclude(propregion_all, positions_toexclude):
coords = [(prop.coords, prop.label) for prop in propregion_all]
lst_label = []
for coord in coords:
A = np.in1d(coord[0][:,0], positions_toexclude[:,0])
B = np.in1d(coord[0][:,1], positions_toexclude[:,1])
C = np.stack((A,B))
if np.any(np.all(C == True,axis=0)):
lst_label.append(coord[1])
return(np.asarray(mean_lst_label, dtype = int))
mean_labels_bis[tuple(region.coords.T)] = mean[0]
'''
|
{"hexsha": "cfed6962168fd9faf4969c247f08594102893d71", "size": 12703, "ext": "py", "lang": "Python", "max_stars_repo_path": "script/vincent.py", "max_stars_repo_name": "bioimage-analysis/microarray_analysis_vincent", "max_stars_repo_head_hexsha": "ed3401a895c06550f2f12e74cbaf971a7578963c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "script/vincent.py", "max_issues_repo_name": "bioimage-analysis/microarray_analysis_vincent", "max_issues_repo_head_hexsha": "ed3401a895c06550f2f12e74cbaf971a7578963c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "script/vincent.py", "max_forks_repo_name": "bioimage-analysis/microarray_analysis_vincent", "max_forks_repo_head_hexsha": "ed3401a895c06550f2f12e74cbaf971a7578963c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9858356941, "max_line_length": 117, "alphanum_fraction": 0.591986145, "include": true, "reason": "import numpy,from astropy", "num_tokens": 3434}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 16 13:30:45 2018
@author: Lionel Massoulard
"""
import pandas as pd
import numpy as np
from sklearn.datasets import make_classification, make_regression
from sklearn.model_selection import train_test_split
from tests.helpers.testing_help_models import verif_model
from aikit.models.rotation_forest import (
GroupPCADecisionTreeClassifier,
GroupPCADecisionTreeRegressor,
RandomRotationForestClassifier,
RandomRotationForestRegressor,
)
from aikit.tools.data_structure_helper import DataTypes
import pytest
import itertools
# In[]
X, y = make_classification(random_state=123)
df1, df2, y1, y2 = train_test_split(X, y)
df1 = pd.DataFrame(df1, columns=["COL_%d" % i for i in range(df1.shape[1])])
df2 = pd.DataFrame(df2, columns=["COL_%d" % i for i in range(df1.shape[1])])
X, y = make_regression(random_state=123)
df1_reg, df2_reg, y1_reg, y2_reg = train_test_split(X, y)
df1_reg = pd.DataFrame(df1_reg, columns=["COL_%d" % i for i in range(df1_reg.shape[1])])
df2_reg = pd.DataFrame(df2_reg, columns=["COL_%d" % i for i in range(df1_reg.shape[1])])
# In[] : GroupPCADecisionTreeClassifier
def test_GroupPCADecisionTreeClassifier():
klass = GroupPCADecisionTreeClassifier
model_kwargs = {}
np.random.seed(1)
verif_model(
df1,
df2,
y1,
klass,
model_kwargs,
all_types=(DataTypes.DataFrame, DataTypes.NumpyArray), # , DataTypes.SparseArray, DataTypes.SparseDataFrame),
is_classifier=True,
)
pytest.mark.longtest
@pytest.mark.parametrize(
"random_state, max_depth, criterion, pca_bootstrap",
list(itertools.product(range(100), (None, 2, 5), ("gini", "entropy"), (True, False))),
)
def test_GroupPCADecisionTreeClassifier_with_params(random_state, max_depth, criterion, pca_bootstrap):
klass = GroupPCADecisionTreeClassifier
model_kwargs = {
"max_depth": max_depth,
"criterion": criterion,
"pca_bootstrap": pca_bootstrap,
"random_state": random_state,
}
verif_model(
df1,
df2,
y1,
klass,
model_kwargs,
all_types=(DataTypes.DataFrame, DataTypes.NumpyArray), # , DataTypes.SparseArray, DataTypes.SparseDataFrame),
is_classifier=True,
)
def verif_all_GroupPCADecisionTreeClassifier():
for random_state, max_depth, criterion, pca_bootstrap in itertools.product(
range(100), (None, 2, 5), ("gini", "entropy"), (True, False)
):
test_GroupPCADecisionTreeClassifier_with_params(
random_state=random_state, max_depth=max_depth, criterion=criterion, pca_bootstrap=pca_bootstrap
)
# In[]
def test_RandomRotationForestClassifier():
klass = RandomRotationForestClassifier
model_kwargs = {}
np.random.seed(2)
verif_model(
df1,
df2,
y1,
klass,
model_kwargs,
all_types=(DataTypes.DataFrame, DataTypes.NumpyArray), # , DataTypes.SparseArray, DataTypes.SparseDataFrame),
is_classifier=True,
)
@pytest.mark.longtest
@pytest.mark.parametrize(
"random_state, max_depth, criterion, pca_bootstrap",
list(itertools.product(range(10), (None, 2, 5), ("gini", "entropy"), (True, False))),
)
def test_RandomRotationForestClassifier_with_params(random_state, max_depth, criterion, pca_bootstrap):
klass = RandomRotationForestClassifier
model_kwargs = {
"max_depth": max_depth,
"criterion": criterion,
"pca_bootstrap": pca_bootstrap,
"random_state": random_state,
}
verif_model(
df1,
df2,
y1,
klass,
model_kwargs,
all_types=(DataTypes.DataFrame, DataTypes.NumpyArray), # , DataTypes.SparseArray, DataTypes.SparseDataFrame),
is_classifier=True,
)
def verif_all_RandomRotationForestClassifier():
for random_state, max_depth, criterion, pca_bootstrap in itertools.product(
range(10), (None, 2, 5), ("gini", "entropy"), (True, False)
):
test_RandomRotationForestClassifier_with_params(
random_state=random_state, max_depth=max_depth, criterion=criterion, pca_bootstrap=pca_bootstrap
)
# In[]
def test_GroupPCADecisionTreeRegressor():
# klass = DecisionTreeClassifier
klass = GroupPCADecisionTreeRegressor
model_kwargs = {}
np.random.seed(3)
verif_model(
df1_reg,
df2_reg,
y1_reg,
klass,
model_kwargs,
all_types=(DataTypes.DataFrame, DataTypes.NumpyArray), # , DataTypes.SparseArray, DataTypes.SparseDataFrame),
is_classifier=False,
)
@pytest.mark.parametrize(
"random_state, max_depth, criterion, pca_bootstrap",
list(itertools.product(range(10), (None, 2, 5), ("mse", "mae"), (True, False))),
)
def test_GroupPCADecisionTreeRegressor_with_params(random_state, max_depth, criterion, pca_bootstrap):
klass = GroupPCADecisionTreeRegressor
model_kwargs = {
"max_depth": max_depth,
"criterion": criterion,
"pca_bootstrap": pca_bootstrap,
"random_state": random_state,
}
verif_model(
df1,
df2,
y1,
klass,
model_kwargs,
all_types=(DataTypes.DataFrame, DataTypes.NumpyArray), # , DataTypes.SparseArray, DataTypes.SparseDataFrame),
is_classifier=False,
)
def verif_all_GroupPCADecisionTreeRegressor():
for random_state, max_depth, criterion, pca_bootstrap in itertools.product(
range(100), (None, 2, 5), ("mse", "mae"), (True, False)
):
test_GroupPCADecisionTreeRegressor_with_params(
random_state=random_state, max_depth=max_depth, criterion=criterion, pca_bootstrap=pca_bootstrap
)
# In[]
def test_RandomRotationForestRegressor():
klass = RandomRotationForestRegressor
model_kwargs = {}
np.random.seed(4)
verif_model(
df1,
df2,
y1,
klass,
model_kwargs,
all_types=(DataTypes.DataFrame, DataTypes.NumpyArray), # , DataTypes.SparseArray, DataTypes.SparseDataFrame),
is_classifier=False,
)
@pytest.mark.longtest
@pytest.mark.parametrize(
"random_state, max_depth, criterion, pca_bootstrap",
list(itertools.product(range(10), (None, 2, 5), ("mse", "mae"), (True, False))),
)
def test_RandomRotationForestRegressor_with_params(random_state, max_depth, criterion, pca_bootstrap):
klass = RandomRotationForestRegressor
model_kwargs = {
"max_depth": max_depth,
"criterion": criterion,
"pca_bootstrap": pca_bootstrap,
"random_state": random_state,
}
verif_model(
df1,
df2,
y1,
klass,
model_kwargs,
all_types=(DataTypes.DataFrame, DataTypes.NumpyArray), # , DataTypes.SparseArray, DataTypes.SparseDataFrame),
is_classifier=False,
)
def verif_all_test_RandomRotationForestRegressor():
for random_state, max_depth, criterion, pca_bootstrap in itertools.product(
range(100), (None, 2, 5), ("mse", "mae"), (True, False)
):
test_RandomRotationForestRegressor_with_params(
random_state=random_state, max_depth=max_depth, criterion=criterion, pca_bootstrap=pca_bootstrap
)
# In[]
|
{"hexsha": "94828cef8528be1c1572fa3a593df18acada9cfe", "size": 7297, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/models/test_rotation_forest.py", "max_stars_repo_name": "gfournier/aikit", "max_stars_repo_head_hexsha": "23257f365a4f387cbb86f0ed3994b696a81b57c6", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2018-09-14T07:29:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-08T19:48:23.000Z", "max_issues_repo_path": "tests/models/test_rotation_forest.py", "max_issues_repo_name": "gfournier/aikit", "max_issues_repo_head_hexsha": "23257f365a4f387cbb86f0ed3994b696a81b57c6", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 40, "max_issues_repo_issues_event_min_datetime": "2019-05-28T09:17:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-31T15:53:36.000Z", "max_forks_repo_path": "tests/models/test_rotation_forest.py", "max_forks_repo_name": "gfournier/aikit", "max_forks_repo_head_hexsha": "23257f365a4f387cbb86f0ed3994b696a81b57c6", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2018-11-21T09:38:45.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-25T10:24:54.000Z", "avg_line_length": 27.4323308271, "max_line_length": 118, "alphanum_fraction": 0.6797313965, "include": true, "reason": "import numpy", "num_tokens": 1767}
|
###############################################################################
#
# File: opencv_optical_flow.py
#
# Wrap OpenCV's optical flow functions to make them even easier to use
#
# History:
# 08-05-20 - Levi Burner - Created file
#
###############################################################################
import cv2
import numpy as np
def lucas_kanade(im1_gray, im2_gray,
feature_params = {
'maxCorners': 5000,
'qualityLevel': 0.1,
'minDistance': 7,
'blockSize': 7
},
lucas_kanade_params = {
'winSize': (15, 15),
'maxLevel': 2,
'criteria': (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03),
}):
start_points = cv2.goodFeaturesToTrack(im1_gray, mask = None, **feature_params)
end_points, status, error = cv2.calcOpticalFlowPyrLK(im1_gray, im2_gray, start_points, None, **lucas_kanade_params)
good_start_points = start_points[status==True]
good_end_points = end_points[status==True]
flow = good_end_points - good_start_points
return np.concatenate((good_start_points, flow), axis=1)
|
{"hexsha": "848532fe0ebc0825a719beed1f4d191c232b4d7b", "size": 1248, "ext": "py", "lang": "Python", "max_stars_repo_path": "motion_illusions/opencv_optical_flow.py", "max_stars_repo_name": "agganu/motion_illusions", "max_stars_repo_head_hexsha": "a5343bada7678827a53551e637e21fcd1a189a0d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-08-03T16:39:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-02T18:56:54.000Z", "max_issues_repo_path": "motion_illusions/opencv_optical_flow.py", "max_issues_repo_name": "ysnan/motion_illusions", "max_issues_repo_head_hexsha": "1b7e8901cbd228a6bdfc8762f6d4756f62361b1f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-09-26T01:28:34.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-25T16:11:40.000Z", "max_forks_repo_path": "motion_illusions/opencv_optical_flow.py", "max_forks_repo_name": "agganu/motion_illusions", "max_forks_repo_head_hexsha": "a5343bada7678827a53551e637e21fcd1a189a0d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-08-03T16:40:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-12T15:41:45.000Z", "avg_line_length": 32.8421052632, "max_line_length": 119, "alphanum_fraction": 0.5144230769, "include": true, "reason": "import numpy", "num_tokens": 290}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2021/8/5 3:12 下午
# @File : filter_wrong.py
# @Author: johnson
# @Desc : 使用多个随机数种子训练模型,然后过滤出所有预测错误的样本,供以后进行分析
import argparse
import json
import os
import pandas as pd
from experiments.myexample.mydata_prepro import do_prepro, absa_source_file, dem8_source_file, purchase_source_file
from predict import do_predict
import collections
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import pickle
mpl.rcParams['font.family'] = ['SimHei']
mpl.rcParams['axes.unicode_minus'] = False
def got_args():
parser = argparse.ArgumentParser()
parser.add_argument("-t","--do_train_filter", action="store_true", help='训练模型并过滤badcase')
parser.add_argument("-d", type=str, default="1,2,3,4,5,6,7,8,9,10,11,12,13,14,15",help='随机数种子,用逗号隔开,有几个种子,就运行几次')
parser.add_argument("-k","--task", type=str, default="all", help='对哪个任务进行预测错误的筛选,默认所有')
parser.add_argument("-w","--wrong_path", type=str, default="wrong_sample/0811", help="预测错误的样本默认保存到哪个文件夹下,错误的样本保存成pkl格式,文件名字用随机数种子命名,包含所有任务的结果")
#分析badcase的参数
parser.add_argument("-a","--do_analysis", action="store_true", help='分析badcase')
parser.add_argument("-p","--analysis_path", type=str, default="wrong_sample/0811", help="分析保存预测错误的样本的文件夹,pkl格式")
parser.add_argument("-s","--analysis_tasks", type=str, default="accuracy,samplenum,badnum,totalbad,wrongnum,export,golden_class_num,class_wrong_num", help="分析保存预测错误的样本的文件夹,pkl格式")
args = parser.parse_args()
return args
def train_and_filter(seed, task ,wrong_path):
#解析参数
seeds = seed.split(",")
# 数字格式的随机数种子
seeds = list(map(int, seeds))
#对任务进行过滤
all_tasks = ["absa","dem8","purchase"]
if task == "all":
tasks = all_tasks
else:
assert task in all_tasks, "给定任务不在我们预设的任务中,请检查"
tasks = [task]
# 检查保存目录
if not os.path.exists(wrong_path):
os.makedirs(wrong_path)
#备份下源数据到wrong_path目录
os.system(command=f"cp -a data_my/canonical_data/source_data {wrong_path}")
for sd in seeds:
wrong_sample_record = os.path.join(wrong_path, f"filter_seed_{sd}.pkl")
records = collections.defaultdict(dict)
records['seed'] = sd
# 根据同一份源数据,不同的随机数种子,产生不同的训练,评估,测试数据集
# 随机数种子不同,产生的训练评估和测试的样本也不同,这里返回它们的id
absa_ids, dems_ids, purchase_ids = do_prepro(root='data_my', use_pkl=True, seed=sd)
# 注意这里的data_id是对应的源数据的索引,是全局唯一的
absa_train_data_id, absa_dev_data_id, absa_test_data_id = absa_ids
dem8_train_data_id, dem8_dev_data_id, dem8_test_data_id = dems_ids
purchase_train_data_id, purchase_dev_data_id, purchase_test_data_id = purchase_ids
# 第二步,源数据变token
code = os.system(command="/home/wac/johnson/anaconda3/envs/py38/bin/python prepro_std.py --model bert-base-chinese --root_dir data_my/canonical_data --task_def experiments/myexample/my_task_def.yml --do_lower_case")
assert code == 0, "数据处理不成功,请检查"
# 第三步,训练模型
model_output_dir = f"checkpoints/mtdnn_seed_{sd}"
train_options_list = {
'data_dir': "--data_dir data_my/canonical_data/bert-base-chinese", # 数据tokenize后的路径
'init_checkpoint': "--init_checkpoint mt_dnn_models/bert_model_base_chinese.pt", # base模型
"batch_size": "--batch_size 32",
"task_def": "--task_def experiments/myexample/my_task_def.yml",
'output_dir': f"--output_dir {model_output_dir}",
'log_file': f"--log_file {model_output_dir}/log.log ",
'answer_opt': "--answer_opt 1 ", # 可选0,1,代表是否使用SANClassifier分类头还是普通的线性分类头,1表示使用SANClassifier, 0是普通线性映射
'optimizer': "--optimizer adamax ",
'epochs': "--epochs 5",
'train_datasets': "--train_datasets absa,dem8,purchase",
'test_datasets': "--test_datasets absa,dem8,purchase",
'grad_clipping': "--grad_clipping 0 ",
'global_grad_clipping': "--global_grad_clipping 1 ",
'learning_rate': "--learning_rate 5e-5",
}
train_options = " ".join(train_options_list.values())
command = f"/home/wac/johnson/anaconda3/envs/py38/bin/python train.py {train_options}"
code = os.system(command=command)
assert code == 0, "训练模型失败,请检查"
# 训练完成,使用训练完成的最后的epoch模型进行预测
model_path = os.path.join(model_output_dir, "model_final.pt")
tasks2id = {
"absa": 0,
"dem8": 1,
"purchase": 2
}
records['model_path'] = model_path
for task in tasks:
task_record = {}
if task == "absa":
task_record["train_data_id"] = absa_train_data_id
task_record["dev_data_id"] = absa_dev_data_id
task_record["test_data_id"] = absa_test_data_id
elif task == "dem8":
task_record["train_data_id"] = dem8_train_data_id
task_record["dev_data_id"] = dem8_dev_data_id
task_record["test_data_id"] = dem8_test_data_id
else:
task_record["train_data_id"] = purchase_train_data_id
task_record["dev_data_id"] = purchase_dev_data_id
task_record["test_data_id"] = purchase_test_data_id
task_id = tasks2id[task]
# 对训练,测试,开发数据集都进行预测一下
# 对于每个任务都进行测试
datasets = ["train", "dev", "test"]
for dataset in datasets:
prep_input = f"data_my/canonical_data/bert-base-chinese/{task}_{dataset}.json"
# 预测结果
test_metrics, predict_labels, scores, gold_labels, _ = do_predict(task, task_def="experiments/myexample/my_task_def.yml", task_id=task_id, prep_input=prep_input, with_label=True, score="predict_score.txt", max_seq_len=512, batch_size_eval=32, checkpoint=model_path,
cuda=True, do_collection=False, collection_file=None)
task_record[f"{dataset}_metrics"] = test_metrics
task_record[f"{dataset}_predict_labels"] = predict_labels
task_record[f"{dataset}_scores"] = scores
task_record[f"{dataset}_gold_labels"] = gold_labels
records[task] = task_record
# 保存一次实验的seed结果
with open(wrong_sample_record, 'w') as f:
json.dump(records,f)
def do_analysis(analysis_path, analysis_tasks):
"""
分析badcase
:param analysis_tasks 绘哪个图
:return:
:rtype:
"""
tasks = ["absa", "dem8", "purchase"]
assert os.path.exists(analysis_path), f"给定的分析的路径不存在:{analysis_path},请检查目录是否正确"
files = os.listdir(analysis_path)
assert "source_data" in files, "原始数据目录不在里面,请检查"
absa_src_data = os.path.join(analysis_path, "source_data", "absa.pkl")
dem8_src_data = os.path.join(analysis_path, "source_data", "dem8.pkl")
purchase_src_data = os.path.join(analysis_path, "source_data", "purchase.pkl")
assert os.path.exists(absa_src_data), "absa的原始数据文件不存在,请检查"
assert os.path.exists(dem8_src_data), "dem8的原始数据文件不存在,请检查"
assert os.path.exists(purchase_src_data), "purchase的原始数据文件不存在,请检查"
# 每个随机数种子训练模型后的结果
seed_pkl = [f for f in files if f.endswith('.pkl')]
# 读取每个运行结果
seeds_result = []
for sd_file in seed_pkl:
#读取每个记录的pkl文件
sd_file_path = os.path.join(analysis_path, sd_file)
with open(sd_file_path, 'rb') as f:
#单次的运行结果
sd_res = json.load(f)
# 预处理下sd_res,为了以后的绘图更方便,主要统计下预测错误的样本,bad_case的基本信息
seeds_result.append(sd_res)
if "accuracy" in analysis_tasks:
# 准确率的绘制
analysis_acc(seeds_result)
#样本训练集,开发集,测试集数量绘制
if "samplenum" in analysis_tasks:
analysis_sample_num(seeds_result)
if "badnum" in analysis_tasks:
# 只画出每次seed的错误的样本数量
analysis_bad_sample_num(seeds_result)
if "totalbad" in analysis_tasks:
# 分析总的错误的样本,重复出错的和只出错一次的
total_bad_sample_num(seeds_result)
if "wrongnum" in analysis_tasks:
# 所有的预测样本错误的的次数的直方图,预测错误1次的有x个,预测错误2次的有y个,预测错误3次的有z个,....
total_bad_sample_bar(seeds_result)
if "export" in analysis_tasks:
export_wrong_data(absa_src_data, dem8_src_data, purchase_src_data, seeds_result)
# 分析样本中每个类别的真实数量
if "golden_class_num" in analysis_tasks:
analysis_every_class_num(seeds_result)
if "class_wrong_num" in analysis_tasks:
# 每个任务的每个类别的正确率分析, 每个label的准确率
analysis_every_class_wrong(seeds_result)
def analysis_every_class_num(seeds_result):
def collect_value(sd_res,task):
# 所有的标签,一共有多少个标签
class_names = sd_res[task]['train_gold_labels'] + sd_res[task]['dev_gold_labels'] + sd_res[task]['test_gold_labels']
#各个标签的百分百
class_count = collections.Counter(class_names)
class_name = list(class_count.keys())
class_num = list(class_count.values())
total_num = len(class_names)
class_percent = [num/total_num for num in class_num]
return class_name, class_num, class_percent
sd_res = seeds_result[0]
class_name, class_num, class_percent = collect_value(sd_res,"absa")
simple_bar_plot(x=class_name, y=class_num, title="absa任务的每个类别的gold-label数量", xname="标签名称", yname="gold-label数量")
simple_bar_plot(x=class_name, y=class_percent, title="absa任务的每个类别的gold-label数量占比", xname="标签名称", yname="gold-label占比")
class_name, class_num, class_percent = collect_value(sd_res,"dem8")
simple_bar_plot(x=class_name, y=class_num, title="dem8任务的每个类别的gold-label数量", xname="标签名称", yname="gold-label数量")
simple_bar_plot(x=class_name, y=class_percent, title="dem8任务的每个类别的gold-label数量占比", xname="标签名称", yname="gold-label占比")
class_name, class_num, class_percent = collect_value(sd_res,"purchase")
simple_bar_plot(x=class_name, y=class_num, title="purchase任务的每个类别的gold-label数量", xname="标签名称", yname="gold-label数量")
simple_bar_plot(x=class_name, y=class_percent, title="purchase任务的每个类别的gold-label数量占比", xname="标签名称", yname="gold-label占比")
def analysis_every_class_wrong(seeds_result):
"""
分析每个任务的每个类别的正确率
:param seeds_result:
:type seeds_result:
:return:
:rtype:
"""
def compaire(a, b, cnt_wrong):
"""
:param a: 预测的标签
:type a:
:param b: 真实的标签
:type b:
:return:
:rtype:
"""
assert len(a) == len(b), "a和b的数量应该相等"
for predict, gold in zip(a, b):
if predict != gold:
cnt_wrong[gold] +=1
return cnt_wrong
def collect_value(sd_res,task):
# 所有的标签,一共有多少个标签
class_names = sd_res[task]['train_gold_labels'] + sd_res[task]['dev_gold_labels'] + sd_res[task]['test_gold_labels']
#各个标签的百分百
class_count = collections.Counter(class_names)
# 所有的标签,一共有多少个标签
class_name = []
#总的类别数
class_num = []
#预测错误的类别数
wrong_num = []
#预测错误的百分百
wrong_percent = []
# 对于不相等的label进行统计
cnt_wrong = collections.Counter()
cnt_wrong = compaire(sd_res[task]['train_predict_labels'], sd_res[task]['train_gold_labels'],cnt_wrong)
cnt_wrong = compaire(sd_res[task]['dev_predict_labels'], sd_res[task]['dev_gold_labels'],cnt_wrong)
cnt_wrong = compaire(sd_res[task]['test_predict_labels'], sd_res[task]['test_gold_labels'], cnt_wrong)
for name, clsnum in class_count.items():
wnum = cnt_wrong.get(name,0)
class_name.append(name)
class_num.append(clsnum)
wrong_num.append(wnum)
wrong_percent.append(wnum/clsnum)
return class_name, class_num,wrong_num, wrong_percent
for sd_res in seeds_result[:2]:
# 只画前3个图
seed = sd_res['seed']
for task in ["absa","dem8","purchase"]:
class_name, class_num, wrong_num, wrong_percent = collect_value(sd_res,task)
# yvalue = list(zip(class_num, wrong_num, wrong_percent))
simple_bar_plot(x=class_name, y=class_num, title=f"seed数为{seed}的{task}任务预测总数量", xname="类别的名称", yname="类别总样本数")
simple_bar_plot(x=class_name, y=wrong_num, title=f"seed数为{seed}的{task}任务预测错误数量", xname="类别的名称", yname="类别错误数量")
simple_bar_plot(x=class_name, y=wrong_percent, title=f"seed数为{seed}的{task}任务预测错误百分比", xname="类别的名称", yname="类别错误百分比")
def export_wrong_data(absa_src_data, dem8_src_data, purchase_src_data, seeds_result):
"""
导出预测错误的样本
:return:
:rtype:
"""
def compaire(predict, gold, sample_id):
diff_id = []
for p, g, s in zip(predict, gold, sample_id):
if p != g:
diff_id.append(s)
return diff_id
def collect_value(sd_res, task):
# 返回预测错误的id, 错误id是对应的源数据的索引,是全局唯一的
t_id = compaire(sd_res[task]['train_predict_labels'], sd_res[task]['train_gold_labels'],
sd_res[task]['train_data_id'])
d_id = compaire(sd_res[task]['dev_predict_labels'], sd_res[task]['dev_gold_labels'],
sd_res[task]['train_data_id'])
s_id = compaire(sd_res[task]['test_predict_labels'], sd_res[task]['test_gold_labels'],
sd_res[task]['train_data_id'])
merge_id = t_id + d_id + s_id
return merge_id
absa_counter = collections.Counter()
dem8_counter = collections.Counter()
purchase_counter = collections.Counter()
for sd_res in seeds_result:
absa_bad_id = collect_value(sd_res, "absa")
absa_counter.update(absa_bad_id)
dem8_bad_id = collect_value(sd_res, "dem8")
dem8_counter.update(dem8_bad_id)
purchase_bad_id = collect_value(sd_res, "purchase")
purchase_counter.update(purchase_bad_id)
#按错误的次数排序id
sorted_absa = sorted(absa_counter.items(), key=lambda x: x[1],reverse=True)
sorted_dem8 = sorted(dem8_counter.items(), key=lambda x: x[1],reverse=True)
sorted_purchase = sorted(purchase_counter.items(), key=lambda x: x[1],reverse=True)
#索引原文,并导入到excel
with open(absa_src_data, "rb") as f:
absa_data = pickle.load(f)
with open(dem8_src_data, "rb") as f:
dem8_data = pickle.load(f)
with open(purchase_src_data, "rb") as f:
purchase_data = pickle.load(f)
#把数据变成字典的索引,那样,方便查找
absa_data_dict = {idx:i for idx, i in enumerate(absa_data)}
dem8_data_dict = {idx:i for idx, i in enumerate(dem8_data)}
purchase_data_dict = {idx:i for idx, i in enumerate(purchase_data)}
# 保存的excel的名称
absa_save_excel = "absa_wrong.xlsx"
dem8_save_excel = "dem8_wrong.xlsx"
purchase_save_excel = "purchase_wrong.xlsx"
def saved_data(sorted_data, saved_excel, src_data, columns):
col_data = []
for i in sorted_data:
id, wrong_num = i
data = src_data[id]
data = list(data)
data.append(wrong_num)
data.append(str(id))
col_data.append(data)
# id也加上
df = pd.DataFrame(col_data, columns=columns)
df.to_excel(saved_excel)
# label是真实的标签
saved_data(sorted_data=sorted_absa, saved_excel=absa_save_excel, src_data=absa_data_dict, columns=['text','keyword','start','end','label','channel','wordtype','md5','wrong_num','id'])
saved_data(sorted_data=sorted_dem8, saved_excel=dem8_save_excel, src_data=dem8_data_dict, columns=['text','keyword','start','end','label','channel','wordtype','md5','wrong_num','id'])
saved_data(sorted_data=sorted_purchase, saved_excel=purchase_save_excel, src_data=purchase_data_dict,columns=['text','title','keyword','start','end','label','md5','wrong_num','id'])
print(f"保存预测错误文件到{absa_save_excel}, {dem8_save_excel}, {purchase_save_excel}")
def simple_bar_plot(x, y, title, xname, yname):
"""
普通的柱状图
:param x: eg: [1, 2, 3, 4]
:type x: 是对应的x轴
:param y: eg: [1, 4, 9, 16]
:type y:
:param title: 绘图的标题
:param xname: "预测错误次数"
:type xname:
:param yname: "样本数量"
:type yname:
:return:
:rtype:
"""
fig, ax = plt.subplots()
rects1 = ax.bar(x, y, width=0.3)
ax.bar_label(rects1, padding=3)
ax.set_title(title)
plt.xlabel(xname)
plt.ylabel(yname)
plt.show()
def total_bad_sample_bar(seeds_result):
"""
统计几次seed中预测错误的样本的重复次数
:param seeds_result:
:type seeds_result:
:return:
:rtype:
"""
def compaire(predict,gold,sample_id):
diff_id = []
for p,g,s in zip(predict,gold,sample_id):
if p != g:
diff_id.append(s)
return diff_id
def collect_value(sd_res,task):
# 返回预测错误的id, 错误id是对应的源数据的索引,是全局唯一的
t_id = compaire(sd_res[task]['train_predict_labels'], sd_res[task]['train_gold_labels'],sd_res[task]['train_data_id'])
d_id = compaire(sd_res[task]['dev_predict_labels'], sd_res[task]['dev_gold_labels'],sd_res[task]['train_data_id'])
s_id = compaire(sd_res[task]['test_predict_labels'], sd_res[task]['test_gold_labels'],sd_res[task]['train_data_id'])
merge_id = t_id + d_id + s_id
return merge_id
absa_counter = collections.Counter()
dem8_counter = collections.Counter()
purchase_counter = collections.Counter()
for sd_res in seeds_result:
absa_bad_id = collect_value(sd_res,"absa")
absa_counter.update(absa_bad_id)
dem8_bad_id = collect_value(sd_res, "dem8")
dem8_counter.update(dem8_bad_id)
purchase_bad_id = collect_value(sd_res, "purchase")
purchase_counter.update(purchase_bad_id)
#统计和绘图
# 错误次数出现1次的样本
absa_wrong_count = collections.Counter([count for id, count in absa_counter.items()])
dem8_wrong_count = collections.Counter([count for id, count in dem8_counter.items()])
purchase_wrong_count = collections.Counter([count for id, count in purchase_counter.items()])
x_absa = list(absa_wrong_count.keys())
y_absa = list(absa_wrong_count.values())
x_dem8 = list(dem8_wrong_count.keys())
y_dem8 = list(dem8_wrong_count.values())
x_purchase = list(purchase_wrong_count.keys())
y_purchase = list(purchase_wrong_count.values())
#错误次数大于1次的样本数量
absa_more1 = [v for k,v in absa_wrong_count.items() if k >1]
dem8_more1 = [v for k,v in dem8_wrong_count.items() if k >1]
purchase_more1 = [v for k,v in purchase_wrong_count.items() if k >1]
x1 = ["1次", "n次"]
y_absa1 = [sum(y_absa)-sum(absa_more1),sum(absa_more1)]
y_dem81 = [sum(y_dem8)-sum(dem8_more1),sum(dem8_more1)]
y_purchase1 = [sum(y_purchase)-sum(purchase_more1),sum(purchase_more1)]
simple_bar_plot(x1, y_absa1, title="情感任务absa的预测错误的样本的频次1次和多次 ", xname="错误频次", yname="样本数量")
simple_bar_plot(x1, y_dem81, title="属性判断dem8的预测错误的样本的频次1次和多次 ", xname="错误频次", yname="样本数量")
simple_bar_plot(x1, y_purchase1, title="购买意向purchase的预测错误的样本的频次1次和多次 ", xname="错误频次", yname="样本数量")
simple_bar_plot(x_absa, y_absa, title="情感任务absa的预测错误的样本的频次", xname="错误频次", yname="样本数量")
simple_bar_plot(x_dem8, y_dem8, title="属性判断dem8的预测错误的样本的频次", xname="错误频次", yname="样本数量")
simple_bar_plot(x_purchase, y_purchase, title="购买意向purchase的预测错误的样本的频次", xname="错误频次", yname="样本数量")
def total_bad_sample_num(seeds_result):
"""
几次seed预测后,总的预测错误的样本数量,总的出错数量,一个是重复出错的样本的数量,一个是几次seed后只有一次的出错的数量,会去重
:param seeds_result:
:type seeds_result:
:return:
:rtype:
"""
plot_x = ["1"] #没用到
absa_plot_acc_data = []
dem8_plot_acc_data = []
purchase_plot_acc_data = []
def compaire(predict,gold,sample_id):
diff_id = []
for p,g,s in zip(predict,gold,sample_id):
if p != g:
diff_id.append(s)
return diff_id
def collect_value(sd_res,task):
# 返回预测错误的id, 错误id是对应的源数据的索引,是全局唯一的
t_id = compaire(sd_res[task]['train_predict_labels'], sd_res[task]['train_gold_labels'],sd_res[task]['train_data_id'])
d_id = compaire(sd_res[task]['dev_predict_labels'], sd_res[task]['dev_gold_labels'],sd_res[task]['train_data_id'])
s_id = compaire(sd_res[task]['test_predict_labels'], sd_res[task]['test_gold_labels'],sd_res[task]['train_data_id'])
merge_id = t_id + d_id + s_id
return merge_id
absa_counter = collections.Counter()
dem8_counter = collections.Counter()
purchase_counter = collections.Counter()
for sd_res in seeds_result:
absa_bad_id = collect_value(sd_res,"absa")
absa_counter.update(absa_bad_id)
dem8_bad_id = collect_value(sd_res, "dem8")
dem8_counter.update(dem8_bad_id)
purchase_bad_id = collect_value(sd_res, "purchase")
purchase_counter.update(purchase_bad_id)
#统计和绘图
# 错误次数出现1次的样本
a1 = {x: count for x, count in absa_counter.items() if count == 1}
d1 = {x: count for x, count in dem8_counter.items() if count == 1}
p1 = {x: count for x, count in purchase_counter.items() if count == 1}
#预测错误多于一次的
ma = len(absa_counter) - len(a1)
md = len(dem8_counter) - len(d1)
mp = len(purchase_counter) - len(p1)
absa_plot_acc_data.append([len(absa_counter),len(a1),ma])
dem8_plot_acc_data.append([len(dem8_counter), len(d1), md])
purchase_plot_acc_data.append([len(purchase_counter), len(p1), mp])
plot_bar(title="所有seed情感任务absa的汇总预测错误",yname="样本数",seeds=plot_x, yvalue=absa_plot_acc_data,xname="汇总预测错误",bar_group_labels=["总错误数","错误一次数","错误n次数"])
plot_bar(title="所有seed属性判断dem8的汇总预测错误",yname="样本数",seeds=plot_x, yvalue=dem8_plot_acc_data,xname="汇总预测错误",bar_group_labels=["总错误数","错误一次数","错误n次数"])
plot_bar(title="所有seed购买意向purchase的汇总预测错误",yname="样本数",seeds=plot_x, yvalue=purchase_plot_acc_data,xname="汇总预测错误",bar_group_labels=["总错误数","错误一次数","错误n次数"])
def analysis_bad_sample_num(seeds_result):
"""
读取每个seed种子的结果,绘制错误的样本的数量
:param seeds_result:
:type seeds_result:
:return:
:rtype:
"""
def compaire(a, b):
assert len(a) == len(b), "a和b的数量应该相等"
same_sample = [i for i, j in zip(a, b) if i == j]
diff_num = len(a) - len(same_sample)
return diff_num, len(same_sample)
plot_seeds = []
absa_plot_acc_data = []
dem8_plot_acc_data = []
purchase_plot_acc_data = []
def collect_value(sd_res,task):
a, _ = compaire(sd_res[task]['train_predict_labels'], sd_res[task]['train_gold_labels'])
b, _ = compaire(sd_res[task]['dev_predict_labels'], sd_res[task]['dev_gold_labels'])
c, _ = compaire(sd_res[task]['test_predict_labels'], sd_res[task]['test_gold_labels'])
return a,b,c
for sd_res in seeds_result:
seed = sd_res['seed']
plot_seeds.append(seed)
a,b,c = collect_value(sd_res,"absa")
absa_plot_acc_data.append([a,b,c])
# dem8的准确率收集
a, b, c = collect_value(sd_res, "dem8")
dem8_plot_acc_data.append([a, b, c])
# purchase
a, b, c = collect_value(sd_res, "purchase")
purchase_plot_acc_data.append([a, b, c])
plot_bar(title="情感任务absa的预测错误样本数",yname="样本数",seeds=plot_seeds, yvalue=absa_plot_acc_data)
plot_bar(title="属性判断dem8的预测错误样本数",yname="样本数",seeds=plot_seeds, yvalue=dem8_plot_acc_data)
plot_bar(title="购买意向purchase的预测错误样本数",yname="样本数",seeds=plot_seeds, yvalue=purchase_plot_acc_data)
def analysis_sample_num(seeds_result):
"""
读取每个seed种子的结果,绘制样本数量,样本数量基本是一样的
:param seeds_result:
:type seeds_result:
:return:
:rtype:
"""
plot_seeds = []
absa_plot_acc_data = []
dem8_plot_acc_data = []
purchase_plot_acc_data = []
for sd_res in seeds_result:
seed = sd_res['seed']
plot_seeds.append(seed)
absa_train_acc = len(sd_res['absa']['train_data_id'])
absa_dev_acc = len(sd_res['absa']['dev_data_id'])
absa_test_acc = len(sd_res['absa']['test_data_id'])
absa_plot_acc_data.append([absa_train_acc,absa_dev_acc,absa_test_acc])
# dem8的准确率收集
dem8_train_acc = len(sd_res['dem8']['train_data_id'])
dem8_dev_acc = len(sd_res['dem8']['dev_data_id'])
dem8_test_acc = len(sd_res['dem8']['test_data_id'])
dem8_plot_acc_data.append([dem8_train_acc,dem8_dev_acc,dem8_test_acc])
# purchase
purchase_train_acc = len(sd_res['purchase']['train_data_id'])
purchase_dev_acc = len(sd_res['purchase']['dev_data_id'])
purchase_test_acc = len(sd_res['purchase']['test_data_id'])
purchase_plot_acc_data.append([purchase_train_acc, purchase_dev_acc, purchase_test_acc])
plot_bar(title="情感任务absa的总样本数",yname="样本数",seeds=plot_seeds, yvalue=absa_plot_acc_data)
plot_bar(title="属性判断dem8的总样本数",yname="样本数",seeds=plot_seeds, yvalue=dem8_plot_acc_data)
plot_bar(title="购买意向purchase的总样本数",yname="样本数",seeds=plot_seeds, yvalue=purchase_plot_acc_data)
def analysis_acc(seeds_result):
"""
读取每个seed种子的结果,绘图准确率
:param seeds_result:
:type seeds_result:
:return:
:rtype:
"""
plot_seeds = []
absa_plot_acc_data = []
dem8_plot_acc_data = []
purchase_plot_acc_data = []
for sd_res in seeds_result:
seed = sd_res['seed']
plot_seeds.append(seed)
absa_train_acc = sd_res['absa']['train_metrics']['ACC']
absa_dev_acc = sd_res['absa']['dev_metrics']['ACC']
absa_test_acc = sd_res['absa']['test_metrics']['ACC']
absa_plot_acc_data.append([absa_train_acc,absa_dev_acc,absa_test_acc])
# dem8的准确率收集
dem8_train_acc = sd_res['dem8']['train_metrics']['ACC']
dem8_dev_acc = sd_res['dem8']['dev_metrics']['ACC']
dem8_test_acc = sd_res['dem8']['test_metrics']['ACC']
dem8_plot_acc_data.append([dem8_train_acc,dem8_dev_acc,dem8_test_acc])
# purchase
purchase_train_acc = sd_res['purchase']['train_metrics']['ACC']
purchase_dev_acc = sd_res['purchase']['dev_metrics']['ACC']
purchase_test_acc = sd_res['purchase']['test_metrics']['ACC']
purchase_plot_acc_data.append([purchase_train_acc, purchase_dev_acc, purchase_test_acc])
#平均的准确率
def average_acc(acc_data):
# 加一个平均值,在末尾
avg_train = sum([i[0] for i in acc_data])/len(acc_data)
avg_dev = sum([i[1] for i in acc_data])/len(acc_data)
avg_test = sum([i[2] for i in acc_data])/len(acc_data)
acc_data.append([avg_train,avg_dev,avg_test])
return acc_data
absa_plot_acc_data = average_acc(absa_plot_acc_data)
dem8_plot_acc_data = average_acc(dem8_plot_acc_data)
purchase_plot_acc_data = average_acc(purchase_plot_acc_data)
# 999代表平均值
plot_seeds.append(999)
plot_bar(title="情感任务absa的准确率",yname="准确率",seeds=plot_seeds, yvalue=absa_plot_acc_data, ylimit=[0, 100])
plot_bar(title="属性判断dem8的准确率",yname="准确率",seeds=plot_seeds, yvalue=dem8_plot_acc_data,ylimit=[0, 100])
plot_bar(title="购买意向purchase的准确率",yname="准确率",seeds=plot_seeds, yvalue=purchase_plot_acc_data,ylimit=[0, 100])
print(f"情感任务absa的准确率: {absa_plot_acc_data}")
print(f"属性判断dem8的准确率: {dem8_plot_acc_data}")
print(f"购买意向purchase的准确率: {purchase_plot_acc_data}")
def plot_bar(title,yname,seeds, yvalue, ylimit=None,xname="随机数种子",bar_group_labels=["训练集","开发集","测试集"]):
"""
绘制准确率的柱状图
:param title: 绘图显示的标题
:type title:
:param seeds: 随机数种子的列表,是标签而已
:type seeds:
:param yvalue: 纵坐标的值,例如: 准确率的列表,嵌套的列表,每个列表是【训练集,开发集,测试集】结果
:type yvalue:
:param ylimit: y轴的大小
:param xname: x轴的名字
:param bar_group_labels: 一组中,每个bar代表的名字,对应yvalue的值
:return:
:rtype:
"""
## matplotlib 3.4.2版本以上支持
# 横坐标
# 给柱状图分配位置和宽度
x = np.arange(len(seeds)) # the label locations
width = 0.6/3 # Bar的宽度
yvalue = np.array(yvalue).T
fig, ax = plt.subplots()
rects1 = ax.bar(x - width, yvalue[0], width, label=bar_group_labels[0])
rects2 = ax.bar(x + width, yvalue[1], width, label=bar_group_labels[1])
rects3 = ax.bar(x, yvalue[2], width, label=bar_group_labels[2])
# 设置y坐标轴长度
if ylimit:
ax.set_ylim(ylimit)
# 横坐标和纵坐标的设置
ax.set_ylabel(yname)
ax.set_title(title)
ax.set_xlabel(xname)
ax.set_xticks(x)
ax.set_xticklabels(seeds)
# 显示legend,即说明,哪个bar的颜色是哪个
ax.legend()
# 给每个bar上面加显示的数值, padding是距离bar多高的位置显示数字
ax.bar_label(rects1, padding=3,color='blue')
ax.bar_label(rects2, padding=15,color='orange')
ax.bar_label(rects3, padding=30,color='green')
# 紧凑显示,显示的图更大一些
fig.tight_layout()
plt.show()
if __name__ == '__main__':
args = got_args()
if args.do_train_filter:
train_and_filter(seed=args.seed, task=args.task ,wrong_path=args.wrong_path)
else:
#分析
plot_tasks = args.analysis_tasks.split(',')
do_analysis(analysis_path=args.analysis_path, analysis_tasks=plot_tasks)
|
{"hexsha": "7337e522a80c417c587adda707c51c76773b4b27", "size": 28595, "ext": "py", "lang": "Python", "max_stars_repo_path": "filter_wrong.py", "max_stars_repo_name": "johnson7788/mt-dnn", "max_stars_repo_head_hexsha": "26e5c4a5bfdbf1a1dd1c903e606db1c070568237", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "filter_wrong.py", "max_issues_repo_name": "johnson7788/mt-dnn", "max_issues_repo_head_hexsha": "26e5c4a5bfdbf1a1dd1c903e606db1c070568237", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "filter_wrong.py", "max_forks_repo_name": "johnson7788/mt-dnn", "max_forks_repo_head_hexsha": "26e5c4a5bfdbf1a1dd1c903e606db1c070568237", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-14T08:57:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-14T08:57:20.000Z", "avg_line_length": 45.8253205128, "max_line_length": 281, "alphanum_fraction": 0.6693128169, "include": true, "reason": "import numpy", "num_tokens": 9156}
|
! Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
! See https://llvm.org/LICENSE.txt for license information.
! SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
!** Test checking msv-vector-bits are passed correctly
! REQUIRES: aarch64-registered-target
! REQUIRES: llvm-13
! RUN: %flang -S -emit-llvm -target aarch64-linux-gnu -march=armv8-a+sve -msve-vector-bits=128 %s -o - | FileCheck %s -check-prefix=ATTRS-SVE-128
! RUN: %flang -S -emit-llvm -target aarch64-linux-gnu -march=armv8-a+sve -msve-vector-bits=256 %s -o - | FileCheck %s -check-prefix=ATTRS-SVE-256
! RUN: %flang -S -emit-llvm -target aarch64-linux-gnu -march=armv8-a+sve2 -msve-vector-bits=512 %s -o - | FileCheck %s -check-prefix=ATTRS-SVE2-512
! RUN: %flang -S -emit-llvm -target aarch64-linux-gnu -march=armv8-a+sve2-sha3 -msve-vector-bits=2048 %s -o - | FileCheck %s -check-prefix=ATTRS-SVE2SHA3-2048
! RUN: %flang -S -emit-llvm -target aarch64-linux-gnu -march=armv8-a+sve2 -msve-vector-bits=scalable %s -o - | FileCheck %s -check-prefix=ATTRS-SVE2-SCALABLE
program tz
integer :: i
integer :: acc(100)
do i = 1, 100
acc(i) = 5
end do
print *, acc(100)
end program
! ATTRS-SVE-128: attributes #{{[0-9]+}}
! ATTRS-SVE-128-DAG: "target-features"="+neon,+sve"
! ATTRS-SVE-128-DAG: vscale_range(1,1)
! ATTRS-SVE-256: attributes #{{[0-9]+}}
! ATTRS-SVE-256-DAG: "target-features"="+neon,+sve"
! ATTRS-SVE-256-DAG: vscale_range(2,2)
! ATTRS-SVE2-512: attributes #{{[0-9]+}}
! ATTRS-SVE2-512-DAG: "target-features"="+neon,+sve2,+sve"
! ATTRS-SVE2-512-DAG: vscale_range(4,4)
! ATTRS-SVE2SHA3-2048: attributes #{{[0-9]+}}
! ATTRS-SVE2SHA3-2048-DAG: "target-features"="+neon,+sve2-sha3,+sve,+sve2"
! ATTRS-SVE2SHA3-2048-DAG: vscale_range(16,16)
! ATTRS-SVE2-SCALABLE: attributes #{{[0-9]+}}
! ATTRS-SVE2-SCALABLE-DAG: "target-features"="+neon,+sve2,+sve"
! ATTRS-SVE2-SCALABLE-DAG: vscale_range(1,16)
|
{"hexsha": "fe1c2bd59643a9ded6ab7ba85ae142fabf6e3b95", "size": 1961, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "test/llvm_ir_correct/vscale-mbits.f90", "max_stars_repo_name": "vigbalu/flang", "max_stars_repo_head_hexsha": "d47f3a626687453df7d3dede01021f5891c2dacd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/llvm_ir_correct/vscale-mbits.f90", "max_issues_repo_name": "vigbalu/flang", "max_issues_repo_head_hexsha": "d47f3a626687453df7d3dede01021f5891c2dacd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/llvm_ir_correct/vscale-mbits.f90", "max_forks_repo_name": "vigbalu/flang", "max_forks_repo_head_hexsha": "d47f3a626687453df7d3dede01021f5891c2dacd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.0, "max_line_length": 158, "alphanum_fraction": 0.684854666, "num_tokens": 761}
|
import sys
import PyQt5
from PyQt5.QtWidgets import QMainWindow, QApplication, QToolBar, QFileDialog, QMessageBox, QColorDialog
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QImage, QPixmap, QColor
from labelseg.mainwindow import Ui_MainWindow
import cv2 as cv
from enum import Enum
import os
from pathlib import Path
import labelseg.util as util
from PyQt5.Qt import QMouseEvent
from labelseg.app_dialog import PixelRangeDialog
import logging
import numpy as np
class OPEN_MODE(Enum):
OPEN_FILE = 1
OPEN_DIR = 2
class STATE(Enum):
NORMAL = 1
DRAW_POLYGON = 2
DRAW_RECTANGLE = 3
DRAW_ELLIPSE = 4
class AppWindow(QMainWindow):
def __init__(self):
super(AppWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.img_area.setScaledContents(False)
self.ui.img_area.setStyleSheet('border: 1px solid red')
self.add_tool_bar()
self.bind_actions()
self.init_vars()
def init_vars(self):
self.open_mode = OPEN_MODE.OPEN_FILE
self.opened_files = []
self.cur_img = None
self.panel_pic = None # 用来在上面绘制矩阵等形状
self.origin_img = None # 保留原始大小的图片
self.labelimg = None
self.scale = 1
self.cur_rect_list = []
self.cur_label_list = []
self.histroty = []
self.fill_color = QColor('green')
self.pixel_range = [-5, 5]
self.state = STATE.NORMAL
self.start_point = None
self.end_point = None
self.polygon_points = None
self.ellipse_points = None
self.modified = False
self.saved = False
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s %(funcName)s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
def refresh_vars(self):
self.cur_img = None
self.panel_pic = None # 用来在上面绘制矩阵等形状
self.origin_img = None # 保留原始大小的图片
self.labelimg = None
self.scale = 1
self.cur_rect_list = []
self.cur_label_list = []
self.histroty = []
self.state = STATE.NORMAL
self.ui.actionRectangle.setChecked(False)
self.ui.actionPolygon.setChecked(False)
self.ui.actionEllipse.setChecked(False)
self.start_point = None
self.end_point = None
self.polygon_points = None
self.ellipse_points = None
self.modified = False
self.saved = False
def bind_actions(self):
self.ui.actionOpen_File.triggered.connect(self.open_file)
self.ui.actionOpen_Dir.triggered.connect(self.open_dir)
self.ui.file_list.itemClicked.connect(self.file_list_item_changed)
self.ui.actionZoom_In.triggered.connect(self.zoom_in_pic)
self.ui.actionZoom_Out.triggered.connect(self.zoom_out_pic)
self.ui.img_area.mouse_pressed.connect(self.mouse_pressed)
self.ui.img_area.mouse_move.connect(self.mouse_move)
self.ui.img_area.mouse_release.connect(self.mouse_release)
self.ui.actionfill_color.triggered.connect(self.set_fill_color)
self.ui.actionSet_Pixel_Range.triggered.connect(self.set_pixel_range)
self.ui.actionUndo.triggered.connect(self.undo)
self.ui.actionSave.triggered.connect(self.save)
self.ui.actionPolygon.triggered.connect(self.create_polygon)
self.ui.actionRectangle.triggered.connect(self.create_rectangle)
self.ui.actionEllipse.triggered.connect(self.create_ellipse)
def save(self):
name = str(self.selected_filename.absolute())
items = name.split('.')
output_filename = '.'.join(items[:-1]) + '.npy'
np.save(output_filename, self.labelimg)
self.saved = True
self.modified = False
def undo(self, checked):
if self.state == STATE.DRAW_POLYGON and self.polygon_points is not None and self.polygon_points[
'finish'] is False:
if len(self.polygon_points['points']) == 1:
self.polygon_points = None
elif len(self.polygon_points['points']) > 1:
self.polygon_points['points'].pop(-1)
self.draw_lines(self.polygon_points['points'], False)
if self.labelimg is not None:
pic = self.draw_points()
self.show_pic(file_name=None, content=pic)
else:
self.show_pic(file_name=None, content=self.panel_pic)
else:
if len(self.histroty) == 0:
return
x, y = self.histroty.pop(-1)
self.labelimg[x, y] = 0
pic = self.draw_points()
self.show_pic(file_name=None, content=pic)
self.modified = True
def set_fill_color(self, checked):
color = QColorDialog.getColor()
if color.isValid():
self.fill_color = color
def set_pixel_range(self, checked):
dialog = PixelRangeDialog()
if dialog.exec_():
v1, v2 = dialog.get_result()
self.pixel_range = [v1, v2]
self.logger.debug(self.pixel_range)
def create_polygon(self, checked):
if checked:
self.state = STATE.DRAW_POLYGON
self.ui.actionEllipse.setChecked(False)
self.ui.actionRectangle.setChecked(False)
else:
self.state = STATE.NORMAL
def create_rectangle(self, checked):
if checked:
self.state = STATE.DRAW_RECTANGLE
self.ui.actionEllipse.setChecked(False)
self.ui.actionPolygon.setChecked(False)
else:
self.state = STATE.NORMAL
def create_ellipse(self, checked):
if checked:
self.state = STATE.DRAW_ELLIPSE
self.ui.actionPolygon.setChecked(False)
self.ui.actionRectangle.setChecked(False)
else:
self.state = STATE.NORMAL
def pos_in_img_area(self, pos: PyQt5.QtCore.QPoint):
if self.cur_img is None:
return False, None
width = self.ui.img_area.width()
height = self.ui.img_area.height()
img_height, img_width, _ = self.cur_img.shape
w = (width - img_width) // 2
h = (height - img_height) // 2
x_valid = w <= pos.x() <= w + img_width
y_valid = h <= pos.y() <= h + img_height
valid = x_valid and y_valid
if valid:
return True, (pos.x() - w, pos.y() - h)
else:
return False, None
def mouse_pressed(self, ev: QMouseEvent):
if self.state == STATE.NORMAL:
return
if ev.button() == Qt.LeftButton:
pos = ev.pos()
valid, relative_pos = self.pos_in_img_area(pos)
self.logger.debug(valid)
if valid:
self.logger.debug(relative_pos)
point = (
int(relative_pos[0] / self.scale), int(relative_pos[1] / self.scale))
if self.state == STATE.DRAW_RECTANGLE:
self.start_point = point
self.end_point = None
self.logger.debug(self.start_point)
elif self.state == STATE.DRAW_POLYGON:
if self.polygon_points is None or self.polygon_points['finish'] is True:
self.polygon_points = {
'finish': False, 'points': [point]}
else:
p1 = np.array(
[
self.polygon_points['points'][0][0] *
self.scale,
self.polygon_points['points'][0][1] *
self.scale])
p2 = np.array(
[point[0] * self.scale, point[1] * self.scale])
dis = np.linalg.norm(p1 - p2, 2)
if dis < 10:
self.polygon_points['finish'] = True
self.draw_lines(
self.polygon_points['points'], True)
else:
self.polygon_points['points'].append(point)
self.draw_lines(
self.polygon_points['points'], False)
if self.labelimg is not None:
pic = self.draw_points()
self.show_pic(file_name=None, content=pic)
else:
self.show_pic(
file_name=None, content=self.panel_pic)
elif self.state == STATE.DRAW_ELLIPSE:
if self.ellipse_points is None:
self.ellipse_points = {'points': [point], 'info': None}
elif len(self.ellipse_points['points']) == 1:
self.ellipse_points['points'].append(point)
self.draw_lines(self.ellipse_points['points'], False)
if self.labelimg is not None:
pic = self.draw_points()
self.show_pic(file_name=None, content=pic)
else:
self.show_pic(
file_name=None, content=self.panel_pic)
elif len(self.ellipse_points['points']) == 2:
self.ellipse_points['points'].append(point)
self.ellipse_points['info'] = self.draw_ellipse(
self.ellipse_points['points'])
if self.labelimg is not None:
pic = self.draw_points()
self.show_pic(file_name=None, content=pic)
else:
self.show_pic(
file_name=None, content=self.panel_pic)
else:
self.ellipse_points = {'points': [point], 'info': None}
else:
raise NotImplementedError()
elif ev.button() == Qt.RightButton:
pass
def mouse_move(self, ev: QMouseEvent):
if self.state == STATE.NORMAL:
return
pos = ev.pos()
valid, relative_pos = self.pos_in_img_area(pos)
if valid:
relative_pos = (
int(relative_pos[0] / self.scale), int(relative_pos[1] / self.scale))
if self.state == STATE.DRAW_RECTANGLE:
self.draw_rect(self.start_point, relative_pos)
if self.labelimg is not None:
pic = self.draw_points()
self.show_pic(file_name=None, content=pic)
else:
self.show_pic(file_name=None, content=self.panel_pic)
elif self.state == STATE.DRAW_POLYGON:
pass
elif self.state == STATE.DRAW_ELLIPSE:
pass
else:
raise NotImplementedError()
def draw_ellipse(self, points):
color_r, color_g, color_b = self.fill_color.red(
), self.fill_color.green(), self.fill_color.blue()
points = list(map(lambda item: (
int(item[0] * self.scale), int(item[1] * self.scale)), points))
p1 = np.array(points[0])
p2 = np.array(points[1])
if p1[0] > p2[0]:
p1, p2 = p2, p1
p3 = np.array(points[2])
center = tuple((p1 + p2) // 2)
r1 = int(np.linalg.norm(p1 - p2, 2) / 2)
a = p2[1] - p1[1]
b = p1[0] - p2[0]
c = p2[0] * p1[1] - p1[0] * p2[1]
r2 = int(np.abs(a * p3[0] + b * p3[1] + c) / np.sqrt(a ** 2 + b ** 2))
if r1 > r2:
theta = -1 * (np.arctan(a / b) * 180) / np.pi
self.panel_pic = cv.ellipse(self.cur_img.copy(
), center, (r1, r2), theta, 0, 360, (color_r, color_g, color_b), 1)
return ((int(center[0] / self.scale), int(center[1] / self.scale)), (int(r1 / self.scale), int(r2 / self.scale)), theta)
else:
theta = (np.arctan(a / b) * 180) / np.pi
if theta > 0:
theta = 90 - theta
else:
theta = -90 - theta
self.panel_pic = cv.ellipse(self.cur_img.copy(), center, (r2, r1), theta, 0, 360,
(color_r, color_g, color_b), 1)
return ((int(center[0] / self.scale), int(center[1] / self.scale)), (int(r2 / self.scale), int(r1 / self.scale)), theta)
def draw_lines(self, points, end=False):
b, g, r = self.fill_color.blue(), self.fill_color.green(), self.fill_color.red()
temp_img = self.cur_img.copy()
points = list(map(lambda item: (
int(item[0] * self.scale), int(item[1] * self.scale)), points))
for i in range(len(points) - 1):
temp_img = cv.line(
temp_img, points[i], points[i + 1], (r, g, b), 1)
if end:
temp_img = cv.line(temp_img, points[-1], points[0], (r, g, b), 1)
self.panel_pic = temp_img
def draw_rect(self, start_point, end_point):
b, g, r = self.fill_color.blue(), self.fill_color.green(), self.fill_color.red()
self.fill_color.rgb()
start_point = (int(start_point[0] * self.scale),
int(start_point[1] * self.scale))
end_point = (int(end_point[0] * self.scale),
int(end_point[1] * self.scale))
self.panel_pic = cv.rectangle(
self.cur_img.copy(), start_point, end_point, (r, g, b), 1)
def draw_points(self):
if self.labelimg is None:
return
b, g, r = self.fill_color.blue(), self.fill_color.green(), self.fill_color.red()
x, y = np.where(self.labelimg == 1)
temp = self.panel_pic.copy()
for i in range(x.shape[0]):
cv.circle(temp, (int(y[i] * self.scale),
int(x[i] * self.scale)), 1, (r, g, b), 1)
return temp
def mouse_release(self, ev: QMouseEvent):
if self.state == STATE.NORMAL:
return
if ev.button() == Qt.LeftButton:
if self.state == STATE.DRAW_RECTANGLE:
pos = ev.pos()
valid, relative_pos = self.pos_in_img_area(pos)
self.logger.debug(valid)
if valid:
self.logger.debug(relative_pos)
self.end_point = (
int(relative_pos[0] / self.scale), int(relative_pos[1] / self.scale))
elif ev.button() == Qt.RightButton:
if self.state == STATE.NORMAL:
return
if self.state == STATE.DRAW_RECTANGLE:
if self.start_point is None or self.end_point is None:
return
pos = ev.pos()
valid, relative_pos = self.pos_in_img_area(pos)
if valid:
click_pos = (
int(relative_pos[0] / self.scale), int(relative_pos[1] / self.scale))
min_x, max_x = min(
self.start_point[0], self.end_point[0]), max(
self.start_point[0], self.end_point[0])
min_y, max_y = min(
self.start_point[1], self.end_point[1]), max(
self.start_point[1], self.end_point[1])
self.logger.debug(click_pos)
if min_x <= click_pos[0] <= max_x and min_y <= click_pos[1] <= max_y:
pixel_value = self.origin_img[click_pos[1],
click_pos[0], 0]
low, high = pixel_value + \
self.pixel_range[0], pixel_value + \
self.pixel_range[1]
selected_area = self.origin_img[min_y: max_y +
1, min_x: max_x + 1, 0]
x, y = np.where((selected_area >= low) *
(selected_area <= high))
x = x + min_y
y = y + min_x
self.labelimg[x, y] = 1
pic = self.draw_points()
self.show_pic(file_name=None, content=pic)
self.histroty.append((x, y))
self.modified = True
elif self.state == STATE.DRAW_POLYGON:
if self.polygon_points is None or self.polygon_points['finish'] is False:
return
pos = ev.pos()
valid, relative_pos = self.pos_in_img_area(pos)
if valid:
click_pos = (
int(relative_pos[0] / self.scale), int(relative_pos[1] / self.scale))
self.logger.debug(click_pos)
x_pos = np.array([item[0]
for item in self.polygon_points['points']])
y_pos = np.array([item[1]
for item in self.polygon_points['points']])
min_x, max_x = x_pos.min(), x_pos.max()
min_y, max_y = y_pos.min(), y_pos.max()
p = np.array(self.polygon_points['points']).reshape(
[1, len(self.polygon_points['points']), 2])
p[0, :, 0] -= min_x
p[0, :, 1] -= min_y
mask = np.zeros(
[max_y - min_y + 1, max_x - min_x + 1], np.uint8)
mask = cv.fillPoly(mask, p, 1)
pixel_value = self.origin_img[click_pos[1],
click_pos[0], 0]
low, high = pixel_value + \
self.pixel_range[0], pixel_value + self.pixel_range[1]
selected_img = self.origin_img[min_y: max_y +
1, min_x: max_x + 1, 0]
x, y = np.where((selected_img >= low) *
(selected_img <= high) * mask)
x = x + min_y
y = y + min_x
self.labelimg[x, y] = 1
pic = self.draw_points()
self.show_pic(file_name=None, content=pic)
self.histroty.append((x, y))
self.modified = True
elif self.state == STATE.DRAW_ELLIPSE:
if self.ellipse_points is None or len(
self.ellipse_points['points']) < 3 or self.ellipse_points['info'] is None:
return
pos = ev.pos()
valid, relative_pos = self.pos_in_img_area(pos)
if valid:
click_pos = (
int(relative_pos[0] / self.scale), int(relative_pos[1] / self.scale))
self.logger.debug(click_pos)
center, r, theta = self.ellipse_points['info']
theta = int(np.around(theta))
p = cv.ellipse2Poly(center, r, theta, 0, 360, 1)
num_p = p.shape[0]
p = p.reshape((1, num_p, 2))
min_x, max_x = np.min(p[0, :, 0]), np.max(p[0, :, 0])
min_y, max_y = np.min(p[0, :, 1]), np.max(p[0, :, 1])
p[0, :, 0] -= min_x
p[0, :, 1] -= min_y
mask = np.zeros(
(max_y - min_y + 1, max_x - min_x + 1), np.uint8)
mask = cv.fillPoly(mask, p, 1)
pixel_value = self.origin_img[click_pos[1],
click_pos[0], 0]
low, high = pixel_value + \
self.pixel_range[0], pixel_value + self.pixel_range[1]
selected_img = self.origin_img[min_y: max_y +
1, min_x: max_x + 1, 0]
x, y = np.where((selected_img >= low) *
(selected_img <= high) * mask)
x = x + min_y
y = y + min_x
self.labelimg[x, y] = 1
pic = self.draw_points()
self.show_pic(file_name=None, content=pic)
self.histroty.append((x, y))
self.modified = True
self.saved = False
def zoom_in_pic(self, checked):
self.zoom_pic(True)
def zoom_out_pic(self, checked):
self.zoom_pic(False)
def zoom_pic(self, zoom_in):
if self.cur_img is None:
return
if zoom_in:
if self.scale >= 5:
pass
elif self.scale < 1:
self.scale += 0.1
else:
self.scale += 1
else:
if self.scale == 0:
pass
elif self.scale <= 1:
self.scale -= 0.1
else:
self.scale -= 1
self.logger.debug(self.scale)
height, width, _ = self.origin_img.shape
self.cur_img = cv.resize(self.origin_img, (int(
height * self.scale), int(width * self.scale)), cv.INTER_LINEAR)
if self.state == STATE.DRAW_RECTANGLE:
if self.start_point is not None and self.end_point is not None:
self.draw_rect(self.start_point, self.end_point)
if self.labelimg is not None:
pic = self.draw_points()
self.show_pic(content=pic)
else:
self.show_pic(file_name=None, content=self.panel_pic)
elif self.state == STATE.DRAW_POLYGON:
if self.polygon_points is not None and self.polygon_points['finish'] is True:
self.draw_lines(self.polygon_points['points'], True)
if self.labelimg is not None:
pic = self.draw_points()
self.show_pic(content=pic)
else:
self.show_pic(file_name=None, content=self.panel_pic)
elif self.state == STATE.DRAW_ELLIPSE:
if self.ellipse_points is not None and len(
self.ellipse_points['points']) == 3:
self.draw_ellipse(self.ellipse_points['points'])
if self.labelimg is not None:
pic = self.draw_points()
self.show_pic(content=pic)
else:
self.show_pic(file_name=None, content=self.panel_pic)
elif self.labelimg is not None:
self.panel_pic = self.cur_img.copy()
pic = self.draw_points()
self.show_pic(file_name=None, content=pic)
else:
self.show_pic(file_name=None)
def file_list_item_changed(self, item):
if self.modified and not self.saved:
res = QMessageBox.warning(
self,
'warning',
'Changes not saved, do you want to save?',
QMessageBox.Ok | QMessageBox.No,
QMessageBox.Ok)
if res == QMessageBox.Ok:
self.save()
base_pic_name = item.text()
for index, filename in enumerate(self.opened_files):
if filename.name == base_pic_name:
selected_filename = filename
break
self.selected_filename = selected_filename
self.refresh_vars()
self.open(self.selected_filename)
def add_tool_bar(self):
self.toolbar = QToolBar()
self.addToolBar(Qt.TopToolBarArea, self.toolbar)
self.toolbar.addAction(self.ui.actionOpen_File)
self.toolbar.addAction(self.ui.actionOpen_Dir)
self.toolbar.addAction(self.ui.actionSave)
self.toolbar.addAction(self.ui.actionZoom_In)
self.toolbar.addAction(self.ui.actionZoom_Out)
self.toolbar.addAction(self.ui.actionfill_color)
self.toolbar.addAction(self.ui.actionSet_Pixel_Range)
self.toolbar.addAction(self.ui.actionRectangle)
self.toolbar.addAction(self.ui.actionEllipse)
self.toolbar.addAction(self.ui.actionPolygon)
self.toolbar.addAction(self.ui.actionUndo)
def show_pic(self, file_name=None, content=None):
if file_name is not None:
file_name = str(file_name.absolute())
img = cv.imdecode(np.fromfile(file_name, dtype=np.uint8), -1)
assert img is not None
if len(img.shape) == 2:
img = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
height, width, channel = img.shape
self.cur_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
self.origin_img = self.cur_img
self.labelimg = np.zeros(self.origin_img.shape[:2], dtype=np.int)
qimg = QImage(
self.cur_img.data,
width,
height,
width * channel,
QImage.Format_RGB888)
self.ui.img_area.setAlignment(Qt.AlignCenter | Qt.AlignHCenter)
self.ui.img_area.setPixmap(QPixmap.fromImage(qimg))
self.ui.img_area.adjustSize()
elif content is not None:
height, width, channel = content.shape
qimg = QImage(
content.data,
width,
height,
width * channel,
QImage.Format_RGB888)
self.ui.img_area.setAlignment(Qt.AlignCenter | Qt.AlignHCenter)
self.ui.img_area.setPixmap(QPixmap.fromImage(qimg))
self.ui.img_area.adjustSize()
else:
height, width, channel = self.cur_img.shape
qimg = QImage(
self.cur_img.data,
width,
height,
width * channel,
QImage.Format_RGB888)
self.ui.img_area.setAlignment(Qt.AlignCenter | Qt.AlignHCenter)
self.ui.img_area.setPixmap(QPixmap.fromImage(qimg))
self.ui.img_area.adjustSize()
def open_file(self, checked):
if self.modified and not self.saved:
res = QMessageBox.warning(
self,
'warning',
'Changes not saved, do you want to save?',
QMessageBox.Ok | QMessageBox.No,
QMessageBox.Ok)
if res == QMessageBox.Ok:
self.save()
filename, filetype = QFileDialog.getOpenFileName(
self, '选取文件', '.', 'PNG Files(*.png);;JPG Files(*.jpg)')
if filename == '':
return
filename = Path(filename)
self.open_mode = OPEN_MODE.OPEN_FILE
self.opened_files = [filename]
self.selected_filename = filename
self.open(self.selected_filename)
self.refresh_list()
def open_dir(self, checked):
if self.modified and not self.saved:
res = QMessageBox.warning(
self,
'warning',
'Changes not saved, do you want to save?',
QMessageBox.Ok | QMessageBox.No,
QMessageBox.Ok)
if res == QMessageBox.Ok:
self.save()
opened_dir = QFileDialog.getExistingDirectory(self, '打开文件夹', '.')
if opened_dir == '':
return
opened_dir = Path(opened_dir)
self.open_mode = OPEN_MODE.OPEN_DIR
self.opened_files = list(opened_dir.iterdir())
self.opened_files = [
item for item in self.opened_files if util.is_pic(
item)]
if len(self.opened_files) == 0:
QMessageBox.warning(self, 'warning', 'No image found')
else:
self.selected_filename = self.opened_files[0]
self.open(self.selected_filename)
self.refresh_list()
def refresh_list(self):
self.ui.file_list.clear()
for file_name in self.opened_files:
base_name = file_name.name
self.ui.file_list.addItem(base_name)
def open(self, filename: Path):
self.setWindowTitle(filename.name)
self.show_pic(file_name=filename)
temp = str(filename).split('.')
npy_filename = '.'.join(temp[:-1]) + '.npy'
if os.path.exists(npy_filename):
self.labelimg = np.load(npy_filename)
self.panel_pic = self.cur_img.copy()
pic = self.draw_points()
self.show_pic(file_name=None, content=pic)
def main():
app = QApplication(sys.argv)
window = AppWindow()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
{"hexsha": "d42b79fff4f4bc3370b43b40ef4a12f91fc99ef7", "size": 29072, "ext": "py", "lang": "Python", "max_stars_repo_path": "labelseg/app.py", "max_stars_repo_name": "1010098686/labelseg", "max_stars_repo_head_hexsha": "8602c55bd2dadc60fb9da5d193ecdf26dc24d398", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "labelseg/app.py", "max_issues_repo_name": "1010098686/labelseg", "max_issues_repo_head_hexsha": "8602c55bd2dadc60fb9da5d193ecdf26dc24d398", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "labelseg/app.py", "max_forks_repo_name": "1010098686/labelseg", "max_forks_repo_head_hexsha": "8602c55bd2dadc60fb9da5d193ecdf26dc24d398", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.7529411765, "max_line_length": 132, "alphanum_fraction": 0.5182993946, "include": true, "reason": "import numpy", "num_tokens": 6180}
|
[STATEMENT]
lemma has_integral_cexp_minus_to_infinity:
fixes a::complex\<comment>\<open>TODO: generalize\<close>
assumes a: "0 < Re a"
shows "((\<lambda>x. exp (x *\<^sub>R - a)) has_integral exp (c *\<^sub>R - a) / a) {c..}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lambda>x. exp (x *\<^sub>R - a)) has_integral exp (c *\<^sub>R - a) / a) {c..}
[PROOF STEP]
using integral_cexp_minus_to_infinity[OF assms]
integrable_on_cexp_minus_to_infinity[OF assms]
[PROOF STATE]
proof (prove)
using this:
integral {?c..} (\<lambda>x. exp (x *\<^sub>R - a)) = exp (?c *\<^sub>R - a) / a
(\<lambda>x. exp (x *\<^sub>R - a)) integrable_on {?c..}
goal (1 subgoal):
1. ((\<lambda>x. exp (x *\<^sub>R - a)) has_integral exp (c *\<^sub>R - a) / a) {c..}
[PROOF STEP]
using has_integral_integrable_integral
[PROOF STATE]
proof (prove)
using this:
integral {?c..} (\<lambda>x. exp (x *\<^sub>R - a)) = exp (?c *\<^sub>R - a) / a
(\<lambda>x. exp (x *\<^sub>R - a)) integrable_on {?c..}
(?f has_integral ?i) ?s = (?f integrable_on ?s \<and> integral ?s ?f = ?i)
goal (1 subgoal):
1. ((\<lambda>x. exp (x *\<^sub>R - a)) has_integral exp (c *\<^sub>R - a) / a) {c..}
[PROOF STEP]
by blast
|
{"llama_tokens": 542, "file": "Laplace_Transform_Existence", "length": 3}
|
import cv2
import PIL.Image
import time
import numpy
import random
import string
from typing import List, Tuple
from types import ModuleType
import os
import multiprocessing
import multiprocessing.synchronize
from scriptorium.ocr import OCR
class CameraManager(multiprocessing.Process):
title = "Press any key to scan - close window to exit"
def __init__(
self,
cam_opts: Tuple[int, int],
word_queue: multiprocessing.Queue,
snapshot_event: multiprocessing.synchronize.Event,
workdir: str,
binarize: bool = False,
):
video_source, fps = cam_opts
vid = cv2.VideoCapture(video_source)
if not vid.isOpened():
raise ValueError("Unable to open video source", video_source)
self.vid = vid
self.delay_ms = int((1.0 / fps) * 1000.0)
self.workdir = workdir
self.word_queue = word_queue
self.snapshot_event = snapshot_event
self.ocr = OCR(binarize)
self.alive = multiprocessing.Event()
self.alive.set()
super().__init__(target=self.run)
def _get_frame(self) -> Tuple[bool, numpy.ndarray]:
if self.vid.isOpened():
ret, frame = self.vid.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return (True, frame)
return (False, None)
def snapshot(self, frame: numpy.ndarray) -> None:
pil_im = PIL.Image.fromarray(frame)
words, im = self.ocr.analyze(pil_im)
filepath = ""
if self.workdir and len(words) > 0:
filepath = os.path.join(
self.workdir,
"frame-{0}-{1}.jpg".format(
time.strftime("%d-%m-%Y-%H-%M-%S"),
"".join(
random.choices(string.ascii_uppercase + string.digits, k=6)
),
),
)
if not os.path.isdir(self.workdir) or not cv2.imwrite(
filepath, numpy.array(im)
):
print(
"[CameraManager] Couldn't write image to path {0}, are you sure the workdir exists and is writeable?".format(
filepath
)
)
filepath = ""
for word in words:
self.word_queue.put((word, filepath))
self.snapshot_event.set()
def run(self) -> None:
cv2.namedWindow(CameraManager.title)
ret, frame = self._get_frame()
while self.alive.is_set():
cv2.imshow(CameraManager.title, frame)
ret, frame = self._get_frame()
key = cv2.waitKey(self.delay_ms)
if key != -1: # any keypress, snapshot
if ret:
self.snapshot(frame)
frame = 0 # 'flash' the webcam feed to indicate a capture
elif (
cv2.getWindowProperty(CameraManager.title, cv2.WND_PROP_VISIBLE) == 0
): # esc
break
return
def shutdown(self) -> None:
self.alive.clear()
time.sleep(2.0 * self.delay_ms / 1000.0)
# cv2.waitKey(0)
try:
cv2.destroyWindow(CameraManager.title)
except cv2.error as e:
pass
except Exception as e:
raise e
cv2.destroyAllWindows()
if self.vid.isOpened():
self.vid.release()
|
{"hexsha": "dd3978beb25a52840fd74a18aa26a15fa2d92a73", "size": 3455, "ext": "py", "lang": "Python", "max_stars_repo_path": "scriptorium/camera.py", "max_stars_repo_name": "sevagh/Scriptorium", "max_stars_repo_head_hexsha": "26c1e59e2d96936e2f68bc66ce46a2805fde4bd4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-22T07:24:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-22T07:24:36.000Z", "max_issues_repo_path": "scriptorium/camera.py", "max_issues_repo_name": "sevagh/Scriptorium", "max_issues_repo_head_hexsha": "26c1e59e2d96936e2f68bc66ce46a2805fde4bd4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scriptorium/camera.py", "max_forks_repo_name": "sevagh/Scriptorium", "max_forks_repo_head_hexsha": "26c1e59e2d96936e2f68bc66ce46a2805fde4bd4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6972477064, "max_line_length": 129, "alphanum_fraction": 0.5458755427, "include": true, "reason": "import numpy", "num_tokens": 758}
|
(* memory_model.thy *)
(* William Mansky *)
(* Memory model locales for PTRANS. *)
theory memory_model
imports "$AFP/List-Infinite/ListInfinite" AxiomaticModel
begin
(*
print_locale "ord"
instantiation option :: (ord) ord
begin
fun less_eq_option where
"(None \<le> None) = True"
| "(None \<le> (Some _ )) = True"
| "((Some _) \<le> None) = False"
| "((Some x) \<le> (Some y)) = (x \<le> y)"
fun less_option where
"(None < None) = False"
| "(None < (Some _)) = True"
| "((Some _) < None) = False"
| "((Some x) < (Some y)) = (x < y)"
instance proof qed
end
lemma map_add_dom_upd [simp]: "dom m' = {k} \<Longrightarrow> (m ++ m')(k \<mapsto> v) = m(k \<mapsto> v)"
by (auto intro!: ext simp add: map_add_def split: option.splits)
lemma dud_set [simp]: "{(l, v). False} = {}"
by simp
(* Extra utility function: enumerate the elements of a set in arbitrary order.
Useful for memory models. Could conceivably be replaced by Eps over finite_distinct_list. *)
thm finite_distinct_list
function list_of_set where
"list_of_set S = (if infinite S \<or> S = {} then [] else let a = SOME a. a \<in> S in a # list_of_set (S - {a}))"
by pat_completeness auto
termination
apply (relation "measure (\<lambda>S. card S)", auto)
apply (frule card_Diff_singleton, rule someI, simp)
apply (case_tac "card S", simp_all)
done
lemma list_of_set_empty [simp]: "list_of_set {} = []"
by simp
lemma list_of_set_inf [simp]: "infinite S \<Longrightarrow> list_of_set S = []"
by simp
lemma list_of_set_card [simp]: "(list_of_set S \<noteq> []) = (card S \<noteq> 0)"
by (auto simp add: Let_def)
declare list_of_set.simps [simp del]
lemma set_some [simp]: "S \<noteq> {} \<Longrightarrow> insert (SOME a. a \<in> S) S = S"
by (metis insert_absorb not_ex_in_conv someI)
lemma set_some2 [simp]: "S \<noteq> {} \<Longrightarrow> (SOME a. a \<in> S) \<in> S"
by (metis not_ex_in_conv someI)
lemma list_of_set_set [simp]: "finite S \<Longrightarrow> set (list_of_set S) = S"
apply (induct "card S" arbitrary: S, simp_all)
apply (rule trans, simp add: list_of_set.simps, simp add: Let_def)
done
corollary list_of_set_nth: "\<lbrakk>list_of_set S ! i = x; i < length (list_of_set S)\<rbrakk> \<Longrightarrow> x \<in> S"
apply (subgoal_tac "finite S", subgoal_tac "x \<in> set (list_of_set S)", simp,
simp add: set_conv_nth, force)
apply (auto simp add: list_of_set.simps split: if_splits)
done
lemma list_of_set_distinct [simp]: "distinct (list_of_set S)"
apply (induct "card S" arbitrary: S, clarsimp simp add: list_of_set.simps)
apply (rule_tac P=distinct in list_of_set.simps [THEN sym [THEN subst]], clarsimp simp add: Let_def)
done
datatype ('thread, 'loc, 'val) access = Read 'thread 'loc 'val | Write 'thread 'loc 'val
| ARW 'thread 'loc 'val 'val | Alloc 'thread 'loc | Free 'thread 'loc
primrec get_thread where
"get_thread (Read t _ _) = t" |
"get_thread (Write t _ _) = t" |
"get_thread (ARW t _ _ _) = t" |
"get_thread (Alloc t _) = t" |
"get_thread (Free t _) = t"
primrec get_loc where
"get_loc (Read _ l _) = l" |
"get_loc (Write _ l _) = l" |
"get_loc (ARW _ l _ _) = l" |
"get_loc (Alloc _ l) = l" |
"get_loc (Free _ l) = l"
primrec set_thread where
"set_thread t' (Read t l v) = Read t' l v" |
"set_thread t' (Write t l v) = Write t' l v" |
"set_thread t' (ARW t l v v') = ARW t' l v v'" |
"set_thread t' (Alloc t l) = Alloc t' l" |
"set_thread t' (Free t l) = Free t' l"
lemma set_get_thread [simp]: "set_thread (get_thread a) a = a"
by (case_tac a, auto)
lemma get_set_thread [simp]: "get_thread (set_thread t a) = t"
by (case_tac a, auto)
lemma set_thread_frees [simp]: "set_thread t' ` Free t ` S = Free t' ` S"
by (auto simp add: image_def)
abbreviation "get_ptrs ops \<equiv> get_loc ` ops"
type_synonym ('block, 'os) pointer = "'block * 'os"
type_synonym ('add, 'size) block_structure = "'add * 'size"
datatype ('add,'val,'size,'os) raw_block =
RawBlk "('add, 'size) block_structure" "('os \<rightharpoonup> 'val)"
type_synonym ('add, 'val, 'size) allocation = "(('add, 'size) block_structure) set"
type_synonym ('add,'val,'size,'os) block_allocation = "('add,'val,'size,'os) raw_block set"
fun get_addr_s where
"get_addr_s ((addr,_)::(('add,'size) block_structure)) = addr"
fun get_size_s where
"get_size_s ((_,s)::(('add,'size) block_structure)) = s"
fun get_addr where "get_addr ( RawBlk (addr,_) _) = addr"
fun get_size where "get_size (RawBlk(_,s) _) = s"
fun block_to_struct where "block_to_struct (RawBlk blk _) = blk"
declare[[show_types = true]]
declare[[show_sorts = true]]
(* locale for reasoning about blocks and the mapping of block offsets to memory locations *)
(* FUNCTIONS *)
(* block_s_start: partial function giving the first memory location of a
block_structure (if it exists)
block_exists: see assumptions for block existence conditions
block_start_os: gives the offset of a block that can be passed into block_s_ptr
such that the same memory location is returned as for block_s_start
block_s_ptr: partial function mapping offsets into a block to memory locations
size_to_offset: conversion between abstract types *)
(* ASSUMPTIONS *)
(* block_firstlast_offset: if a block exists, the start offset must be \<le> the offset
returned by calling size_to_offset on the block size (corresponds to the last memory
location of the block.
pointer_inbounds_exists: For any offset into a block that is between the start
and end offsets (inclusive), there must be a valid mapping to a memory location for
that offset.
block_s_same_add_same_loc: Two blocks with the same starting address must also have
the same starting memory location.
block_s_ptr_monotonic: block_s_ptr is a monotonic function
block_s_ptr_one_to_one: block_s_ptr is a one to one function
block_s_start_offset: Assumes that block_start_os is a total function. There will
exist an offset such that block_s_ptr and block_s_start will be equal (even if
it is because they both return None) *)
locale block_structure =
fixes block_s_start::"('add, 'size::ord) block_structure \<rightharpoonup> ('loc::ord)"
and block_exists::"('add, 'size::ord) block_structure \<Rightarrow> bool"
and block_start_os :: "('add, 'size::ord) block_structure \<Rightarrow> ('os::{ord,plus})"
and block_s_ptr::"('add, 'size) block_structure * 'os \<rightharpoonup> ('loc::ord)"
and size_to_offset::"'size \<Rightarrow> 'os"
and struct_to_rb::"('add,'size) block_structure \<Rightarrow> ('add,'val,'size,'os) raw_block"
assumes block_exists_defin: "block_exists blk = (\<exists>x. block_s_start(blk) = (Some x))"
and block_firstlast_offset:
"block_exists (addr,size1) \<Longrightarrow>
(block_start_os (addr,size1)) \<le> (size_to_offset size1)"
and pointer_inbounds_exists:
"\<lbrakk>block_exists (addr,size1); os \<ge> block_start_os(addr,size1);
os \<le> size_to_offset(size1)\<rbrakk> \<Longrightarrow>
\<exists> l. (block_s_ptr ((addr,size1),os) = (Some l))"
and block_s_same_add_same_loc:
"\<lbrakk>block_exists (add, size1); block_exists (add, size2)\<rbrakk> \<Longrightarrow>
block_s_start (add,size1) = block_s_start(add, size2)"
and block_s_ptr_monotonic: "\<lbrakk>os1 < os2; block_s_ptr(blk,os1) = Some x;
block_s_ptr(blk,os2) = Some y\<rbrakk> \<Longrightarrow> x < y"
and block_s_ptr_one_to_one: "\<lbrakk>block_s_ptr(blk,os1) = Some x;
block_s_ptr(blk,os2) = Some y;
(x = y)\<rbrakk> \<Longrightarrow> (os1 = os2)"
and block_s_start_offset: "\<exists>os_1. block_s_ptr(blk,os_1) = block_s_start(blk)"
and block_s_start_defin: "(block_s_start blk = block_s_ptr(blk,(block_start_os blk)))"
context block_structure
begin
fun good_block_s_ptr where
"good_block_s_ptr ((start,len), os) = ((block_exists (start,len)) \<and>
((block_start_os (start,len)) \<le> os) \<and> (os \<le> (size_to_offset len)))"
fun good_rb_s_pair where
"good_rb_s_pair blk1 blk2 =
((\<exists> os_1. (\<exists> os_2. ((good_block_s_ptr(blk1,os_1)) \<and>
(good_block_s_ptr(blk2,os_2)) \<and>
((good_block_s_ptr(blk1,os_1)) = (good_block_s_ptr(blk2,os_2)))))) \<longrightarrow>
(blk1 = blk2))"
fun good_allocation where
"good_allocation (alloc::('add,'val,'size) allocation) =
(\<forall> rb1 \<in> alloc. (\<forall> rb2 \<in> alloc. (good_rb_s_pair rb1 rb2)))"
fun good_block_allocation where
"good_block_allocation (alloc::('add,'val,'size,'os) block_allocation) =
good_allocation (block_to_struct ` alloc)"
lemma good_rb_s_pair_refl [simp]: "good_rb_s_pair a a"
apply auto
done
lemma good_rb_s_pair_symm [simp]: "good_rb_s_pair a b = good_rb_s_pair b a"
apply auto
done
lemma good_rb_s_pair_trans [simp]: "((good_allocation alloc) \<and>
(a \<in> alloc) \<and> (b \<in> alloc) \<and> (c \<in> alloc) \<and>
(good_rb_s_pair a b) \<and> (good_rb_s_pair b c) \<longrightarrow> good_rb_s_pair a c)"
apply auto
done
end
type_synonym ('block,'size, 'os) access_region = "('block * 'size * 'os)"
type_synonym ('add,'val,'size,'os) block_access_region =
"(('add,'val,'size,'os) raw_block,'size,'os) access_region"
type_synonym ('add,'val,'size,'os) block_s_access_region =
"(('add,'size) block_structure,'size,'os) access_region"
fun region_get_block where
"region_get_block ((b,s,os)::('block,'size, 'os) access_region) = b"
context block_structure
begin
(* Define the set of ptr or os that are "in" a region *)
fun os_in_region where
"os_in_region ((b,s,os1)::('add,'val,'size,'os) block_access_region) (os2::'os) =
((((block_start_os (block_to_struct b))+os1) \<le> os2) \<and>
(((block_start_os (block_to_struct b))+os1+(size_to_offset s)) \<ge> os2))"
fun region_eq where
"region_eq ((b1, s1, os1)::('add,'val,'size,'os) block_access_region)
((b2, s2, os2)::('add,'val,'size,'os) block_access_region) =
((block_s_ptr((block_to_struct b1),os1) = block_s_ptr((block_to_struct b2),os2))
\<and> (block_s_ptr((block_to_struct b1),(os1+(size_to_offset s1))) =
block_s_ptr((block_to_struct b2),(os2+(size_to_offset s2)))))"
lemma region_eq_all_ptr:
"region_eq (b1,s1,os1) (b2, s2, os2) \<Longrightarrow> (\<forall> (os_a::'os). \<exists> (os_b::'os).
(((os_in_region (b1,s1,os1) os_a) \<and> (os_in_region (b2,s2,os2) os_b))
\<and> (block_s_ptr((block_to_struct b1),os_a)) = block_s_ptr((block_to_struct b2),os_b)))"
oops
fun region_overlap where
"region_overlap ((b1, s1, os1)::('add,'val,'size,'os) block_access_region)
((b2, s2, os2)::(('add,'val,'size,'os) block_access_region)) = (\<exists> (os_a::'os).
(\<exists> (os_b::'os). ((os_a \<ge> os1) \<and> (os_b \<ge> os2) \<and> (os_a \<le> (os1+(size_to_offset s1))) \<and>
(os_b \<le> (os2+(size_to_offset s2))) \<and> (block_s_ptr((block_to_struct b1),os_a)) =
block_s_ptr((block_to_struct b2),os_b))))"
fun region_inbounds where
"region_inbounds ((b, s, os)::('add,'val,'size,'os) block_access_region)
= (((os + (size_to_offset s)) \<le> (size_to_offset (get_size b))) \<and>
(block_start_os((block_to_struct b)) \<le> os))"
(* tells us if a region describes an entire block *)
fun region_block_eq where
"region_block_eq ((b, s, os)::('add,'val,'size,'os) block_access_region)
= ((block_start_os((block_to_struct b)) = os) \<and> ((get_size_s(block_to_struct b)) = s))"
end
declare[[show_types = true]]
declare[[show_sorts = true]]
*)
context axiomaticModel begin
term observeMem
end
locale memory_model = axiomaticModel
where actions = "actions :: (aid_type, 'val, loc_type, 'lock, 'name, 'callID) action set"
and locations = "locations :: loc_type set"
and actionIDs = "actionIDs :: aid_type set"
and times = "times :: time_type set"
and threads = "threads :: 'tid set"
and locks = "locks :: 'lock set"
and names = "names :: 'name set"
and callIDs = "callIDs :: 'callID set"
for actions locations actionIDs times threads locks names callIDs +
fixes free_set::"(('tid \<times> time_type \<times> loc_type)
\<Rightarrow> (aid_type \<times> (aid_type, 'val, loc_type, 'lock, 'name, 'callID) action) option)
\<Rightarrow> time_type \<Rightarrow> loc_type set"
and can_read::"(('tid \<times> time_type \<times> loc_type)
\<Rightarrow> (aid_type \<times> (aid_type, 'val, loc_type, 'lock, 'name, 'callID) action) option)
\<Rightarrow>'tid \<Rightarrow> time_type \<Rightarrow> loc_type \<Rightarrow> 'val set"
and update_mem::"(('tid \<times> time_type \<times> loc_type)
\<Rightarrow> (aid_type \<times> (aid_type, 'val, loc_type, 'lock, 'name, 'callID) action) option)
\<Rightarrow> time_type \<Rightarrow> ('tid \<times> aid_type \<times>
(aid_type, 'val, loc_type, 'lock, 'name, 'callID) action) set
\<Rightarrow> (('tid \<times> time_type \<times> loc_type)
\<Rightarrow> (aid_type \<times> (aid_type, 'val, loc_type, 'lock, 'name, 'callID) action) option) \<Rightarrow> bool"
and start_mem:: "(('tid \<times> time_type \<times> loc_type)
\<Rightarrow> (aid_type \<times> (aid_type, 'val, loc_type, 'lock, 'name, 'callID) action) option)"
assumes alloc_not_free: "\<lbrakk>update_mem mem time ops mem'; (tid,aid,Create l) \<in> ops\<rbrakk> \<Longrightarrow>
l \<notin> free_set mem' time"
and stays_not_free: "\<lbrakk>update_mem mem time ops mem'; l \<notin> free_set mem time\<rbrakk> \<Longrightarrow>
l \<notin> free_set mem' time"
(*
datatype ('thread, 'add, 'size,'os) block_access =
bRead 'thread "('add,'size,'os) block_s_access_region" 'val
| bWrite 'thread "('add,'size,'os) block_s_access_region" 'val
| bARW 'thread "('add,'size,'os) block_s_access_region" 'val 'val
| bAlloc 'thread "('add,'size) block_structure"
| bFree 'thread "('add,'size) block_structure"
datatype ('thread, 'block, 'region,'val) block_access =
bRead 'thread 'region 'val
| bWrite 'thread 'region 'val
| bARW 'thread 'region 'val 'val
| bAlloc 'thread 'block
| bFree 'thread 'block
*)
(*
locale block_structur =
fixes good_block ::"'block \<Rightarrow> bool"
and good_region :: "'region \<Rightarrow> bool"
and block_overlap::"'block \<Rightarrow> 'block \<Rightarrow> bool"
and region_overlap::"'region \<Rightarrow> 'region \<Rightarrow> bool"
and subblock::"'block \<Rightarrow> 'block \<Rightarrow> bool"
and subregion :: "'region \<Rightarrow> 'region \<Rightarrow> bool"
and region_get_block ::"'region \<Rightarrow> 'block"
and block_as_region ::"'block \<Rightarrow> 'region"
and value_fits_region :: "'val \<Rightarrow> 'region \<Rightarrow> bool"
and region_fits_block ::"'region \<Rightarrow> 'block \<Rightarrow> bool"
(* define does_not_modify later *)
and does_not_modify :: "('thread, 'block, 'region,'val) block_access
\<Rightarrow> 'region \<Rightarrow> bool"
(* and get_access_region:: "('thread, 'block, 'region,'val) block_access \<Rightarrow> 'region"*)
assumes value_fits_bigger_region :
"\<lbrakk> subregion r1 r2; value_fits_region v r1 \<rbrakk> \<Longrightarrow> value_fits_region v r2"
and subblock_good: "\<lbrakk>good_block b ; subblock b' b\<rbrakk> \<Longrightarrow> good_block b'"
and region_overlap_symm: "region_overlap r r' \<Longrightarrow> region_overlap r' r"
(* and dummy: "\<exists> b. block_as_region b = block_as_region b" *)
fun (in block_structur) get_access_region::"('thread, 'block, 'region,'val) block_access \<Rightarrow> 'region" where
"get_access_region (bRead _ r _) = r" |
"get_access_region (bWrite _ r _) = r" |
"get_access_region (bARW _ r _ _) = r" |
"get_access_region (bAlloc _ b) = (block_as_region b)" |
"get_access_region (bFree _ b) = (block_as_region b)"
thm block_structur.get_access_region.simps
thm block_structur_def
locale basic_can_do = block_structur
where good_block = "good_block :: 'block \<Rightarrow> bool"
and good_region = "good_region :: 'region \<Rightarrow> bool"
for good_block
and good_region +
fixes can_do::"('thread, 'block, 'region,'val) block_access list \<Rightarrow>
('thread, 'block, 'region,'val) block_access \<Rightarrow> bool"
assumes base_allows_alloc: "good_block b \<Longrightarrow> (can_do [] (bAlloc t b))"
and free_allows_alloc: "\<lbrakk>good_block b; can_do m (bFree t b); subblock b' b\<rbrakk>
\<Longrightarrow> (can_do ((bFree t b)#m) (bAlloc t' b'))"
and alloc_allows_free: "\<lbrakk>good_block b; can_do m (bAlloc t b)\<rbrakk> \<Longrightarrow>
(can_do ((bAlloc t b)#m) (bFree t b))"
(* and alloc_allows_write: "\<lbrakk>good_block b; can_do m (bAlloc t b);
region_fits_block r b\<rbrakk> \<Longrightarrow>
(can_do ((bAlloc t b)#m) (bWrite t' r v))"*)
and alloc_allows_write_same_thread: "\<lbrakk>good_block b; can_do m (bAlloc t b);
region_fits_block r b\<rbrakk> \<Longrightarrow>
(can_do ((bAlloc t b)#m) (bWrite t r v))"
and write_any_value_same_thread: "\<lbrakk>value_fits_region v' r;
(can_do m (bWrite t r v))\<rbrakk> \<Longrightarrow> (can_do m (bWrite t r v'))"
and not_mod_write_drop: "\<lbrakk>can_do m opr; does_not_modify opr r;
can_do (opr#m) (bWrite t r v)\<rbrakk>
\<Longrightarrow> can_do m (bWrite t r v)"
and not_mod_write_add: "\<lbrakk>can_do m opr; does_not_modify opr r;
can_do m (bWrite t r v)\<rbrakk> \<Longrightarrow> can_do (opr#m) (bWrite t r v)"
and write_not_read_drop: "\<lbrakk>can_do m (bWrite t r v);
\<forall>r' v' .(region_overlap r r') \<longrightarrow> (opr \<noteq> (bRead t' r' v'));
can_do ((bWrite t r v)#m) opr\<rbrakk> \<Longrightarrow> can_do m opr"
and write_not_read_add: "\<lbrakk>can_do m (bWrite t r v);
\<forall>r' v' .(region_overlap r r') \<longrightarrow> (opr \<noteq> (bRead t' r' v'));
can_do m opr\<rbrakk> \<Longrightarrow> can_do ((bWrite t r v)#m) opr"
and read_only_written: "\<lbrakk>can_do m (bWrite t r v);
can_do ((bWrite t r v)#m) (bRead t' r v')\<rbrakk> \<Longrightarrow> v = v'"
and read_written: "can_do m (bWrite t r v) \<Longrightarrow>
can_do ((bWrite t r v)#m) (bRead t' r v)"
and read_noop_drop: "\<lbrakk>can_do m (bRead t r v); can_do ((bRead t r v)#m) opr\<rbrakk>
\<Longrightarrow> can_do m opr"
and read_noop_add: "\<lbrakk>can_do m (bRead t r v); can_do m opr\<rbrakk>
\<Longrightarrow> can_do ((bRead t r v)#m) opr"
and reg_drop: "\<lbrakk>\<not>(region_overlap (get_access_region opr) (get_access_region opr'));
can_do m opr; can_do (opr#m) opr'\<rbrakk> \<Longrightarrow> can_do m opr'"
and reg_add: "\<lbrakk>\<not>(region_overlap (get_access_region opr) (get_access_region opr'));
can_do m opr; can_do m opr'\<rbrakk> \<Longrightarrow> can_do (opr#m) opr'"
(*and reg_comm: "\<lbrakk>\<not>(region_overlap (get_access_region opr) (get_access_region opr'));
can_do (opr#m) opr'\<rbrakk> \<Longrightarrow> can_do (opr'#m) opr"*)
and prefix_closed: "\<lbrakk>can_do (opr#m) opr'\<rbrakk> \<Longrightarrow> can_do m opr"
lemma (in basic_can_do) reg_comm: "\<lbrakk>\<not>(region_overlap (get_access_region opr) (get_access_region opr'));
can_do (opr#m) opr'\<rbrakk> \<Longrightarrow> can_do (opr'#m) opr"
apply (rule reg_add)
apply (erule contrapos_nn)
apply (erule_tac r = "(get_access_region opr')" and r' = "(get_access_region opr)"
in region_overlap_symm)
prefer 2
apply (erule prefix_closed)
apply (erule reg_drop)
apply (erule prefix_closed)
apply auto
done
(* Commented out to the end
context block_structure
begin
primrec block_get_thread where
"block_get_thread (bRead t _ _) = t" |
"block_get_thread (bWrite t _ _) = t" |
"block_get_thread (bARW t _ _ _) = t" |
"block_get_thread (bAlloc t _) = t" |
"block_get_thread (bFree t _) = t"
fun block_get_region where
"block_get_region (bRead _ r _) = r" |
"block_get_region (bWrite _ r _) = r" |
"block_get_region (bARW _ r _ _) = r"|
"block_get_region (bAlloc _ b) = ((struct_to_rb b),(get_size_s b),(block_start_os(b)))" |
"block_get_region (bFree _ b) = ((struct_to_rb b),(get_size_s b),(block_start_os(b)))"
primrec block_get_block where
"block_get_block (bRead _ r _) = (region_get_block r)" |
"block_get_block (bWrite _ r _) = (region_get_block r)" |
"block_get_block (bARW _ r _ _) = (region_get_block r)" |
"block_get_block (bAlloc _ b) = (struct_to_rb b)" |
"block_get_block (bFree _ b) = (struct_to_rb b)"
primrec block_set_thread where
"block_set_thread t' (bRead t r v) = bRead t' r v" |
"block_set_thread t' (bWrite t r v) = bWrite t' r v" |
"block_set_thread t' (bARW t r v v') = bARW t' r v v'" |
"block_set_thread t' (bAlloc t l) = bAlloc t' l" |
"block_set_thread t' (bFree t l) = bFree t' l"
end
locale block_seq_can_do = fixes can_do::"('thread,'add,'val,'size,'os)
block_access list \<Rightarrow> ('thread,'add,'val,'size,'os) block_access \<Rightarrow> bool"
assumes base_allows_alloc: "(can_do [] (bAlloc t b))"
and free_allows_alloc: "\<lbrakk>can_do m (bFree t b); b=b'\<rbrakk>
\<Longrightarrow> (\<not>(can_do ((bFree t b)#m) (bFree t' b')))"
and alloc_allows_write: "\<lbrakk>can_do m (bAlloc t b);
region_inbounds ((struct_to_block b),s,os)\<rbrakk> \<Longrightarrow>
(can_do ((bAlloc t b)#m)
(bWrite t' ((struct_to_block b),s,os) v))"
and alloc_allows_alloc: "\<lbrakk>can_do m (bAlloc t b); b=b'\<rbrakk> \<Longrightarrow>
(\<not>(can_do ((bAlloc t b)#m) (bAlloc t' b')))"
and alloc_allows_free: "\<lbrakk>can_do m (bAlloc t b); b=b'\<rbrakk> \<Longrightarrow>
(can_do ((bAlloc t b)#m) (bFree t' b'))"
and write_any_value: "(can_do m (bWrite t r v)) = (can_do m (bWrite t' r v'))"(*
and not_mod_write: "\<lbrakk>can_do m (bRead t' r' v'); can_do m (bWrite t' r' v');
can_do m (bAlloc t' b); can_do m (bFree t' b); region_block_eq(b,s,os);
\<not>(region_overlap r r'); \<not>(region_overlap r (b,s,os))\<rbrakk> \<Longrightarrow>
((can_do ((bRead t' r' v')#m) (bWrite t r v)) =
(can_do (m) (bWrite t r v))) \<and>
((can_do ((bWrite t' r' v')#m) (bWrite t r v)) =
(can_do (m) (bWrite t r v))) \<and>
((can_do ((bAlloc t' b)#m) (bWrite t r v)) =
(can_do (m) (bWrite t r v))) \<and>
((can_do ((bFree t' b)#m) (bWrite t r v)) =
(can_do (m) (bWrite t r v)))"
and write_not_read: "\<lbrakk>can_do m (bWrite t r v); opr \<noteq> (bRead t' r v')\<rbrakk> \<Longrightarrow>
(can_do ((bWrite t r v)#m) opr) = (can_do m opr)"
and read_written: "\<lbrakk>can_do m (bWrite t r v)\<rbrakk> \<Longrightarrow> ((can_do ((bWrite t r v)#m)
(bRead t' r v')) = (v = v'))"
and read_noop: "\<lbrakk>can_do m (bRead t r v)\<rbrakk> \<Longrightarrow> (can_do ((bRead t r v)#m) opr =
can_do m opr)"*)(*
and loc_drop: "\<lbrakk>\<not>region_overlap((block_get_region opr) (block_get_region opr'));
(can_do m opr)\<rbrakk> \<Longrightarrow> (can_do (opr#m) opr') = (can_do m opr')"
and loc_comm: "\<lbrakk>\<not>region_overlap((block_get_region opr) (block_get_region opr'))\<rbrakk>
\<Longrightarrow> (can_do (opr#m) opr') = (can_do (opr'#m) opr)"*)
locale block_seq_can_do_extend = block_seq_can_do +
assumes base_disallows_free:"(\<not>(can_do [] (bFree t b)))"
and free_dissallows_free:"\<lbrakk>can_do m (bFree t b); b=b'\<rbrakk>
\<Longrightarrow> (\<not>(can_do ((bFree t b)#m) (bFree t' b')))"
locale seq_can_do = fixes can_do::"('thread, 'loc, 'val) access list \<Rightarrow>
('thread, 'loc, 'val) access \<Rightarrow> bool"
assumes base_allows: "(\<not>(can_do [] (Read t l v))) \<and> (can_do [] (Alloc t l))
\<and>(\<not>(can_do [] (Free t l)))"
and free_allows: "\<lbrakk>can_do m (Free t l)\<rbrakk> \<Longrightarrow> (\<not>(can_do ((Free t l)#m) (Read t' l v)))
\<and> (can_do ((Free t l)#m) (Alloc t' l)) \<and> (\<not>(can_do ((Free t l)#m) (Free t' l)))"
and alloc_allows: "\<lbrakk>can_do m (Alloc t l)\<rbrakk> \<Longrightarrow> (can_do ((Alloc t l)#m) (Write t l v))
\<and> (\<not>(can_do ((Alloc t l)#m) (Alloc t' l))) \<and> (can_do ((Alloc t l)#m) (Free t l))"
and write_any_value: "(can_do m (Write t l v)) = (can_do m (Write t l v'))"
and not_mod_write: "\<lbrakk>can_do m (Read t l v); can_do m (Write t l' v);
can_do m (Alloc t l'); can_do m (Free t l'); l \<noteq> l'\<rbrakk> \<Longrightarrow> ((can_do ((Read t l v)#m)
(Write t' l v)) = (can_do (m) (Write t' l v))) \<and> ((can_do ((Write t l' v)#m)
(Write t' l v)) = (can_do (m) (Write t' l v))) \<and> ((can_do ((Alloc t l')#m)
(Write t' l v)) = (can_do (m) (Write t' l v))) \<and> ((can_do ((Free t l')#m)
(Write t' l v)) = (can_do (m) (Write t' l v)))"
and write_not_read: "\<lbrakk>can_do m (Write t l v); opr \<noteq> (Read t' l v')\<rbrakk> \<Longrightarrow>
(can_do ((Write t l v)#m) opr) = (can_do m opr)"
and read_written: "\<lbrakk>can_do m (Write t l v)\<rbrakk> \<Longrightarrow> ((can_do ((Write t l v)#m)
(Read t' l v')) = (v = v'))"
and read_noop: "\<lbrakk>can_do m (Read t l v)\<rbrakk> \<Longrightarrow> (can_do ((Read t l v)#m) a = can_do m a)"
and loc_drop: "\<lbrakk>get_loc (opr) \<noteq> get_loc(opr'); (can_do m opr)\<rbrakk> \<Longrightarrow> (can_do (opr#m) opr')
= (can_do m opr')"
and loc_comm: "\<lbrakk>get_loc (opr) \<noteq> get_loc(opr')\<rbrakk> \<Longrightarrow> (can_do (opr#m) opr') =
(can_do (opr'#m) opr)"
(* how can we define a memory as only being a "good" allocation? *)
locale block_memory_model = fixes free_set::"'memory \<Rightarrow> 'a word set"
and can_read::"'memory \<Rightarrow> 'thread \<Rightarrow> 'loc \<Rightarrow> 'block set"
and update_mem::"'memory \<Rightarrow> ('thread, 'loc, 'val) block_access set \<Rightarrow> 'memory \<Rightarrow> bool"
and allocation::"'loc allocation"
and start_mem::'memory
(*and memory_max_word:: "'loc::len word" *) (*largest address *) (* note: we may want to condense these into one *)
and memory_max_int:: int
and memory_min:: int (* smallest address *)
(*assumes good_allocations: "\<lbrakk>update_mem mem ops mem'; good_allocation *)
(*assumes good_allocation_word: "good_allocation memory_max_word allocation"*)
assumes good_allocation_int: "good_allocation (word_of_int memory_max_int) allocation"
(* TSO memory model *)
locale TSO = fixes undef::'val begin
(* This isn't really a TSO-specific locale, but I might have other assumptions later.
More to the point, it's convenient to have a separate locale for each memory model, even if
they don't actually rely on separate assumptions. *)
abbreviation "free_set mem \<equiv> UNIV - dom (fst mem)"
definition "can_read mem t l \<equiv> case List.find (\<lambda>(l', v). l' = l) ((snd mem) t) of
Some (l, v) \<Rightarrow> {v} | None \<Rightarrow> {v. fst mem l = Some v}"
definition "can_read2 mem t l \<equiv> case mem of (mem_map, bufs) \<Rightarrow> (case List.find (\<lambda>(l', v). l' = l) (bufs t) of
Some (l, v) \<Rightarrow> {v} | None \<Rightarrow> {v. mem_map l = Some v})"
(* Switch to the inductive approach... sometime. *)
inductive update_mem where
no_atomic [intro]: "\<lbrakk>\<And>t l v v'. ARW t l v v' \<notin> ops; \<And>t. \<exists>up. bufs' t = up @ bufs t \<and>
set up = {(l, v) | l v. Write t l v \<in> ops} \<and> distinct up\<rbrakk> \<Longrightarrow>
update_mem (mem, bufs) ops (mem |` (UNIV - {l. \<exists>t. Free t l \<in> ops}) ++
(\<lambda>l. if \<exists>t. Alloc t l \<in> ops then Some undef else None), bufs')" |
update [intro]: "\<lbrakk>update_mem (mem, bufs) ops (mem', bufs'); bufs' t = buf @ [(l, v)]\<rbrakk> \<Longrightarrow>
update_mem (mem, bufs) ops (mem'(l \<mapsto> v), bufs'(t := buf))" |
atomic [intro!]: "bufs t = [] \<Longrightarrow> update_mem (mem, bufs) {ARW t l v v'} (mem(l \<mapsto> v'), bufs)"
abbreviation "start_mem \<equiv> (empty, \<lambda>t. [])"
lemma alloc_not_free: "\<lbrakk>update_mem mem ops mem'; Alloc t l \<in> ops; \<forall>t. Free t l \<notin> ops\<rbrakk> \<Longrightarrow>
l \<notin> free_set mem'"
by (induct rule: update_mem.induct, auto split: if_splits)
lemma stays_not_free: "\<lbrakk>update_mem mem ops mem'; l \<notin> free_set mem; \<forall>t. Free t l \<notin> ops\<rbrakk> \<Longrightarrow>
l \<notin> free_set mem'"
by (induct rule: update_mem.induct, auto split: if_splits)
end
sublocale TSO \<subseteq> memory_model free_set can_read update_mem start_mem
by (unfold_locales, metis alloc_not_free, metis stays_not_free)
context TSO begin
lemma update_none [intro!, simp]: "update_mem C {} C"
by (cases C, cut_tac ops="{}" and mem=a and bufs=b in no_atomic, auto simp add: restrict_map_def)
lemma can_read_thread: "\<lbrakk>v \<in> can_read (mem, b) t l; b t = b' t\<rbrakk> \<Longrightarrow>
v \<in> can_read (mem, b') t l"
by (auto simp add: can_read_def split: option.splits)
lemma first_entry: "\<lbrakk>update_mem (mem, bufs) {Write t l v} (mem', bufs');
bufs' t = a # rest\<rbrakk> \<Longrightarrow> a = (l, v)"
apply (drule_tac P="\<lambda>(mem, bufs) ops (mem', bufs'). \<forall>a rest. ops = {Write t l v} \<and>
bufs' t = a # rest \<longrightarrow> a = (l, v)" in update_mem.induct, simp_all, clarsimp)
apply (subgoal_tac "\<exists>up. bufs'a t = up @ bufs t \<and> set up = {ab. t = t \<and> (case ab of (la, va) \<Rightarrow>
la = l \<and> va = v)} \<and> distinct up", clarify, simp+)
apply (case_tac up, simp, simp)
apply (thin_tac "bufs'a t = ((aa, b) # resta)", force)
apply auto
apply (cases a, auto)
done
lemma update_map: "update_mem (mem, bufs) {} (mem', bufs') \<Longrightarrow>
\<exists>map. \<forall>mem2. update_mem (mem2, bufs) {} (mem2 ++ map, bufs')"
apply (drule_tac P="\<lambda>(mem, bufs) ops (mem', bufs'). ops = {} \<longrightarrow>
(\<exists>map. \<forall>mem2. update_mem (mem2, bufs) {} (mem2 ++ map, bufs'))" in update_mem.induct, auto)
apply (subgoal_tac "bufs' = bufs", rule_tac x=empty in exI, simp, rule ext)
apply (subgoal_tac "\<exists>up. bufs' x = (up @ bufs x) \<and> set up = {(l, v). False} \<and> distinct up", clarify, auto)
apply (rule_tac x="map(l \<mapsto> v)" in exI, auto)
done
lemma update_trans_rev: "\<lbrakk>update_mem (mem', bufs') {} (mem'', bufs'');
update_mem (mem, bufs) ops (mem', bufs')\<rbrakk> \<Longrightarrow> update_mem (mem, bufs) ops (mem'', bufs'')"
apply (drule_tac P="\<lambda>(mem', bufs') ops' (mem'', bufs''). ops' = {} \<and> update_mem (mem, bufs) ops (mem', bufs') \<longrightarrow>
update_mem (mem, bufs) ops (mem'', bufs'')" in update_mem.induct, auto simp add: restrict_map_def)
apply (subgoal_tac "bufs'a = bufsa", simp, rule ext)
apply (subgoal_tac "\<exists>up. bufs'a x = up @ bufsa x \<and> set up = {(l, v). False} \<and> distinct up",
clarify, auto)
done
lemma update_trans [trans]: "\<lbrakk>update_mem (mem, bufs) ops (mem', bufs');
update_mem (mem', bufs') {} (mem'', bufs'')\<rbrakk> \<Longrightarrow> update_mem (mem, bufs) ops (mem'', bufs'')"
by (erule update_trans_rev, simp)
lemma update_canonical: "\<lbrakk>update_mem (mem, bufs) ops (mem', bufs');
\<And>t l v v'. ARW t l v v' \<notin> ops\<rbrakk> \<Longrightarrow>
\<exists>writes bufs''. (\<forall>t. bufs'' t = writes t @ bufs t \<and> set (writes t) = {(l, v) | l v. Write t l v \<in> ops} \<and> distinct (writes t)) \<and>
update_mem (mem, bufs) ops (mem |` (UNIV - {l. \<exists>t. Free t l \<in> ops}) ++
(\<lambda>l. if \<exists>t. Alloc t l \<in> ops then Some undef else None), bufs'') \<and>
update_mem (mem |` (UNIV - {l. \<exists>t. Free t l \<in> ops}) ++
(\<lambda>l. if \<exists>t. Alloc t l \<in> ops then Some undef else None), bufs'') {} (mem', bufs')"
apply (drule_tac P="\<lambda>(mem, bufs) ops (mem', bufs'). (\<forall>t l v v'. ARW t l v v' \<notin> ops) \<longrightarrow>
(\<exists>writes bufs''. (\<forall>t. bufs'' t = writes t @ bufs t \<and>
set (writes t) = {(l, v) | l v. Write t l v \<in> ops} \<and> distinct (writes t)) \<and> update_mem (mem, bufs) ops
(mem |` (UNIV - {l. \<exists>t. Free t l \<in> ops}) ++ (\<lambda>l. if \<exists>t. Alloc t l \<in> ops then Some undef else None), bufs'') \<and>
update_mem (mem |` (UNIV - {l. \<exists>t. Free t l \<in> ops}) ++
(\<lambda>l. if \<exists>t. Alloc t l \<in> ops then Some undef else None), bufs'') {} (mem', bufs'))" in update_mem.induct, auto)
apply (rule_tac x="\<lambda>t. SOME up. bufs' t = up @ bufs t \<and> set up = {(l, v). Write t l v \<in> opsa} \<and>
distinct up" in exI)
apply (rule_tac x=bufs' in exI)
apply (rule conjI, clarsimp)
apply (rule someI_ex, auto)
done
corollary update_write: "\<lbrakk>update_mem (mem, bufs) {Write t l v} (mem', bufs')\<rbrakk> \<Longrightarrow>
update_mem (mem, bufs(t := (l, v) # bufs t)) {} (mem', bufs')"
apply (drule update_canonical, auto)
apply (subgoal_tac "bufs'' = bufs(t := (l, v) # bufs t)", simp add: restrict_map_def, rule ext,
clarsimp)
apply (erule_tac x=x in allE, rule conjI, clarsimp)
apply (case_tac "writes t", simp+, case_tac list, simp+)
apply clarsimp
done
lemma update_later: "\<lbrakk>update_mem (mem, bufs) {} (mem', bufs')\<rbrakk> \<Longrightarrow>
update_mem (mem, bufs) {Write t l v} (mem', bufs'(t := (l, v) # bufs' t))"
apply (drule_tac P="\<lambda>(mem, bufs) ops (mem', bufs'). ops = {} \<longrightarrow>
update_mem (mem, bufs) {Write t l v} (mem', bufs'(t := (l, v) # bufs' t))" in update_mem.induct, auto)
apply (cut_tac ops="{Write t l v}" in no_atomic, auto)
apply (drule_tac ops="{Write t l v}" in update, auto)
apply (drule_tac ops="{Write t l v}" and t=ta in update, auto)
by (metis (hide_lams, no_types) fun_upd_twist)
lemma update_later2: "\<lbrakk>update_mem (mem, bufs) {} (mem', bufs'(t := buf)); bufs' t = (l, v) # buf\<rbrakk> \<Longrightarrow>
update_mem (mem, bufs) {Write t l v} (mem', bufs')"
by (smt fun_upd_idem_iff fun_upd_upd list.inject update_later)
lemma update_past: "\<lbrakk>update_mem (mem, bufs) ops (mem', bufs'); t \<notin> get_thread ` ops\<rbrakk> \<Longrightarrow>
update_mem (mem, bufs(t := b @ bufs t)) ops (mem', bufs'(t := b @ bufs' t))"
apply (drule_tac P="\<lambda>(mem, bufs) ops (mem', bufs'). t \<notin> get_thread ` ops \<longrightarrow>
update_mem (mem, bufs(t := b @ bufs t)) ops (mem', bufs'(t := b @ bufs' t))"
in update_mem.induct, auto)
apply (rule no_atomic, auto)
apply (rule_tac x="[]" in exI, simp)
apply (subgoal_tac "\<forall>a b. Write t a b \<notin> opsa", subgoal_tac "\<exists>up. bufs' t = up @ bufs t
\<and> set up = {(l, v). Write t l v \<in> opsa} \<and> distinct up", clarify, simp, metis, auto)
apply (metis get_thread.simps(2) imageI)
apply (drule_tac bufs="bufs(t := b @ bufs t)" in update, auto)
apply (drule_tac bufs="bufs(t := b @ bufs t)" and t=ta in update, auto)
apply (simp add: fun_upd_twist)
done
lemma update_past2: "\<lbrakk>update_mem (mem, bufs) ops (mem', bufs');
\<And>l v. Write t l v \<notin> ops; \<And>l v v'. ARW t l v v' \<notin> ops\<rbrakk> \<Longrightarrow>
update_mem (mem, bufs(t := b @ bufs t)) ops (mem', bufs'(t := b @ bufs' t))"
apply (drule_tac P="\<lambda>(mem, bufs) ops (mem', bufs'). (\<forall>l v. Write t l v \<notin> ops) \<and>
(\<forall>l v v'. ARW t l v v' \<notin> ops) \<longrightarrow> update_mem (mem, bufs(t := b @ bufs t)) ops (mem', bufs'(t := b @ bufs' t))"
in update_mem.induct, auto)
apply (rule no_atomic, auto)
apply (subgoal_tac "\<exists>up. bufs' t = up @ bufs t \<and> set up = {(l, v). Write t l v \<in> opsa} \<and>
distinct up", simp)
apply (metis (full_types))
apply (drule_tac bufs="bufs(t := b @ bufs t)" in update, auto)
apply (drule_tac bufs="bufs(t := b @ bufs t)" and t=ta in update, auto simp add: fun_upd_twist)
done
lemma process_buffer: "\<lbrakk>update_mem (mem, bufs) ops (mem', bufs'); bufs' t = a @ b\<rbrakk> \<Longrightarrow>
update_mem (mem, bufs) ops (mem' ++ map_of b, bufs'(t := a))"
apply (induct b arbitrary: mem' bufs' rule: rev_induct, auto simp add: map_upd_triv)
apply (drule_tac t=t in update, auto)
apply force
done
end
(* assorted useful list lemmas*)
lemma map_upt_zip_Suc [simp]: "l' = map (\<lambda>(x, n). (x, Suc n)) l \<Longrightarrow> map (\<lambda>((l, v), n). (l, v) # map (Pair l) (f n)) l' =
map (\<lambda>((l, v), n). (l, v) # map (Pair l) (f (Suc n))) l"
by auto
declare map_Suc_upt [simp]
lemma zip_Suc [simp]: "zip l [Suc i..<Suc j] = map (\<lambda>(x, n). (x, Suc n)) (zip l [i..<j])"
by (simp only: zip_map2 [THEN sym], simp)
(* The redundant store produces buffers with redundant elements.
add_red is a general characterization of such buffers. *)
definition "add_red l f =
concat (map (\<lambda>((l, v), n). (l, v) # map (\<lambda>v. (l, v)) (f n)) (zip l [0..<length l]))"
lemma add_red_nil [simp]: "add_red [] f = []"
by (simp add: add_red_def)
lemma add_red_cons [simp]: "add_red (x # l) f = x # map (\<lambda>v. (fst x, v)) (f 0) @ add_red l (\<lambda>n. f (Suc n))"
apply (auto simp add: add_red_def)
apply (case_tac "map (\<lambda>((l, v), n). (l, v) # map (Pair l) (f n)) (zip (x # l) ([0..<length l] @ [length l]))", auto)
apply (case_tac "[0..<length l] @ [length l]", auto)
apply (case_tac "[0..<length l] @ [length l]", auto)
apply (case_tac "[0..<length l] @ [length l]", auto)
apply (cut_tac i=0 and j="Suc (length l)" and x=ba and xs=list in upt_eq_Cons_conv, auto)
apply (rule_tac f=concat in arg_cong)
apply (rule map_upt_zip_Suc)
by (metis upt_Suc_append zip_Suc)
lemma add_red_app [simp]: "add_red (l @ l') f = add_red l f @ add_red l' (\<lambda>n. f (n + length l))"
by (induct l arbitrary: f, auto)
lemma add_red_id [simp]: "(\<And>n. n < length l \<Longrightarrow> f n = []) \<Longrightarrow> add_red l f = l"
by (induct l arbitrary: f, auto)
lemma find_append: "(\<And>x. x \<in> set l \<Longrightarrow> \<not>P x) \<Longrightarrow> List.find P (l @ l') = List.find P l'"
by (induct l, auto)
lemma find_append2 [simp]: "List.find P l = None \<Longrightarrow> List.find P (l @ l') = List.find P l'"
by (induct l, auto)
lemma find_append3 [simp]: "List.find P l = Some x \<Longrightarrow> List.find P (l @ l') = Some x"
by (induct l, auto)
lemma add_red_find [simp]: "List.find (\<lambda>(l, v). l = x) (add_red buf f) =
List.find (\<lambda>(l, v). l = x) buf"
apply (induct buf arbitrary: f, auto)
apply (rule trans, rule find_append, auto)
done
context TSO begin
lemma update_red: "\<lbrakk>update_mem (mem, bufs) ops (mem', bufs'); \<And>t. red t = add_red (bufs t) (f t)\<rbrakk> \<Longrightarrow>
\<exists>red' f'. update_mem (mem, red) ops (mem', red') \<and> (\<forall>t. red' t = add_red (bufs' t) (f' t))"
apply (drule_tac P="\<lambda>(mem, bufs) ops (mem', bufs'). (\<forall>t. red t = add_red (bufs t) (f t)) \<longrightarrow>
(\<exists>red' f'. update_mem (mem, red) ops (mem', red') \<and> (\<forall>t. red' t = add_red (bufs' t) (f' t)))"
in update_mem.induct, auto)
apply (rule_tac x="\<lambda>t. (SOME up. bufs' t = up @ bufsa t \<and> set up = {(l, v). Write t l v \<in> ops} \<and>
distinct up) @ red t" in exI, auto)
apply (rule no_atomic, simp_all)
apply (cut_tac P="\<lambda>up. bufs' t = up @ bufsa t \<and> set up = {(l, v). Write t l v \<in> ops} \<and>
distinct up" in someI_ex, simp+)
apply (rule_tac x="\<lambda>t n. if n < length (SOME up. bufs' t = up @ bufsa t \<and>
set up = {(l, v). Write t l v \<in> ops} \<and> distinct up) then [] else f t (n - length (SOME up.
bufs' t = up @ bufsa t \<and> set up = {(l, v). Write t l v \<in> ops} \<and> distinct up))" in exI, clarsimp)
apply (subgoal_tac "\<exists>up. bufs' t = up @ bufsa t \<and> set up = {(l, v). Write t l v \<in> ops} \<and> distinct up",
clarify, clarsimp)
apply (cut_tac P="\<lambda>upa. up = upa \<and> set upa = {(l, v). Write t l v \<in> ops} \<and> distinct upa" in someI,
force, clarsimp)
apply metis
apply (drule_tac bufs=red and t=t and a="add_red buf (f' t)" in process_buffer, simp, clarsimp)
apply (subgoal_tac "dom (map_of (map (Pair l) (f' t (length buf)))) = {l} \<or> f' t (length buf) = []",
erule disjE, simp+)
apply (rule_tac x="red'(t := add_red buf (f' t))" in exI, clarsimp)
apply (rule_tac x=f' in exI, simp)
apply (rule_tac x="red'(t := add_red buf (f' t))" in exI, clarsimp)
apply (rule_tac x=f' in exI, simp)
apply (auto simp add: dom_map_of_conv_image_fst intro!: set_eqI)
apply (case_tac "f' t (length buf)", simp+)
apply (rule_tac x=red in exI, auto)+
done
lemma update_red1: "update_mem (mem, bufs) ops (mem', bufs') \<Longrightarrow>
\<exists>f'. update_mem (mem, bufs(t := add_red (bufs t) f)) ops (mem', bufs'(t := add_red (bufs' t) f'))"
apply (drule_tac P="\<lambda>(mem, bufs) ops (mem', bufs'). \<exists>f'. update_mem (mem, bufs(t := add_red (bufs t) f))
ops (mem', bufs'(t := add_red (bufs' t) f'))" in update_mem.induct, auto)
apply (rule_tac x="\<lambda>n. if n < length (SOME x. bufs' t = x @ bufs t \<and> set x = {(l, v). Write t l v \<in> ops} \<and> distinct x)
then [] else f (n - length (SOME x. bufs' t = x @ bufs t \<and> set x = {(l, v). Write t l v \<in> ops} \<and> distinct x))"
in exI, rule no_atomic, auto)
apply (cut_tac P="\<lambda>up. bufs' t = up @ bufs t \<and> set up = {(l, v). Write t l v \<in> ops} \<and>
distinct up" in someI_ex, force, clarsimp)
apply (rule_tac x="SOME x. bufs' t = x @ bufs t \<and> set x = {(l, v). Write t l v \<in> ops} \<and> distinct x"
in exI, clarsimp)
apply (rule_tac s="add_red ((SOME x. bufs' t = x @ bufs t \<and> set x = {(l, v). Write t l v \<in> ops} \<and> distinct x) @ bufs t)
(\<lambda>n. if n < length (SOME x. bufs' t = x @ bufs t \<and> set x = {(l, v). Write t l v \<in> ops} \<and> distinct x) then []
else f (n - length (SOME x. bufs' t = x @ bufs t \<and> set x = {(l, v). Write t l v \<in> ops} \<and> distinct x)))"
in trans, simp, simp (no_asm))
apply (drule_tac bufs="bufs(t := add_red (bufs t) f)" and t=t and a="add_red buf f'" in process_buffer,
simp, clarsimp)
apply (subgoal_tac "dom (map_of (map (Pair l) (f' (length buf)))) = {l} \<or> f' (length buf) = []",
auto simp add: dom_map_of_conv_image_fst intro!: set_eqI)
apply (metis (full_types) append_Nil fst_conv imageI in_set_conv_decomp list.exhaust)
apply (drule_tac bufs="bufs(t := add_red (bufs t) f)" in update, simp, force,
force simp add: fun_upd_twist)
apply force
done
lemma can_read_red: "b' t = add_red (b t) f \<Longrightarrow> can_read (mem, b') t = can_read (mem, b) t"
by (clarsimp intro!: ext simp add: can_read_def split: option.splits)
lemma can_read_red_loc: "\<lbrakk>b' t = (l', v) # add_red (b t) f; l' \<noteq> l\<rbrakk> \<Longrightarrow>
can_read (mem, b') t l = can_read (mem, b) t l"
by (clarsimp intro!: ext simp add: can_read_def split: option.splits)
end
(* Sequential consistency memory model *)
locale SC = fixes undef::'val begin
abbreviation "free_set mem \<equiv> UNIV - dom mem"
abbreviation "can_read mem t l \<equiv> {v. mem l = Some v}"
inductive update_mem where
no_atomic [intro]: "\<lbrakk>\<And>t l v v'. ARW t l v v' \<notin> ops; \<And>l. (\<forall>t v. Write t l v \<notin> ops) \<Longrightarrow> writes l = None;
\<And>t l v. Write t l v \<in> ops \<Longrightarrow> \<exists>t v. Write t l v \<in> ops \<and> writes l = Some v; finite ops\<rbrakk> \<Longrightarrow>
update_mem mem ops (mem |` (UNIV - {l. \<exists>t. Free t l \<in> ops}) ++
(\<lambda>l. if \<exists>t. Alloc t l \<in> ops then Some undef else None) ++ writes)" |
atomic [intro!]: "update_mem mem {ARW t l v v'} (mem(l \<mapsto> v'))"
abbreviation "start_mem \<equiv> empty"
lemma update_threads: "\<lbrakk>update_mem mem ops mem'; \<forall>a'\<in>ops'. \<exists>a\<in>ops. \<exists>t. a' = set_thread (t::'t)
(a::('t, 'l, 'val) access)\<rbrakk> \<Longrightarrow> update_mem mem ops' mem'"
apply (drule_tac P="\<lambda>mem ops mem'. \<forall>ops'. (\<forall>a'\<in>ops'. \<exists>a\<in>ops. \<exists>t. a' = set_thread t a) \<longrightarrow>
update_mem mem ops' mem'" in update_mem.induct, auto)
apply (subgoal_tac "{l. \<exists>t. Free t l \<in> opsa} = {l. \<exists>t. Free t l \<in> ops'a} \<and> (\<lambda>l. if \<exists>t. Alloc t l \<in>
opsa then Some undef else None) = (\<lambda>l. if \<exists>t. Alloc t l \<in> ops'a then Some undef else None)", simp)
apply (rule no_atomic, clarsimp)
apply (thin_tac "\<forall>a'\<in>ops'. \<exists>a\<in>ops. \<exists>t. a' = set_thread t a")
apply (erule_tac x="ARW t l v v'" in ballE, simp_all, clarsimp)
apply (case_tac a, simp_all)
apply (subgoal_tac "\<forall>t v. Write t l v \<notin> opsa", simp, clarsimp)
oops
lemma update_threads: "update_mem mem ops mem' \<Longrightarrow> update_mem mem (set_thread t ` ops) mem'"
apply (drule_tac P="\<lambda>mem ops mem'. update_mem mem (set_thread t ` ops) mem'" in update_mem.induct, auto)
apply (subgoal_tac "{l. \<exists>t. Free t l \<in> ops} = {l. \<exists>ta. Free ta l \<in> set_thread t ` ops} \<and> (\<lambda>l. if
\<exists>t. Alloc t l \<in> ops then Some undef else None) = (\<lambda>l. if \<exists>ta. Alloc ta l \<in> set_thread t ` ops
then Some undef else None)", simp)
apply (rule no_atomic, clarsimp)
apply (case_tac x, simp_all)
apply (subgoal_tac "\<forall>t v. Write t l v \<notin> ops", simp, clarsimp)
apply (erule_tac x=t in allE, erule_tac x=v in allE, force simp add: image_def)
apply (subgoal_tac "\<exists>t v. Write t l v \<in> ops \<and> writes l = Some v", clarsimp simp add: image_def)
apply (rule_tac x=t in exI, rule_tac x="Write tb l va" in bexI, simp+)
apply (clarsimp simp add: image_def)
apply (case_tac x, simp_all)
apply (auto simp add: image_def)
apply (rule_tac x=t in exI, rule bexI, simp_all)
apply (case_tac xa, simp_all, metis)
apply (rule ext, auto)
apply (rule_tac x=t in exI, rule bexI, simp_all)
apply (case_tac x, simp_all)
done
lemma stays_not_free: "\<lbrakk>update_mem mem ops mem'; l \<notin> free_set mem; \<forall>t. Free t l \<notin> ops\<rbrakk> \<Longrightarrow>
l \<notin> free_set mem'"
by (induct rule: update_mem.induct, auto split: if_splits)
lemma alloc_not_free: "\<lbrakk>update_mem mem ops mem'; Alloc t l \<in> ops; Free t l \<notin> ops\<rbrakk> \<Longrightarrow>
l \<notin> free_set mem'"
by (induct rule: update_mem.induct, auto split: if_splits)
end
sublocale SC \<subseteq> memory_model free_set can_read update_mem start_mem
by (unfold_locales, metis alloc_not_free, metis stays_not_free)
context SC begin
lemma update_none [intro!, simp]: "update_mem C {} C"
by (cut_tac no_atomic, auto simp add: restrict_map_def)
lemma update_none_only [simp, dest!]: "update_mem mem {} mem' \<Longrightarrow> mem' = mem"
by (erule update_mem.cases, auto simp add: restrict_map_def map_add_def)
lemma update_one_write [intro!, simp]: "mem' = mem(l \<mapsto> v) \<Longrightarrow>
update_mem mem {Write t l v} mem'"
by (cut_tac ops="{Write t l v}" and writes="[l \<mapsto> v]" in no_atomic, auto simp add: restrict_map_def)
lemma update_one_writeD [dest!, simp]: "update_mem mem {Write t l v} mem' \<Longrightarrow> mem' = mem(l \<mapsto> v)"
by (erule update_mem.cases, auto intro!: ext simp add: map_add_def split: option.splits)
lemma update_one_read [intro, simp]: "update_mem mem {Read t l v} mem"
by (cut_tac ops="{Read t l v}" in no_atomic, auto simp add: restrict_map_def)
lemma update_one_readD [dest!, simp]: "update_mem mem {Read t l v} mem' \<Longrightarrow> mem' = mem"
by (erule update_mem.cases, auto intro!: ext simp add: restrict_map_def map_add_def)
lemma update_one_alloc [intro!, simp]: "mem' = mem(l \<mapsto> undef) \<Longrightarrow>
update_mem mem {Alloc t l} mem'"
apply (cut_tac ops="{Alloc t l}" and mem=mem in no_atomic, auto simp add: restrict_map_def)
apply (subgoal_tac "mem ++ (\<lambda>la. if la = l then Some undef else None) = mem(l \<mapsto> undef)", simp,
auto intro!: ext simp add: map_add_def)
done
lemma update_one_allocD [dest!]: "update_mem mem {Alloc t l} mem' \<Longrightarrow> mem' = mem(l \<mapsto> undef)"
by (erule update_mem.cases, auto simp add: restrict_map_def map_add_def)
lemma update_frees [intro!, simp]: "\<lbrakk>mem' = mem |` (UNIV - S); finite S\<rbrakk> \<Longrightarrow>
update_mem mem (Free t ` S) mem'"
apply (cut_tac ops="Free t ` S" and mem=mem in no_atomic, auto)
apply (rule subst, rule_tac f="update_mem mem (Free t ` S)" in arg_cong, simp_all)
apply (auto intro!: ext simp add: map_add_def restrict_map_def, force)
done
lemma update_freesD [dest!, simp]: "update_mem mem (Free t ` S) mem' \<Longrightarrow>
mem' = mem |` (UNIV - S)"
apply (erule update_mem.cases, auto intro!: ext simp add: map_add_def restrict_map_def
split: option.splits)
apply (metis image_eqI)
apply (metis access.distinct(13) image_iff option.distinct(1))
apply (metis image_eqI)
by (metis access.distinct(13) image_iff option.distinct(1))
lemma update_ARW [intro!]: "mem' = mem(l \<mapsto> v') \<Longrightarrow> update_mem mem {ARW t l v v'} mem'"
by clarsimp
lemma update_ARWD [dest!]: "update_mem mem {ARW t l v v'} mem' \<Longrightarrow> mem' = mem(l \<mapsto> v')"
by (erule update_mem.cases, auto)
lemma update_past: "\<lbrakk>update_mem mem ops mem'; \<And>t v. Write t l v \<notin> ops;
\<And>t v v'. ARW t l v v' \<notin> ops; \<And>t. Free t l \<notin> ops; \<And>t. Alloc t l \<notin> ops\<rbrakk> \<Longrightarrow>
update_mem (mem(l := v)) ops (mem'(l := v))"
apply (induct rule: update_mem.induct, auto)
apply (cut_tac ops=ops and mem="mem(l := v)" and writes=writes in no_atomic, auto)
apply (rule_tac f1="update_mem (mem(l := v))" in arg_cong2 [THEN subst],
auto intro!: ext simp add: map_add_def restrict_map_def split: option.splits)
by (metis atomic fun_upd_twist)
end
locale undef = fixes undef::'val
sublocale undef \<subseteq> TSO: TSO .
sublocale undef \<subseteq> SC: SC .
context undef begin
(* SC can be modeled by TSO. *)
(*
lemma process_all_buffers [rule_format]: "\<lbrakk>update_mem (mem, bufs) ops (mem', bufs');
\<forall>t. bufs' t = ma t @ mb t; finite {t. mb t \<noteq> []}\<rbrakk> \<Longrightarrow>
\<exists>m'. update_mem (mem, bufs) ops (mem' ++ m', (\<lambda>t. ma t)) \<and>
(\<forall>l v. ((m' l = Some v) \<longrightarrow> (\<exists>t. map_of (mb t) l = Some v)) \<and>
(m' l = None \<longrightarrow> (\<forall>t. map_of (mb t) l = None)))"
apply (drule_tac P="\<lambda>S. \<forall>mem' bufs' ma mb. (update_mem (mem, bufs) ops (mem', bufs') \<and>
(\<forall>t. bufs' t = ma t @ mb t)) \<and> S = {t. mb t \<noteq> []} \<longrightarrow>
(\<exists>m'. update_mem (mem, bufs) ops (mem' ++ m', (\<lambda>t. ma t)) \<and>
(\<forall>l v. ((m' l = Some v) \<longrightarrow> (\<exists>t. map_of (mb t) l = Some v)) \<and>
(m' l = None \<longrightarrow> (\<forall>t. map_of (mb t) l = None))))" in finite_induct, auto)
apply (rule_tac x=empty in exI, simp)
apply (subgoal_tac "bufs'a = maa", simp)
apply (auto intro!: ext split: option.split)
apply (cut_tac a=x and B=F in insertI1, clarsimp)
apply (thin_tac "\<forall>t. bufs' t = ma t @ mb t")
apply (drule_tac bufs'=bufs'a and t=x and a="ma x" in process_buffer, auto)
apply (erule_tac x="mem'a ++ map_of (mb x)" in allE, erule_tac x="bufs'a(x := ma x)" in allE,
erule_tac x=ma in allE, erule_tac x="mb(x := [])" in allE, clarsimp, erule impE, force, clarsimp)
apply (rule_tac x="map_of (mb x) ++ m'" in exI, auto split: if_splits, metis, metis)
apply (erule_tac x=mem' in allE, erule_tac x=bufs' in allE, simp)
done
lemma SC_lt_update [simp, intro]: "update_mem_SC mem ops mem' \<Longrightarrow>
update_mem (mem, \<lambda>t. []) ops (mem', \<lambda>t. [])"
apply (erule update_mem_SC.cases, auto split: if_splits)
apply (cut_tac ops=ops and mem=mem and bufs="\<lambda>t. []" and
bufs'="\<lambda>t. list_of_set {(l, v) | l v. Write t l v \<in> ops \<and> writes l = Some v} @
list_of_set {(l, v) | l v. Write t l v \<in> ops \<and> writes l \<noteq> Some v}" in no_atomic, simp_all)
apply (subgoal_tac "finite {(l, v). Write t l v \<in> ops}", auto)
apply (rule finite_vimageI, auto simp add: inj_on_def)
apply (drule_tac ma="\<lambda>t. list_of_set {(l, v) | l v. Write t l v \<in> ops \<and> writes l = Some v}" and
mb="\<lambda>t. list_of_set {(l, v) | l v. Write t l v \<in> ops \<and> writes l \<noteq> Some v}" in process_all_buffers)
apply (simp split: if_splits)
apply clarsimp
apply (rule_tac B="{t. \<exists>a \<in> ops. get_thread a = t}" in finite_subset, clarsimp)
apply (clarsimp simp add: card_gt_0_iff, rule_tac x="Write x a b" in bexI, simp+)
apply clarsimp
apply (drule_tac ma="\<lambda>t. []" and
mb="\<lambda>t. list_of_set {(l, v) | l v. Write t l v \<in> ops \<and> writes l = Some v}" in process_all_buffers)
apply (simp split: if_splits)
apply clarsimp
apply (rule_tac B="{t. \<exists>a \<in> ops. get_thread a = t}" in finite_subset, clarsimp)
apply (clarsimp simp add: card_gt_0_iff, rule_tac x="Write x a b" in bexI, simp+)
apply clarsimp
apply (rule_tac x1="(mem |` (UNIV - {l. \<exists>t. Free t l \<in> ops}) ++
(\<lambda>l. if \<exists>t. Alloc t l \<in> ops then Some undef else None) ++ m' ++ m'a, \<lambda>t. case case if t \<in> dom threads
then Some (list_of_set {(l, v) |l v. Write t l v \<in> ops \<and> writes l = Some v} @
list_of_set {(l, v) |l v. Write t l v \<in> ops \<and> writes l \<noteq> Some v})
else None of
None \<Rightarrow> None | Some x \<Rightarrow> Some (list_of_set {(l, v) |l v. Write t l v \<in> ops \<and> writes l = Some v}) of
None \<Rightarrow> None | Some x \<Rightarrow> Some [])" in cong [THEN subst], simp, auto)
apply (subgoal_tac "m' ++ m'a = writes")
apply (metis map_add_assoc)
apply (rule ext, (erule_tac x=x in allE)+, clarsimp simp add: map_add_def split: option.split)
apply (case_tac "writes x", simp_all)
apply (subgoal_tac "\<forall>t v. Write t x v \<notin> ops", rule ccontr, clarsimp, erule disjE,
clarsimp split: if_splits)
apply (drule map_of_is_SomeD)
apply (subgoal_tac "finite {(l, v). Write t l v \<in> ops}", simp+)
apply (rule finite_vimageI, simp, simp add: inj_on_def)
apply (clarsimp split: if_splits)
apply (drule map_of_is_SomeD)
apply (subgoal_tac "finite {(l, v). Write t l v \<in> ops}", simp+)
apply (rule finite_vimageI, simp, simp add: inj_on_def)
apply (metis not_Some_eq)
apply (subgoal_tac "\<exists>t. Write t x a \<in> ops", clarsimp)
apply (rule conjI, clarsimp)
apply (erule_tac x=t in allE)
apply (clarsimp simp add: map_of_eq_None_iff)
apply (subgoal_tac "finite {(l, v). Write t l v \<in> ops}", simp+)
apply (erule notE, rule_tac x="(x, a)" in image_eqI, simp+)
apply (rule finite_vimageI, simp, simp add: inj_on_def)
apply clarsimp
apply (drule map_of_is_SomeD)
apply (subgoal_tac "finite {(l, v). Write ta l v \<in> ops}", simp+)
apply (rule finite_vimageI, simp, simp add: inj_on_def)
by (metis (hide_lams, full_types) not_Some_eq option.inject)
lemma make_bufs_can_read [simp]: "can_read (mem, \<lambda>t. []) t = can_read_SC mem t"
by (rule ext, simp add: can_read_def)
(* Because can_read and update_mem are the only interfaces to the memory provided by memory_model,
this implies that SC refines TSO for any language. Is there a way to make this explicit? *)
*)
end
locale PSO = fixes undef::'val begin
(* PSO memory model *)
abbreviation "free_set mem \<equiv> UNIV - dom (fst mem)"
definition "can_read mem t l \<equiv> case snd mem t l of v # buf \<Rightarrow> {v}
| [] \<Rightarrow> {v. fst mem l = Some v}"
inductive update_mem where
no_atomic [intro]: "\<lbrakk>\<And>t l v v'. ARW t l v v' \<notin> ops; \<And>t l. \<exists>up. bufs' t l = up @ bufs t l \<and>
set up = {v. Write t l v \<in> ops} \<and> distinct up\<rbrakk> \<Longrightarrow>
update_mem (mem, bufs) ops (mem |` (UNIV - {l. \<exists>t. Free t l \<in> ops}) ++
(\<lambda>l. if \<exists>t. Alloc t l \<in> ops then Some undef else None), bufs')" |
update [intro]: "\<lbrakk>update_mem (mem, bufs) ops (mem', bufs'); bufs' t l = buf @ [v]\<rbrakk> \<Longrightarrow>
update_mem (mem, bufs) ops (mem'(l \<mapsto> v), bufs'(t := (bufs' t)(l := buf)))" |
atomic [intro!]: "bufs t l = [] \<Longrightarrow>
update_mem (mem, bufs) {ARW t l v v'} (mem(l \<mapsto> v'), bufs)"
abbreviation "start_mem \<equiv> (empty, \<lambda>t l. [])"
lemma alloc_not_free: "\<lbrakk>update_mem mem ops mem'; Alloc t l \<in> ops; \<forall>t. Free t l \<notin> ops\<rbrakk> \<Longrightarrow>
l \<notin> free_set mem'"
by (induct rule: update_mem.induct, auto split: if_splits)
lemma stays_not_free: "\<lbrakk>update_mem mem ops mem'; l \<notin> free_set mem; \<forall>t. Free t l \<notin> ops\<rbrakk> \<Longrightarrow>
l \<notin> free_set mem'"
by (induct rule: update_mem.induct, auto split: if_splits)
end
sublocale PSO \<subseteq> memory_model free_set can_read update_mem start_mem
by (unfold_locales, metis alloc_not_free, metis stays_not_free)
context PSO begin
lemma update_none [intro!, simp]: "update_mem C {} C"
by (cases C, cut_tac ops="{}" and mem=a and bufs=b in no_atomic, auto simp add: restrict_map_def)
lemma process_buffer: "\<lbrakk>update_mem (mem, bufs) ops (mem', bufs'); bufs' t l = a @ v # b\<rbrakk> \<Longrightarrow>
update_mem (mem, bufs) ops (mem'(l \<mapsto> v), bufs'(t := (bufs' t)(l := a)))"
apply (induct b arbitrary: mem' bufs' v rule: rev_induct, auto simp add: map_upd_triv)
apply (drule_tac t=t and l=l in update, simp, force)
done
lemma update_later: "\<lbrakk>update_mem (mem, bufs) {} (mem', bufs')\<rbrakk> \<Longrightarrow>
update_mem (mem, bufs) {Write t l v} (mem', bufs'(t := (bufs' t)(l := v # bufs' t l)))"
apply (drule_tac P="\<lambda>(mem, bufs) ops (mem', bufs'). ops = {} \<longrightarrow>
update_mem (mem, bufs) {Write t l v} (mem', bufs'(t := (bufs' t)(l := v # bufs' t l)))"
in update_mem.induct, auto)
apply (cut_tac ops="{Write t l v}" in no_atomic, auto)
apply (drule_tac ops="{Write t l v}" in update, auto)
apply (drule_tac ops="{Write t l v}" and t=ta in update, auto simp add: fun_upd_twist)
apply (drule_tac ops="{Write t l v}" and l=la in update, auto simp add: fun_upd_twist)
apply (drule_tac ops="{Write t l v}" and t=ta in update, auto simp add: fun_upd_twist)
done
lemma update_later2: "\<lbrakk>update_mem (mem, bufs) {} (mem', bufs'(t := (bufs' t)(l := buf)));
bufs' t l = v # buf\<rbrakk> \<Longrightarrow> update_mem (mem, bufs) {Write t l v} (mem', bufs')"
by (smt fun_upd_idem_iff fun_upd_upd list.inject update_later)
lemma update_past2: "\<lbrakk>update_mem (mem, bufs) ops (mem', bufs');
\<And>v. Write t l v \<notin> ops; \<And>l v v'. ARW t l v v' \<notin> ops\<rbrakk> \<Longrightarrow>
update_mem (mem, bufs(t := (bufs t)(l := b @ bufs t l))) ops
(mem', bufs'(t := (bufs' t)(l := b @ bufs' t l)))"
apply (drule_tac P="\<lambda>(mem, bufs) ops (mem', bufs'). (\<forall>v. Write t l v \<notin> ops) \<and>
(\<forall>l v v'. ARW t l v v' \<notin> ops) \<longrightarrow> update_mem (mem, bufs(t := (bufs t)(l := b @ bufs t l))) ops
(mem', bufs'(t := (bufs' t)(l := b @ bufs' t l)))" in update_mem.induct, auto)
apply (rule no_atomic, auto)
apply (subgoal_tac "\<exists>up. bufs' t l = up @ bufs t l \<and> set up = {v. Write t l v \<in> opsa} \<and>
distinct up", simp)
apply (metis (full_types))
apply (drule_tac bufs="bufs(t := (bufs t)(l := b @ bufs t l))" in update, auto)
apply (drule_tac bufs="bufs(t := (bufs t)(l := b @ bufs t l))" and t=ta in update,
auto simp add: fun_upd_twist)
apply (drule_tac bufs="bufs(t := (bufs t)(l := b @ bufs t l))" in update,
auto simp add: fun_upd_twist)
apply (drule_tac bufs="bufs(t := (bufs t)(l := b @ bufs t l))" and t=ta in update,
auto simp add: fun_upd_twist)
done
lemma update_write: "\<lbrakk>update_mem (mem, bufs) {Write t l v} (mem', bufs')\<rbrakk> \<Longrightarrow>
update_mem (mem, bufs(t := (bufs t)(l := v # bufs t l))) {} (mem', bufs')"
apply (drule_tac P="\<lambda>(mem, bufs) ops (mem', bufs'). ops = {Write t l v} \<longrightarrow>
update_mem (mem, bufs(t := (bufs t)(l := v # bufs t l))) {} (mem', bufs')"
in update_mem.induct, auto)
apply (cut_tac ops="{}" and bufs="bufs(t := (bufs t)(l := v # bufs t l))" and
bufs'=bufs' in no_atomic, simp_all)
apply (subgoal_tac "\<exists>up. bufs' ta la = up @ bufs ta la \<and> set up = {va. va = v \<and> ta = t \<and> la = l} \<and>
distinct up", clarify, auto)
apply (case_tac up, auto, case_tac list, auto)
done
lemma update_trans_rev: "\<lbrakk>update_mem (mem', bufs') {} (mem'', bufs'');
update_mem (mem, bufs) ops (mem', bufs')\<rbrakk> \<Longrightarrow> update_mem (mem, bufs) ops (mem'', bufs'')"
apply (drule_tac P="\<lambda>(mem', bufs') ops' (mem'', bufs''). ops' = {} \<and> update_mem (mem, bufs) ops (mem', bufs') \<longrightarrow>
update_mem (mem, bufs) ops (mem'', bufs'')" in update_mem.induct, auto simp add: restrict_map_def)
apply (subgoal_tac "bufs'a = bufsa", auto)
done
end
*)
(* In PSO, redundant elements are added to the buffers for individual locations. *)
definition "add_red2 l f = concat (map (\<lambda>(v, n). v # f n) (zip l [0..<length l]))"
lemma add_red2_nil [simp]: "add_red2 [] f = []"
by (simp add: add_red2_def)
lemma upt_0 [simp]: "j > i \<Longrightarrow> [i..<j] ! 0 = i"
by (induct i, auto)
lemma add_red2_nil2 [simp]: "(add_red2 l f = []) = (l = [])"
apply (auto simp add: add_red2_def)
apply (case_tac l, auto)
apply (erule_tac x="(a, 0)" in ballE, auto simp add: set_conv_nth)
apply (erule_tac x=0 in allE, auto)
by (metis gr_implies_not0 le0 upt_0 upt_Suc zero_less_Suc)
lemma map_upt_zip_Suc2 [simp]: "l' = map (\<lambda>(x, n). (x, Suc n)) l \<Longrightarrow> map (\<lambda>(v, n). v # f n) l' =
map (\<lambda>(v, n). v # f (Suc n)) l"
by auto
lemma add_red2_cons [simp]: "add_red2 (x # l) f = x # f 0 @ add_red2 l (\<lambda>n. f (Suc n))"
apply (auto simp add: add_red2_def)
apply (case_tac "map (\<lambda>(v, n). v # f n) (zip (x # l) ([0..<length l] @ [length l]))", auto)
apply (case_tac "[0..<length l] @ [length l]", auto)
apply (case_tac "[0..<length l] @ [length l]", auto)
apply (case_tac "[0..<length l] @ [length l]", auto)
apply (cut_tac i=0 and j="Suc (length l)" and x=b and xs=list in upt_eq_Cons_conv, auto)
apply (rule_tac f=concat in arg_cong)
apply (rule map_upt_zip_Suc2)
by (metis upt_Suc_append zip_Suc)
lemma add_red2_app [simp]: "add_red2 (l @ l') f = add_red2 l f @ add_red2 l' (\<lambda>n. f (n + length l))"
by (induct l arbitrary: f, auto)
lemma add_red2_id [simp]: "(\<And>n. n < length l \<Longrightarrow> f n = []) \<Longrightarrow> add_red2 l f = l"
by (induct l arbitrary: f, auto)
context PSO begin
lemma update_red2: "\<lbrakk>update_mem (mem, bufs) ops (mem', bufs');
\<And>t l. red t l = add_red2 (bufs t l) (f t l)\<rbrakk> \<Longrightarrow>
\<exists>red' f'. update_mem (mem, red) ops (mem', red') \<and>
(\<forall>t l. red' t l = add_red2 (bufs' t l) (f' t l))"
apply (drule_tac P="\<lambda>(mem, bufs) ops (mem', bufs'). (\<forall>t l. red t l = add_red2 (bufs t l) (f t l)) \<longrightarrow>
(\<exists>red' f'. update_mem (mem, red) ops (mem', red') \<and> (\<forall>t l. red' t l = add_red2 (bufs' t l) (f' t l)))"
in update_mem.induct, auto)
apply (rule_tac x="\<lambda>t l. (SOME up. bufs' t l = up @ (bufsa t l) \<and> set up = {v. Write t l v \<in> ops} \<and>
distinct up) @ red t l" in exI, auto)
apply (rule no_atomic, simp_all)
apply (cut_tac P="\<lambda>up. bufs' t l = up @ bufsa t l \<and> set up = {v. Write t l v \<in> ops} \<and>
distinct up" in someI_ex, simp+)
apply (rule_tac x="\<lambda>t l n. if n < length (SOME up. bufs' t l = up @ bufsa t l \<and> set up =
{v. Write t l v \<in> ops} \<and> distinct up) then [] else f t l (n - length (SOME up. bufs' t l =
up @ bufsa t l \<and> set up = {v. Write t l v \<in> ops} \<and> distinct up))" in exI, clarsimp)
apply (subgoal_tac "\<exists>up. bufs' t l = up @ bufsa t l \<and> set up = {v. Write t l v \<in> ops} \<and> distinct up",
clarify, clarsimp)
apply (cut_tac P="\<lambda>upa. up = upa \<and> set upa = {v. Write t l v \<in> ops} \<and> distinct upa" in someI,
force, clarsimp)
apply metis
apply (drule_tac bufs=red and t=t and l=l and a="add_red2 buf (f' t l)" in process_buffer, force)
apply (rule_tac x="red'(t := (red' t)(l := add_red2 buf (f' t l)))" in exI, clarsimp, metis)
apply (rule_tac x=red in exI, auto)
done
lemma update_red2_one_buf: "\<lbrakk>update_mem (mem, bufs) ops (mem', bufs');
\<And>t'. t' \<noteq> t \<Longrightarrow> red t' = bufs t'; \<And>l. red t l = add_red2 (bufs t l) (f l)\<rbrakk> \<Longrightarrow>
\<exists>red' f'. update_mem (mem, red) ops (mem', red') \<and> (\<forall>t'. t' \<noteq> t \<longrightarrow> red' t' = bufs' t') \<and>
(\<forall>l. red' t l = add_red2 (bufs' t l) (f' l))"
apply (drule_tac P="\<lambda>(mem, bufs) ops (mem', bufs'). ((\<forall>t'. t' \<noteq> t \<longrightarrow> red t' = bufs t') \<and>
(\<forall>l. red t l = add_red2 (bufs t l) (f l))) \<longrightarrow> (\<exists>red' f'. update_mem (mem, red) ops (mem', red') \<and>
(\<forall>t'. t' \<noteq> t \<longrightarrow> red' t' = bufs' t') \<and> (\<forall>l. red' t l = add_red2 (bufs' t l) (f' l)))"
in update_mem.induct, simp_all, clarsimp)
apply (rule_tac x="bufs'(t := \<lambda>l. (SOME up. bufs' t l = up @ (bufsa t l) \<and> set up = {v. Write t l v \<in> ops} \<and>
distinct up) @ red t l)" in exI, clarsimp)
apply (rule conjI, rule no_atomic, simp_all)
apply (cut_tac P="\<lambda>up. bufs' t l = up @ bufsa t l \<and> set up = {v. Write t l v \<in> ops} \<and>
distinct up" in someI_ex, simp+)
apply (rule_tac x="\<lambda>l n. if n < length (SOME up. bufs' t l = up @ bufsa t l \<and> set up =
{v. Write t l v \<in> ops} \<and> distinct up) then [] else f l (n - length (SOME up. bufs' t l =
up @ bufsa t l \<and> set up = {v. Write t l v \<in> ops} \<and> distinct up))" in exI, clarsimp)
apply (subgoal_tac "\<exists>up. bufs' t l = up @ bufsa t l \<and> set up = {v. Write t l v \<in> ops} \<and> distinct up",
clarify, clarsimp)
apply (cut_tac P="\<lambda>upa. up = upa \<and> set upa = {v. Write t l v \<in> ops} \<and> distinct upa" in someI,
force, clarsimp)
apply metis
apply auto
apply (drule_tac bufs=red and t=t and l=l in process_buffer, force)
apply (rule_tac x="red'(t := (red' t)(l := add_red2 buf (f' l)))" in exI, clarsimp, metis)
apply (drule_tac bufs=red and t=ta and l=l in process_buffer, force)
apply (rule_tac x="red'(ta := (red' ta)(l := buf))" in exI, clarsimp, metis)
apply (rule_tac x=red in exI, auto)
apply (case_tac "ta = t", auto)
done
lemma can_read_red [simp]: "b' t l = add_red2 (b t l) f \<Longrightarrow>
can_read (mem, b') t l = can_read (mem, b) t l"
by (clarsimp intro!: ext simp add: can_read_def split: list.splits)
lemma can_read_loc: "b' t l = b t l \<Longrightarrow>
can_read (mem, b') t l = can_read (mem, b) t l"
by (clarsimp intro!: ext simp add: can_read_def split: list.splits)
lemma two_part_buf [simp]: "bufs t l = buf \<Longrightarrow> bufs(t := (bufs t)(l := buf)) = bufs"
by (clarsimp intro!: ext)
end
*)
end
|
{"author": "liyili2", "repo": "timed-relaxed-memory-model", "sha": "6d85bc75d8b04228b3e581b945e3f672395f0c66", "save_path": "github-repos/isabelle/liyili2-timed-relaxed-memory-model", "path": "github-repos/isabelle/liyili2-timed-relaxed-memory-model/timed-relaxed-memory-model-6d85bc75d8b04228b3e581b945e3f672395f0c66/memory_model.thy"}
|
\section{Model specification}
\label{sec:model_spec}
The model specification is shown in Table~\ref{tab:model_specification} for all the experiments in Section~\ref{sec:experiments}.
CIRAR10 ResNet uses the regular ResNet units while CIFAR100 ResNet uses the bottleneck units. Only the convolutional layers are shown with filter size, filter number as well as the repeating count of the units. The layer counting for ResNets also includes batch normalization and Relu layers. The LSTM models are also diversified for different tasks with different vocabulary sizes, word embedding dimensions and number of layers.
\begin{table}
\vspace{1em}
\begin{small}
\centering
\begin{tabular}{c@{\hskip 0.1in} c@{\hskip 0.1in} c@{\hskip 0.1in} c@{\hskip 0.1in} c@{\hskip 0.1in} c}
\toprule
network & \# layers & Conv 0 & Unit 1s & Unit 2s & Unit 3s \\
%\midrule
\midrule
CIFAR10 ResNet & 110
& $\left[\begin{array}{c c} 3 \times 3, & 4 \end{array} \right] $
& $\left[\begin{array}{c c} 3 \times 3, & 4 \\ 3\times 3, & 4\end{array} \right]\times 6 $
& $\left[\begin{array}{c c} 3 \times 3, & 8 \\ 3\times 3, & 8\end{array} \right]\times 6 $
& $\left[\begin{array}{c c} 3 \times 3, & 16 \\ 3\times 3, & 16\end{array} \right]\times 6 $
\\
\midrule
CIFAR100 ResNet & 164
& $\left[\begin{array}{c c} 3 \times 3, & 4 \end{array} \right] $
& $\left[\begin{array}{c c} 1 \times 1, & 16 \\ 3\times 3, & 16 \\ 1 \times 1, & 64 \end{array} \right]\times 6 $
& $\left[\begin{array}{c c} 1 \times 1, & 32 \\ 3\times 3, & 32 \\ 1 \times 1, & 128 \end{array} \right]\times 6 $
& $\left[\begin{array}{c c} 1 \times 1, & 64 \\ 3\times 3, & 64 \\ 1 \times 1, & 256 \end{array} \right]\times 6 $
\\
% \midrule
% CIFAR100 ResNext & 164
% & $\left[\begin{array}{c c} 3 \times 3, & 4 \end{array} \right] $
% & $\left[\begin{array}{c c} 1 \times 1, & 16 \\ 3\times 3, & 16 \\ 1 \times 1, & 64 \end{array} \right]\times 6 $
% & $\left[\begin{array}{c c} 1 \times 1, & 32 \\ 3\times 3, & 32 \\ 1 \times 1, & 128 \end{array} \right]\times 6 $
% & $\left[\begin{array}{c c} 1 \times 1, & 64 \\ 3\times 3, & 64 \\ 1 \times 1, & 256 \end{array} \right]\times 6 $
% \\
\midrule
\midrule
network & \# layers & Word Embed. & Layer 1 & Layer 2 & Layer 3 \\
\midrule
TS LSTM & 2 & [65 vocab, 128 dim] & 128 hidden units & 128 hidden units & -- \\
\midrule
PTB LSTM & 2 & [10000 vocab, 200 dim] & 200 hidden units & 200 hidden units & -- \\
\midrule
WSJ LSTM & 3 & [6922 vocab, 500 dim] & 500 hidden units & 500 hidden units & 500 hidden units\\
% \midrule
% PTB Tied LSTM & 2 & [10000 vocab, 650 dim] & 650 hidden units & 650 hidden units & \\
\bottomrule
\end{tabular}
\end{small}
\caption{Specification of ResNet and LSTM model architectures.}
\label{tab:model_specification}
\end{table}
|
{"hexsha": "f050e14d1c648e91fa9db6eac5f6c117ee67634e", "size": 2808, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "model_spec.tex", "max_stars_repo_name": "mitliagkas/dshs", "max_stars_repo_head_hexsha": "6d5262af72288dd06544c2d5831d0c198db251bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model_spec.tex", "max_issues_repo_name": "mitliagkas/dshs", "max_issues_repo_head_hexsha": "6d5262af72288dd06544c2d5831d0c198db251bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model_spec.tex", "max_forks_repo_name": "mitliagkas/dshs", "max_forks_repo_head_hexsha": "6d5262af72288dd06544c2d5831d0c198db251bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.0, "max_line_length": 430, "alphanum_fraction": 0.6381766382, "num_tokens": 1123}
|
/*
Copyright 2010 Intel Corporation
Use, modification and distribution are subject to the Boost Software License,
Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt).
*/
//layout_database.hpp
#ifndef BOOST_POLYGON_TUTORIAL_LAYOUT_DATABASE_HPP
#define BOOST_POLYGON_TUTORIAL_LAYOUT_DATABASE_HPP
#include <boost/polygon/polygon.hpp>
#include <map>
#include "layout_rectangle.hpp"
typedef std::map<std::string, boost::polygon::polygon_90_set_data<int> > layout_database;
//map the layout rectangle data type to the boost::polygon::rectangle_concept
namespace boost { namespace polygon{
template <>
struct rectangle_traits<layout_rectangle> {
typedef int coordinate_type;
typedef interval_data<int> interval_type;
static inline interval_type get(const layout_rectangle& rectangle, orientation_2d orient) {
if(orient == HORIZONTAL)
return interval_type(rectangle.xl, rectangle.xh);
return interval_type(rectangle.yl, rectangle.yh);
}
};
template <>
struct geometry_concept<layout_rectangle> { typedef rectangle_concept type; };
}}
//insert layout rectangles into a layout database
inline void populate_layout_database(layout_database& layout, std::vector<layout_rectangle>& rects) {
for(std::size_t i = 0; i < rects.size(); ++i) {
layout[rects[i].layer].insert(rects[i]);
}
}
#endif
|
{"hexsha": "d3cc2b36b36f29c07a75c3374525fadd81964934", "size": 1387, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/libs/polygon/doc/tutorial/layout_database.hpp", "max_stars_repo_name": "randolphwong/mcsema", "max_stars_repo_head_hexsha": "eb5b376736e7f57ff0a61f7e4e5a436bbb874720", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1155.0, "max_stars_repo_stars_event_min_datetime": "2015-01-10T19:04:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:30:30.000Z", "max_issues_repo_path": "boost/libs/polygon/doc/tutorial/layout_database.hpp", "max_issues_repo_name": "randolphwong/mcsema", "max_issues_repo_head_hexsha": "eb5b376736e7f57ff0a61f7e4e5a436bbb874720", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 618.0, "max_issues_repo_issues_event_min_datetime": "2015-01-02T01:39:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T15:18:40.000Z", "max_forks_repo_path": "boost/libs/polygon/doc/tutorial/layout_database.hpp", "max_forks_repo_name": "randolphwong/mcsema", "max_forks_repo_head_hexsha": "eb5b376736e7f57ff0a61f7e4e5a436bbb874720", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 228.0, "max_forks_repo_forks_event_min_datetime": "2015-01-13T12:55:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T11:11:05.000Z", "avg_line_length": 33.0238095238, "max_line_length": 101, "alphanum_fraction": 0.7642393655, "num_tokens": 319}
|
/********************************************************************************
* Copyright 2009 The Robotics Group, The Maersk Mc-Kinney Moller Institute,
* Faculty of Engineering, University of Southern Denmark
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
********************************************************************************/
#include "Message.hpp"
#include <boost/filesystem.hpp>
using namespace rw::core;
Message::Message (const std::string& file, int line, const std::string& message) :
#if (BOOST_FILESYSTEM_VERSION == 2)
_file (boost::filesystem::path (file.c_str ()).filename ()),
#else
_file (boost::filesystem::path (file.c_str ()).filename ().string ()),
#endif
_line (line), _message (message)
{}
std::ostream& rw::core::operator<< (std::ostream& out, const Message& msg)
{
out << msg.getFile () << ":" << msg.getLine () << " " << msg.getText () << std::endl;
return out;
}
|
{"hexsha": "228c147c195642865d9e77b8c47cf0aff1b35c98", "size": 1441, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "RobWork/src/rw/core/Message.cpp", "max_stars_repo_name": "ZLW07/RobWork", "max_stars_repo_head_hexsha": "e713881f809d866b9a0749eeb15f6763e64044b3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-12-29T14:16:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T14:16:27.000Z", "max_issues_repo_path": "RobWork/src/rw/core/Message.cpp", "max_issues_repo_name": "ZLW07/RobWork", "max_issues_repo_head_hexsha": "e713881f809d866b9a0749eeb15f6763e64044b3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "RobWork/src/rw/core/Message.cpp", "max_forks_repo_name": "ZLW07/RobWork", "max_forks_repo_head_hexsha": "e713881f809d866b9a0749eeb15f6763e64044b3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9210526316, "max_line_length": 89, "alphanum_fraction": 0.6210964608, "num_tokens": 311}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.