text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
#include <cstring>
#include <glog/logging.h>
#include <boost/program_options.hpp>
#include <restinio/all.hpp>
#include <kspp/kspp.h>
#include <kspp/sources/mem_stream_source.h>
#include <kspp/processors/flat_map.h>
#include <kspp/metrics/prometheus_pushgateway_reporter.h>
#include <kspp/utils/env.h>
#include <bb_monitor_client_utils/pb_json_parsing.h>
#include <bb_monitor_client_utils/prometheus_utils.h>
#include <bb_monitor_client_utils/bb_metric_sink.h>
#include <bb_monitor_client_utils/bb_intake_sink.h>
#include <bb_monitor_utils/compression.h>
#define SERVICE_NAME "dd_api_srv"
#define DEFAULT_PORT "8080"
//#define DEFAULT_DD_API_KEY "2cc3d365-c765-4d3e-933e-f1bb9abd4419"
/* Exit flag for main loop */
static bool run = true;
static void sigterm(int sig) {
run = false;
}
//static std::string s_dd_api_key = DEFAULT_DD_API_KEY;
/*
* bool authorize(mg_connection *conn){
const char* api_key = mg_get_header(conn, "Dd-Api-Key");
if (!api_key) {
send_status(conn, 403); // Unauthorized
mg_printf(conn, "\r\n");
return false;
}
if (s_dd_api_key != api_key) {
send_status(conn, 403); // Unauthorized
mg_printf(conn, "\r\n");
return false;
}
return true;
}
*/
std::string get_decompressed_body(restinio::request_handle_t req){
if (req->header().has_field(restinio::http_field::content_encoding)) {
auto encodning = req->header().get_field(restinio::http_field::content_encoding);
if (encodning == "deflate") {
return decompress_deflate(req->body());
} else if (encodning == "snappy") {
return decompress_snappy(req->body());
} else if (encodning == "gzip") {
return decompress_gzip(req->body());
} else {
LOG(ERROR) << "dont know what to do with content_encoding: " << encodning;
return req->body();
}
}
return req->body();
}
using router_t = restinio::router::express_router_t<>;
auto create_request_handler(std::shared_ptr<kspp::mem_stream_source<void, bb_monitor::Metric>> metrics_source, std::shared_ptr<kspp::mem_stream_source<void, bb_monitor::Intake>> intake_stream)
{
auto router = std::make_unique<router_t>();
router->http_post(
"/api/v1/series",
[metrics_source]( auto req, auto ){
auto body = get_decompressed_body(req);
//LOG(INFO) << body;
auto v = parse_datadog_series2pb(body);
for(auto m : v)
insert(*metrics_source, m);
req->create_response(restinio::status_created())
.append_header( restinio::http_field::content_type, "text/plain; charset=utf-8" )
.set_body("\r\n")
.done();
return restinio::request_accepted();
} );
router->http_post(
"/api/v1/check_run",
[metrics_source]( auto req, auto ){
auto body = get_decompressed_body(req);
//LOG(INFO) << body;// remove me
auto v = parse_datadog_check_run2pb(body);
for(auto m : v)
insert(*metrics_source, m);
req->create_response(restinio::status_created())
.append_header( restinio::http_field::content_type, "text/plain; charset=utf-8" )
.set_body("\r\n")
.done();
return restinio::request_accepted();
} );
router->http_post(
"/intake/",
[intake_stream]( auto req, auto ){
auto body = get_decompressed_body(req);
//LOG(INFO) << body; // remove me
bb_monitor::Intake intake;
intake.set_agent("datadog");
intake.set_data(body);
intake.set_timestamp(kspp::milliseconds_since_epoch());
insert(*intake_stream, intake);
req->create_response(restinio::status_created())
.append_header( restinio::http_field::content_type, "text/plain; charset=utf-8" )
.set_body("\r\n")
.done();
return restinio::request_accepted();
} );
// not implemented
router->http_post(
"/api/v1/validate",
[]( auto req, auto ){
LOG(INFO) << "not implemented";
req->create_response(restinio::status_created())
.append_header( restinio::http_field::content_type, "text/plain; charset=utf-8" )
.set_body("\r\n")
.done();
return restinio::request_accepted();
} );
router->http_post(
"/api/v1/comments",
[]( auto req, auto ){
LOG(INFO) << "not implemented";
req->create_response(restinio::status_created())
.append_header( restinio::http_field::content_type, "text/plain; charset=utf-8" )
.set_body("\r\n")
.done();
return restinio::request_accepted();
} );
router->http_post(
"/api/v1/events",
[]( auto req, auto ){
LOG(INFO) << "not implemented";
req->create_response(restinio::status_created())
.append_header( restinio::http_field::content_type, "text/plain; charset=utf-8" )
.set_body("\r\n")
.done();
return restinio::request_accepted();
} );
router->http_post(
"/api/v1/tags/hosts",
[]( auto req, auto ){
LOG(INFO) << "not implemented";
req->create_response(restinio::status_created())
.append_header( restinio::http_field::content_type, "text/plain; charset=utf-8" )
.set_body("\r\n")
.done();
return restinio::request_accepted();
} );
router->http_post(
"/api/v1/container",
[]( auto req, auto ){
LOG(INFO) << "not implemented";
req->create_response(restinio::status_created())
.append_header( restinio::http_field::content_type, "text/plain; charset=utf-8" )
.set_body("\r\n")
.done();
return restinio::request_accepted();
} );
router->http_get(
"/healthz",
[]( auto req, auto ){
LOG(INFO) << "healthz called";
req->create_response(restinio::status_ok())
.append_header( restinio::http_field::content_type, "text/plain; charset=utf-8" )
.set_body("\r\n")
.done();
return restinio::request_accepted();
} );
return router;
}
using namespace std::chrono_literals;
using namespace kspp;
int
main(int argc, char *argv[])
{
FLAGS_logtostderr = 1;
google::InitGoogleLogging(argv[0]);
boost::program_options::options_description desc("options");
desc.add_options()
("help", "produce help message")
("app_realm", boost::program_options::value<std::string>()->default_value(get_env_and_log("APP_REALM", "DEV")), "app_realm")
("port", boost::program_options::value<std::string>()->default_value(get_env_and_log("PORT", DEFAULT_PORT)), "port")
//("dd_api_key", boost::program_options::value<std::string>()->default_value(get_env_and_log("DD_API_KEY", DEFAULT_DD_API_KEY)), "dd_api_key")
("monitor_api_key", boost::program_options::value<std::string>()->default_value(get_env_and_log("MONITOR_API_KEY", "")), "monitor_api_key")
("monitor_secret_access_key", boost::program_options::value<std::string>()->default_value(get_env_and_log("MONITOR_SECRET_ACCESS_KEY", "")),"monitor_secret_access_key")
("monitor_uri", boost::program_options::value<std::string>()->default_value(get_env_and_log("MONITOR_URI", "lb.bitbouncer.com:30111")),"monitor_uri")
("max_queue", boost::program_options::value<std::string>()->default_value(get_env_and_log("MAX_QUEUE", "100000")),"max_queue")
;
boost::program_options::variables_map vm;
boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), vm);
boost::program_options::notify(vm);
if (vm.count("help")) {
std::cout << desc << std::endl;
return 0;
}
std::string app_realm;
if (vm.count("app_realm")) {
app_realm = vm["app_realm"].as<std::string>();
}
std::string port;
if (vm.count("port")) {
port = vm["port"].as<std::string>();
}
/*if (vm.count("dd_api_key")) {
s_dd_api_key = vm["dd_api_key"].as<std::string>();
}
*/
std::string monitor_uri;
if (vm.count("monitor_uri")) {
monitor_uri = vm["monitor_uri"].as<std::string>();
}
std::string monitor_api_key;
if (vm.count("monitor_api_key")) {
monitor_api_key = vm["monitor_api_key"].as<std::string>();
}
if (monitor_api_key.size()==0){
std::cerr << "monitor_api_key must be defined - exiting";
return -1;
}
std::string monitor_secret_access_key;
if (vm.count("monitor_secret_access_key")) {
monitor_secret_access_key = vm["monitor_secret_access_key"].as<std::string>();
}
int64_t max_queue=0;
if (vm.count("max_queue")) {
auto s = vm["max_queue"].as<std::string>();
max_queue = atoll(s.c_str());
}
if (max_queue<=0)
max_queue = LONG_LONG_MAX;
std::string consumer_group(SERVICE_NAME);
auto config = std::make_shared<kspp::cluster_config>(consumer_group, kspp::cluster_config::PUSHGATEWAY);
config->load_config_from_env();
//std::string src_topic = dd::raw_metrics_topic_name(tenant_id);
//std::string dst_topic = dd::prometheus_metrics_topic_name(tenant_id);
LOG(INFO) << "port : " << port;
//LOG(INFO) << "dd_api_key : " << s_dd_api_key;
LOG(INFO) << "monitor_uri : " << monitor_uri;
LOG(INFO) << "monitor_api_key : " << monitor_api_key;
if (monitor_secret_access_key.size()>0)
LOG(INFO) << "monitor_secret_access_key: " << "[hidden]";
LOG(INFO) << "monitor_secret_access_key : " << monitor_secret_access_key;
LOG(INFO) << "max_queue : " << max_queue;
LOG(INFO) << "discovering facts...";
kspp::topology_builder builder(config);
auto topology = builder.create_topology();
auto metrics_source = topology->create_processor<mem_stream_source<void, bb_monitor::Metric>>(0);
std::shared_ptr<grpc::Channel> metrics_channel;
{
grpc::ChannelArguments channelArgs;
auto channel_creds = grpc::SslCredentials(grpc::SslCredentialsOptions());
metrics_channel = grpc::CreateCustomChannel(monitor_uri, channel_creds, channelArgs);
}
auto sink = topology->create_sink<bb_metric_sink>(metrics_source, metrics_channel, monitor_api_key, monitor_secret_access_key, max_queue);
auto intake_source = topology->create_processor<mem_stream_source<void, bb_monitor::Intake>>(0);
auto intake_sink = topology->create_sink<bb_intake_sink>(intake_source, metrics_channel, monitor_api_key, monitor_secret_access_key);
std::signal(SIGINT, sigterm);
std::signal(SIGTERM, sigterm);
std::signal(SIGPIPE, SIG_IGN);
topology->add_labels( {
{ "app_name", SERVICE_NAME },
{ "app_realm", app_realm },
{ "hostname", default_hostname() }
});
topology->start(kspp::OFFSET_END); // has to be something - since we feed events from web totally irrelevant
std::thread t([topology]() {
while (run) {
if (topology->process(kspp::milliseconds_since_epoch()) == 0) {
std::this_thread::sleep_for(5000ms);
topology->commit(false);
}
}
LOG(INFO) << "flushing events..";
topology->flush(true, 10000); // 10sec max
LOG(INFO) << "flushing events done";
});
LOG(INFO) << "status is up";
try {
using traits_t =
restinio::traits_t<
restinio::asio_timer_manager_t,
restinio::null_logger_t,
router_t >;
restinio::run(
restinio::on_this_thread< traits_t >()
.port(atoi(port.c_str()))
.address("0.0.0.0")
.request_handler(create_request_handler(metrics_source, intake_source))
.read_next_http_message_timelimit( 10s )
.write_http_response_timelimit( 5s )
.handle_request_timeout( 2s )
);
}
catch (const std::exception &ex) {
LOG(ERROR) << "exception: " << ex.what();
run = false;
}
LOG(INFO) << "status is down";
topology->commit(true);
topology->close();
return 0;
}
|
{"hexsha": "d70cfdf74a602143e55559aacd02d0df4f694905", "size": 11986, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "programs/client-proxies/bb-monitor-dd-metrics-proxy/main.cpp", "max_stars_repo_name": "bitbouncer/bb-monitor", "max_stars_repo_head_hexsha": "ee93cd1c52526bc16401cb54584e7f32e24be570", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "programs/client-proxies/bb-monitor-dd-metrics-proxy/main.cpp", "max_issues_repo_name": "bitbouncer/bb-monitor", "max_issues_repo_head_hexsha": "ee93cd1c52526bc16401cb54584e7f32e24be570", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "programs/client-proxies/bb-monitor-dd-metrics-proxy/main.cpp", "max_forks_repo_name": "bitbouncer/bb-monitor", "max_forks_repo_head_hexsha": "ee93cd1c52526bc16401cb54584e7f32e24be570", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4804469274, "max_line_length": 192, "alphanum_fraction": 0.6373268814, "num_tokens": 3042}
|
import numpy as np
import tensorflow as tf
import pickle
from models.model import Model
class doc2vecForCombiner(Model):
"""
Model only used to load pre-trained doc2vec model.
(It is NOT the doc2vec model itself!)
"""
def __init__(self, path_to_d2v, **kwargs):
super(doc2vecForCombiner, self).__init__(**kwargs)
self.doc2vec_cosine = tf.placeholder(dtype=tf.float32,
shape=(None, 1), name="doc2vec_cosine")
self.trained_d2v = pickle.load(open(path_to_d2v, "rb"))
def get_features(self):
return [self.doc2vec_cosine]
def get_feed_dict(self, contexts, endings, **kwargs):
"""
Using template from feature model...
"""
# Extract cosines from doc2vec
dummy = self.trained_d2v.predict(contexts, endings)
feed_dict = {self.doc2vec_cosine: self.trained_d2v.get_features()}
return feed_dict
def predict(self, contexts, endings, **kwargs):
raise NotImplementedError("Should not be used for prediction")
def train_step(self, context_batch, end_batch, single=True, labels=None, **kwargs):
raise NotImplementedError("Model can not be trained")
|
{"hexsha": "58d0e0913c602e5937e442c310bba8dc81552abf", "size": 1228, "ext": "py", "lang": "Python", "max_stars_repo_path": "project-2/src/models/doc2vec_for_combiner.py", "max_stars_repo_name": "thomasnilsson/nlu-2019", "max_stars_repo_head_hexsha": "dd26a28950d32f6b31b55919a35bff1ed8e4e7f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "project-2/src/models/doc2vec_for_combiner.py", "max_issues_repo_name": "thomasnilsson/nlu-2019", "max_issues_repo_head_hexsha": "dd26a28950d32f6b31b55919a35bff1ed8e4e7f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:59:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-21T15:50:35.000Z", "max_forks_repo_path": "project-2/src/models/doc2vec_for_combiner.py", "max_forks_repo_name": "thomasnilsson/nlu-2019", "max_forks_repo_head_hexsha": "dd26a28950d32f6b31b55919a35bff1ed8e4e7f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9512195122, "max_line_length": 87, "alphanum_fraction": 0.6539087948, "include": true, "reason": "import numpy", "num_tokens": 289}
|
[STATEMENT]
lemma CHAR_pos_iff: "CHAR > 0 \<longleftrightarrow> (\<exists>n>0. of_nat n = (0::'a))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (0 < CHAR) = (\<exists>n>0. of_nat n = (0::'a))
[PROOF STEP]
using CHAR_eq0_iff neq0_conv
[PROOF STATE]
proof (prove)
using this:
(CHAR = 0) = (\<forall>n>0. of_nat n \<noteq> (0::'a))
(?n \<noteq> 0) = (0 < ?n)
goal (1 subgoal):
1. (0 < CHAR) = (\<exists>n>0. of_nat n = (0::'a))
[PROOF STEP]
by blast
|
{"llama_tokens": 221, "file": null, "length": 2}
|
################Class which build the fully convolutional neural net###########################################################
import inspect
import os
from . import TensorflowUtils as utils
import numpy as np
import tensorflow as tf
VGG_MEAN = [103.939, 116.779, 123.68]# Mean value of pixels in R G and B channels
#========================Class for building the FCN neural network based on VGG16==================================================================================
class BUILD_NET_VGG16:
def __init__(self, vgg16_npy_path=None):
# if vgg16_npy_path is None:
# path = inspect.getfile(BUILD_NET_VGG16)
# path = os.path.abspath(os.path.join(path, os.pardir))
# path = os.path.join(path, "vgg16.npy")
# vgg16_npy_path = path
#
# print(path)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1', allow_pickle=True).item() #Load weights of trained VGG16 for encoder
print("npy file loaded")
########################################Build Net#####################################################################################################################
def build(self, rgb,NUM_CLASSES,keep_prob): #Build the fully convolutional neural network (FCN) and load weight for decoder based on trained VGG16 network
"""
load variable from npy to build the VGG
:param rgb: rgb image [batch, height, width, 3] values 0-255
"""
self.SumWeights = tf.constant(0.0, name="SumFiltersWeights") #Sum of weights of all filters for weight decay loss
print("build model started")
# rgb_scaled = rgb * 255.0
# Convert RGB to BGR and substract pixels mean
red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb)
bgr = tf.concat(axis=3, values=[
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
#-----------------------------Build network encoder based on VGG16 network and load the trained VGG16 weights-----------------------------------------
#Layer 1
self.conv1_1 = self.conv_layer(bgr, "conv1_1") #Build Convolution layer and load weights
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")#Build Convolution layer +Relu and load weights
self.pool1 = self.max_pool(self.conv1_2, 'pool1') #Max Pooling
# Layer 2
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
# Layer 3
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.pool3 = self.max_pool(self.conv3_3, 'pool3')
# Layer 4
self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.pool4 = self.max_pool(self.conv4_3, 'pool4')
# Layer 5
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.pool5 = self.max_pool(self.conv5_3, 'pool5')
##-----------------------Build Net Fully connvolutional layers------------------------------------------------------------------------------------
self.W6 = utils.weight_variable([7, 7, 512, 4096],name="W6") # Create tf weight for the new layer with initial weights with normal random distrubution mean zero and std 0.02
self.b6 = utils.bias_variable([4096], name="b6") # Create tf biasefor the new layer with initial weights of 0
self.conv6 = utils.conv2d_basic(self.pool5 , self.W6, self.b6) # Check the size of this net input is it same as input or is it 1X1
self.relu6 = tf.nn.relu(self.conv6, name="relu6")
# if FLAGS.debug: utils.add_activation_summary(relu6)
self.relu_dropout6 = tf.nn.dropout(self.relu6,keep_prob=keep_prob) # Apply dropout for traning need to be added only for training
W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7") # 1X1 Convloution
b7 = utils.bias_variable([4096], name="b7")
self.conv7 = utils.conv2d_basic(self.relu_dropout6, W7, b7) # 1X1 Convloution
self.relu7 = tf.nn.relu(self.conv7, name="relu7")
# if FLAGS.debug: utils.add_activation_summary(relu7)
self.relu_dropout7 = tf.nn.dropout(self.relu7, keep_prob=keep_prob) # Another dropout need to be used only for training
W8 = utils.weight_variable([1, 1, 4096, NUM_CLASSES],name="W8") # Basically the output num of classes imply the output is already the prediction this is flexible can be change however in multinet class number of 2 give good results
b8 = utils.bias_variable([NUM_CLASSES], name="b8")
self.conv8 = utils.conv2d_basic(self.relu_dropout7, W8, b8)
# annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1")
#-------------------------------------Build Decoder --------------------------------------------------------------------------------------------------
# now to upscale to actual image size
deconv_shape1 = self.pool4.get_shape() # Set the output shape for the the transpose convolution output take only the depth since the transpose convolution will have to have the same depth for output
W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_CLASSES],name="W_t1") # Deconvolution/transpose in size 4X4 note that the output shape is of depth NUM_OF_CLASSES this is not necessary in will need to be fixed if you only have 2 catagories
b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
self.conv_t1 = utils.conv2d_transpose_strided(self.conv8, W_t1, b_t1, output_shape=tf.shape(self.pool4)) # Use strided convolution to double layer size (depth is the depth of pool4 for the later element wise addition
self.fuse_1 = tf.add(self.conv_t1, self.pool4, name="fuse_1") # Add element wise the pool layer from the decoder
deconv_shape2 = self.pool3.get_shape()
W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
self.conv_t2 = utils.conv2d_transpose_strided(self.fuse_1, W_t2, b_t2, output_shape=tf.shape(self.pool3))
self.fuse_2 = tf.add(self.conv_t2, self.pool3, name="fuse_2")
shape = tf.shape(rgb)
W_t3 = utils.weight_variable([16, 16, NUM_CLASSES, deconv_shape2[3].value], name="W_t3")
b_t3 = utils.bias_variable([NUM_CLASSES], name="b_t3")
self.Prob = utils.conv2d_transpose_strided(self.fuse_2, W_t3, b_t3, output_shape=[shape[0], shape[1], shape[2], NUM_CLASSES], stride=8)
#--------------------Transform probability vectors to label maps-----------------------------------------------------------------
self.Pred = tf.argmax(self.Prob, dimension=3, name="Pred")
print("FCN model built")
#####################################################################################################################################################
def max_pool(self, bottom, name):
return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
############################################################################################################################################################
def conv_layer(self, bottom, name):
with tf.variable_scope(name):
filt = self.get_conv_filter(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
############################################################################################################################################################
def conv_layer_NoRelu(self, bottom, name):
with tf.variable_scope(name):
filt = self.get_conv_filter(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
return bias
#########################################Build fully convolutional layer##############################################################################################################
def fc_layer(self, bottom, name):
with tf.variable_scope(name):
shape = bottom.get_shape().as_list()
dim = 1
for d in shape[1:]:
dim *= d
x = tf.reshape(bottom, [-1, dim])
weights = self.get_fc_weight(name)
biases = self.get_bias(name)
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
return fc
######################################Get VGG filter ############################################################################################################
def get_conv_filter(self, name):
var=tf.Variable(self.data_dict[name][0], name="filter_" + name)
self.SumWeights+=tf.nn.l2_loss(var)
return var
##################################################################################################################################################
def get_bias(self, name):
return tf.Variable(self.data_dict[name][1], name="biases_"+name)
#############################################################################################################################################
def get_fc_weight(self, name):
return tf.Variable(self.data_dict[name][0], name="weights_"+name)
|
{"hexsha": "d48fb0636b0ed7bde005cf03cfb0df725527d2c9", "size": 10111, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/fcn_tensorflow/BuildNetVgg16.py", "max_stars_repo_name": "jzi040941/ScheduleRecognition", "max_stars_repo_head_hexsha": "8a7d83d8a5b7c38d3c02b556d4adbac3dc58c6a2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model/fcn_tensorflow/BuildNetVgg16.py", "max_issues_repo_name": "jzi040941/ScheduleRecognition", "max_issues_repo_head_hexsha": "8a7d83d8a5b7c38d3c02b556d4adbac3dc58c6a2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model/fcn_tensorflow/BuildNetVgg16.py", "max_forks_repo_name": "jzi040941/ScheduleRecognition", "max_forks_repo_head_hexsha": "8a7d83d8a5b7c38d3c02b556d4adbac3dc58c6a2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 60.1845238095, "max_line_length": 264, "alphanum_fraction": 0.5430719019, "include": true, "reason": "import numpy", "num_tokens": 2368}
|
#include <albert/bt/peer_connection.hpp>
#include <map>
#include <memory>
#include <random>
#include <string>
#include <vector>
#include <stdexcept>
#include <boost/asio/io_context.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/placeholders.hpp>
#include <boost/bind.hpp>
#include <albert/bencode/bencoding.hpp>
#include <albert/bt/bt.hpp>
#include <albert/bt/peer.hpp>
#include <albert/log/log.hpp>
#include <albert/u160/u160.hpp>
#include <albert/utils/utils.hpp>
using boost::asio::ip::tcp;
using boost::asio::ip::udp;
namespace {
using bdict = std::map<std::string, std::shared_ptr<albert::bencoding::Node>>;
auto newdict(const bdict &dic) {
return std::make_shared<albert::bencoding::DictNode>(dic);
};
auto newint(int64_t i) {
return std::make_shared<albert::bencoding::IntNode>(i);
};
}
namespace albert::bt::peer {
static std::string get_string_or_throw(
const std::map<std::string, std::shared_ptr<bencoding::Node>> &dict, const std::string &key, const std::string &context) {
if (dict.find(key) == dict.end()) {
throw InvalidPeerMessage(context + ", '" + key + "' not found");
}
auto node = std::dynamic_pointer_cast<bencoding::StringNode>(dict.at(key));
if (!node) {
throw InvalidPeerMessage(context + ", '" + key + "' is not a string");
}
return *node;
}
static int64_t get_int64_or_throw(
const std::map<std::string, std::shared_ptr<bencoding::Node>> &dict, const std::string &key, const std::string &context) {
if (dict.find(key) == dict.end()) {
throw InvalidPeerMessage(context + ", '" + key + "' not found");
}
auto node = std::dynamic_pointer_cast<bencoding::IntNode>(dict.at(key));
if (!node) {
throw InvalidPeerMessage(context + ", '" + key + "' is not a int");
}
return *node;
}
static std::map<std::string, std::shared_ptr<bencoding::Node>> get_dict_or_throw(
const std::map<std::string, std::shared_ptr<bencoding::Node>> &dict, const std::string &key, const std::string &context) {
if (dict.find(key) == dict.end()) {
throw InvalidPeerMessage(context + ", '" + key + "' not found");
}
auto node = std::dynamic_pointer_cast<bencoding::DictNode>(dict.at(key));
if (!node) {
throw InvalidPeerMessage(context + ", '" + key + "' is not a int");
}
return node->dict();
}
namespace {
std::string make_message(uint8_t type, const uint8_t *data, size_t size) {
std::stringstream ss;
auto data_size = size + 1;
ss.put((uint8_t)((data_size >> 24u) & 0xff));
ss.put((uint8_t)((data_size >> 16u) & 0xff));
ss.put((uint8_t)((data_size >> 8u) & 0xff));
ss.put((uint8_t)((data_size >> 0u) & 0xff));
ss.put(type);
if (data) {
ss.write((const char*)data, size);
}
return ss.str();
}
std::string make_extended(const bencoding::Node &payload, uint8_t extended_id) {
std::stringstream ss;
ss.put(extended_id);
payload.encode(ss);
auto s = ss.str();
return make_message(MessageTypeExtended, (const uint8_t*)s.data(), s.size());
}
std::string make_empty_message(uint8_t message_type) {
std::stringstream ss;
ss.put(message_type);
auto s = ss.str();
return make_message(MessageTypeExtended, (const uint8_t*)s.data(), s.size());
}
}
std::mutex PeerConnection::pointers_lock_{};
std::set<PeerConnection*> PeerConnection::pointers_{};
PeerConnection::PeerConnection(
boost::asio::io_context &io_context,
const u160::U160 &self,
const u160::U160 &target,
uint32_t bind_ip,
uint16_t bind_port,
uint32_t ip,
uint16_t port,
bool use_utp)
: self_(self),
target_(target),
peer_(std::make_unique<Peer>(ip, port)) {
{
std::unique_lock _(pointers_lock_);
pointers_.insert(this);
}
if (use_utp) {
socket_ = std::make_shared<transport::UTPSocket>(io_context, udp::endpoint(boost::asio::ip::address_v4(bind_ip), bind_port));
} else {
socket_ = std::make_unique<transport::TCPSocket>(io_context, tcp::endpoint(boost::asio::ip::address_v4(bind_ip), bind_port));
}
}
PeerConnection::~PeerConnection() {
{
std::unique_lock _(pointers_lock_);
pointers_.erase(this);
}
}
void PeerConnection::connect(
std::function<void(const boost::system::error_code &)> connect_handler,
std::function<void(int, size_t)> extended_handshake_handler) {
// Start the asynchronous connect operation.
connect_handler_ = std::move(connect_handler);
extended_handshake_handler_ = std::move(extended_handshake_handler);
LOG(debug) << "PeerConnection::connect, connecting to " << peer_->to_string();
// Why using weak_from_this(). https://stackoverflow.com/a/35469759
socket_->async_connect(
boost::asio::ip::address_v4(peer_->ip()),
peer_->port(),
[pc = weak_from_this()](const boost::system::error_code &e) {
if (!pc.expired()) {
pc.lock()->handle_connect(e);
}
});
}
void PeerConnection::handle_connect(
const boost::system::error_code &ec) {
if (!socket_->is_open()) {
LOG(debug) << "Connect timed out " << peer_->to_string();
connection_status_ = ConnectionStatus::Disconnected;
failed_reason_ = "timeout";
connect_handler_(boost::asio::error::timed_out);
connect_handler_ = nullptr;
} else if (ec) {
LOG(debug) << "Connect error: " << peer_->to_string() << " " << ec.message();
close();
connection_status_ = ConnectionStatus::Disconnected;
failed_reason_ = "connect_error: " + ec.message();
connect_handler_(ec);
connect_handler_ = nullptr;
} else {
connection_status_ = ConnectionStatus::Connected;
LOG(info) << "PeerConnection: connected to " << this->peer_->to_string();
continue_receive();
send_handshake();
}
}
void PeerConnection::send_handshake() {
{
std::stringstream ss;
self_.encode(ss);
auto s = ss.str();
if (s.size() != u160::U160Length) {
throw std::runtime_error("self_ Invalid node id length, s.size() != NodeIDLength");
}
memcpy(sent_handshake_.sender_id, s.data(), u160::U160Length);
}
{
std::stringstream ss;
target_.encode(ss);
auto s = ss.str();
if (s.size() != u160::U160Length) {
throw std::runtime_error("target_ Invalid node id length, s.size() != NodeIDLength");
}
memcpy(sent_handshake_.info_hash, s.data(), u160::U160Length);
}
size_t write_size = sizeof(sent_handshake_);
std::copy(
(char*)&sent_handshake_,
(char*)&sent_handshake_ + sizeof(sent_handshake_),
write_buffer_.begin());
socket_->async_send(
boost::asio::buffer(write_buffer_.data(), write_size),
[pc_weak = weak_from_this()](const boost::system::error_code &err, size_t bytes_transferred) {
if (err) {
auto pc = pc_weak.lock();
if (pc) {
pc->connection_status_ = ConnectionStatus::Disconnected;
pc->failed_reason_ = "failed_to_handshake: " + err.message();
pc->close();
LOG(error) << "Failed to send handshake " + err.message();
}
}
});
auto m = bdict();
for (auto &item : extended_message_id_) {
m[item.second] = std::make_shared<bencoding::IntNode>(item.first);
}
// extended handshake
bencoding::DictNode node(
bdict({
{
"m", newdict(m)
},
{"p", newint(6881)},
{"reqq", newint(500)},
{"v", std::make_shared<bencoding::StringNode>("wtf/0.0")}
}));
socket_->async_send(
boost::asio::buffer(make_extended(node, 0)),
[pc_weak = weak_from_this()](const boost::system::error_code &err, size_t bytes_transferred) {
if (err) {
auto pc = pc_weak.lock();
if (pc) {
pc->connection_status_ = ConnectionStatus::Disconnected;
pc->failed_reason_ = "failed_to_extended_handshake: " + err.message();
pc->close();
LOG(error) << "Failed to send extended handshake " + err.message();
}
}
});
}
void PeerConnection::handle_message(uint8_t type, gsl::span<uint8_t> data) {
if (type == MessageTypeChoke) {
peer_choke_ = true;
} else if (type == MessageTypeUnchoke) {
if (peer_choke_) {
LOG(debug) << "peer " << peer_->to_string() << " unchoke";
peer_choke_ = false;
if (unchoke_handler_) {
unchoke_handler_();
}
}
} else if (type == MessageTypeInterested) {
LOG(debug) << "peer " << peer_->to_string() << " interested";
peer_interested_ = true;
} else if (type == MessageTypeNotInterested) {
LOG(debug) << "peer " << peer_->to_string() << " interested";
peer_interested_ = false;
} else if (type == MessageTypeBitfield) {
LOG(debug) << "Bitfield: " << utils::hexdump(data.data(), data.size(), true);
peer_bitfield_ = std::vector(data.begin(), data.end());
} else if (type == MessageTypeHave) {
if (data.size() != sizeof(uint32_t)) {
LOG(error) << "invalid have message, data length != " << sizeof(uint32_t);
} else {
auto piece = utils::host_to_network(*(uint32_t*)data.data());
set_peer_has_piece(piece);
}
} else if (type == MessageTypeRequest) {
LOG(info) << "Request ";
} else if (type == MessageTypePiece) {
std::stringstream ss(std::string((const char*)data.data(), (const char*)data.data()+2*sizeof(uint32_t)));
uint32_t index = 0, begin = 0;
ss.read((char*)&index, sizeof(index));
index = utils::network_to_host(index);
ss.read((char*)&begin, sizeof(begin));
begin = utils::network_to_host(begin);
size_t block_size = data.size() - 2*sizeof(uint32_t);
if (block_handler_) {
block_handler_(index, begin, gsl::span<uint8_t>(data.data()+2*sizeof(uint32_t), data.size()-2*sizeof(uint32_t)));
}
} else if (type == MessageTypeExtended) {
if (data.size() > 0) {
uint8_t extended_id = data[0];
auto content_size = data.size() - 1;
std::stringstream ss(std::string((const char*)data.data() + 1, content_size));
auto node = bencoding::Node::decode(ss);
std::istreambuf_iterator<char> eos;
std::string rest(std::istreambuf_iterator<char>(ss), eos);
std::vector<uint8_t> appended_data(rest.size());
std::copy(rest.begin(), rest.end(), appended_data.begin());
if (auto dict = std::dynamic_pointer_cast<bencoding::DictNode>(node); dict) {
handle_extended_message(extended_id, dict, appended_data);
} else {
LOG(error) << "Invalid extended message, root node is not a dict. Closing connection";
close();
}
} else {
LOG(error) << "PeerConnection: Invalid extended message, expected size";
close();
}
} else {
LOG(debug) << "PeerConnection: Unknown message type ignored " << (int)type;
}
}
void PeerConnection::handle_extended_message(
uint8_t extended_id,
std::shared_ptr<bencoding::DictNode> msg,
const std::vector<uint8_t> &appended_data
) {
auto &dict = msg->dict();
if (extended_id == 0) {
// extended handshake
std::stringstream ss;
msg->encode(ss, bencoding::EncodeMode::JSON);
extended_handshake_ = msg;
auto total_size = get_int64_or_throw(dict, "metadata_size", "ut_metadata");
m_dict_ = get_dict_or_throw(dict, "m", "ut_metadata");
piece_count_ = ceil(double(total_size) / MetadataPieceSize);
if (piece_count_ <= 0) {
throw InvalidPeerMessage("piece count cannot <= zero");
}
if (extended_handshake_handler_) {
extended_handshake_handler_(piece_count_, total_size);
extended_handshake_handler_ = nullptr;
}
LOG(debug) << "Extended handshake: from " << peer_->to_string() << std::endl
<< "total pieces: " << piece_count_
<< "data: " << ss.str();
} else {
if (extended_message_id_.find(extended_id) == extended_message_id_.end()) {
LOG(error) << "Invalid extended message, unknown exteneded id " << extended_id;
} else {
auto message_type = extended_message_id_[extended_id];
if (message_type == "ut_metadata") {
auto msg_type = get_int64_or_throw(dict, "msg_type", "ut_metadata");
if (msg_type == ExtendedMessageTypeRequest) {
LOG(error) << "msg_type request not implemented";
} else if (msg_type == ExtendedMessageTypeData) {
auto piece = get_int64_or_throw(dict, "piece", "ut_metadata");
// auto total_size = get_int64_or_throw(dict, "total_size", "ut_metadata");
piece_data_handler_(piece, appended_data);
} else if (msg_type == ExtendedMessageTypeReject) {
LOG(error) << "msg_type reject not implemented";
} else {
LOG(error) << "unknown msg_type";
}
} else {
LOG(error) << "Invalid extended message, unknown message type " << message_type;
}
}
}
}
void PeerConnection::handle_receive(const boost::system::error_code &err, size_t bytes_transferred) {
// we may arrive here if the torrent download complete before handle_receive() is called
if (status() == ConnectionStatus::Disconnected) {
return;
}
// LOG(info) << "PeerConnection: received " << bytes_transferred << std::endl
// << utils::hexdump(read_buffer_.data(), bytes_transferred, true);
if (err == boost::asio::error::eof) {
connection_status_ = ConnectionStatus::Disconnected;
failed_reason_ = "eof";
return;
} else if (err == boost::asio::error::connection_reset) {
LOG(warning) << "Peer reset the connection " << peer_->to_string() << ", id " << peer_id_.to_string();
connection_status_ = ConnectionStatus::Disconnected;
failed_reason_ = "reset";
return;
} else if (err) {
connection_status_ = ConnectionStatus::Disconnected;
LOG(error) << "PeerConnection: unhandled error when reading to from socket " + err.message();
failed_reason_ = "error: " + err.message();
return;
}
if (connection_status_ == ConnectionStatus::Connecting) {
connection_status_ = ConnectionStatus::Connected;
}
read_ring_.appended(bytes_transferred);
try {
while (read_ring_.data_size() > 0) {
// Handle handshake
if (!handshake_completed_) {
if (read_ring_.has_data(sizeof(Handshake))) {
// pop front sizeof(Handshake)
read_ring_.pop_data(&received_handshake_, sizeof(Handshake));
std::stringstream ss(
std::string((char *) &received_handshake_.sender_id,
sizeof(u160::U160)));
peer_id_ = u160::U160::decode(ss);
handshake_completed_ = true;
auto ss1 = std::stringstream(std::string((char *) &received_handshake_.sender_id, sizeof(u160::U160)));
auto received_info_hash = u160::U160::decode(ss1);
// if (received_info_hash != target_) {
// LOG(error) << "Peer info_hash not matched, closing connection. target: " << target_.to_string() << ", peer: " << received_info_hash.to_string();
// close();
// return;
// } else {
if (connect_handler_) {
connect_handler_(boost::system::error_code());
connect_handler_ = nullptr;
}
// }
} else {
LOG(debug) << "handshake not complete, segmented " << peer_->to_string();
break;
}
} else {
uint32_t message_size = 0;
bool need_skip_message_size = false;
if (message_segmented) {
message_size = last_message_size_;
} else {
if (read_ring_.has_data(sizeof(uint32_t))) {
read_ring_.pop_data(&message_size, sizeof(uint32_t));
message_size = utils::network_to_host<uint32_t>(message_size);
} else {
LOG(debug) << "message size not complete, segmented " << peer_->to_string();
break;
}
}
if (message_size == 0) {
// This is a keep alive message
handle_keep_alive();
} else {
if (read_ring_.has_data(message_size)) {
uint8_t message_type;
read_ring_.pop_data(&message_type, sizeof(message_type));
auto content_size = message_size - sizeof(message_type);
auto buf = read_ring_.use_data(content_size);
handle_message(message_type, buf);
read_ring_.skip_data(content_size);
message_segmented = false;
} else {
last_message_size_ = message_size;
message_segmented = true;
LOG(debug) << "message content not complete, segmented " << peer_->to_string() << " " << read_ring_.data_size() << "/" << message_size;
break;
}
}
}
}
if (read_ring_.data_size() == 0) {
LOG(debug) << "handle_receive complete because read_ring_ is empty";
}
} catch (const bencoding::InvalidBencoding &e) {
LOG(error) << "parse BT handshake: Invalid bencoding: " << e.what();
failed_reason_ = std::string("close: InvalidBencoding, ") + e.what();
close();
} catch (const InvalidPeerMessage &e){
LOG(error) << "Invalid peer message " << e.what();
failed_reason_ = std::string("close: InvalidPeerMessage, ") + e.what();
close();
}
continue_receive();
}
void PeerConnection::send_metadata_request(int64_t piece) {
if (!extended_handshake_) {
throw InvalidStatus("Cannot send metadata request before receiving extended handshake");
} else {
if (!has_peer_extended_message(MetadataMessage)) {
throw InvalidStatus("Peer(" + peer_->to_string() + ") does not support metadata message");
} else {
auto extended_id = get_peer_extended_message_id(MetadataMessage);
// extended message
bencoding::DictNode node(
bdict({
{
"msg_type",
newint(ExtendedMessageTypeRequest),
},
{
"piece",
newint(piece),
}
}));
socket_->async_send(
boost::asio::buffer(make_extended(node, extended_id)),
[pc_weak = weak_from_this()](const boost::system::error_code &err, size_t bytes_transferred) {
if (err) {
auto pc = pc_weak.lock();
if (pc) {
pc->connection_status_ = ConnectionStatus::Disconnected;
pc->failed_reason_ = "failed_to_send: " + err.message();
pc->close();
LOG(error) << "Failed to send metadata request " + err.message();
}
}
});
}
}
}
uint8_t PeerConnection::get_peer_extended_message_id(const std::string &message_name) {
return get_int64_or_throw(m_dict_, message_name, "PeerConenction::get_peer_extended_message_id");
}
void PeerConnection::close() {
socket_->close();
connection_status_ = ConnectionStatus::Disconnected;
}
uint8_t PeerConnection::has_peer_extended_message(const std::string &message_name) const {
return extended_handshake_->dict().find(message_name) == extended_handshake_->dict().end();
}
void PeerConnection::start_metadata_transfer(
std::function<void(
int piece,
const std::vector<uint8_t> &piece_data
)> piece_data_handler) {
piece_data_handler_ = std::move(piece_data_handler);
// start metadata pieces transfer, but in a random order, to increase concurrency
std::vector<int> piece_ids;
for (int i = 0; i < piece_count_; i++) {
piece_ids.push_back(i);
}
std::shuffle(piece_ids.begin(), piece_ids.end(), std::random_device{});
for (auto i : piece_ids) {
LOG(debug) << "sending metadata request to " << peer_->to_string() << ", " << i;
send_metadata_request(i);
}
}
void PeerConnection::send_peer_message(uint8_t type, std::vector<uint8_t> data) {
uint32_t packet_size = data.size() + sizeof(uint8_t);
uint32_t total_size = packet_size + sizeof(uint32_t);
std::stringstream ss;
auto send_write_size = utils::host_to_network(packet_size);
ss.write((const char*)&send_write_size, sizeof(send_write_size));
ss.put(type);
ss.write((const char*)data.data(), data.size());
assert(total_size < write_buffer_.size());
auto s = ss.str();
std::copy(
s.begin(),
s.end(),
write_buffer_.begin());
socket_->async_send(
boost::asio::buffer(write_buffer_.data(), total_size),
[pc_weak = weak_from_this()](const boost::system::error_code &err, size_t bytes_transferred) {
if (err) {
auto pc = pc_weak.lock();
if (pc) {
pc->connection_status_ = ConnectionStatus::Disconnected;
pc->failed_reason_ = "failed_to_send: " + err.message();
pc->close();
LOG(error) << "Failed to send peer message " + err.message();
}
}
});
}
void PeerConnection::interest(std::function<void()> unchoke_handler) {
unchoke_handler_ = std::move(unchoke_handler);
send_peer_message(MessageTypeInterested, {});
if (!peer_choke_) {
unchoke_handler_();
}
}
void PeerConnection::request(size_t index, size_t begin, size_t length) {
std::stringstream ss;
auto index_n = utils::host_to_network<uint32_t>(index);
ss.write((const char*)&index_n, sizeof(index_n));
auto begin_n = utils::host_to_network<uint32_t>(begin);
ss.write((const char*)&begin_n, sizeof(index_n));
auto length_n = utils::host_to_network<uint32_t>(length);
ss.write((const char*)&length_n, sizeof(index_n));
auto s = ss.str();
std::vector<uint8_t> data((const uint8_t*)s.data(), (const uint8_t*)s.data() + ss.str().size());
LOG(debug) << "requesting piece " << index << " " << begin << " " << length;
send_peer_message(MessageTypeRequest, data);
}
void PeerConnection::set_peer_has_piece(size_t piece) {
size_t byte = 0;
if (piece != 0) {
byte = piece / 8;
}
auto bit = 7u - (piece % 8u);
if (byte < peer_bitfield_.size()) {
peer_bitfield_[byte] |= 1u << bit;
} else {
LOG(error) << "cannot set piece " << piece << ", out of range: " << (peer_bitfield_.size()*8);
}
}
bool PeerConnection::has_piece(size_t piece) const {
size_t byte = 0;
if (piece != 0) {
// 0-7 => 0
// 0 -> 7
// 1 -> 6
// 7 -> 0
// 8-15 => 1
byte = piece / 8u;
}
auto bit = 7u - (piece % 8u);
if (byte < peer_bitfield_.size()) {
return (peer_bitfield_[byte] >> bit) & 1u;
} else {
throw std::invalid_argument("cannot get piece " + std::to_string(piece) + ", out of range: " + std::to_string(peer_bitfield_.size()*8));
}
}
size_t PeerConnection::next_valid_piece(size_t piece) const {
for (size_t i = piece; i < piece_count_; i++) {
if (has_piece(i)) {
return i;
}
}
return piece_count_;
}
void PeerConnection::handle_keep_alive() {
LOG(info) << "Peer keep alive " << peer_->to_string();
}
void PeerConnection::continue_receive() {
auto buf = read_ring_.use_for_append(MCU);
socket_->async_receive(
boost::asio::buffer(buf.data(), buf.size()),
[pc = weak_from_this()](const boost::system::error_code &e, size_t bytes_transfered) {
if (!pc.expired()) {
pc.lock()->handle_receive(e, bytes_transfered);
}
});
}
size_t PeerConnection::memory_size() const {
size_t ret = sizeof(*this);
ret += peer_bitfield_.size() * sizeof(peer_bitfield_[0]);
return ret;
}
}
|
{"hexsha": "70df58d9b29019a0811895ba2161095986c674ae", "size": 23317, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/albert/bt/peer_connection.cpp", "max_stars_repo_name": "a1exwang/dht", "max_stars_repo_head_hexsha": "1ff57a3bd1ea0adb2e98e8eac5041b786092e5a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-02-23T13:25:58.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-23T13:25:58.000Z", "max_issues_repo_path": "src/albert/bt/peer_connection.cpp", "max_issues_repo_name": "a1exwang/dht", "max_issues_repo_head_hexsha": "1ff57a3bd1ea0adb2e98e8eac5041b786092e5a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/albert/bt/peer_connection.cpp", "max_forks_repo_name": "a1exwang/dht", "max_forks_repo_head_hexsha": "1ff57a3bd1ea0adb2e98e8eac5041b786092e5a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3287878788, "max_line_length": 160, "alphanum_fraction": 0.6275678689, "num_tokens": 5906}
|
import time
from datasets import create_dataset
from modules import create_model
from utils.visdom.visualizer import Visualizer
from utils import startup
import os
import utils.tools as util
import numpy as np
import evaluation
def train(config):
dataset = create_dataset(config)
model = create_model(config)
model.setup(config)
dataset_size = len(dataset) # get the size of dataset
print('The number of training images = %d' % dataset_size)
test_config = config.copy()
test_config['status'] = 'test'
test_config['num_threads'] = 1
test_dataset = create_dataset(test_config)
test_dataset_size = len(test_dataset)
print('The number of testing images = %d' % test_dataset_size)
visualizer = Visualizer(config) # create visualizer to show/save iamge
total_iters = 0 # total iteration for datasets points
t_data = 0
if int(config['resume_epoch']) > 0:
print("\n resume traing from rpoch " + str(int(config['resume_epoch']))+" ...")
model.resume_scheduler(int(config['resume_epoch']))
model.load_networks(config['resume_epoch'])
model.load_optimizers(config['resume_epoch'])
# outter iteration for differtent epoch; we save module via <epoch_count> and <epoch_count>+<save_latest_freq> options
for epoch in range(int(config['resume_epoch'])+1, int(config['epoch']) +1):
epoch_start_time = time.time() # note the starting time for current epoch
iter_data_time = time.time() # note the starting time for datasets iteration
epoch_iter = 0 # iteration times for current epoch, reset to 0 for each epoch
#innear iteration for single epoch
for i, data in enumerate(dataset):
iter_start_time = time.time() # note the stating time for current iteration
if total_iters % int(config['print_freq']) == 0: # note during time each <print_freq> times iteration
t_data = iter_start_time - iter_data_time
visualizer.reset()
total_iters = total_iters + int(config['train_batch_size'])
epoch_iter = epoch_iter + int(config['train_batch_size'])
model.set_input(data) # push loading image to the module
model.optimize_parameters() # calculate loss, gradient and refresh module parameters
if total_iters % int(config['display_freq']) == 0: # show runing result in visdom each <display_freq> iterations
save_result = total_iters % int(config['update_html_freq']) == 0 # save runing result to html each <update_html_freq> iteartions
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_iters % int(config['print_freq']) == 0: # print/save training loss to console each <print_freq> iterations
losses = model.get_current_losses()
t_comp = (time.time() - iter_start_time) / int(config['train_batch_size'])
visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
if int(config['display_id']) > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
if epoch % int(config['save_epoch_freq']) == 0: # save module each <save_epoch_freq> epoch iterations
print('saving the module at the end of epoch %d, iters %d' % (epoch, total_iters))
model.save_networks(epoch)
model.save_optimizers(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, int(config['epoch']), time.time() - epoch_start_time))
val(config=test_config, epoch=epoch, dataset=test_dataset,model=model)
# update learning rate after each epoch
model.update_learning_rate()
def val(config,epoch,dataset,model):
result_root_path = os.path.join(config['checkpoints_dir'], config['name'], config['results_dir'],"epoch"+str(epoch))
util.mkdir(result_root_path)
model.eval()
save_npy = np.ndarray(shape=(dataset.__len__() + 1, 2), dtype=np.float)
save_npy[0][0], save_npy[0][1] = -1, -1
print("Start evaluating epoch "+str(epoch)+"...")
for i, data in enumerate(dataset):
model.set_input(data) # push test datasets to module
model.test() # forward module
datapoints = (model.test_result[0][1]).cpu().data.numpy()
index = data["PATH"].cpu().data.numpy()[0]
save_npy[index][0], save_npy[index][1] = datapoints[0][0], datapoints[0][1]
dist_img = model.test_result[1][1]
util.save_image(util.tensor2im(dist_img),os.path.join(result_root_path,str(index)+".png"))
model.train()
np.save(os.path.join(result_root_path, 'regression.npy'), save_npy)
l2_dist,easy_dist,hard_dist = evaluation.evaluate_detailed(save_npy)
text = open(os.path.join(config['checkpoints_dir'], config['name'], config['results_dir'],"evaluation.txt"),"a+")
text.writelines("EPOCH "+str(epoch)+": "+str(round(l2_dist,4))+" "+str(round(easy_dist,4)) + ' '+str(round(hard_dist,4))+"\n")
text.close()
print("Testing npy result have been saved! Evaluation distance: "+str(round(l2_dist))+"("+str(round(easy_dist))+","+str(round(hard_dist))+")")
if __name__ == '__main__':
configs_stage1 = startup.SetupConfigs(config_path='configs/TCLNET_STAGE1.yaml')
configs_stage2 = startup.SetupConfigs(config_path='configs/TCLNET_STAGE2.yaml')
configs_stage1 = configs_stage1.setup()
configs_stage2 = configs_stage2.setup()
if (configs_stage1['status'] == "train") and (configs_stage2['status'] == 'train'):
train(configs_stage1)
train(configs_stage2)
|
{"hexsha": "ec8eb9acec5e09baa07e9348db1a91fc75e58ab9", "size": 5797, "ext": "py", "lang": "Python", "max_stars_repo_path": "run.py", "max_stars_repo_name": "chao-tan/TCLNet", "max_stars_repo_head_hexsha": "4f48bd3430d8915e5407f0f22aa6676fc2f48957", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-11T13:56:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-11T13:56:53.000Z", "max_issues_repo_path": "run.py", "max_issues_repo_name": "chao-tan/TCLNet", "max_issues_repo_head_hexsha": "4f48bd3430d8915e5407f0f22aa6676fc2f48957", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-04-12T11:33:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-12T19:51:23.000Z", "max_forks_repo_path": "run.py", "max_forks_repo_name": "chao-tan/TCLNet", "max_forks_repo_head_hexsha": "4f48bd3430d8915e5407f0f22aa6676fc2f48957", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-04-11T13:56:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T03:16:31.000Z", "avg_line_length": 47.9090909091, "max_line_length": 147, "alphanum_fraction": 0.6610315681, "include": true, "reason": "import numpy", "num_tokens": 1355}
|
import math
from typing import Optional
import numpy
from sklearn.decomposition import SparseCoder
from aydin.util.crop.rep_crop import representative_crop
from aydin.util.dictionary.dictionary import (
fixed_dictionary,
extract_normalised_vectorised_patches,
)
from aydin.util.j_invariance.j_invariant_classic import calibrate_denoiser_classic
from aydin.util.j_invariance.j_invariant_smart import calibrate_denoiser_smart
from aydin.util.log.log import lsection, lprint
from aydin.util.patch_size.patch_size import default_patch_size
from aydin.util.patch_transform.patch_transform import reconstruct_from_nd_patches
def calibrate_denoise_dictionary_fixed(
image,
patch_size: int = None,
try_omp: bool = True,
try_lasso_lars: bool = False,
try_lasso_cd: bool = False,
try_lars: bool = False,
try_threshold: bool = False,
num_sparsity_values_to_try: int = 6,
dictionaries: str = 'dct',
crop_size_in_voxels: Optional[int] = None,
max_num_evaluations: int = 256,
display_dictionary: bool = False,
display_images: bool = False,
**other_fixed_parameters,
):
"""
Calibrates the dictionary-based denoiser for the given image and returns the
optimal parameters obtained using the N2S loss.
Parameters
----------
image : ArrayLike
Image to calibrate denoiser for.
patch_size : int
Patch size. Common parameter to both 'learned',
or 'fixed' dictionary types.
(advanced)
try_omp: bool
Whether OMP should be tried as a sparse coding
algorithm during calibration.
try_lasso_lars: bool
Whether LASSO-LARS should be tried as a sparse
coding algorithm during calibration.
try_lasso_cd: bool
Whether LASSO-CD should be tried as a sparse
coding algorithm during calibration.
try_lars: bool
Whether LARS should be tried as a sparse coding
algorithm during calibration.
try_threshold: bool
Whether 'threshold'' should be tried as a sparse
coding algorithm during calibration.
num_sparsity_values_to_try: int
Maximum number of sparsity values to try during calibration
(advanced)
dictionaries: str
Fixed dictionaries to be included. Can be: 'dct',
'dst'.
crop_size_in_voxels: int or None for default
Number of voxels for crop used to calibrate
denoiser.
(advanced)
max_num_evaluations: int
Maximum number of evaluations for finding the
optimal parameters.
(advanced)
display_dictionary: bool
If True displays dictionary with napari -- for
debug purposes.
display_images: bool
When True the denoised images encountered during
optimisation are shown.
other_fixed_parameters: dict
Any other fixed parameters
Returns
-------
Denoising function, dictionary containing optimal parameters,
and free memory needed in bytes for computation.
"""
# Convert image to float if needed:
image = image.astype(dtype=numpy.float32, copy=False)
# Normalise patch size:
patch_size = default_patch_size(image, patch_size, odd=True)
# obtain representative crop, to speed things up...
crop = representative_crop(image, crop_size=crop_size_in_voxels)
# Partial function:
def _denoise_dictionary(
image, max_freq: float = 0.5, coding_mode: str = 'omp', **parameters
):
dictionary = fixed_dictionary(
image, patch_size=patch_size, dictionaries=dictionaries, max_freq=max_freq
)
denoised_image = denoise_dictionary_fixed(
image, dictionary=dictionary, coding_mode=coding_mode, **parameters
)
return denoised_image
# coding modes to try:
coding_modes = []
if try_omp:
coding_modes.append('omp')
if try_lasso_lars:
coding_modes.append('lasso_lars')
if try_lasso_cd:
coding_modes.append('lasso_cd')
if try_lars:
coding_modes.append('lars')
if try_threshold:
coding_modes.append('threshold')
# Parameters to test when calibrating the denoising algorithm
parameter_ranges = {
'max_freq': (0.01, 1.3),
# numpy.arange(0.05, 1.4, 0.05), # (0.01, 1.3),
'coding_mode': coding_modes,
# 'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'
}
# Calibrate denoiser:
best_parameters = calibrate_denoiser_smart(
crop,
_denoise_dictionary,
denoise_parameters=parameter_ranges,
max_num_evaluations=max_num_evaluations,
)
lprint(f"Best parameters: {best_parameters}")
# Parameters to test when calibrating the denoising algorithm
parameter_ranges = {
'sparsity': [1, 2, 3, 4, 8, 16][:num_sparsity_values_to_try],
# 'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'
}
# Calibrate denoiser:
best_parameters = (
calibrate_denoiser_classic(
crop,
_denoise_dictionary,
denoise_parameters=parameter_ranges,
other_fixed_parameters=best_parameters | other_fixed_parameters,
display_images=display_images,
)
| best_parameters
| other_fixed_parameters
)
# Cleaning up a bit:
best_parameters.pop('other_fixed_parameters')
lprint(f"Final best parameters: {best_parameters}")
# we need to replace the max freq argument with the actual dictionary
# because that's what our client facing denoise function expects:
max_freq = best_parameters.pop('max_freq')
# Dictionary to use based on fixed and best parameters:
dictionary = fixed_dictionary(
image, patch_size=patch_size, dictionaries=dictionaries, max_freq=max_freq
)
best_parameters = best_parameters | {'dictionary': dictionary}
if display_dictionary:
import napari
with napari.gui_qt():
viewer = napari.Viewer()
viewer.add_image(
dictionary.reshape(len(dictionary), *patch_size), name='dictionary'
)
# Memory needed:
memory_needed = 2 * image.nbytes + 6 * image.nbytes * math.prod(patch_size)
return denoise_dictionary_fixed, best_parameters, memory_needed
def denoise_dictionary_fixed(
image,
dictionary=None,
coding_mode: str = 'omp',
sparsity: int = 1,
gamma: float = 0.001,
multi_core: bool = True,
**kwargs,
):
"""
Denoises the given image using sparse-coding over a fixed
dictionary of nD image patches. The dictionary learning and
patch sparse coding uses scikit-learn's Batch-OMP implementation.
Parameters
----------
image: ArrayLike
nD image to be denoised
dictionary: ArrayLike
Dictionary to use for denosing image via sparse coding.
By default (None) a fixed dictionary is used.
coding_mode: str
Type of sparse coding, can be: 'lasso_lars', 'lasso_cd', 'lars', 'omp',
or 'threshold'
sparsity: int
How many atoms are used to represent each patch after denoising.
gamma: float
How much the periphery of teh patches contributes to the final denoised
image. Larger gamma means that we keep more of the central pixels of the
patches, smaller values lead to a more uniform contribution.
A value of 1 corresponds to the default blackman window.
multi_core: bool
By default we use as many cores as possible, in some cases, for small
(test) images, it might be faster to run on a single core instead of
starting the whole parallelization machinery.
Returns
-------
Denoised image
"""
# Convert image to float if needed:
image = image.astype(dtype=numpy.float32, copy=False)
if dictionary is None:
# learn dictionary with all defaults:
dictionary = fixed_dictionary(image)
# we can infer patch shape from dictionary:
patch_size = dictionary.shape[1:]
with lsection(f"Denoise image of shape {image.shape} and dtype {image.dtype}"):
# vectorise dictionary:
vectorised_dictionary = dictionary.reshape(len(dictionary), -1)
# setup sparse coder:
coder = SparseCoder(
vectorised_dictionary,
transform_algorithm=coding_mode,
transform_n_nonzero_coefs=sparsity,
n_jobs=-1 if multi_core else 1,
)
# First we extract _all_ patches from the image, without any normalisation:
with lsection("Extract all patches from image..."):
patches, patch_means, _ = extract_normalised_vectorised_patches(
image,
patch_size=patch_size,
max_patches=None,
normalise_means=True,
normalise_stds=False,
output_norm_values=True,
)
with lsection("Obtain sparse codes for each patch..."):
code = coder.transform(patches)
with lsection("Reconstruct patches from codes..."):
denoised_patches = numpy.dot(code, vectorised_dictionary)
# Add back means:
denoised_patches += patch_means
with lsection("Reshape to patches..."):
denoised_patches = denoised_patches.reshape(len(patches), *patch_size)
with lsection("Reconstructing image from patches..."):
# Reconstructs image from denoised patches:
denoised_image = reconstruct_from_nd_patches(
patches=denoised_patches, image_shape=image.shape, gamma=gamma
)
return denoised_image
|
{"hexsha": "d2f76b4a17b595541432f54d6382748cea20371f", "size": 9664, "ext": "py", "lang": "Python", "max_stars_repo_path": "aydin/it/classic_denoisers/dictionary_fixed.py", "max_stars_repo_name": "royerloic/aydin", "max_stars_repo_head_hexsha": "f9c61a24030891d008c318b250da5faec69fcd7d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aydin/it/classic_denoisers/dictionary_fixed.py", "max_issues_repo_name": "royerloic/aydin", "max_issues_repo_head_hexsha": "f9c61a24030891d008c318b250da5faec69fcd7d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aydin/it/classic_denoisers/dictionary_fixed.py", "max_forks_repo_name": "royerloic/aydin", "max_forks_repo_head_hexsha": "f9c61a24030891d008c318b250da5faec69fcd7d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0, "max_line_length": 86, "alphanum_fraction": 0.6685637417, "include": true, "reason": "import numpy", "num_tokens": 2181}
|
\chapter{Record Examples}
\label{cha:record-examples}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "../../copatterns-thesis"
%%% End:
|
{"hexsha": "2d9f149c905b74339d0bb0d2940af980db7708d8", "size": 142, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "sections/appendix/example_records.tex", "max_stars_repo_name": "sualitu/thesis", "max_stars_repo_head_hexsha": "22d2cb4f21dc7c2dab011da5bb560c003650a2bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sections/appendix/example_records.tex", "max_issues_repo_name": "sualitu/thesis", "max_issues_repo_head_hexsha": "22d2cb4f21dc7c2dab011da5bb560c003650a2bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sections/appendix/example_records.tex", "max_forks_repo_name": "sualitu/thesis", "max_forks_repo_head_hexsha": "22d2cb4f21dc7c2dab011da5bb560c003650a2bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.2857142857, "max_line_length": 41, "alphanum_fraction": 0.661971831, "num_tokens": 39}
|
# <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
import abc
import numpy
from xData import ancestry as ancestryModule
from PoPs.quantities.quantity import double
"""
Defines incident-energy-dependent functions representing the contribution to the resolved region cross section
from resonances external to the evaluated set.
The external R-Matrix is added to the R-Matrix diagonal during resonance reconstruction.
"""
class externalRMatrix(ancestryModule.ancestry, metaclass=abc.ABCMeta):
"""
Abstract base class inherited by the Froehner and SAMMY classes.
"""
moniker = 'externalRMatrix'
def __init__(self, **kwargs):
super().__init__()
provided_terms = set(kwargs.keys())
required_terms = {'singularityEnergyBelow', 'singularityEnergyAbove'}
if not required_terms.issubset(provided_terms):
missing = required_terms.difference(provided_terms)
raise AttributeError("%s external R-Matrix is missing required terms: %s" % (self.type, ", ".join(missing)))
extra = provided_terms.difference(self.ancestryMembers)
if extra:
raise AttributeError("%s external R-Matrix received unexpected terms: %s" % (self.type, ", ".join(extra)))
self._terms = kwargs
@property
@abc.abstractmethod
def type(self): pass
@property
@abc.abstractmethod
def terms(self): pass
@abc.abstractmethod
def evaluate(self, energies): pass
def getTerm(self, key, unit):
result = self.terms.get(key)
if result is None:
return 0
return result.float(unit)
def toXMLList(self, indent='', **kwargs):
indent2 = indent + ' '
xmlString = ['%s<%s type="%s">' % (indent, self.moniker, self.type)]
for key in self.ancestryMembers:
term = self.terms.get(key)
if term is not None:
xmlString += term.toXMLList(indent=indent2, **kwargs)
xmlString[-1] += ('</%s>' % self.moniker)
return xmlString
@staticmethod
def parseXMLNode(element, xPath, linkData, **kwargs):
xPath.append(element.tag)
terms = {term.get("label"): double.parseXMLNodeAsClass(term, xPath, linkData)
for term in element.findall("double")}
class_ = {
'Froehner': Froehner,
'SAMMY': SAMMY
}[element.get("type")]
result = class_(**terms)
xPath.pop()
return result
class Froehner(externalRMatrix):
"""
Froehner's external R-Matrix parametrization.
"""
ancestryMembers = ('averageRadiationWidth', 'constantExternalR', 'poleStrength', 'singularityEnergyBelow',
'singularityEnergyAbove')
def __init__(self, **kwargs):
super().__init__(**kwargs)
@property
def type(self):
return "Froehner"
@property
def terms(self):
return self._terms
def evaluate(self, energies):
"""
Evaluate Fröhner's external R-Matrix parametrization at the given energy or energies.
@param energies: single energy or numpy array of energies
@return: tuple(real part, imaginary part)
"""
R0 = self.getTerm('constantExternalR', '')
sc = self.getTerm('poleStrength', '')
Gamma = self.getTerm('averageRadiationWidth', 'eV')
Edown = self.getTerm('singularityEnergyBelow', 'eV')
Eup = self.getTerm('singularityEnergyAbove', 'eV')
Ebar = (Eup + Edown) / 2
I = Eup - Edown
realTerm = R0 + 2 * sc * numpy.arctan2(energies - Ebar, I/2)
imaginaryTerm = (Gamma * I / 4) / (I**2 / 4 - (energies - Ebar)**2)
return realTerm, imaginaryTerm
class SAMMY(externalRMatrix):
"""
External R-Matrix parametrization from SAMMY
"""
ancestryMembers = ('constantExternalR', 'linearExternalR', 'quadraticExternalR',
'constantLogarithmicCoefficient', 'linearLogarithmicCoefficient',
'singularityEnergyBelow', 'singularityEnergyAbove')
def __init__(self, **kwargs):
super().__init__(**kwargs)
@property
def type(self):
return "SAMMY"
@property
def terms(self):
return self._terms
def evaluate(self, energies):
"""
Evaluate the SAMMY external R-Matrix parametrization at the given energy or energies.
@param energies: single energy or numpy array of energies
@return: tuple(real part, imaginary part)
"""
Rcon = self.getTerm('constantExternalR', '')
Rlin = self.getTerm('linearExternalR', '1/eV')
Rquad = self.getTerm('quadraticExternalR', '1/eV**2')
scon = self.getTerm('constantLogarithmicCoefficient', '')
slin = self.getTerm('linearLogarithmicCoefficient', '1/eV')
Edown = self.getTerm('singularityEnergyBelow', 'eV')
Eup = self.getTerm('singularityEnergyAbove', 'eV')
logTerm = numpy.log((Eup - energies) / (energies - Edown))
realTerm = Rcon + Rlin * energies + Rquad * energies**2 - slin * (Eup - Edown) - (scon + slin * energies) * logTerm
return realTerm, 0
|
{"hexsha": "9cb5d2d7642d448b8b1aa6d032632ecbd0a22162", "size": 5280, "ext": "py", "lang": "Python", "max_stars_repo_path": "fudge/resonances/externalRMatrix.py", "max_stars_repo_name": "brown170/fudge", "max_stars_repo_head_hexsha": "4f818b0e0b0de52bc127dd77285b20ce3568c97a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-08-29T23:46:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T10:16:25.000Z", "max_issues_repo_path": "fudge/resonances/externalRMatrix.py", "max_issues_repo_name": "brown170/fudge", "max_issues_repo_head_hexsha": "4f818b0e0b0de52bc127dd77285b20ce3568c97a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-08-04T16:14:45.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-01T01:54:34.000Z", "max_forks_repo_path": "fudge/resonances/externalRMatrix.py", "max_forks_repo_name": "brown170/fudge", "max_forks_repo_head_hexsha": "4f818b0e0b0de52bc127dd77285b20ce3568c97a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-03T22:41:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T22:54:43.000Z", "avg_line_length": 34.064516129, "max_line_length": 123, "alphanum_fraction": 0.6356060606, "include": true, "reason": "import numpy", "num_tokens": 1298}
|
# -*- coding: utf-8 -*-
"""
.. module:: perform_meta_analysis
:synopsis: module performing a meta-analysis
.. moduleauthor:: Aurore Bussalb <aurore.bussalb@mensiatech.com>
"""
import numpy as np
import scipy.stats as scp
import pandas as pd
import warnings
import matplotlib.pyplot as plt
def _effect_size_ppc(n_treatment, n_control, mean_post_test_treatment, mean_pre_test_treatment, mean_pre_test_control, mean_post_test_control,
std_pre_test_treatment, std_pre_test_control):
"""Computes the pre post control effect size (Scott B. Morris (2008), also called the effect size between "Estimating Effect Sizes From Pretest-Posttest Control Group Designs
and under a random effects model", Organizational Research Methods (Equation 8)).
Parameters
----------
n_treatment: int
Number of patients included in the treatment group.
n_control: int
Number of patients included in the control group.
mean_post_test_treatment: float
Mean score after the treatment.
mean_pre_test_treatment: float
Mean score before the treatment.
mean_pre_test_control: float
Mean score before the treatment in the control group.
mean_post_test_control: float
Mean score after the treatment in the control group.
std_pre_test_treatment: float
Standard deviation of the mean score before the treatment.
std_post_test_treatment: float
Standard deviation of the mean score after the treatment.
Returns
-------
effect_size: float
Value estimating the efficacy of the treatment.
If it's negative, the result is in favor of the treatment.
"""
S_within = np.sqrt(((n_treatment - 1)*std_pre_test_treatment**2 + (n_control - 1)*std_pre_test_control**2)/
(n_treatment + n_control - 2))
d = ((mean_post_test_treatment - mean_pre_test_treatment) - (mean_post_test_control - mean_pre_test_control))/S_within
# Correction factor for small sample size. This correction factor is close to 1 unless the degree of freedom is very
# small (<10), see Borenstein, Introdution to meta-analysis, 2009.
if (n_treatment + n_control - 2) < 10:
warnings.warn('Since the sample size is too small, a correction factor is applied to the effect size')
correction_factor = 1 - (3/(4*(n_treatment + n_control - 2) - 1))
effect_size = d*correction_factor
else:
effect_size = d
return effect_size
def _standard_error_effect_size(n_treatment, n_control, effect_size, pre_post_correlation):
"""Scott B. Morris (2008) "Estimating Effect Sizes From Pretest-Posttest Control Group Designs and under
a random effects model", Organizational Research Methods (Equation 25).
Parameters
----------
n_treatment: int
Number of patients included in the treatment group.
n_control: int
Number of patients included in the control group.
effect_size: float
Value estimating the efficacy of the treatment.
If it's negative, the result is in favor of the treatment.
pre_post_correlation: float
Pearson correlation of the pre-test and post-test values (i.e the pooled within-groups Pearson correlation.
Returns
-------
standard_error_ES: float
Standard error of the effect size.
variance_ES: float
Variance of the effect size.
"""
# Correction factor for small sample size. This correction factor is close to 1 unless the degree of freedom is very
# small (<10), see Borenstein, Introdution to meta-analysis, 2009.
if (n_treatment + n_control - 2) < 10:
correction_factor = 1 - (3/(4*(n_treatment + n_control - 2) - 1))
warnings.warn('Since the sample size is too small, a correction factor is applied to the variance of the effect size')
else:
correction_factor = 1
# Variance
variance_ES = (2*(correction_factor**2)*(1 - pre_post_correlation)*((n_treatment + n_control)/
(n_treatment*n_control))*((n_treatment + n_control - 2)/(n_treatment + n_control - 4))*
(1 + ((effect_size**2)/(2*(1 - pre_post_correlation)*((n_treatment + n_control)/
(n_treatment*n_control))))) - effect_size**2)
# Standard Error
standard_error_ES = np.sqrt(variance_ES)
return standard_error_ES
def run_meta_analysis(df, scale_to_reverse=[], pre_post_correlation=0.5):
"""Performs a meta analysis with the formulae described in Scott B. Morris (2008) "Estimating Effect Sizes From Pretest-
Posttest Control Group Designs and under a random effects model", *Organizational Research Methods* and in Borenstein (2009)
*Introduction to meta-analysis*. These formulae are the same as the ones used in Cortese et al., 2016.
A negative effect size favours the treatment.
Parameters
----------
df: pandas.DataFrame
Parents, teachers or clinicians ratings required to perform the meta-analysis.
This dataframe corresponds to one of those obtained with the ``import_csv_for_meta_analysis`` module.
If you want to run the meta-analysis on parent assessments enter ``df_values_parents``, to run it on teacher assessments
enter ``df_values_teachers``, and to run on clinicians assessments, run ``df_values_clinicians``
Each row corresponds to a study, the disease symptoms are assessed by parents, teachers, or clinicians.
Columns are: mean_post_test_treatment, mean_post_test_control, mean_pre_test_treatment, mean_pre_test_control, n_treatment,
n_control, std_post_test_treatment, std_post_test_control, std_pre_test_treatment, std_pre_test_control, raters for each study.
scale_to_reverse: list of str, optional
List of strings listing the clinical scales having a positive correlation with symptoms of the disease;
i.e increasing when a patient gets better.
pre_post_correlation: float, default = 0.5
Pearson correlation of the pre-test and post-test values (i.e the pooled within-groups Pearson correlation). Set to 0.5 by
default (see Cuijpers et al., 2016 and Balk et al., 2012 "Empirical Assessment of Within-Arm Correlation Imputation in Trials
of Continuous Outcomes").
Returns
-------
df_results_per_study: pandas.DataFrame
Results per study.
Rows of the dataframe correspond to the studies, columns correspond to the effect size of the study, its standard
error, its 95% confidence interval, and the weight of the study.
df_results: pandas.DataFrame
Global results.
It contains the summary effect, its 95% confidence interval, its variance, its standard error, its p-value,
the between studies variance (Tau²), the heterogeneity (I²), its p-value, and the Chi2 value.
Notes
-----
Effect sizes computed for each study correspond to the effect sizes between subjects. Thus, the studies included in the meta-analysis
must be controlled and provide pre and post scores for treatment and control groups.
"""
# Creation of the dataframe for total results
index = ['Results']
df_results = pd.DataFrame(index=index)
# Compute the effect size
df['effect_size'] = df[
['n_treatment', 'n_control', 'mean_post_test_treatment',
'mean_pre_test_treatment', 'mean_pre_test_control',
'mean_post_test_control', 'std_pre_test_treatment', 'std_pre_test_control']
].apply(lambda row:_effect_size_ppc(**row), axis=1)
# Compute the standard error of the effect size
df['standard_error_ES'] = df[
['n_treatment', 'n_control', 'effect_size']
].apply(lambda row:_standard_error_effect_size(row['n_treatment'], row['n_control'],
row['effect_size'], pre_post_correlation), axis=1)
# Check if all the scales measure the desease severity the same way (high score = more symptomps) and homogenize
for scale_name in scale_to_reverse:
df['effect_size'][ df['score_name']==scale_name ] *= -1
# All the following equations come from M. Borenstein and L. Hedges (2009) Introduction to Meta-Analysis
# 95% Confidence interval (Equations 8.3 and 8.4)
df['confidence_interval_of_the_ES'] = df[
['effect_size', 'standard_error_ES']].apply(lambda row: (
row['effect_size'] - 1.96*row['standard_error_ES'],
row['effect_size'] + 1.96*row['standard_error_ES']), axis=1)
# Compute the inverse of the variance = weight under a fixed effect model (Equation 11.2)
df['weight_fixed_model'] = 1/(df['standard_error_ES']**2)
# Computation of Tau²: between studies variance
## Compute degrees of freedom (Equation 12.4)
degrees_of_freedom = len(df.index) - 1
## Compute Q (Equation 12.3)
Q = (df['weight_fixed_model']*df['effect_size']**2).sum() - ((df['weight_fixed_model']*df['effect_size']).sum())**2/df['weight_fixed_model'].sum()
df_results['Chi2'] = Q
## P value of the heterogeneity
# To know if heterogeneity is statistically significant, we can use Q and degrees of freedom
# Null hypothesis: all studies share a common effect size
# Under the null hypothesis, Q will follow a central chi-squared distribution
df_results['p-value Heterogeneity'] = 1 - scp.chi2.cdf(Q, degrees_of_freedom)
## Compute C (Equation 12.5)
C = df['weight_fixed_model'].sum() - ((df['weight_fixed_model']**2).sum()/df['weight_fixed_model'].sum())
## Tau² (Equation 12.2)
# When Tau2 is negative, we put it at zero (this negative value is due to sampling issues,
# when the observed dispersion is less than we would expect by chance, see Borenstein)
Tau2 = (Q - degrees_of_freedom)/C
if Tau2 < 0:
Tau2 = 0
df_results['Tau2'] = Tau2
# Compute the weight of each study under a random effects model
## Compute the weights (Equation 12.6)
df['weight'] = 1/(df['standard_error_ES']**2 + Tau2)
## In percentage
df['percentage_weight'] = (df['weight']*100)/df['weight'].sum()
# Summary effect (Equation 12.7)
df_results['Summary Effect'] = (df['effect_size']*df['percentage_weight']).sum()/df['percentage_weight'].sum()
# Variance and SE of the summary effect (Equations 12.8 and 12.9)
df_results['Variance Summary Effect'] = 1/df['weight'].sum()
df_results['Standard Error Summary Effect'] = np.sqrt(df_results['Variance Summary Effect'])
# 95% Confidence interval (Equations 12.10 and 12.11)
df_results['95% Confidence Interval of the Summary Effect'] = df_results[
['Summary Effect', 'Standard Error Summary Effect']].apply(lambda row: (
row['Summary Effect'] - 1.96*row['Standard Error Summary Effect'],
row['Summary Effect'] + 1.96*row['Standard Error Summary Effect']), axis=1)
# P value for the summary effect (Equations 12.12 and 12.14)
# Null hypothesis: control group and treatment group have no different effect
z = df_results['Summary Effect']/df_results['Standard Error Summary Effect']
df_results['p-value'] = 2*(1 - scp.norm.cdf(abs(z)))
# Heterogeneity (Equation 16.9)
I2 = (((Q - degrees_of_freedom))/Q)*100
if I2 < 0:
I2 = 0
df_results['Heterogeneity'] = I2
# Creation of the dataframe with results by studies
df_results_per_study = pd.DataFrame({'Year': df['year'],
'Effect size': df['effect_size'],
'Standard Error of the ES': df['standard_error_ES'],
'95% Confidence interval of the ES': df['confidence_interval_of_the_ES'],
'Weight': df['percentage_weight']},
index=df.index)
return df_results_per_study, df_results, df['effect_size']
if __name__ == '__main__':
meta_analysis('values_total_meta_analysis.csv', 'Parents')
def forest_plot(df_results_per_study, df_results):
"""Creates a forest plot.
Parameters
----------
df_results_per_study: pandas.DataFrame
Results per study.
Dataframe obtained after performing the meta-analysis with ``run_meta_analysis``.
Rows of the dataframe correspond to the studies, columns correspond to the effect size of the study, its standard
error, its 95% confidence interval, and the weight of the study.
df_results: pandas.DataFrame
Global results.
It contains the summary effect, its 95% confidence interval, its variance, its standard error, its p-value,
the between studies variance (Tau²), the heterogeneity (I²), its p-value, and the Chi2 value.
Returns
-------
forest_plot: matplotlib.figure
Graphical representation of the meta-analysis' results.
Representation of the effect size and its 95% confidence interval for each study.
"""
# Sort data so that studies with bigger effect size are in the top of the forest plot
df_results_per_study = df_results_per_study.sort_values(df_results_per_study.columns[1], ascending=[True])
# Conversion to lists for the plotting
ES = df_results_per_study['Effect size'].tolist()
weight = df_results_per_study['Weight'].tolist()
names = df_results_per_study.index.tolist()
names = [i[0] for i in names]
# Preparing for the plotting
## Confidence Interval
lower_limit = []
upper_limit = []
for confidence_interval in df_results_per_study['95% Confidence interval of the ES']:
lower_limit.append(confidence_interval[0])
upper_limit.append(confidence_interval[1])
lower_limit_summary = df_results['95% Confidence Interval of the Summary Effect'][0][0]
upper_limit_summary = df_results['95% Confidence Interval of the Summary Effect'][0][1]
# Add the confidence interval of the summary effect to others
lower_limit.extend([lower_limit_summary])
lower_limit.reverse() # the summary effect must be at the bottom
upper_limit.extend([upper_limit_summary])
upper_limit.reverse()
# Add the summary effect at the other effects size
names.append('Summary Effect')
names.reverse()
ES.extend(df_results['Summary Effect'])
ES.reverse()
# Make the effect size representation more visible (squares are bigger)
weight = [i * 5 for i in weight]
# Graphic
y = np.array(range(1,len(names)+1))
forest_plot = plt.figure()
plt.yticks(y, names)
# Vertical line in zero
plt.axvline(0, color = 'k')
# Plot Confidence Interval
for i in range(0,len(names)):
plt.plot([lower_limit[i], upper_limit[i]], [y[i],y[i]], color = 'g')
# Plot effect sizes
plt.scatter(ES[1:len(names)], y[1:len(names)], s=weight[1:len(names)],
marker = 's', color = 'b')
plt.scatter(ES[0], y[0], s=100, marker = 'D', color = 'b')
plt.xlabel('Effect size')
plt.title('Standard Mean Difference, 95% Confidence Interval', fontweight = "bold")
return forest_plot
|
{"hexsha": "37debf4d640772ffe584d43c1063d445a1c80a92", "size": 15738, "ext": "py", "lang": "Python", "max_stars_repo_path": "source_assess_treatment_efficacy/meta_analysis/perform_meta_analysis.py", "max_stars_repo_name": "AuroreBussalb/meta-analysis-statistical-tools", "max_stars_repo_head_hexsha": "5ef8a285269ced605196946174e0e36609bf8a6e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-10-14T12:25:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-26T08:09:35.000Z", "max_issues_repo_path": "source_assess_treatment_efficacy/meta_analysis/perform_meta_analysis.py", "max_issues_repo_name": "AuroreBussalb/meta-analysis-statistical-tools", "max_issues_repo_head_hexsha": "5ef8a285269ced605196946174e0e36609bf8a6e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source_assess_treatment_efficacy/meta_analysis/perform_meta_analysis.py", "max_forks_repo_name": "AuroreBussalb/meta-analysis-statistical-tools", "max_forks_repo_head_hexsha": "5ef8a285269ced605196946174e0e36609bf8a6e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-14T08:59:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-14T08:59:57.000Z", "avg_line_length": 44.3323943662, "max_line_length": 179, "alphanum_fraction": 0.6577074597, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3630}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Karel Roots"
import os
import sys
import numpy as np
from EEGModels import get_models
from data_loader import load_data
from experiment import Experiment
from mcnemar import mcnemar_test
from tensorflow.keras import backend as K
from tensorflow.keras.utils import to_categorical
from training_testing import run_experiment
from run_type import RunType
"""
Required dependencies:
Python == 3.7.7, Tensorflow == 2.1.0, Numpy >= 1.18.1, scikit-learn >= 0.22.1, pyEDFlib >= 0.1.17
statsmodels >= 0.11.1, Gumpy (https://github.com/gumpy-bci/gumpy)
The program can be run from the CLI with the following required arguments:
1.) The number of subjects to be used from the dataset (int)
2.) The number of epochs the training of models should be done (int)
3.) The number of target classes in the classification (int)
4.) What type of trials should be extracted from the data; 1 => executed trials only; 2 => imagined trials only
5.) If CPU-only mode should be used (True / False)
Example: python run_experiments.py 109 100 2 1 True
"""
# Settings
if len(sys.argv) < 6:
raise AttributeError("Input requires 6 arguments: number of subjects (int), number of training epochs (int), " +
"number of classes (int), trial type (0 or 1, int), if cpu mode should be used (boolean)")
print("Starting job with args:")
print(sys.argv)
nr_of_subj = int(sys.argv[1])
nr_of_epochs = int(sys.argv[2])
nb_classes = int(sys.argv[3])
trial_type = RunType.Executed if int(sys.argv[4]) == 1 else RunType.Imagined
use_cpu = True if sys.argv[5] == 'True' else False
# Settings for transfer learning
trials_per_subject = 3 * 15 * 8
subj_for_training = 100 # The number of subjects that should be used for pre-training the model for TL
subj_for_transfer_learning = 3 # The number of subject that should be used for individual evaluation of the TL model
# Loading data from files
X, y = load_data(nr_of_subj=nr_of_subj, trial_type=trial_type, chunk_data=True, chunks=8, cpu_format=use_cpu,
preprocessing=True, hp_freq=0.5, bp_low=2, bp_high=60, notch=True,
hp_filter=False, bp_filter=True, artifact_removal=True)
# methods for saving/loading the data to/from files
# file_name = 'executed.npz' if trial_type == RunType.Executed else 'imagined.npz'
# np.savez_compressed(file_name, data=X, labels=y)
# print("Loaded data from file %s" % (file_name))
# data = np.load(file_name, allow_pickle=True)
# X = data['data']
# y = data['labels']
# Data formatting
if use_cpu:
print("Using CPU")
K.set_image_data_format('channels_last')
samples = X.shape[1]
X = X.reshape(X.shape[0], X.shape[1], X.shape[2], 1)
else:
print("Using GPU")
K.set_image_data_format('channels_first')
samples = X.shape[2]
X = X.reshape(X.shape[0], 1, X.shape[1], X.shape[2])
y = to_categorical(y, nb_classes)
print("X shape: {}".format(X.shape))
print("y shape: {}".format(y.shape))
# Make directories for model binaries
DIR = ['./model', './history']
for directory in DIR:
if not os.path.exists(directory):
os.makedirs(directory)
# Perform experiments
experiments = []
# test models with all subjects
experiment_103sub = Experiment(trial_type, '103sub', get_models(trial_type, nb_classes, samples, use_cpu), nr_of_epochs,
0.125, 0.2)
experiments.append(run_experiment(X, y, experiment_103sub))
# Calculate Mcnemar's test statistic and p-value for all experiments
models = ['EEGNet', 'ShallowConvNet', 'DeepConvNet']
for experiment in experiments:
fusion_eqs = experiment.get_model('EEGNet_fusion').get_equals()
eqs_list = []
# evaluate EEGNet Fusion against the state-of-the-art models under evaluation
for model_name in models:
print("EEGNet Fusion vs {}".format(model_name))
model_eqs = experiment.get_model(model_name).get_equals()
mcnemar_test(fusion_eqs, model_eqs)
eqs_list.append(model_eqs)
# save equals lists in .npz file for future analysis
np.savez_compressed(experiment.get_exp_type() + '_' + experiment.get_trial_type().name + '_eq_values.npz',
fusion=fusion_eqs, eegnet=eqs_list[0], shallow=eqs_list[1], deep=eqs_list[2])
|
{"hexsha": "c782ae9885701c4df416d7115c812bf3028ba25b", "size": 4386, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_experiments.py", "max_stars_repo_name": "rootskar/EEGMotorImagery", "max_stars_repo_head_hexsha": "62bba0afc16cf102c77c1bda3a87bfc6fd3fb121", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-06-22T13:00:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T12:18:22.000Z", "max_issues_repo_path": "run_experiments.py", "max_issues_repo_name": "rootskar/EEGMotorImagery", "max_issues_repo_head_hexsha": "62bba0afc16cf102c77c1bda3a87bfc6fd3fb121", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-08-24T12:03:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T02:07:13.000Z", "max_forks_repo_path": "run_experiments.py", "max_forks_repo_name": "rootskar/EEGMotorImagery", "max_forks_repo_head_hexsha": "62bba0afc16cf102c77c1bda3a87bfc6fd3fb121", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-12-09T02:41:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T12:59:16.000Z", "avg_line_length": 39.5135135135, "max_line_length": 121, "alphanum_fraction": 0.6935704514, "include": true, "reason": "import numpy", "num_tokens": 1138}
|
\chapter*{Lijst van symbolen}
\addcontentsline{toc}{chapter}{Lijst van symbolen}
\begin{center}
\begin{tabularx}{0.8\textwidth}{p{1.5cm}X}
$\pi$ & het getal pi\\
$42$ & The Answer to the Ultimate Question of Life, the Universe, and Everything\cite{h2g2}
\end{tabularx}
\end{center}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "eindwerk_template"
%%% End:
|
{"hexsha": "5ea22100dac8eb65b1c6ebdbf84958fbe9417f46", "size": 381, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "report/final/symbolenlijst.tex", "max_stars_repo_name": "matthijsvk/convNets", "max_stars_repo_head_hexsha": "7e65db7857a4e6abfbcab264953eb7741319de6c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "report/final/symbolenlijst.tex", "max_issues_repo_name": "matthijsvk/convNets", "max_issues_repo_head_hexsha": "7e65db7857a4e6abfbcab264953eb7741319de6c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "report/final/symbolenlijst.tex", "max_forks_repo_name": "matthijsvk/convNets", "max_forks_repo_head_hexsha": "7e65db7857a4e6abfbcab264953eb7741319de6c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4, "max_line_length": 96, "alphanum_fraction": 0.6797900262, "num_tokens": 134}
|
import numpy as np
import cv2, random
from os.path import join
class WiderFaceDataset:
def __init__(self, data_dir):
self.data_dir = data_dir
self._train_ls = self.load_file("wider_face_train_bbx_gt.txt")
self._val_ls = self.load_file("wider_face_val_bbx_gt.txt")
self._test_ls = self.load_file("wider_face_test_filelist.txt")
def load_file(self, file_name):
result = []
file_path = join(self.data_dir, "wider_face_split", file_name)
if file_name == "wider_face_test_filelist.txt":
return [line.strip() for line in open(file_path, "r").readlines()]
with open(file_path, "r") as f:
status = 0
for line in f.readlines():
line = line.strip()
if status == 0:
record = [line, []]
status = 1
continue
if status == 1:
count = int(line)
status = 2
continue
if status == 2:
record[1].append([int(s) for s in line.split(" ")])
count -= 1
if count <= 0:
result.append(record)
status = 0
continue
return result
def load_image(self, image_path):
return cv2.imread(join(self.data_dir, image_path))
def preprocess(self, img, rois):
h, w, _ = img.shape
scale = min(1024 / h, 1024 / w)
img = cv2.resize(img, None, fx=scale, fy=scale)
rois = rois * scale
h, w, _ = img.shape
canvas = np.zeros((1024, 1024, 3))
canvas[:h, :w, :] = img
return canvas, rois.astype(np.int)
def train_data(self):
random.shuffle(self._train_ls)
for image_name, data in self._train_ls:
image_path = join("WIDER_train", "images", image_name)
img = self.load_image(image_path)
rois = np.array(data)[:, :4]
yield self.preprocess(img, rois)
def val_data(self):
for image_name, data in self._val_ls:
image_path = join("WIDER_val", "images", image_name)
img = self.load_image(image_path)
rois = np.array(data)[:, :4]
yield self.preprocess(img, rois)
def test_data(self):
for image_name, data in self._test_ls:
image_path = join("WIDER_test", "images", image_name)
yield self.load_image(image_path)
if __name__ == '__main__':
dataset = WiderFaceDataset("/home/killf/data/数据集/wider_face")
for img, rois in dataset.train_data():
for roi in rois:
cv2.rectangle(img, (roi[0], roi[1]), (roi[0] + roi[2], roi[1] + roi[3]), (255, 255, 0))
cv2.imwrite("0.jpg", img)
print(dataset)
|
{"hexsha": "a6c84c812691e8eafa5e72db666b18c1a491f621", "size": 2856, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataset/wider_face.py", "max_stars_repo_name": "killf/FaceDetection", "max_stars_repo_head_hexsha": "4698921f8a6e8a33e6effe5a489353b82a03b653", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dataset/wider_face.py", "max_issues_repo_name": "killf/FaceDetection", "max_issues_repo_head_hexsha": "4698921f8a6e8a33e6effe5a489353b82a03b653", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dataset/wider_face.py", "max_forks_repo_name": "killf/FaceDetection", "max_forks_repo_head_hexsha": "4698921f8a6e8a33e6effe5a489353b82a03b653", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7333333333, "max_line_length": 99, "alphanum_fraction": 0.5381652661, "include": true, "reason": "import numpy", "num_tokens": 710}
|
using ArcadeLearningEnvironment
using CartesianGeneticProgramming
using IICGP
using Test
using Statistics
# Global test parameters
GAME_NAMES = ["freeway", "centipede", "pong"]
N_OUT_ENCO = 2
N_STEPS = 3
function enco_cont_from_reducer(r::AbstractReducer, game_name::String)
# Temporarily open a game to retrieve parameters
game = Game(game_name, 0)
n_in = 3 # RGB images
n_out = length(getMinimalActionSet(game.ale)) # One output per legal action
rgb = get_rgb(game)
img_size = size(rgb[1])
d_fitness = 1
close!(game)
# Encoder
enco_nodes = [
Node(1, 1, IICGP.CGPFunctions.f_dilate, [0.5], false)
]
enco_outputs = convert(
Array{Int16},
ceil.((n_in + length(enco_nodes)) * rand(N_OUT_ENCO))
)
enco_cfg = cfg_from_info(enco_nodes, n_in, enco_outputs, IICGP.CGPFunctions,
d_fitness)
enco = IPCGPInd(enco_nodes, enco_cfg, enco_outputs, img_size)
# Forward pass to retrieve the number of input of the controller
enco_out = CartesianGeneticProgramming.process(enco, rgb)
features = r.reduct(enco_out, r.parameters)
features_flatten = collect(Iterators.flatten(Iterators.flatten(features)))
# Controller
cont_nodes = [
Node(1, 2, IICGP.CGPFunctions.f_subtract, [0.5], false),
Node(1, 2, IICGP.CGPFunctions.f_add, [0.5], false),
Node(3, 3, IICGP.CGPFunctions.f_cos, [0.6], false)
]
cont_n_in = length(features_flatten)
cont_outputs = convert(
Array{Int16},
ceil.((cont_n_in + length(cont_nodes)) * rand(n_out))
)
cont_cfg = cfg_from_info(cont_nodes, cont_n_in, cont_outputs,
IICGP.CGPFunctions, d_fitness)
cont = CGPInd(cont_nodes, cont_cfg, cont_outputs)
enco, cont, img_size
end
@testset "Processing function" begin
for game_name in GAME_NAMES
# Pooling reducer
features_size = 5
r = PoolingReducer(Statistics.mean, features_size)
enco, cont, img_size = enco_cont_from_reducer(r, game_name)
game = Game(game_name, 0)
n_out = length(getMinimalActionSet(game.ale))
for step in 1:N_STEPS
rgb = get_rgb(game)
features, out = IICGP.process_f(enco, r, cont, rgb)
@test length(features) == N_OUT_ENCO
for i in eachindex(features)
@test typeof(features[i]) == Array{Float64, 2}
@test size(features[i]) == (features_size, features_size)
@test all(f -> (0.0 <= f <= 1.0), features[i])
@test all(f -> (!isnan(f)), features[i])
end
@test length(out) == n_out
end
close!(game)
# Centroid reducer
n_centroids = 20
r = CentroidReducer(n_centroids, N_OUT_ENCO, img_size)
enco, cont, img_size = enco_cont_from_reducer(r, game_name)
game = Game(game_name, 0)
for step in 1:N_STEPS
rgb = get_rgb(game)
features, out = IICGP.process_f(enco, r, cont, rgb)
action = game.actions[argmax(output)]
act(game.ale, action)
@test length(features) == N_OUT_ENCO
for i in eachindex(features)
@test typeof(features[i]) == Array{Tuple{Float64,Float64},1}
@test length(features[i]) == n_centroids
@test all(f -> ((0.0, 0.0) <= f <= (1.0, 1.0)), features[i])
@test all(f -> (!isnan(f[1]) && !isnan(f[2])), features[i])
end
@test length(out) == n_out
end
close!(game)
end
end
cfg_filename = string(@__DIR__, "/dualcgp_test.yaml")
rom = "assault"
@testset "CGP controller with constant input values" begin
mcfg, ecfg, ccfg, reducer, _ = IICGP.dualcgp_config(cfg_filename, rom)
game = Game(rom, 0)
enco = IICGP.IPCGPInd(ecfg)
cont = CGPInd(ccfg)
cstes = collect(0.0:1.0/(ccfg.n_cst_inputs-1):1.0)
for t in 1:10
s = get_state(game, true, true)
output = IICGP.process(enco, reducer, cont, ccfg, s)
@test cont.buffer[ccfg.n_in-ccfg.n_cst_inputs+1:ccfg.n_in] == cstes
end
close!(game)
end
|
{"hexsha": "733d415ed51d58c821157a14c88f992a9c23fd45", "size": 4185, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/process.jl", "max_stars_repo_name": "erwanlecarpentier/IICGP.jl", "max_stars_repo_head_hexsha": "65cecc8210e02f3f479a4e2b50b70f1006afb6f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-06T17:39:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-06T17:39:28.000Z", "max_issues_repo_path": "test/process.jl", "max_issues_repo_name": "erwanlecarpentier/IICGP.jl", "max_issues_repo_head_hexsha": "65cecc8210e02f3f479a4e2b50b70f1006afb6f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/process.jl", "max_forks_repo_name": "erwanlecarpentier/IICGP.jl", "max_forks_repo_head_hexsha": "65cecc8210e02f3f479a4e2b50b70f1006afb6f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-06T17:39:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-06T17:39:29.000Z", "avg_line_length": 35.4661016949, "max_line_length": 80, "alphanum_fraction": 0.6112305854, "num_tokens": 1189}
|
__version__ = '1.0'
__all__ = ['formatPoly', 'latex_matrix', '__version__']
__author__ = u'Rahul Gupta'
__license__ = 'MIT'
__copyright__ = 'Copyright 2021 Rahul Gupta'
# Source for numpyrett
# Some code is inspired from StackExchange , namely
# https://stackoverflow.com/questions/3862310/
# https://stackoverflow.com/questions/17129290/
# https://stackoverflow.com/questions/1911281/
import numpy as np
import fractions
from IPython.display import display, Markdown , Latex
def frac_formatter(coeff , format_mode = False , format_string = '{:0.2f}' , poly_mode = True)->str:
vals = str(fractions.Fraction(coeff).limit_denominator()).split('/')
if(not format_mode):
if(len(vals)==2):
if(not format_mode):
frac = str('\\frac{%d}{%d}' % (abs(int(vals[0])) , abs(int(vals[1]))))
if(coeff < 0):
frac = "-" + frac
return frac
if(len(vals)==1):
if(not format_mode):
if(vals[0] == '1' and poly_mode):
return " "
elif(vals[0] == '-1' and poly_mode):
return "-"
return vals[0]
else:
raise ValueError("Make sure you have typed a valid fraction")
else:
return format_string.format(coeff)
def formatPoly(poly , format_mode = False , format_string = '{:0.2f}' ,var_change = False , variable = 'x')->str:
if (isinstance(poly,np.poly1d)) :
degree = poly.order
var = poly.variable if not var_change else variable
coeff = poly.coef[::-1]
elif (isinstance(poly,np.polynomial.polynomial.Polynomial)):
degree = poly.degree()
var = variable
coeff = poly.coef[::-1]
if(coeff[-1] == 0.0):
coeff = np.resize(coeff, coeff.size - 1)
degree-=1
else :
raise TypeError("You can only pretty print numpy.poly1d or numpy.polynomial.Polynomial")
pstr =" \\displaystyle "
deg = degree
while(deg>-1):
if(coeff[deg]!=0):
if(deg == degree):
pstr += frac_formatter(coeff[deg] , format_mode , format_string ) + var + "^{"+ str(deg) + "}"
else:
prefix = " " if ("-" in str(coeff[deg])) else " + "
if(deg>1):
pstr += prefix + frac_formatter(coeff[deg] , format_mode , format_string) + var + "^"+ str(deg)
elif(deg == 1):
pstr += prefix + frac_formatter(coeff[deg] , format_mode , format_string) + var + ""
elif(deg == 0):
pstr += prefix + frac_formatter(coeff[deg] , format_mode , format_string)
deg = deg - 1
# pstr+='$'
display(Latex(pstr))
def latex_matrix(array , mat_type = 'bmatrix' , format_mode = False , format_string = '{:0.2f}' ):
if isinstance(array,np.matrix):
array = np.squeeze(np.asarray(array))
if len(array.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
temp_array = np.empty(array.shape)
if len(array.shape) == 1:
temp_array = np.array(list(map(lambda x: frac_formatter(x,format_mode,format_string,False) , array)))
if len(array.shape) == 2:
for row in array:
row = np.array(list(map(lambda x: frac_formatter(x,format_mode,format_string,False) , row)))
temp_array = np.vstack([temp_array,row])
temp_array = temp_array[array.shape[0]:]
lines = str(temp_array).replace('[', '').replace(']', '').replace('\'', '').replace('\\\\', '\\').splitlines()
rv = [r'\begin{' + mat_type + '}']
rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines]
rv += [r'\end{' + mat_type + '}']
display(Latex('\n'.join(rv)))
def pretty_list(lst,index_colour_pair_dict= {0:'red'}):
array_str = ""
for i in range(len(lst)):
if i in index_colour_pair_dict.keys():
array_str+=fr"\fcolorbox{{{index_colour_pair_dict.get(i)}}}{{{index_colour_pair_dict.get(i)}}}{{{lst[i]}}}"
else:
array_str+=fr"\fbox{{{lst[i]}}}"
display(Latex (array_str))
def get_all_subclasses(obj):
all_subclasses = []
for subclass in obj.__subclasses__():
all_subclasses.append(subclass.__name__)
all_subclasses.extend(get_all_subclasses(subclass))
return set(all_subclasses)
def get_all_superclasses(obj):
all_superclasses = []
for superclass in obj.__bases__:
all_superclasses.append(superclass.__name__)
all_superclasses.extend(get_all_superclasses(superclass))
return set(all_superclasses)
def get_all_fields(obj):
return set(obj.__dict__.keys())
def get_all_methods(obj):
return set([func for func in dir(obj.__class__) if callable(getattr(obj.__class__, func)) and not func.startswith("__")])
def class_info(obj):
display(Markdown(fr'# Class {obj.__class__.__name__}'))
display(Markdown(fr'## Super Classes : { get_all_superclasses(obj.__class__) }'))
display(Markdown(fr'## Sub Classes : { get_all_subclasses(obj.__class__) }'))
display(Markdown(fr'## Fields : { get_all_fields(obj) }'))
display(Markdown(fr'## Methods : { get_all_methods(obj) }'))
|
{"hexsha": "4631789f23142d56af0c3b85e1813a520fefe6dd", "size": 5016, "ext": "py", "lang": "Python", "max_stars_repo_path": "NumPyrett/__init__.py", "max_stars_repo_name": "argoopjmc/NumPyrett", "max_stars_repo_head_hexsha": "4ce7a8312b62b47ad1b70973c640a539948c2134", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-26T06:44:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-26T06:44:34.000Z", "max_issues_repo_path": "NumPyrett/__init__.py", "max_issues_repo_name": "argoopjmc/NumPyrett", "max_issues_repo_head_hexsha": "4ce7a8312b62b47ad1b70973c640a539948c2134", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NumPyrett/__init__.py", "max_forks_repo_name": "argoopjmc/NumPyrett", "max_forks_repo_head_hexsha": "4ce7a8312b62b47ad1b70973c640a539948c2134", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0, "max_line_length": 123, "alphanum_fraction": 0.6226076555, "include": true, "reason": "import numpy", "num_tokens": 1299}
|
(******************************************************************************)
(* PipeCheck: Specifying and Verifying Microarchitectural *)
(* Enforcement of Memory Consistency Models *)
(* *)
(* Copyright (c) 2014 Daniel Lustig, Princeton University *)
(* All rights reserved. *)
(* *)
(* This library is free software; you can redistribute it and/or *)
(* modify it under the terms of the GNU Lesser General Public *)
(* License as published by the Free Software Foundation; either *)
(* version 2.1 of the License, or (at your option) any later version. *)
(* *)
(* This library is distributed in the hope that it will be useful, *)
(* but WITHOUT ANY WARRANTY; without even the implied warranty of *)
(* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *)
(* Lesser General Public License for more details. *)
(* *)
(* You should have received a copy of the GNU Lesser General Public *)
(* License along with this library; if not, write to the Free Software *)
(* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 *)
(* USA *)
(******************************************************************************)
Require Import Ensembles.
Require Import Arith.
Require Import Bool.
Require Import List.
Import ListNotations.
Require Import wmm.
Require Import util2.
Require Import dot.
Require Import topsort.
Require Import stages.
(** * Traditional Five-Stage RISC Pipeline *)
(** ** Local Reorderings at different stages *)
(** [FIFO]: any ordering guaranteed at the input is also guaranteed at the
output. This is the common case. *)
Definition FIFO : LocalReordering :=
fun _ ordering => ordering.
(** [NoOrderGuarantees]: Operations can leave the stage in any order; nothing
is guaranteed. *)
Definition NoOrderGuarantees : LocalReordering :=
fun _ _ => [].
(** ** Special Edge Maps *)
(** In most cases, we don't need to add any special edges *)
Definition NoSpecialEdges : SpecialEdgeMap :=
fun _ _ _ => [].
(** The store buffer only allows one outstanding unacknowledged store at a
time. *)
Fixpoint StoreBufferSpecialEdges
(c n : nat)
(e_before : list Event)
(e : Event)
(e_after : list Event)
: GlobalGraph :=
match e_after with
| h::t =>
match dirn h with
| R => StoreBufferSpecialEdges c n e_before e t
| W => [((6 * n, eiid e), (5 + 6 * c, eiid h), "StoreBuffer")]
end
| _ => []
end.
(** * Pipeline Definition *)
(** ** Pipeline Stages *)
(** Each pipeline stage is defined by a name, a number, a [LocalReordering],
and a function adding any special edges (in this case, only at the store
buffer). *)
Definition RISC_PipelineStages n c := [
mkStage "Fetch" FIFO NoSpecialEdges;
mkStage "Decode" FIFO NoSpecialEdges;
mkStage "Execute" FIFO NoSpecialEdges;
mkStage "Memory" FIFO NoSpecialEdges;
mkStage "Writeback" FIFO NoSpecialEdges;
mkStage "StoreBuffer" FIFO (StoreBufferSpecialEdges c n)
].
Definition RISC_SharedStages := [
mkStage "MainMemory" NoOrderGuarantees NoSpecialEdges;
mkStage "Retire" FIFO NoSpecialEdges
].
Fixpoint RISC_AllStages (n : nat) :=
fold_left (app (A:=_)) (map (RISC_PipelineStages n) [0 ... n-1]) []
++ RISC_SharedStages.
Definition StagesOfCore
(c : nat)
(l : list nat)
: list nat :=
map (fun x => x + 6 * c) l.
(** ** Pipeline Paths *)
Definition RISC_PathOptions
(n : nat)
(e : Event)
: PathOptions :=
let c := proc (iiid e) in
match dirn e with
| R => [
mkPathOption (String.append "Read" (stringOfNat (loc e))) e
(StagesOfCore c [0 ... 4])
[mkPerformStages (3 + 6 * c) [0 ... n-1] [0 ... n-1] None true]
NoSpecialEdges;
mkPathOption (String.append "STBFwd" (stringOfNat (loc e))) e
(StagesOfCore c [0 ... 4])
[mkPerformStages (3 + 6 * c) [0 ... n-1] [c] None false]
NoSpecialEdges
]
| W => [
mkPathOption (String.append "Write" (stringOfNat (loc e))) e
(StagesOfCore c [0 ... 5] ++ StagesOfCore n [0; 1])
[mkPerformStages (3 + 6 * c) [c] [c] None false;
mkPerformStages (6 * n) [0 ... n-1] [0 ... n-1] None true]
NoSpecialEdges
]
end.
(** ** Pipeline Definition *)
Definition RISC_Pipeline (n : nat) :=
mkPipeline
"RISC"
(RISC_AllStages n)
(RISC_PathOptions n).
|
{"author": "daniellustig", "repo": "pipecheck", "sha": "7b70b585be8c0a946869e991f459c57c29f73c9b", "save_path": "github-repos/coq/daniellustig-pipecheck", "path": "github-repos/coq/daniellustig-pipecheck/pipecheck-7b70b585be8c0a946869e991f459c57c29f73c9b/risc.v"}
|
#include "orphandownloader.h"
#include <univalue.h>
#include "rpcipfs.h"
#include "guiutil.h"
#include "rpcpog.h"
#include "timedata.h"
#include <QUrl>
#include <boost/algorithm/string/case_conv.hpp>
#include <QDir>
#include <QTimer>
#include <QString>
OrphanDownloader::OrphanDownloader(QString xURL, QString xDestName, int xTimeout) : sURL(xURL), sDestName(xDestName), iTimeout(xTimeout)
{
}
void OrphanDownloader::Get()
{
if (sURL == "" || sDestName == "") return;
int64_t nFileSize = GetFileSize(GUIUtil::FROMQS(sDestName));
bDownloadFinished = false;
if (nFileSize > 0)
{
return;
}
// We must call out using an HTTPS request here - to pull down the Orphan's picture locally (otherwise QT won't display the image)
manager = new QNetworkAccessManager;
QNetworkRequest request;
request.setUrl(QUrl(sURL));
reply = manager->get(request);
file = new QFile;
file->setFileName(sDestName);
file->open(QIODevice::WriteOnly);
connect(reply,SIGNAL(downloadProgress(qint64,qint64)), this, SLOT(onDownloadProgress(qint64,qint64)));
connect(reply,SIGNAL(readyRead()), this, SLOT(onReadyRead()));
connect(reply,SIGNAL(finished()), this, SLOT(onReplyFinished()));
connect(manager, SIGNAL(finished(QNetworkReply*)), this, SLOT(onFinished(QNetworkReply*)));
QTimer::singleShot(iTimeout, this, SLOT(DownloadFailure()));
}
void OrphanDownloader::BusyWait()
{
int iBroken = 0;
while (1==1)
{
int64_t nFileSize = GetFileSize(GUIUtil::FROMQS(sDestName));
if (nFileSize > 0 || bDownloadFinished) break;
MilliSleep(250);
iBroken++;
if (iBroken > (iTimeout/500)) break;
}
}
void OrphanDownloader::DownloadFailure()
{
LogPrintf("Download failed\n");
bDownloadFinished = true;
}
void OrphanDownloader::onDownloadProgress(qint64 bytesRead,qint64 bytesTotal)
{
printf("OnDownloadProgress %f ", (double)bytesRead);
}
void OrphanDownloader::onFinished(QNetworkReply * reply)
{
switch(reply->error())
{
case QNetworkReply::NoError:
{
printf(" \n Downloaded successfully. ");
}
break;
default:
{
printf(" BioDownloadError %s ", GUIUtil::FROMQS(reply->errorString()).c_str());
};
}
if(file->isOpen())
{
file->close();
file->deleteLater();
}
bDownloadFinished = true;
}
void OrphanDownloader::onReadyRead()
{
file->write(reply->readAll());
}
void OrphanDownloader::onReplyFinished()
{
if(file->isOpen())
{
file->close();
file->deleteLater();
}
}
OrphanDownloader::~OrphanDownloader()
{
// Note - there is no UI to delete
}
|
{"hexsha": "0dd1418522f97a08080e378c45d316b27f9933c8", "size": 2610, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/qt/orphandownloader.cpp", "max_stars_repo_name": "Mart1250/biblepay", "max_stars_repo_head_hexsha": "d53d04f74242596b104d360187268a50b845b82e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/qt/orphandownloader.cpp", "max_issues_repo_name": "Mart1250/biblepay", "max_issues_repo_head_hexsha": "d53d04f74242596b104d360187268a50b845b82e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/qt/orphandownloader.cpp", "max_forks_repo_name": "Mart1250/biblepay", "max_forks_repo_head_hexsha": "d53d04f74242596b104d360187268a50b845b82e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.0973451327, "max_line_length": 136, "alphanum_fraction": 0.6793103448, "num_tokens": 676}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 17 13:28:24 2022
@author: awatson
"""
import flask, json, zarr, os, ast
from flask import request, Response, send_file
import numpy as np
import dask.array as da
from bil_api.dataset_info import dataset_info
# from bil_api import config
from bil_api import utils
import tifffile as tf
import io
from bil_api import zarrLoader
import imaris_ims_file_reader as ims
# from flaskAPI_gunicorn import app
# from flask import render_template
fakePaths = ['c:/test/test/test.txt', 'c:/test/test/test2.txt']
# @app.route('/', defaults={'req_path': ''})
@app.route('/api/vfs', defaults={'req_path': ''})
@app.route('/api/vfs/<path:req_path>')
def dir_listing(req_path):
# Show directory contents
files = fakePaths
return render_template('vfs.html', files=files)
# <!doctype html>
# <ul>
# {% for file in files %}
# <li>
# <a href="{{ (request.path + '/' if request.path != '/' else '') + file }}">
# {{ (request.path + '/' if request.path != '/' else '') + file }}
# </a>
# </li>
# {% endfor %}
# </ul>
|
{"hexsha": "a9d81c94dbc8a16c89dcf6429c3105e3cda4bd47", "size": 1111, "ext": "py", "lang": "Python", "max_stars_repo_path": "BrAinPI/old/virtualFileSystem.py", "max_stars_repo_name": "CBI-PITT/bil_api", "max_stars_repo_head_hexsha": "5be7e9d84556dcadade944f4f0c536c4b5798cfa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "BrAinPI/old/virtualFileSystem.py", "max_issues_repo_name": "CBI-PITT/bil_api", "max_issues_repo_head_hexsha": "5be7e9d84556dcadade944f4f0c536c4b5798cfa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BrAinPI/old/virtualFileSystem.py", "max_forks_repo_name": "CBI-PITT/bil_api", "max_forks_repo_head_hexsha": "5be7e9d84556dcadade944f4f0c536c4b5798cfa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.3653846154, "max_line_length": 85, "alphanum_fraction": 0.6345634563, "include": true, "reason": "import numpy", "num_tokens": 301}
|
[STATEMENT]
lemma hn_monadic_FOREACH[sepref_comb_rules]:
assumes "INDEP Rk" "INDEP Rs" "INDEP R\<sigma>"
assumes FR: "P \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt R\<sigma> \<sigma>' \<sigma>"
assumes STL: "GEN_ALGO tsl (IS_TO_SORTED_LIST ordR Rs Rk)"
assumes c_ref: "\<And>\<sigma> \<sigma>'. hn_refine
(\<Gamma> * hn_ctxt Rs s' s * hn_ctxt R\<sigma> \<sigma>' \<sigma>)
(c \<sigma>)
(\<Gamma>c \<sigma>' \<sigma>)
bool_assn
(c' \<sigma>')"
assumes C_FR:
"\<And>\<sigma>' \<sigma>. TERM monadic_FOREACH \<Longrightarrow>
\<Gamma>c \<sigma>' \<sigma> \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt R\<sigma> \<sigma>' \<sigma>"
assumes f_ref: "\<And>x' x \<sigma>' \<sigma>. hn_refine
(\<Gamma> * hn_ctxt Rs s' s * hn_ctxt Rk x' x * hn_ctxt R\<sigma> \<sigma>' \<sigma>)
(f x \<sigma>)
(\<Gamma>f x' x \<sigma>' \<sigma>) R\<sigma>
(f' x' \<sigma>')"
assumes F_FR: "\<And>x' x \<sigma>' \<sigma>. TERM monadic_FOREACH \<Longrightarrow> \<Gamma>f x' x \<sigma>' \<sigma> \<Longrightarrow>\<^sub>t
\<Gamma> * hn_ctxt Rs s' s * hn_ctxt Pfx x' x * hn_ctxt Pf\<sigma> \<sigma>' \<sigma>"
shows "hn_refine
P
(imp_foreach tsl s c f \<sigma>)
(\<Gamma> * hn_ctxt Rs s' s * hn_invalid R\<sigma> \<sigma>' \<sigma>)
R\<sigma>
((PR_CONST (monadic_FOREACH ordR I))
$s'$(\<lambda>\<^sub>2\<sigma>'. c' \<sigma>')$(\<lambda>\<^sub>2x' \<sigma>'. f' x' \<sigma>')$\<sigma>'
)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. hn_refine P (imp_foreach tsl s c f \<sigma>) (\<Gamma> * hn_ctxt Rs s' s * hn_invalid R\<sigma> \<sigma>' \<sigma>) R\<sigma> (PR_CONST (monadic_FOREACH ordR I) $ s' $ (\<lambda>x. (#c' x#)) $ (\<lambda>x. (#\<lambda>xa. (#f' x xa#)#)) $ \<sigma>')
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. hn_refine P (imp_foreach tsl s c f \<sigma>) (\<Gamma> * hn_ctxt Rs s' s * hn_invalid R\<sigma> \<sigma>' \<sigma>) R\<sigma> (PR_CONST (monadic_FOREACH ordR I) $ s' $ (\<lambda>x. (#c' x#)) $ (\<lambda>x. (#\<lambda>xa. (#f' x xa#)#)) $ \<sigma>')
[PROOF STEP]
from STL
[PROOF STATE]
proof (chain)
picking this:
GEN_ALGO tsl (IS_TO_SORTED_LIST ordR Rs Rk)
[PROOF STEP]
have STL: "(tsl,it_to_sorted_list ordR) \<in> (Rs)\<^sup>k \<rightarrow>\<^sub>a list_assn Rk"
[PROOF STATE]
proof (prove)
using this:
GEN_ALGO tsl (IS_TO_SORTED_LIST ordR Rs Rk)
goal (1 subgoal):
1. (tsl, it_to_sorted_list ordR) \<in> Rs\<^sup>k \<rightarrow>\<^sub>a list_assn Rk
[PROOF STEP]
unfolding GEN_ALGO_def IS_TO_SORTED_LIST_def
[PROOF STATE]
proof (prove)
using this:
(tsl, it_to_sorted_list ordR) \<in> Rs\<^sup>k \<rightarrow>\<^sub>a list_assn Rk
goal (1 subgoal):
1. (tsl, it_to_sorted_list ordR) \<in> Rs\<^sup>k \<rightarrow>\<^sub>a list_assn Rk
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(tsl, it_to_sorted_list ordR) \<in> Rs\<^sup>k \<rightarrow>\<^sub>a list_assn Rk
goal (1 subgoal):
1. hn_refine P (imp_foreach tsl s c f \<sigma>) (\<Gamma> * hn_ctxt Rs s' s * hn_invalid R\<sigma> \<sigma>' \<sigma>) R\<sigma> (PR_CONST (monadic_FOREACH ordR I) $ s' $ (\<lambda>x. (#c' x#)) $ (\<lambda>x. (#\<lambda>xa. (#f' x xa#)#)) $ \<sigma>')
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. hn_refine P (imp_foreach tsl s c f \<sigma>) (\<Gamma> * hn_ctxt Rs s' s * hn_invalid R\<sigma> \<sigma>' \<sigma>) R\<sigma> (PR_CONST (monadic_FOREACH ordR I) $ s' $ (\<lambda>x. (#c' x#)) $ (\<lambda>x. (#\<lambda>xa. (#f' x xa#)#)) $ \<sigma>')
[PROOF STEP]
apply (rule hn_refine_cons_pre[OF FR])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. hn_refine (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt R\<sigma> \<sigma>' \<sigma>) (imp_foreach tsl s c f \<sigma>) (\<Gamma> * hn_ctxt Rs s' s * hn_invalid R\<sigma> \<sigma>' \<sigma>) R\<sigma> (PR_CONST (monadic_FOREACH ordR I) $ s' $ (\<lambda>x. (#c' x#)) $ (\<lambda>x. (#\<lambda>xa. (#f' x xa#)#)) $ \<sigma>')
[PROOF STEP]
apply weaken_hnr_post
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. hn_refine (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt R\<sigma> \<sigma>' \<sigma>) (imp_foreach tsl s c f \<sigma>) (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma>) R\<sigma> (PR_CONST (monadic_FOREACH ordR I) $ s' $ (\<lambda>x. (#c' x#)) $ (\<lambda>x. (#\<lambda>xa. (#f' x xa#)#)) $ \<sigma>')
[PROOF STEP]
unfolding APP_def PROTECT2_def PR_CONST_def imp_foreach_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. hn_refine (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt R\<sigma> \<sigma>' \<sigma>) (tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)) (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma>) R\<sigma> (monadic_FOREACH ordR I s' c' f' \<sigma>')
[PROOF STEP]
apply (rule hn_refine_ref[OF monadic_FOREACH_itsl])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. hn_refine (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt R\<sigma> \<sigma>' \<sigma>) (tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)) (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma>) R\<sigma> (it_to_sorted_list ordR s' \<bind> (\<lambda>l. monadic_nfoldli l c' f' \<sigma>'))
[PROOF STEP]
apply (rule hn_refine_guessI)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. hn_refine (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt R\<sigma> \<sigma>' \<sigma>) ?f11 (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma>) R\<sigma> (it_to_sorted_list ordR s' \<bind> (\<lambda>l. monadic_nfoldli l c' f' \<sigma>'))
2. ?f11 = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
apply (rule hnr_bind)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. hn_refine (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt R\<sigma> \<sigma>' \<sigma>) ?m'14 ?\<Gamma>1.14 ?Rh14 (it_to_sorted_list ordR s')
2. \<And>x x'. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> hn_refine (?\<Gamma>1.14 * hn_ctxt ?Rh14 x x') (?f'14 x') (?\<Gamma>2.14 x x') R\<sigma> (monadic_nfoldli x c' f' \<sigma>')
3. \<And>x x'. ?\<Gamma>2.14 x x' \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma> * hn_ctxt ?Rx14 x x'
4. ?m'14 \<bind> ?f'14 = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
apply (rule hn_refine_frame)
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. hn_refine ?P'18 ?m'14 ?Q'18 ?Rh14 (it_to_sorted_list ordR s')
2. \<Gamma> * hn_ctxt Rs s' s * hn_ctxt R\<sigma> \<sigma>' \<sigma> \<Longrightarrow>\<^sub>t ?F18 * ?P'18
3. \<And>x x'. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> hn_refine (?F18 * ?Q'18 * hn_ctxt ?Rh14 x x') (?f'14 x') (?\<Gamma>2.14 x x') R\<sigma> (monadic_nfoldli x c' f' \<sigma>')
4. \<And>x x'. ?\<Gamma>2.14 x x' \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma> * hn_ctxt ?Rx14 x x'
5. ?m'14 \<bind> ?f'14 = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
apply (rule STL[to_hnr, unfolded APP_def])
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<Gamma> * hn_ctxt Rs s' s * hn_ctxt R\<sigma> \<sigma>' \<sigma> \<Longrightarrow>\<^sub>t ?F18 * hn_ctxt Rs s' ?xi21
2. \<And>x x'. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> hn_refine (?F18 * hn_ctxt Rs s' ?xi21 * hn_ctxt (list_assn Rk) x x') (?f'14 x') (?\<Gamma>2.14 x x') R\<sigma> (monadic_nfoldli x c' f' \<sigma>')
3. \<And>x x'. ?\<Gamma>2.14 x x' \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma> * hn_ctxt ?Rx14 x x'
4. tsl ?xi21 \<bind> ?f'14 = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
apply (tactic \<open>Sepref_Frame.frame_tac (K (K no_tac)) @{context} 1\<close>)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>x x'. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> hn_refine (\<Gamma> * hn_ctxt R\<sigma> \<sigma>' \<sigma> * hn_ctxt Rs s' s * hn_ctxt (list_assn Rk) x x') (?f'14 x') (?\<Gamma>2.14 x x') R\<sigma> (monadic_nfoldli x c' f' \<sigma>')
2. \<And>x x'. ?\<Gamma>2.14 x x' \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma> * hn_ctxt ?Rx14 x x'
3. tsl s \<bind> ?f'14 = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
apply (rule hn_monadic_nfoldli[unfolded APP_def])
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. \<And>x x'. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> \<Gamma> * hn_ctxt R\<sigma> \<sigma>' \<sigma> * hn_ctxt Rs s' s * hn_ctxt (list_assn Rk) x x' \<Longrightarrow>\<^sub>t ?\<Gamma>29 x x' * hn_ctxt (list_assn (?Rl29 x x')) x (?l30 x') * hn_ctxt R\<sigma> \<sigma>' (?s33 x')
2. \<And>x x' s s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> hn_refine (?\<Gamma>29 x x' * hn_ctxt R\<sigma> s'a s) (?c31 x' s) (?\<Gamma>29 x x' * hn_ctxt R\<sigma> s'a s) bool_assn (c' s'a)
3. \<And>x x' xa x'a s s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> hn_refine (?\<Gamma>29 x x' * hn_ctxt (?Rl29 x x') x'a xa * hn_ctxt R\<sigma> s'a s) (?f32 x' xa s) (?\<Gamma>29 x x' * hn_invalid (?Rl29 x x') x'a xa * hn_invalid R\<sigma> s'a s) R\<sigma> (f' x'a s'a)
4. \<And>x x'. ?\<Gamma>29 x x' * hn_invalid (list_assn (?Rl29 x x')) x (?l30 x') * hn_invalid R\<sigma> \<sigma>' (?s33 x') \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma> * hn_ctxt ?Rx14 x x'
5. tsl s \<bind> (\<lambda>x'. imp_nfoldli (?l30 x') (?c31 x') (?f32 x') (?s33 x')) = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
apply (tactic \<open>Sepref_Frame.frame_tac (K (K no_tac)) @{context} 1\<close>)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>x x' sa s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> hn_refine (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt R\<sigma> s'a sa) (?c31 x' sa) (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt R\<sigma> s'a sa) bool_assn (c' s'a)
2. \<And>x x' xa x'a sa s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> hn_refine (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt Rk x'a xa * hn_ctxt R\<sigma> s'a sa) (?f32 x' xa sa) (\<Gamma> * hn_ctxt Rs s' s * hn_invalid Rk x'a xa * hn_invalid R\<sigma> s'a sa) R\<sigma> (f' x'a s'a)
3. \<And>x x'. \<Gamma> * hn_ctxt Rs s' s * hn_invalid (list_assn Rk) x x' * hn_invalid R\<sigma> \<sigma>' \<sigma> \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma> * hn_ctxt ?Rx14 x x'
4. tsl s \<bind> (\<lambda>x'. imp_nfoldli x' (?c31 x') (?f32 x') \<sigma>) = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
apply (rule hn_refine_cons_post)
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. \<And>x x' sa s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> hn_refine (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt R\<sigma> s'a sa) (?c31 x' sa) (?Q41 x x' sa s'a) bool_assn (c' s'a)
2. \<And>x x' sa s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> ?Q41 x x' sa s'a \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt R\<sigma> s'a sa
3. \<And>x x' xa x'a sa s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> hn_refine (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt Rk x'a xa * hn_ctxt R\<sigma> s'a sa) (?f32 x' xa sa) (\<Gamma> * hn_ctxt Rs s' s * hn_invalid Rk x'a xa * hn_invalid R\<sigma> s'a sa) R\<sigma> (f' x'a s'a)
4. \<And>x x'. \<Gamma> * hn_ctxt Rs s' s * hn_invalid (list_assn Rk) x x' * hn_invalid R\<sigma> \<sigma>' \<sigma> \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma> * hn_ctxt ?Rx14 x x'
5. tsl s \<bind> (\<lambda>x'. imp_nfoldli x' (?c31 x') (?f32 x') \<sigma>) = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
apply (rule c_ref[unfolded APP_def])
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>x x' sa s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> \<Gamma>c s'a sa \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt R\<sigma> s'a sa
2. \<And>x x' xa x'a sa s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> hn_refine (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt Rk x'a xa * hn_ctxt R\<sigma> s'a sa) (?f32 x' xa sa) (\<Gamma> * hn_ctxt Rs s' s * hn_invalid Rk x'a xa * hn_invalid R\<sigma> s'a sa) R\<sigma> (f' x'a s'a)
3. \<And>x x'. \<Gamma> * hn_ctxt Rs s' s * hn_invalid (list_assn Rk) x x' * hn_invalid R\<sigma> \<sigma>' \<sigma> \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma> * hn_ctxt ?Rx14 x x'
4. tsl s \<bind> (\<lambda>x'. imp_nfoldli x' c (?f32 x') \<sigma>) = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
apply (rule C_FR)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>x x' s s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> TERM monadic_FOREACH
2. \<And>x x' xa x'a sa s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> hn_refine (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt Rk x'a xa * hn_ctxt R\<sigma> s'a sa) (?f32 x' xa sa) (\<Gamma> * hn_ctxt Rs s' s * hn_invalid Rk x'a xa * hn_invalid R\<sigma> s'a sa) R\<sigma> (f' x'a s'a)
3. \<And>x x'. \<Gamma> * hn_ctxt Rs s' s * hn_invalid (list_assn Rk) x x' * hn_invalid R\<sigma> \<sigma>' \<sigma> \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma> * hn_ctxt ?Rx14 x x'
4. tsl s \<bind> (\<lambda>x'. imp_nfoldli x' c (?f32 x') \<sigma>) = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
apply (rule TERMI)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>x x' xa x'a sa s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> hn_refine (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt Rk x'a xa * hn_ctxt R\<sigma> s'a sa) (?f32 x' xa sa) (\<Gamma> * hn_ctxt Rs s' s * hn_invalid Rk x'a xa * hn_invalid R\<sigma> s'a sa) R\<sigma> (f' x'a s'a)
2. \<And>x x'. \<Gamma> * hn_ctxt Rs s' s * hn_invalid (list_assn Rk) x x' * hn_invalid R\<sigma> \<sigma>' \<sigma> \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma> * hn_ctxt ?Rx14 x x'
3. tsl s \<bind> (\<lambda>x'. imp_nfoldli x' c (?f32 x') \<sigma>) = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
apply weaken_hnr_post
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>x x' xa x'a sa s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> hn_refine (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt Rk x'a xa * hn_ctxt R\<sigma> s'a sa) (?fa10 x' xa sa) (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) x'a xa * hn_ctxt (\<lambda>_ _. true) s'a sa) R\<sigma> (f' x'a s'a)
2. \<And>x x'. \<Gamma> * hn_ctxt Rs s' s * hn_invalid (list_assn Rk) x x' * hn_invalid R\<sigma> \<sigma>' \<sigma> \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma> * hn_ctxt ?Rx10 x x'
3. tsl s \<bind> (\<lambda>x'. imp_nfoldli x' c (?fa10 x') \<sigma>) = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
apply (rule hn_refine_cons_post)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>x x' xa x'a sa s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> hn_refine (\<Gamma> * hn_ctxt Rs s' s * hn_ctxt Rk x'a xa * hn_ctxt R\<sigma> s'a sa) (?fa10 x' xa sa) (?Q11 x x' xa x'a sa s'a) R\<sigma> (f' x'a s'a)
2. \<And>x x' xa x'a sa s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> ?Q11 x x' xa x'a sa s'a \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) x'a xa * hn_ctxt (\<lambda>_ _. true) s'a sa
3. \<And>x x'. \<Gamma> * hn_ctxt Rs s' s * hn_invalid (list_assn Rk) x x' * hn_invalid R\<sigma> \<sigma>' \<sigma> \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma> * hn_ctxt ?Rx10 x x'
4. tsl s \<bind> (\<lambda>x'. imp_nfoldli x' c (?fa10 x') \<sigma>) = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
apply (rule f_ref[unfolded APP_def])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>x x' xa x'a sa s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> \<Gamma>f x'a xa s'a sa \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) x'a xa * hn_ctxt (\<lambda>_ _. true) s'a sa
2. \<And>x x'. \<Gamma> * hn_ctxt Rs s' s * hn_invalid (list_assn Rk) x x' * hn_invalid R\<sigma> \<sigma>' \<sigma> \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma> * hn_ctxt ?Rx10 x x'
3. tsl s \<bind> (\<lambda>x'. imp_nfoldli x' c f \<sigma>) = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
apply (rule entt_trans[OF F_FR])
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>x x' xa x'a s s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> TERM monadic_FOREACH
2. \<And>x x' xa x'a sa s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> \<Gamma> * hn_ctxt Rs s' s * hn_ctxt Pfx x'a xa * hn_ctxt Pf\<sigma> s'a sa \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) x'a xa * hn_ctxt (\<lambda>_ _. true) s'a sa
3. \<And>x x'. \<Gamma> * hn_ctxt Rs s' s * hn_invalid (list_assn Rk) x x' * hn_invalid R\<sigma> \<sigma>' \<sigma> \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma> * hn_ctxt ?Rx10 x x'
4. tsl s \<bind> (\<lambda>x'. imp_nfoldli x' c f \<sigma>) = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
apply (rule TERMI)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>x x' xa x'a sa s'a. RETURN x \<le> it_to_sorted_list ordR s' \<Longrightarrow> \<Gamma> * hn_ctxt Rs s' s * hn_ctxt Pfx x'a xa * hn_ctxt Pf\<sigma> s'a sa \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) x'a xa * hn_ctxt (\<lambda>_ _. true) s'a sa
2. \<And>x x'. \<Gamma> * hn_ctxt Rs s' s * hn_invalid (list_assn Rk) x x' * hn_invalid R\<sigma> \<sigma>' \<sigma> \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma> * hn_ctxt ?Rx10 x x'
3. tsl s \<bind> (\<lambda>x'. imp_nfoldli x' c f \<sigma>) = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
applyS (tactic \<open>Sepref_Frame.frame_tac (K (K no_tac)) @{context} 1\<close>)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x x'. \<Gamma> * hn_ctxt Rs s' s * hn_invalid (list_assn Rk) x x' * hn_invalid R\<sigma> \<sigma>' \<sigma> \<Longrightarrow>\<^sub>t \<Gamma> * hn_ctxt Rs s' s * hn_ctxt (\<lambda>_ _. true) \<sigma>' \<sigma> * hn_ctxt ?Rx10 x x'
2. tsl s \<bind> (\<lambda>x'. imp_nfoldli x' c f \<sigma>) = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
applyS (tactic \<open>Sepref_Frame.frame_tac (K (K no_tac)) @{context} 1\<close>)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. tsl s \<bind> (\<lambda>x'. imp_nfoldli x' c f \<sigma>) = tsl s \<bind> (\<lambda>l. imp_nfoldli l c f \<sigma>)
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
hn_refine P (imp_foreach tsl s c f \<sigma>) (\<Gamma> * hn_ctxt Rs s' s * hn_invalid R\<sigma> \<sigma>' \<sigma>) R\<sigma> (PR_CONST (monadic_FOREACH ordR I) $ s' $ (\<lambda>x. (#c' x#)) $ (\<lambda>x. (#\<lambda>xa. (#f' x xa#)#)) $ \<sigma>')
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 9034, "file": "Refine_Imperative_HOL_Sepref_Foreach", "length": 31}
|
// Generated Files
${PROJ_DIR}/axi4-st/axi_st_d64/axi_st_d64_master_top.sv
${PROJ_DIR}/axi4-st/axi_st_d64/axi_st_d64_master_concat.sv
${PROJ_DIR}/axi4-st/axi_st_d64/axi_st_d64_master_name.sv
// Logic Link files
-f ${PROJ_DIR}/llink/rtl/llink.f
// Common Files
-f ${PROJ_DIR}/common/rtl/common.f
|
{"hexsha": "a4904a699b15491129b38c5937beec22ce548596", "size": 302, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "axi4-st/axi_st_d64/axi_st_d64_master.f", "max_stars_repo_name": "chipsalliance/aib-protocols", "max_stars_repo_head_hexsha": "98858e6707f30ed6ea714598e3e324d754d82be0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-09-01T19:48:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T16:13:59.000Z", "max_issues_repo_path": "axi4-st/axi_st_d64/axi_st_d64_master.f", "max_issues_repo_name": "chipsalliance/aib-protocols", "max_issues_repo_head_hexsha": "98858e6707f30ed6ea714598e3e324d754d82be0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 86, "max_issues_repo_issues_event_min_datetime": "2021-07-16T17:55:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T20:18:23.000Z", "max_forks_repo_path": "axi4-st/axi_st_d64/axi_st_d64_master.f", "max_forks_repo_name": "chipsalliance/aib-protocols", "max_forks_repo_head_hexsha": "98858e6707f30ed6ea714598e3e324d754d82be0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-09-18T03:59:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-30T09:14:37.000Z", "avg_line_length": 27.4545454545, "max_line_length": 58, "alphanum_fraction": 0.7682119205, "num_tokens": 118}
|
program pgm
integer :: a(10)
a(10) = 3
print *, a(10)
end
|
{"hexsha": "7fde59bfeb18922475b832dc5d8dbfc474a6a68f", "size": 58, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "test/mlir_out_tests/array1.f90", "max_stars_repo_name": "clementval/fc", "max_stars_repo_head_hexsha": "a5b444963c1b46e4eb34d938d992836d718010f7", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/mlir_out_tests/array1.f90", "max_issues_repo_name": "clementval/fc", "max_issues_repo_head_hexsha": "a5b444963c1b46e4eb34d938d992836d718010f7", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/mlir_out_tests/array1.f90", "max_forks_repo_name": "clementval/fc", "max_forks_repo_head_hexsha": "a5b444963c1b46e4eb34d938d992836d718010f7", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 9.6666666667, "max_line_length": 16, "alphanum_fraction": 0.6034482759, "num_tokens": 26}
|
[STATEMENT]
lemma mix_pmf_comp_with_dif_equiv:
assumes "\<alpha> \<in> {0..(1::real)}"
and "\<beta> \<in> {0..(1::real)}"
assumes "\<alpha> > \<beta>"
shows "mix_pmf (\<beta>/\<alpha>) (mix_pmf \<alpha> p q) q = mix_pmf \<beta> p q" (is "?l = ?r")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q = mix_pmf \<beta> p q
[PROOF STEP]
proof (rule pmf_equiv_intro1[symmetric])
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>e. e \<in> set_pmf (mix_pmf \<beta> p q) \<Longrightarrow> pmf (mix_pmf \<beta> p q) e = pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q) e
[PROOF STEP]
fix e
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>e. e \<in> set_pmf (mix_pmf \<beta> p q) \<Longrightarrow> pmf (mix_pmf \<beta> p q) e = pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q) e
[PROOF STEP]
assume a: "e \<in> set_pmf ?r"
[PROOF STATE]
proof (state)
this:
e \<in> set_pmf (mix_pmf \<beta> p q)
goal (1 subgoal):
1. \<And>e. e \<in> set_pmf (mix_pmf \<beta> p q) \<Longrightarrow> pmf (mix_pmf \<beta> p q) e = pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q) e
[PROOF STEP]
have "e \<in> set_pmf ?l"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. e \<in> set_pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q)
[PROOF STEP]
using a pmf_mix_deeper
[PROOF STATE]
proof (prove)
using this:
e \<in> set_pmf (mix_pmf \<beta> p q)
?a \<in> {0..1} \<Longrightarrow> pmf (mix_pmf ?a ?p ?q) ?x = ?a * pmf ?p ?x + pmf ?q ?x - ?a * pmf ?q ?x
goal (1 subgoal):
1. e \<in> set_pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q)
[PROOF STEP]
by (metis assms(1) assms(2) assms(3) mix_pmf_comp_left_div pmf_eq_0_set_pmf)
[PROOF STATE]
proof (state)
this:
e \<in> set_pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q)
goal (1 subgoal):
1. \<And>e. e \<in> set_pmf (mix_pmf \<beta> p q) \<Longrightarrow> pmf (mix_pmf \<beta> p q) e = pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q) e
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
e \<in> set_pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q)
[PROOF STEP]
have "pmf ?l e = \<beta> * pmf p e - \<beta> * pmf q e + pmf q e"
[PROOF STATE]
proof (prove)
using this:
e \<in> set_pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q)
goal (1 subgoal):
1. pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q) e = \<beta> * pmf p e - \<beta> * pmf q e + pmf q e
[PROOF STEP]
using pmf_mix_deeper[of "\<beta>/\<alpha>" p q e] mix_pmf_comp_left_div[of \<alpha> \<beta> p q e] assms
[PROOF STATE]
proof (prove)
using this:
e \<in> set_pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q)
\<beta> / \<alpha> \<in> {0..1} \<Longrightarrow> pmf (mix_pmf (\<beta> / \<alpha>) p q) e = \<beta> / \<alpha> * pmf p e + pmf q e - \<beta> / \<alpha> * pmf q e
\<lbrakk>\<alpha> \<in> {0..1}; \<beta> \<in> {0..1}; \<beta> < \<alpha>\<rbrakk> \<Longrightarrow> pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q) e = \<beta> * pmf p e + pmf q e - \<beta> * pmf q e
\<alpha> \<in> {0..1}
\<beta> \<in> {0..1}
\<beta> < \<alpha>
goal (1 subgoal):
1. pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q) e = \<beta> * pmf p e - \<beta> * pmf q e + pmf q e
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q) e = \<beta> * pmf p e - \<beta> * pmf q e + pmf q e
goal (1 subgoal):
1. \<And>e. e \<in> set_pmf (mix_pmf \<beta> p q) \<Longrightarrow> pmf (mix_pmf \<beta> p q) e = pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q) e
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q) e = \<beta> * pmf p e - \<beta> * pmf q e + pmf q e
[PROOF STEP]
show "pmf (mix_pmf \<beta> p q) e = pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q) e"
[PROOF STATE]
proof (prove)
using this:
pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q) e = \<beta> * pmf p e - \<beta> * pmf q e + pmf q e
goal (1 subgoal):
1. pmf (mix_pmf \<beta> p q) e = pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q) e
[PROOF STEP]
by (metis (full_types) assms(1) assms(2) assms(3) mix_pmf_comp_left_div pmf_mix_deeper)
[PROOF STATE]
proof (state)
this:
pmf (mix_pmf \<beta> p q) e = pmf (mix_pmf (\<beta> / \<alpha>) (mix_pmf \<alpha> p q) q) e
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2071, "file": "Neumann_Morgenstern_Utility_PMF_Composition", "length": 14}
|
import pandas as pd
import numpy as np
from utils import *
from metric import *
from multiprocessing import Pool
import cv2
from tqdm import tqdm
from functools import partial
from generator import *
from model import *
from keras.models import load_model
import tensorflow as tf
import ast
from sklearn.model_selection import ParameterGrid
import gc
import os
# from predict import *
param_grid = {'proba' : np.round(np.array([0.25,0.5, 0.75],dtype =np.float64),2),
'pad' : np.array([0,5,10]),
'reduce_size' : np.array([0,5000,10000,20000,40000],dtype=np.int64),
'convex': [True,False]
}
# param_grid = {'proba' : np.round(np.array([0,0.25,0.50,0.75, 1.0],dtype =np.float64),2),
# 'pad' : np.array([0,3,5,10]),
# 'reduce_size' : np.arange(5000,20000,5000,dtype=np.int64),
# # 'convex': [True, False]
# }
cores = os.cpu_count()
parameter = list(ParameterGrid(param_grid))
print (len(parameter))
def predict_post_grid(msks, proba = 0.9, pad = False, pad_size = 10, reduce = False, reduce_size =10000, convex = False, resize_shape=[512,256] ):
# print(msks.shape)
resized_msks = np.zeros((msks.shape[0],resize_shape[0], resize_shape[1]))
for i in range(len(msks)):
# cls = msks[1][i]
# msk = msks[0][i,:,:] * cls
msk = msks[i,:,:].copy()
msk = np.array(msk >=proba, dtype = np.uint8)
# msk = cv2.resize(msk,(resize_shape[1],resize_shape[0]), interpolation = cv2.INTER_LINEAR)
msk = mask2pad(msk, pad_size)
msk = masks_reduce(msk, reduce_size)
msk = contour_convexHull(msk.astype(np.uint8))
resized_msks[i, : , :] = msk
return resized_msks
def paralleize_numpy(preds, func ,proba = 0.9, pad_size = 10, reduce_size = 10000, convex =False, resize_shape=[512,256], cores=2):
np_split = np.array_split(preds, cores)
# np_split_cls = np.array_split(preds_cls, cores)
# value = list(zip(np_split, np_split_cls))
pool = Pool(cores)
res_np = np.concatenate(pool.map(partial(func, proba = proba,pad_size = pad_size, reduce_size = reduce_size , convex = convex, resize_shape = resize_shape), np_split ))
pool.close()
pool.join()
return res_np
def post_optimi(valid_true, valid_pred, resize_shape=[512,256], labels =['fish', 'flower', 'gravel', 'sugar']):
val_dice_max = []
val_dice_max_np = []
for l_idx, label in enumerate(labels):
val_dice_tabel = pd.DataFrame(columns = ['label','size', 'proba', 'reduce_size','pad', 'convex', 'dice'])
val_dice_max_np_temp = []
for p_idx,para in tqdm(enumerate(parameter)):
valid_preds_pp = paralleize_numpy(valid_pred[:,:,:,l_idx],
predict_post_grid,
proba=para['proba'],
pad_size = para['pad'],
reduce_size = para['reduce_size'],
cores = cores,
convex=para['convex'],
resize_shape = resize_shape)
dice_score = dice_channel_label(valid_preds_pp, valid_true[:,:,:, l_idx])
val_dice_tabel = val_dice_tabel.append({'label' : label,
'proba' : para['proba'],
'size' : str(resize_shape),
'reduce_size' : para['reduce_size'],
'pad' : para['pad'],
'convex': para['convex'],
'dice' : dice_score},
ignore_index=True)
val_dice_max_np_temp.append(valid_preds_pp)
gc.collect()
label_max_table = val_dice_tabel[val_dice_tabel['label']==label]
print(label_max_table)
print(label_max_table[label_max_table['dice'] == label_max_table['dice'].max()])
label_max_table = label_max_table[label_max_table['dice'] == label_max_table['dice'].max()].iloc[0].to_frame().T
print(label_max_table)
label_max_idx = label_max_table.index[-1]
print(label_max_idx)
# sys.exit()
val_dice_max.append(label_max_table)
val_dice_max_np.append(np.expand_dims(val_dice_max_np_temp[label_max_idx],-1))
gc.collect()
val_dice_max = pd.concat(val_dice_max, axis=0)
val_dice_max_np = np.concatenate(val_dice_max_np, axis =-1)
print(val_dice_max.shape, val_dice_max_np.shape)
return val_dice_max, val_dice_max_np
def post_batch(valid_df,post_arr, label_names ):
batch_res_df = []
# for i in tqdm(range(0, test_df.shape[0], pred_batch_size)):
# batch_idx = list(range(i, min(test_df.shape[0], i + pred_batch_size)))
# batch_generator = SegmentDataGenerator( test_df.iloc[batch_idx], subset='test', batch_size = 1,
# shuffle=False, preprocess = img_preprocess,
# augmentation = None, resize_shape = FLAGS.resize_shape,
# test_path = FLAGS.img_path)
# batch_preds = model.predict_generator(batch_generator, verbose = 1)
# batch_preds_re = predict_resize(valid_arr.copy(),
# proba=proba,
# pad_size=pad_size,
# reduce_size = reduce_size,
# convex= convex,
# origin_img_size = (FLAGS.origin_shape[0],FLAGS.origin_shape[1])
# )
# # # np.save('../test.npy',batch_preds_re)
# # # sys.exit()
for j in tqdm(range(valid_df.shape[0])):
filename = valid_df['ImageId'].iloc[j]
image_df = valid_df[valid_df['ImageId'] == filename].copy()
# print(image_df)
preds_mask = post_arr[j, : ,:, :]
# size [2100,1400]
# print(image_df['size'])
# print(image_df)
# try:
# origin_shape = ast.literal_eval(image_df['size'].values)
# except ValueError:
# origin shape : [2100,1400]
origin_shape = ast.literal_eval(image_df['size'].values[0])
# print(origin_size)
# image_df['size'] = [[origin_size[0], origin_size[1]]]
# image_df['colors'] = [colors]
# print(image_df)
# sys.exit()
# print(origin_shape)
for l_idx, label in enumerate(label_names):
# (512, 256)
l_mask = preds_mask[:,:,l_idx].astype(np.uint8)
# (2100,1400)
l_mask = cv2.resize(l_mask, (origin_shape[1], origin_shape[0]))
# In : (2100,1400)
# Out : rle (2100, 1400), [2100,1400]
label_rle = mask2rle(l_mask)
# print(label_rle)
image_df[label] = label_rle['counts']
batch_res_df.append(image_df)
gc.collect()
# gc.collect()
batch_res_df = pd.concat(batch_res_df)
print(batch_res_df)
# sys.exit()
batch_res_df.replace(to_replace=[None], value = np.nan, inplace = True)
print(batch_res_df)
print(batch_res_df.shape)
print('Batch predict end, Images : {}, labels : {}'.format(batch_res_df.shape[0], batch_res_df.shape[1]-3))
return batch_res_df
def post_process_main():
# print('/'.join(FLAGS.valid_true_batch_path.split('/')[:-1]))
# sys.exit()
K.clear_session()
# In : (512,256,3,4)
# Out : (512,256,3,4)
model_name = [ k for k in os.listdir(FLAGS.model_path) if k.split('.')[-1]=='h5'][0]
model_path = os.path.join(FLAGS.model_path,model_name)
loaded_model = load_model(model_path, custom_objects = {
'bce_dice_loss' : bce_dice_loss,
'dice_coef' : dice_coef})
valid_df = pd.read_csv(FLAGS.valid_true_batch_path)
labels = valid_df.columns.tolist()[3:]
print(valid_df)
print(labels)
# print(FLAGS.resize_shape[0])
# print(FLAGS.resize_shape[1])
colors = np.array(ast.literal_eval( valid_df.loc[0,'colors']))
# In : size(2100,1400) rle(2100,1400)
# out :
# Img - resize_size(512,256 ) - > resize(256,512) -> (512,256)
# Mask - resize_size(512,256 ) - > resize(256,512) -> (512,256)
valid_generator_true = SegmentDataGenerator(valid_df, batch_size = 1,
subset='train', shuffle=False,
preprocess = img_preprocess, augmentation = None,
resize_shape = (FLAGS.resize_shape[0],FLAGS.resize_shape[1]),train_path = FLAGS.valid_img_path)
# In : size(2100,1400) rle(2100,1400)
# out :
# Img - resize_size(512,256 ) - > resize(256,512) -> (512,256)
# Mask - resize_size(512,256 ) - > resize(256,512) -> (512,256)
valid_generator_pred = SegmentDataGenerator(valid_df, batch_size = 1,
subset='train', shuffle=False,
preprocess = img_preprocess, augmentation = None,
resize_shape = (FLAGS.resize_shape[0],FLAGS.resize_shape[1]),train_path = FLAGS.valid_img_path)
# print( valid_generator_true[0][1])
print(valid_generator_pred[0][0].shape)
print(valid_generator_pred[0][1].shape)
# print(valid_generator_pred[0][0][0,:,:,0])
# print('@@@@@')
# print(np.where(valid_generator_pred[0][1][0,:,:,0]==1))
# print(np.where(valid_generator_pred[0][1][0,:,:,1]==1))
# print(np.where(valid_generator_pred[0][1][0,:,:,2]==1))
# print(np.where(valid_generator_pred[0][1][0,:,:,3]==1))
# sys.exit()
# label shape = (40, 512,256,4)
valid_true_img_resize = np.zeros((valid_df.shape[0], FLAGS.resize_shape[0],FLAGS.resize_shape[1],len(labels)))
# print(valid_true_img_resize.shape)
# print(valid_generator_true[0][1].shape)
# print('!!!!')
for idx in tqdm(range(valid_df.shape[0])):
# print( valid_generator_true[idx][1])
valid_true_img_resize[idx, :, :, :] = valid_generator_true[idx][1]
print(valid_true_img_resize.shape)
print(valid_true_img_resize[0].shape)
print(valid_true_img_resize[0][:,:,0].shape)
print(valid_true_img_resize)
print(np.where(valid_true_img_resize==1))
# sys.exit()
# print('$$$$$')
# np.save('../valid_true.npy',valid_true_img_resize)
# print
# In : (512,256,3)
# Out : (512,256,4)
valid_pred_img = loaded_model.predict_generator(valid_generator_true, verbose=1)
# np.save('../valid_pred.npy',valid_pred_img)
### Need Check
valid_pred_img_proba = np.array(valid_pred_img >0.5,dtype = np.uint8)
print("valid_true : " + str(valid_true_img_resize.shape),
"valid_pred : " + str(valid_pred_img.shape))
print(" validation dice coefficient scores ")
valid_dice_scores = dice_channel_torch(valid_pred_img_proba, valid_true_img_resize)
# sys.exit
### resized mask & resized optimization parameter
# In : (40,512,256,4)
# Out : (40,512,256,4)
# Post Processing
post_matrix, post_nps = post_optimi(valid_true_img_resize,valid_pred_img, labels = labels, resize_shape = (FLAGS.resize_shape[0],FLAGS.resize_shape[1]) )
# np.save('../valid_pred_post.npy',post_nps)
print(" Post_processed validation dice coefficient scores ")
processed_valid_dice_scores = dice_channel_torch(post_nps, valid_true_img_resize)
# print("Improve {}, {} % ".format((processed_valid_dice_scores - valid_dice_scores) ,
# ( (processed_valid_dice_scores - valid_dice_scores)/valid_dice_scores*100) ))
# sys.exit()
print(" Score data frame save")
valid_dice_scores = np.append(False , valid_dice_scores)
processed_valid_dice_scores = np.append(True , processed_valid_dice_scores)
score_nps = np.vstack((valid_dice_scores, processed_valid_dice_scores))
score_label = ['post_processed'] + ['total_score']+ labels
score_df = pd.DataFrame(score_nps, columns= score_label)
score_df_path = os.path.join('/'.join(FLAGS.valid_true_batch_path.split('/')[:-1]), 'valid_score.csv')
score_df.to_csv(score_df_path, index= False)
print(" Score data frame save done !! ")
post_save_path = os.path.join('/'.join(FLAGS.valid_true_batch_path.split('/')[:-1]), 'valid_post_grid.csv')
post_matrix['colors'] = colors.reshape(-1,1)
post_matrix.to_csv(post_save_path, index=False)
print("Post processing matrix saved to {}".format(post_save_path))
# proba_max = post_matrix['proba'].tolist()
# reduce_size_max = post_matrix['reduce_size'].tolist()
# pad_max = post_matrix['pad'].tolist()
# convex_max = post_matrix['convex'].tolist()
print(valid_df)
print(post_nps)
print(post_nps.shape)
# In : (40, 512, 256, 4)
# Out : rle : (40,2100,1400,4 ) is same (40, original width, original height, 4)
post_rle_df =post_batch(valid_df= valid_df,post_arr = post_nps,label_names = labels )
rle_df_path = os.path.join('/'.join(FLAGS.valid_true_batch_path.split('/')[:-1]), 'valid_batch_pred.csv')
post_rle_df.to_csv(rle_df_path, index=False)
valid_path = '/'.join(model_path.split('/')[:-2]) + '/valid/'
print(valid_true_img_resize.shape)
# sys.exit()
for r_idx in tqdm(range(post_rle_df.shape[0])):
img = rle_mask2img(post_rle_df.iloc[r_idx], FLAGS.valid_img_path, origin_mask_array = valid_true_img_resize[r_idx])
# print(img.shape)
# sys.exit()
# print(valid_path)
# print( pred_test.iloc[r_idx]['ImageId'].split('.')[0])
# print(pred_test.iloc[r_idx]['ImageId'].split('.')[1])
# print('!!!!')
print(post_rle_df.iloc[r_idx]['ImageId'].split('.')[0] + '_segment.' + post_rle_df.iloc[r_idx]['ImageId'].split('.')[1])
save_path = os.path.join(valid_path, post_rle_df.iloc[r_idx]['ImageId'].split('.')[0] + '_segment.' + post_rle_df.iloc[r_idx]['ImageId'].split('.')[1])
img.save(save_path)
print(img.size)
print ('Segment {} done !!! '.format(post_rle_df.iloc[r_idx]['ImageId']))
return post_matrix
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'testing')
# parser.add_argument('--target', required=True, help = 'train or predict')
# parser.add_argument('--train_path')
# parser.add_argument('--test_path')
parser.add_argument('--valid_true_batch_path', required=True)
parser.add_argument('--model_path',required = True)
parser.add_argument('--valid_img_path', required = True)
# parser.add_argument('--origin_shape', nargs='+', type=int, default = [1400,2100])
parser.add_argument('--resize_shape', nargs='+', type=int, default = [256,256])
FLAGS, unparsed = parser.parse_known_args()
post_process_main()
# val_dice_table_ensemble.to_csv('val_dice_table_ensemble_2.csv')
|
{"hexsha": "a7b6c26c8516a1635104bf33fe47b7f99b65347f", "size": 15742, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/post_processing.py", "max_stars_repo_name": "JeongChanwoo/Deep-POC-2019", "max_stars_repo_head_hexsha": "cee8ede5994da4c7302b2cca31fca83480249f76", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/post_processing.py", "max_issues_repo_name": "JeongChanwoo/Deep-POC-2019", "max_issues_repo_head_hexsha": "cee8ede5994da4c7302b2cca31fca83480249f76", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/post_processing.py", "max_forks_repo_name": "JeongChanwoo/Deep-POC-2019", "max_forks_repo_head_hexsha": "cee8ede5994da4c7302b2cca31fca83480249f76", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-10T02:34:14.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-10T02:34:14.000Z", "avg_line_length": 39.9543147208, "max_line_length": 172, "alphanum_fraction": 0.5795959853, "include": true, "reason": "import numpy", "num_tokens": 3794}
|
# coding=utf-8
"""
.. moduleauthor: Torbjörn Klatt <t.klatt@fz-juelich.de>
"""
import unittest
import numpy
from nose.tools import *
from pypint.integrators.integrator_base import IntegratorBase
from pypint.integrators import INTEGRATOR_PRESETS
def init_with_presets(preset):
integrator = IntegratorBase()
integrator.init(**preset)
assert_is_instance(integrator.nodes, numpy.ndarray)
assert_equal(integrator.nodes.size, preset["num_nodes"])
assert_is_instance(integrator.weights, numpy.ndarray)
assert_equal(integrator.weights.size, preset["num_nodes"])
def test_init_with_presets():
for preset in INTEGRATOR_PRESETS:
yield init_with_presets, INTEGRATOR_PRESETS[preset]
class IntegratorBaseTest(unittest.TestCase):
def test_initialization(self):
integrator = IntegratorBase()
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "b4761487997bd4258d805798ec35dae5cc38ef6e", "size": 881, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/pypint/integrators_tests/integrator_base_test.py", "max_stars_repo_name": "DiMoser/PyPinT", "max_stars_repo_head_hexsha": "3cba394d0fd87055ab412d35fe6dbf4a3b0dbe73", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/pypint/integrators_tests/integrator_base_test.py", "max_issues_repo_name": "DiMoser/PyPinT", "max_issues_repo_head_hexsha": "3cba394d0fd87055ab412d35fe6dbf4a3b0dbe73", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/pypint/integrators_tests/integrator_base_test.py", "max_forks_repo_name": "DiMoser/PyPinT", "max_forks_repo_head_hexsha": "3cba394d0fd87055ab412d35fe6dbf4a3b0dbe73", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.1714285714, "max_line_length": 62, "alphanum_fraction": 0.76276958, "include": true, "reason": "import numpy", "num_tokens": 210}
|
[STATEMENT]
lemma merge_coeffs_alt_def:
\<open>(RETURN o merge_coeffs) p =
REC\<^sub>T(\<lambda>f p.
(case p of
[] \<Rightarrow> RETURN []
| [_] => RETURN p
| ((xs, n) # (ys, m) # p) \<Rightarrow>
(if xs = ys
then if n + m \<noteq> 0 then f ((xs, n + m) # p) else f p
else do {p \<leftarrow> f ((ys, m) # p); RETURN ((xs, n) # p)})))
p\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (RETURN \<circ> merge_coeffs) p = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) p
[PROOF STEP]
apply (induction p rule: merge_coeffs.induct)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. (RETURN \<circ> merge_coeffs) [] = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) []
2. \<And>xs n. (RETURN \<circ> merge_coeffs) [(xs, n)] = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) [(xs, n)]
3. \<And>xs n ys m p. \<lbrakk>\<lbrakk>xs = ys; n + m \<noteq> 0\<rbrakk> \<Longrightarrow> (RETURN \<circ> merge_coeffs) ((xs, n + m) # p) = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) ((xs, n + m) # p); \<lbrakk>xs = ys; \<not> n + m \<noteq> 0\<rbrakk> \<Longrightarrow> (RETURN \<circ> merge_coeffs) p = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) p; xs \<noteq> ys \<Longrightarrow> (RETURN \<circ> merge_coeffs) ((ys, m) # p) = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) ((ys, m) # p)\<rbrakk> \<Longrightarrow> (RETURN \<circ> merge_coeffs) ((xs, n) # (ys, m) # p) = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) ((xs, n) # (ys, m) # p)
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (RETURN \<circ> merge_coeffs) [] = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) []
[PROOF STEP]
by (subst RECT_unfold, refine_mono) auto
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>xs n. (RETURN \<circ> merge_coeffs) [(xs, n)] = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) [(xs, n)]
2. \<And>xs n ys m p. \<lbrakk>\<lbrakk>xs = ys; n + m \<noteq> 0\<rbrakk> \<Longrightarrow> (RETURN \<circ> merge_coeffs) ((xs, n + m) # p) = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) ((xs, n + m) # p); \<lbrakk>xs = ys; \<not> n + m \<noteq> 0\<rbrakk> \<Longrightarrow> (RETURN \<circ> merge_coeffs) p = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) p; xs \<noteq> ys \<Longrightarrow> (RETURN \<circ> merge_coeffs) ((ys, m) # p) = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) ((ys, m) # p)\<rbrakk> \<Longrightarrow> (RETURN \<circ> merge_coeffs) ((xs, n) # (ys, m) # p) = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) ((xs, n) # (ys, m) # p)
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (RETURN \<circ> merge_coeffs) [(xs_, n_)] = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) [(xs_, n_)]
[PROOF STEP]
by (subst RECT_unfold, refine_mono) auto
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>xs n ys m p. \<lbrakk>\<lbrakk>xs = ys; n + m \<noteq> 0\<rbrakk> \<Longrightarrow> (RETURN \<circ> merge_coeffs) ((xs, n + m) # p) = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) ((xs, n + m) # p); \<lbrakk>xs = ys; \<not> n + m \<noteq> 0\<rbrakk> \<Longrightarrow> (RETURN \<circ> merge_coeffs) p = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) p; xs \<noteq> ys \<Longrightarrow> (RETURN \<circ> merge_coeffs) ((ys, m) # p) = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) ((ys, m) # p)\<rbrakk> \<Longrightarrow> (RETURN \<circ> merge_coeffs) ((xs, n) # (ys, m) # p) = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) ((xs, n) # (ys, m) # p)
[PROOF STEP]
subgoal for x p y q
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>x = y; p + q \<noteq> 0\<rbrakk> \<Longrightarrow> (RETURN \<circ> merge_coeffs) ((x, p + q) # p_) = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) ((x, p + q) # p_); \<lbrakk>x = y; \<not> p + q \<noteq> 0\<rbrakk> \<Longrightarrow> (RETURN \<circ> merge_coeffs) p_ = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) p_; x \<noteq> y \<Longrightarrow> (RETURN \<circ> merge_coeffs) ((y, q) # p_) = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) ((y, q) # p_)\<rbrakk> \<Longrightarrow> (RETURN \<circ> merge_coeffs) ((x, p) # (y, q) # p_) = REC\<^sub>T (\<lambda>f p. case p of [] \<Rightarrow> RETURN [] | [(aa, b)] \<Rightarrow> RETURN p | (aa, b) # (ys, m) # p \<Rightarrow> if aa = ys then if b + m \<noteq> 0 then f ((aa, b + m) # p) else f p else f ((ys, m) # p) \<bind> (\<lambda>p. RETURN ((aa, b) # p))) ((x, p) # (y, q) # p_)
[PROOF STEP]
by (subst RECT_unfold, refine_mono)
(smt case_prod_conv list.simps(5) merge_coeffs.simps(3) nres_monad1
push_in_let_conv(2))
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 3953, "file": "PAC_Checker_PAC_Checker_Init", "length": 8}
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def mental_tests(path):
"""Six Mental Tests
These data are from the SAS manual and consist of six mental tests for
32 students, with some missing data. The three `x` variables are
intended to load on a verbal factor, and the three `y` variables on a
math factor. The data can be used to illustrate the estimation of a
confirmatory factor analysis model by multinormal full-information
maximum-likelihood in the presence of missing data.
A data frame with 32 observations on the following 6 variables.
`x1`
score on verbal test 1.
`x2`
score on verbal test 2.
`x3`
score on verbal test 3.
`y1`
score on math test 1.
`y2`
score on math test 2.
`y3`
score on math test 3.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `mental_tests.csv`.
Returns:
Tuple of np.ndarray `x_train` with 32 rows and 6 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'mental_tests.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/sem/Tests.csv'
maybe_download_and_extract(path, url,
save_file_name='mental_tests.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
{"hexsha": "80bf8c51b03caa3c9a21fa22a1866c0825e48e1a", "size": 1853, "ext": "py", "lang": "Python", "max_stars_repo_path": "observations/r/mental_tests.py", "max_stars_repo_name": "hajime9652/observations", "max_stars_repo_head_hexsha": "2c8b1ac31025938cb17762e540f2f592e302d5de", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 199, "max_stars_repo_stars_event_min_datetime": "2017-07-24T01:34:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T00:50:55.000Z", "max_issues_repo_path": "observations/r/mental_tests.py", "max_issues_repo_name": "hajime9652/observations", "max_issues_repo_head_hexsha": "2c8b1ac31025938cb17762e540f2f592e302d5de", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 46, "max_issues_repo_issues_event_min_datetime": "2017-09-05T19:27:20.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-07T09:47:26.000Z", "max_forks_repo_path": "observations/r/mental_tests.py", "max_forks_repo_name": "hajime9652/observations", "max_forks_repo_head_hexsha": "2c8b1ac31025938cb17762e540f2f592e302d5de", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 45, "max_forks_repo_forks_event_min_datetime": "2017-07-26T00:10:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T20:44:59.000Z", "avg_line_length": 26.0985915493, "max_line_length": 72, "alphanum_fraction": 0.6853750675, "include": true, "reason": "import numpy", "num_tokens": 444}
|
from algorithms.network_alignment_model import NetworkAlignmentModel
from evaluation.metrics import get_statistics
from algorithms.NAME.embedding_model import NAME_MODEL, StableFactor, CombineModel, Combine2Model
from input.dataset import Dataset
from utils.graph_utils import load_gt
import torch.nn.functional as F
import torch.nn as nn
from algorithms.NAME.utils import *
from algorithms.NAME.losses import *
import torch
import numpy as np
import networkx as nx
import random
import numpy as np
from time import time
import argparse
import os
import sys
from simple_classify.gumbel import gumbel_softmax
from simple_classify.models import Model
# IMPORT PALE
from algorithms.PALE.embedding_model import PaleEmbedding
from algorithms.PALE.mapping_model import PaleMappingLinear
from torch.autograd import Variable
from tqdm import tqdm
import copy
class NAME(NetworkAlignmentModel):
"""
NAME model for networks alignment task
"""
def __init__(self, source_dataset, target_dataset, args):
super(NAME, self).__init__(source_dataset, target_dataset)
self.source_dataset = source_dataset
self.target_dataset = target_dataset
self.alphas = [args.alpha0, args.alpha1, args.alpha2]
self.args = args
# full dict is just used in the process of investigating and need to be DELETE later
self.full_dict = load_gt(args.groundtruth, source_dataset.id2idx, target_dataset.id2idx, 'dict')
self.gt_train = load_gt(args.train_dict, source_dataset.id2idx, target_dataset.id2idx, 'dict')
def align(self):
source_A_hat, target_A_hat, source_feats, target_feats = self.get_elements()
source_adj = self.source_dataset.get_adjacency_matrix()
target_adj = self.target_dataset.get_adjacency_matrix()
new_source_A_hat, new_source_feats, new_deg, new_edges, new_adj, id2idx_augment = self.graph_augmentation(self.source_dataset,target_adj)
new_adj = Variable(torch.FloatTensor(new_adj), requires_grad = False)
source_adj = Variable(torch.FloatTensor(source_adj), requires_grad = False)
target_adj = Variable(torch.FloatTensor(target_adj), requires_grad = False)
if self.args.cuda:
new_adj = new_adj.cuda()
source_adj = source_adj.cuda()
target_adj = target_adj.cuda()
new_source_info = {'num_nodes': len(new_source_feats), 'deg': new_deg, 'edges': new_edges, 'adj': new_adj}
source_info = {'num_nodes': len(source_feats), 'deg': self.source_dataset.get_nodes_degrees(), 'edges': self.source_dataset.get_edges(), 'adj': source_adj}
target_info = {'num_nodes': len(target_feats), 'deg': self.target_dataset.get_nodes_degrees(), 'edges': self.target_dataset.get_edges(), 'adj': target_adj}
test_dict = copy.deepcopy(self.full_dict)
train_dict = copy.deepcopy(self.gt_train)
self.full_dict = {u:v for (u,v) in id2idx_augment.items() if random.uniform(0,1) > 0.1}
self.gt_train = {u:v for (u,v) in id2idx_augment.items() if u not in list(self.full_dict.keys())}
S_NAME, S_pale, S_mincut = self.get_multi_align(source_A_hat, new_source_A_hat, source_feats, new_source_feats, source_info, new_source_info)
combine_model = CombineModel()
combine_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, combine_model.parameters()), lr=self.args.lr)
for epochs in tqdm(range(200)):
combine_optimizer.zero_grad()
loss = combine_model.loss(S_NAME, S_pale, S_mincut, id2idx_augment)
loss.backward()
combine_optimizer.step()
print("Loss: {:.4f}".format(loss.item()))
print(combine_model.thetas)
S_pale = S_pale.detach().cpu().numpy()
S_NAME = S_NAME.detach().cpu().numpy()
S_mincut = S_mincut.detach().cpu().numpy()
self.full_dict = test_dict
self.gt_train = train_dict
S_NAME, S_pale, S_mincut = self.get_multi_align(source_A_hat, target_A_hat, source_feats, target_feats, source_info, target_info)
S = combine_model(S_NAME, S_pale, S_mincut)
print('-'*100)
S = S.detach().cpu().numpy()
return S
def get_multi_align(self, source_A_hat, target_A_hat, source_feats, target_feats, source_info, target_info):
"""
step1: align by GCN
step2: find stables nodes
step3: run pale with stabels nodes
step4: run mincut with stabels nodes
"""
NAME_start = time
NAME = NAME_MODEL(
activate_function = self.args.act,
num_GCN_blocks = self.args.num_GCN_blocks,
input_dim = self.args.input_dim,
output_dim = self.args.embedding_dim,
num_source_nodes = len(source_A_hat),
num_target_nodes = len(target_A_hat),
source_feats = source_feats,
target_feats = target_feats
)
if self.args.cuda:
NAME = NAME.cuda()
NAME.train()
structural_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, NAME.parameters()), lr=self.args.lr)
NAME_S = self.train_NAME(NAME, source_A_hat, target_A_hat, structural_optimizer, self.args.threshold)
gt_train = self.gt_train
# print('NAME running time: {:.4f}'.format(time() - NAME_start))
# Step2: PALE
start = time()
# self.args.cuda = True
# source_info['adj'] = source_info['adj'].cuda()
# target_info['adj'] = target_info['adj'].cuda()
source_pale, target_pale = self.learn_pale_embs(source_info, target_info)
pale_map = PaleMappingLinear(
embedding_dim=self.args.embedding_dim,
source_embedding=source_pale,
target_embedding=target_pale,
)
pale_S = self.map_source_target_pale(pale_map, list(gt_train.keys()), gt_train, source_pale, target_pale)
print('pale running time: {:.4f}'.format(time() - start))
# Step3: Mincut
mincut_start = time()
source_adj = source_info['adj']
target_adj = target_info['adj']
source_mincut, target_mincut = self.learn_mincut_embs(source_adj, target_adj)
pale_map2 = PaleMappingLinear(
embedding_dim=self.args.num_parts,
source_embedding=source_mincut,
target_embedding=target_mincut,
)
mincut_S = self.map_source_target_pale(pale_map2, list(gt_train.keys()), gt_train, source_mincut, target_mincut)
print('mincut running time: {:.4f}'.format(time() - mincut_start))
NAME_S = torch.FloatTensor(NAME_S)
pale_S = torch.FloatTensor(pale_S)
mincut_S = torch.FloatTensor(mincut_S)
# self.args.cuda = False
return NAME_S, pale_S, mincut_S
def learn_mincut_embs(self, adj_source, adj_target):
source_mincut = self.learn_mincut(len(adj_source), adj_source)
target_mincut = self.learn_mincut(len(adj_target), adj_target)
return source_mincut, target_mincut
def learn_mincut(self, num_nodes, adj):
model = Model(num_nodes, self.args.num_parts)
if self.args.cuda:
model = model.cuda()
optimizer = torch.optim.Adam(filter(lambda p : p.requires_grad, model.parameters()), lr=self.args.mincut_lr)
temp=self.args.temp
for epoch in tqdm(range(self.args.mincut_epochs)):
model.zero_grad()
nodes = None
super_adj = model(adj, nodes, temp=temp, hard=self.args.hard, beta=self.args.beta)
loss, ncut_loss, balance_loss = model.loss(super_adj, nodes, balance_node=self.args.balance_node, lam=self.args.lam, w2v_lam = self.args.w2v_lam, new=self.args.new)
# print(loss, ncut_loss, balance_loss)
if loss!=loss: import pdb;pdb.set_trace()
total_loss = loss
total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), self.args.clip)
optimizer.step()
if model.params.max() != model.params.max():import pdb;pdb.set_trace()
if epoch % 500==0:
if self.args.anneal:
temp = min(self.args.min_temp, self.args.temp * np.exp(-0.00003*epoch))
try:
print("loss:", loss.item())
print("ncut_loss: ", ncut_loss.item())
print("balance_loss: ", balance_loss.item())
except:
import pdb; pdb.set_trace()
params = model.params.max(dim=1)[1].cpu().detach().numpy()
embedding = gumbel_softmax(model.params, temp=10, hard=False, beta=1)
embedding_onehot = np.zeros(embedding.shape)
for i in range(len(embedding_onehot)):
embedding_onehot[i][params[i]] = 1
# embedding_onehot[:, params] = 1
embedding_onehot = torch.FloatTensor(embedding_onehot)
if self.args.cuda:
embedding_onehot = embedding_onehot.cuda()
return embedding_onehot
def map_source_target_pale(self, pale_map, source_train_nodes, gt_train, source_pale, target_pale):
"""
source_train_nodes: Numpy array
gt_train: dictionary
"""
if self.args.cuda:
pale_map = pale_map.cuda()
optimizer = torch.optim.Adam(filter(lambda p : p.requires_grad, pale_map.parameters()), lr=self.args.pale_map_lr)
pale_map_batchsize = len(source_train_nodes) // 4
n_iters = len(source_train_nodes) // pale_map_batchsize
assert n_iters > 0, "batch_size is too large"
if(len(source_train_nodes) % pale_map_batchsize > 0):
n_iters += 1
total_steps = 0
n_epochs = self.args.pale_map_epochs
for epoch in range(1, n_epochs + 1):
# for time evaluate
start = time()
print('Epochs: ', epoch)
np.random.shuffle(source_train_nodes)
for iter in range(n_iters):
source_batch = source_train_nodes[iter*pale_map_batchsize:(iter+1)*pale_map_batchsize]
target_batch = [gt_train[x] for x in source_batch]
source_batch = torch.LongTensor(source_batch)
target_batch = torch.LongTensor(target_batch)
if self.args.cuda:
source_batch = source_batch.cuda()
target_batch = target_batch.cuda()
optimizer.zero_grad()
loss = pale_map.loss(source_batch, target_batch)
loss.backward()
optimizer.step()
total_steps += 1
print("mapping_loss: {:.4f}".format(loss.item()))
self.mapping_epoch_time = time() - start
source_pale_map = pale_map(source_pale)
self.S_pale = torch.matmul(source_pale_map, target_pale.t())
self.S_pale = self.S_pale.detach().cpu().numpy()
return self.S_pale
def learn_pale_embs(self, source_info, target_info):
num_source_nodes = source_info['num_nodes']
source_deg = source_info['deg']
source_edges = source_info['edges']
num_target_nodes = target_info['num_nodes']
target_deg = target_info['deg']
target_edges = target_info['edges']
#source_edges, target_edges = self.extend_edge(source_edges, target_edges)
print("Done extend edges")
source_pale = self.learn_pale(num_source_nodes, source_deg, source_edges) #, 's')
target_pale = self.learn_pale(num_target_nodes, target_deg, target_edges) #, 't')
return source_pale, target_pale
def learn_pale(self, num_nodes, deg, edges):
pale_model = PaleEmbedding(
n_nodes = num_nodes,
embedding_dim = self.args.embedding_dim,
deg= deg,
neg_sample_size = 10,
cuda = self.args.cuda,
)
if self.args.cuda:
pale_model = pale_model.cuda()
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, pale_model.parameters()), lr=self.args.pale_emb_lr)
embedding = self.train_pale_emb(pale_model, edges, optimizer)
return embedding
def train_pale_emb(self, embedding_model, edges, optimizer):
num_edges = len(edges)
n_iters = num_edges // self.args.pale_emb_batchsize
assert n_iters > 0, "batch_size is too large!"
if(num_edges % self.args.pale_emb_batchsize > 0):
n_iters += 1
print_every = int(n_iters/4) + 1
total_steps = 0
n_epochs = self.args.pale_emb_epochs
for epoch in range(1, n_epochs + 1):
# for time evaluate
# start = time()
if self.args.log:
print("Epoch {0}".format(epoch))
np.random.shuffle(edges)
loss, loss0, loss1 = 0, 0, 0
for iter in range(n_iters):
batch_edges = torch.LongTensor(edges[iter*self.args.pale_emb_batchsize:(iter+1)*self.args.pale_emb_batchsize])
if self.args.cuda:
batch_edges = batch_edges.cuda()
optimizer.zero_grad()
loss, loss0, loss1 = embedding_model.loss(batch_edges[:, 0], batch_edges[:,1])
loss.backward()
optimizer.step()
total_steps += 1
if self.args.log:
print(
"train_loss=", "{:.5f}".format(loss.item()),
"true_loss=", "{:.5f}".format(loss0.item()),
"neg_loss=", "{:.5f}".format(loss1.item())
# "time", "{:.5f}".format(time()-start)
)
# for time evaluate
# self.embedding_epoch_time = time() - start
embedding = embedding_model.get_embedding()
embedding = embedding.cpu().detach().numpy()
embedding = torch.FloatTensor(embedding)
if self.args.cuda:
embedding = embedding.cuda()
return embedding
def get_elements(self):
"""
Compute Normalized Laplacian matrix
Preprocessing nodes attribute
"""
source_A_hat, _ = Laplacian_graph(self.source_dataset.get_adjacency_matrix())
target_A_hat, _ = Laplacian_graph(self.target_dataset.get_adjacency_matrix())
if self.args.cuda:
source_A_hat = source_A_hat.cuda()
target_A_hat = target_A_hat.cuda()
source_feats = self.source_dataset.features
target_feats = self.target_dataset.features
if source_feats is None:
source_feats = np.zeros((len(self.source_dataset.G.nodes()), 1))
target_feats = np.zeros((len(self.target_dataset.G.nodes()), 1))
for i in range(len(source_feats)):
if source_feats[i].sum() == 0:
source_feats[i, -1] = 1
for i in range(len(target_feats)):
if target_feats[i].sum() == 0:
target_feats[i, -1] = 1
if source_feats is not None:
source_feats = torch.FloatTensor(source_feats)
target_feats = torch.FloatTensor(target_feats)
if self.args.cuda:
source_feats = source_feats.cuda()
target_feats = target_feats.cuda()
# Norm2 normalization
source_feats = F.normalize(source_feats)
target_feats = F.normalize(target_feats)
# features is okey, just A_hat, A_hat is okey too
return source_A_hat, target_A_hat, source_feats, target_feats
def graph_augmentation(self, dataset, target_adj, type_aug='remove_edges'):
"""
Generate small noisy graph from original graph
:params dataset: original graph
:params type_aug: type of noise added for generating new graph
"""
t_nodes = target_adj.shape[0]
t_edges = int(target_adj.sum())/2
edges = dataset.get_edges()
adj = dataset.get_adjacency_matrix()
padder = t_nodes - adj.shape[0]
if padder > 0:
adj = nx.to_numpy_matrix(dataset.G)
edges = list(dataset.G.edges())
del_edges = random.sample(list(dataset.G.edges()), len(list(dataset.G.edges()))/5)
for edge in del_edges:
adj[dataset.id2idx[edge[0]], dataset.id2idx[edge[1]]] = 0
adj[dataset.id2idx[edge[1]], dataset.id2idx[edge[0]]] = 0
edges = [(dataset.id2idx[i[0]], dataset.id2idx[i[1]]) for i in edges if i not in del_edges]
nodes = list(dataset.G.nodes())
feats = np.copy(dataset.features)
feats = torch.FloatTensor(feats)
sidx2idx = {i:i for i in range(len(nodes))}
deg = adj.sum(axis=1).flatten()
new_adj_H, _ = Laplacian_graph(adj)
if self.args.cuda:
feats = feats.cuda()
new_adj_H = new_adj_H.cuda()
deg = np.asarray(deg)[0]
return new_adj_H, feats, deg, edges, adj, sidx2idx
else:
nodes = [list(dataset.G.nodes())[0]]
index = 0
count = 1
while count < t_nodes:
try:
nodes.extend([i for i in dataset.G.neighbors(nodes[index]) if i not in nodes])
count = len(nodes)
index += 1
except:
break
if nx.number_connected_components(dataset.G) > 1:
nodes = random.sample(list(dataset.G.nodes()), t_nodes)
new_G = dataset.G.subgraph(nodes)
edges = list(new_G.edges())
source_idx = dataset.id2idx
sidx2idx = {source_idx[node]:i for i,node in enumerate(nodes)} # dict {index of source graph: index of augment graph}
id2idx = {node:i for i,node in enumerate(nodes)}
new_G = nx.Graph()
new_G.add_nodes_from(nodes)
new_G.add_edges_from(edges)
adj = nx.to_numpy_matrix(new_G)
feats = np.copy(dataset.features)
new_feats = np.zeros((len(nodes), feats.shape[1]))
s_idx2id = {u:v for v,u in source_idx.items()}
for i,feat in enumerate(feats):
if s_idx2id[i] in nodes:
new_feats[sidx2idx[i]] = feat
print(new_feats)
num_eds = adj.sum()/2 - t_edges
if num_eds > 0:
while num_eds > 1:
edge = random.choice(edges)
# if adj.sum(axis = 0)[0,id2idx[edge[0]]] > 1 and adj.sum(axis = 0)[0,id2idx[edge[1]]] > 1:
# new_adj = adj + 0
adj[id2idx[edge[0]], id2idx[edge[1]]] = 0
adj[id2idx[edge[1]], id2idx[edge[0]]] = 0
# adj = new_adj + 0
edges.remove(edge)
num_eds -= 1
new_feats = torch.FloatTensor(new_feats)
deg = adj.sum(axis=1).flatten()
new_adj_H, _ = Laplacian_graph(adj)
if self.args.cuda:
new_feats = new_feats.cuda()
new_adj_H = new_adj_H.cuda()
deg = np.asarray(deg)[0]
edges = [(id2idx[e[0]],id2idx[e[1]]) for e in edges]
print('augment nodes: {}, augment edges: {}'.format(adj.shape[0], adj.sum()/2))
return new_adj_H, new_feats, deg, edges, adj, sidx2idx
else:
stat = 2*num_eds / (adj.shape[0]**2 - adj.sum())
edges = [(id2idx[e[0]],id2idx[e[1]]) for e in edges]
for i in range(adj.shape[0]):
for j in range(i, adj.shape[0]):
if adj[i,j] == 0 and random.uniform(0,1) < stat:
adj[i,j] = 1
adj[j,i] = 1
edges.append((i,j))
new_feats = torch.FloatTensor(new_feats)
deg = adj.sum(axis=1).flatten()
new_adj_H, _ = Laplacian_graph(adj)
if self.args.cuda:
new_feats = new_feats.cuda()
new_adj_H = new_adj_H.cuda()
deg = np.asarray(deg)[0]
print('augment nodes: {}, augment edges: {}'.format(adj.shape[0], adj.sum()/2))
return new_adj_H, new_feats, deg, edges, adj, sidx2idx
def linkpred_loss(self, embedding, A):
pred_adj = torch.matmul(F.normalize(embedding), F.normalize(embedding).t())
if self.args.cuda:
pred_adj = F.normalize((torch.min(pred_adj, torch.Tensor([1]).cuda())), dim = 1)
else:
pred_adj = F.normalize((torch.min(pred_adj, torch.Tensor([1]))), dim = 1)
#linkpred_losss = (pred_adj - A[index]) ** 2
linkpred_losss = (pred_adj - A) ** 2
linkpred_losss = linkpred_losss.sum() / A.shape[1]
return linkpred_losss
def gcn_semisup_training(self, NAME, source_A_hat, target_A_hat, optimizer, gamma, k):
train = np.asarray([[x,y] for (x,y) in self.gt_train.items()])
for epoch in range(self.args.NAME_epochs):
optimizer.zero_grad()
if self.args.log:
print("GAlign learning epoch: {}".format(epoch))
source_outputs = NAME(source_A_hat, 's')
target_outputs = NAME(target_A_hat, 't')
# output = torch.cat((source_output, target_output), dim = 0)
left = train[:,0]
right = train[:,1]
left_x1 = source_outputs[-1][left]
right_x1 = target_outputs[-1][right]
sup_loss1 = (left_x1 - right_x1) ** 2
sup_loss1 = sup_loss1.mean()
left_x2 = source_outputs[-2][left]
right_x2 = target_outputs[-2][right]
sup_loss2 = (left_x2 - right_x2) ** 2
sup_loss2 = sup_loss2.mean()
sup_loss = sup_loss1 + sup_loss2
unsup_loss_source = self.linkpred_loss(source_outputs[-1], source_A_hat) + self.linkpred_loss(source_outputs[-2], source_A_hat)
unsup_loss_target = self.linkpred_loss(target_outputs[-1], target_A_hat) + self.linkpred_loss(target_outputs[-2], target_A_hat)
unsup_loss = unsup_loss_source + unsup_loss_target
loss = sup_loss + unsup_loss
loss.backward()
#loss = unsup_loss
print('recent loss: {:.4f}, sup: {}, unsup: {:.4f}'.format(loss, sup_loss, unsup_loss))
optimizer.step()
NAME.eval()
return NAME
def refine(self, NAME, source_A_hat, target_A_hat, threshold):
refinement_model = StableFactor(len(source_A_hat), len(target_A_hat), self.args.cuda)
if self.args.cuda:
refinement_model = refinement_model.cuda()
S_max = None
source_outputs = NAME(refinement_model(source_A_hat, 's'), 's')
target_outputs = NAME(refinement_model(target_A_hat, 't'), 't')
acc, S = get_acc(source_outputs, target_outputs, self.full_dict, self.alphas)
self.NAME_S = S
source_edges = self.source_dataset.get_edges()
target_edges = self.target_dataset.get_edges()
edgess = [source_edges.tolist(), target_edges.tolist()]
# score = self.compute_score(edgess, S)
score = np.mean(S.max(axis = 1))
acc_max = 0
alpha_source_max = None
alpha_target_max = None
# if 1:
if score > refinement_model.score_max:
refinement_model.score_max = score
alpha_source_max = refinement_model.alpha_source
alpha_target_max = refinement_model.alpha_target
acc_max = acc
S_max = S
print("Acc: {}, score: {:.4f}".format(acc, score))
source_candidates, target_candidates = [], []
alpha_source_max = refinement_model.alpha_source + 0
alpha_target_max = refinement_model.alpha_target + 0
for epoch in range(self.args.refinement_epochs):
if self.args.log:
print("Refinement epoch: {}".format(epoch))
source_candidates, target_candidates, len_source_candidates, count_true_candidates = self.get_candidate(source_outputs, target_outputs, threshold)
refinement_model.alpha_source[source_candidates] *= 1.1
refinement_model.alpha_target[target_candidates] *= 1.1
source_outputs = NAME(refinement_model(source_A_hat, 's'), 's')
target_outputs = NAME(refinement_model(target_A_hat, 't'), 't')
acc, S = get_acc(source_outputs, target_outputs, self.full_dict, self.alphas)
# score = self.compute_score(edgess, S)
score = np.mean(S.max(axis = 1))
if score > refinement_model.score_max:
refinement_model.score_max = score
alpha_source_max = refinement_model.alpha_source + 0
alpha_target_max = refinement_model.alpha_target + 0
acc_max = acc
S_max = S
if self.args.log:
print("Acc: {}, score: {:.4f}, score_max {:.4f}".format(acc, score, refinement_model.score_max))
if epoch == self.args.refinement_epochs - 1:
print("Numcandidate: {}, num_true_candidate: {}".format(len_source_candidates, count_true_candidates))
print("Done refinement!")
print("Acc with max score: {:.4f} is : {}".format(refinement_model.score_max, acc_max))
refinement_model.alpha_source = alpha_source_max
refinement_model.alpha_target = alpha_target_max
self.NAME_S = S_max
self.log_and_evaluate(NAME, refinement_model, source_A_hat, target_A_hat)
source_candidates = source_candidates.detach().cpu().numpy()
target_candidates = target_candidates.detach().cpu().numpy()
# return source_candidates, target_candidates, self.NAME_S
return self.NAME_S
def train_NAME(self, NAME, source_A_hat, target_A_hat, structural_optimizer, threshold):
NAME = self.gcn_semisup_training(NAME, source_A_hat, target_A_hat, structural_optimizer, gamma = 3, k = 5)
print("Done structural training")
source_A_hat = source_A_hat.to_dense()
target_A_hat = target_A_hat.to_dense()
NAME_S = self.refine(NAME, source_A_hat, target_A_hat, threshold)
return NAME_S / 3
def get_similarity_matrices(self, source_outputs, target_outputs):
"""
Construct Similarity matrix in each layer
:params source_outputs: List of embedding at each layer of source graph
:params target_outputs: List of embedding at each layer of target graph
"""
list_S = []
for i in range(len(source_outputs)):
source_output_i = source_outputs[i]
target_output_i = target_outputs[i]
S = torch.mm(F.normalize(source_output_i), F.normalize(target_output_i).t())
list_S.append(S)
return list_S
def log_and_evaluate(self, embedding_model, refinement_model, source_A_hat, target_A_hat):
embedding_model.eval()
source_outputs = embedding_model(refinement_model(source_A_hat, 's'), 's')
target_outputs = embedding_model(refinement_model(target_A_hat, 't'), 't')
# print("-"* 100)
# source_care = torch.LongTensor(list(self.full_dict.keys()))
# target_care = torch.LongTensor(list(self.full_dict.values()))
# for i in range(len(source_outputs)):
# source_i = source_outputs[i][source_care].detach().cpu().numpy()
# target_i = target_outputs[i][target_care].detach().cpu().numpy()
# np.save("numpy_emb/source_layer{}".format(i), source_i)
# np.save("numpy_emb/target_layer{}".format(i), target_i)
return source_outputs, target_outputs
def get_candidate(self, source_outputs, target_outputs, threshold):
List_S = self.get_similarity_matrices(source_outputs, target_outputs)[1:]
source_candidates = []
target_candidates = []
count_true_candidates = 0
if len(List_S) < 2:
print("The current model doesn't support refinement for number of GCN layer smaller than 2")
return torch.LongTensor(source_candidates), torch.LongTensor(target_candidates)
num_source_nodes = len(self.source_dataset.G.nodes())
num_target_nodes = len(self.target_dataset.G.nodes())
for i in range(min(num_source_nodes, num_target_nodes)):
node_i_is_stable = True
for j in range(len(List_S)):
if List_S[j][i].argmax() != List_S[j-1][i].argmax():
node_i_is_stable = False
break
if node_i_is_stable:
tg_candi = List_S[-1][i].argmax()
source_candidates.append(i)
target_candidates.append(tg_candi)
try:
if self.full_dict[i] == tg_candi:
count_true_candidates += 1
except:
continue
return torch.LongTensor(source_candidates), torch.LongTensor(target_candidates), len(source_candidates), count_true_candidates
|
{"hexsha": "8cdd0ecb4628872e263d0dde13aac8357b477dc5", "size": 29710, "ext": "py", "lang": "Python", "max_stars_repo_path": "algorithms/NAME/NAME.py", "max_stars_repo_name": "thanhtrunghuynh93/holisticEmbeddingsNA", "max_stars_repo_head_hexsha": "d1bb58e879a9fb868729ea13c198e46c9c5f45c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "algorithms/NAME/NAME.py", "max_issues_repo_name": "thanhtrunghuynh93/holisticEmbeddingsNA", "max_issues_repo_head_hexsha": "d1bb58e879a9fb868729ea13c198e46c9c5f45c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "algorithms/NAME/NAME.py", "max_forks_repo_name": "thanhtrunghuynh93/holisticEmbeddingsNA", "max_forks_repo_head_hexsha": "d1bb58e879a9fb868729ea13c198e46c9c5f45c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.3432835821, "max_line_length": 176, "alphanum_fraction": 0.5991585325, "include": true, "reason": "import numpy,import networkx", "num_tokens": 6584}
|
import socket
import base64
import cv2
import numpy as np
from collections import OrderedDict
import atexit
from .server import get_server
def jpeg_encode(img):
return cv2.imencode('.png', img)[1]
def get_free_port(rng, low=2000, high=10000):
in_use = True
while in_use:
port = rng.randint(high - low) + low
in_use = False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("0.0.0.0", port))
except socket.error as e:
if e.errno == 98: # port already in use
in_use = True
s.close()
return port
class Manager:
def __init__(self, img_encode_method=jpeg_encode, rng=None):
self._queue = OrderedDict()
self._server = None
self.img_encode_method = img_encode_method
if rng is None:
rng = np.random.RandomState(self.get_default_seed())
self.rng = rng
def get_default_seed(self):
return 0
def imshow(self, title, img):
data = self.img_encode_method(img)
data = base64.b64encode(data)
data = data.decode('utf8')
self._queue[title] = {
"dtype": "image", "caption": title, "content": data}
def clean(self):
if self._server is not None:
self._server.kill()
if self._conn.poll():
self._conn.close()
@property
def conn(self):
if self._server is None:
self.port = get_free_port(self.rng)
self._server, self._conn = get_server(port=self.port)
atexit.register(self.clean)
return self._conn
def table_show(self, title, table):
self._queue[title] = {
"dtype": "table", "caption": title, "content": table}
def head_show(self, title, head):
self._queue[title] = {"dtype": "header", "content": head}
def send(self, delay=0):
self.conn.send([delay, list(self._queue.values())])
self._queue = OrderedDict()
return True
def waitKey(self, delay=0):
self.conn.send([delay, list(self._queue.values())])
self._queue = OrderedDict()
code = self.conn.recv()
print("socket code", code)
return code
global_manager = Manager()
|
{"hexsha": "c4f45f42a1764f4ec457049037f52c2f03ace4e9", "size": 2263, "ext": "py", "lang": "Python", "max_stars_repo_path": "webcv/manager.py", "max_stars_repo_name": "wanzysky/webcv", "max_stars_repo_head_hexsha": "6a0012f7464862cf1a1eca9f78d7b16b35d164ec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "webcv/manager.py", "max_issues_repo_name": "wanzysky/webcv", "max_issues_repo_head_hexsha": "6a0012f7464862cf1a1eca9f78d7b16b35d164ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "webcv/manager.py", "max_forks_repo_name": "wanzysky/webcv", "max_forks_repo_head_hexsha": "6a0012f7464862cf1a1eca9f78d7b16b35d164ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.265060241, "max_line_length": 69, "alphanum_fraction": 0.5916924437, "include": true, "reason": "import numpy", "num_tokens": 549}
|
# Autogenerated wrapper script for OpenCVQt_jll for armv7l-linux-gnueabihf-cxx03
export libopencv_calib3d, libopencv_core, libopencv_dnn, libopencv_features2d, libopencv_flann, libopencv_gapi, libopencv_highgui, libopencv_imgcodecs, libopencv_imgproc, libopencv_ml, libopencv_objdetect, libopencv_photo, libopencv_stitching, libopencv_video, libopencv_videoio
using Qt_jll
using Libglvnd_jll
## Global variables
PATH = ""
LIBPATH = ""
LIBPATH_env = "LD_LIBRARY_PATH"
LIBPATH_default = ""
# Relative path to `libopencv_calib3d`
const libopencv_calib3d_splitpath = ["lib", "libopencv_calib3d.so"]
# This will be filled out by __init__() for all products, as it must be done at runtime
libopencv_calib3d_path = ""
# libopencv_calib3d-specific global declaration
# This will be filled out by __init__()
libopencv_calib3d_handle = C_NULL
# This must be `const` so that we can use it with `ccall()`
const libopencv_calib3d = "libopencv_calib3d.so.4.5"
# Relative path to `libopencv_core`
const libopencv_core_splitpath = ["lib", "libopencv_core.so"]
# This will be filled out by __init__() for all products, as it must be done at runtime
libopencv_core_path = ""
# libopencv_core-specific global declaration
# This will be filled out by __init__()
libopencv_core_handle = C_NULL
# This must be `const` so that we can use it with `ccall()`
const libopencv_core = "libopencv_core.so.4.5"
# Relative path to `libopencv_dnn`
const libopencv_dnn_splitpath = ["lib", "libopencv_dnn.so"]
# This will be filled out by __init__() for all products, as it must be done at runtime
libopencv_dnn_path = ""
# libopencv_dnn-specific global declaration
# This will be filled out by __init__()
libopencv_dnn_handle = C_NULL
# This must be `const` so that we can use it with `ccall()`
const libopencv_dnn = "libopencv_dnn.so.4.5"
# Relative path to `libopencv_features2d`
const libopencv_features2d_splitpath = ["lib", "libopencv_features2d.so"]
# This will be filled out by __init__() for all products, as it must be done at runtime
libopencv_features2d_path = ""
# libopencv_features2d-specific global declaration
# This will be filled out by __init__()
libopencv_features2d_handle = C_NULL
# This must be `const` so that we can use it with `ccall()`
const libopencv_features2d = "libopencv_features2d.so.4.5"
# Relative path to `libopencv_flann`
const libopencv_flann_splitpath = ["lib", "libopencv_flann.so"]
# This will be filled out by __init__() for all products, as it must be done at runtime
libopencv_flann_path = ""
# libopencv_flann-specific global declaration
# This will be filled out by __init__()
libopencv_flann_handle = C_NULL
# This must be `const` so that we can use it with `ccall()`
const libopencv_flann = "libopencv_flann.so.4.5"
# Relative path to `libopencv_gapi`
const libopencv_gapi_splitpath = ["lib", "libopencv_gapi.so"]
# This will be filled out by __init__() for all products, as it must be done at runtime
libopencv_gapi_path = ""
# libopencv_gapi-specific global declaration
# This will be filled out by __init__()
libopencv_gapi_handle = C_NULL
# This must be `const` so that we can use it with `ccall()`
const libopencv_gapi = "libopencv_gapi.so.4.5"
# Relative path to `libopencv_highgui`
const libopencv_highgui_splitpath = ["lib", "libopencv_highgui.so"]
# This will be filled out by __init__() for all products, as it must be done at runtime
libopencv_highgui_path = ""
# libopencv_highgui-specific global declaration
# This will be filled out by __init__()
libopencv_highgui_handle = C_NULL
# This must be `const` so that we can use it with `ccall()`
const libopencv_highgui = "libopencv_highgui.so.4.5"
# Relative path to `libopencv_imgcodecs`
const libopencv_imgcodecs_splitpath = ["lib", "libopencv_imgcodecs.so"]
# This will be filled out by __init__() for all products, as it must be done at runtime
libopencv_imgcodecs_path = ""
# libopencv_imgcodecs-specific global declaration
# This will be filled out by __init__()
libopencv_imgcodecs_handle = C_NULL
# This must be `const` so that we can use it with `ccall()`
const libopencv_imgcodecs = "libopencv_imgcodecs.so.4.5"
# Relative path to `libopencv_imgproc`
const libopencv_imgproc_splitpath = ["lib", "libopencv_imgproc.so"]
# This will be filled out by __init__() for all products, as it must be done at runtime
libopencv_imgproc_path = ""
# libopencv_imgproc-specific global declaration
# This will be filled out by __init__()
libopencv_imgproc_handle = C_NULL
# This must be `const` so that we can use it with `ccall()`
const libopencv_imgproc = "libopencv_imgproc.so.4.5"
# Relative path to `libopencv_ml`
const libopencv_ml_splitpath = ["lib", "libopencv_ml.so"]
# This will be filled out by __init__() for all products, as it must be done at runtime
libopencv_ml_path = ""
# libopencv_ml-specific global declaration
# This will be filled out by __init__()
libopencv_ml_handle = C_NULL
# This must be `const` so that we can use it with `ccall()`
const libopencv_ml = "libopencv_ml.so.4.5"
# Relative path to `libopencv_objdetect`
const libopencv_objdetect_splitpath = ["lib", "libopencv_objdetect.so"]
# This will be filled out by __init__() for all products, as it must be done at runtime
libopencv_objdetect_path = ""
# libopencv_objdetect-specific global declaration
# This will be filled out by __init__()
libopencv_objdetect_handle = C_NULL
# This must be `const` so that we can use it with `ccall()`
const libopencv_objdetect = "libopencv_objdetect.so.4.5"
# Relative path to `libopencv_photo`
const libopencv_photo_splitpath = ["lib", "libopencv_photo.so"]
# This will be filled out by __init__() for all products, as it must be done at runtime
libopencv_photo_path = ""
# libopencv_photo-specific global declaration
# This will be filled out by __init__()
libopencv_photo_handle = C_NULL
# This must be `const` so that we can use it with `ccall()`
const libopencv_photo = "libopencv_photo.so.4.5"
# Relative path to `libopencv_stitching`
const libopencv_stitching_splitpath = ["lib", "libopencv_stitching.so"]
# This will be filled out by __init__() for all products, as it must be done at runtime
libopencv_stitching_path = ""
# libopencv_stitching-specific global declaration
# This will be filled out by __init__()
libopencv_stitching_handle = C_NULL
# This must be `const` so that we can use it with `ccall()`
const libopencv_stitching = "libopencv_stitching.so.4.5"
# Relative path to `libopencv_video`
const libopencv_video_splitpath = ["lib", "libopencv_video.so"]
# This will be filled out by __init__() for all products, as it must be done at runtime
libopencv_video_path = ""
# libopencv_video-specific global declaration
# This will be filled out by __init__()
libopencv_video_handle = C_NULL
# This must be `const` so that we can use it with `ccall()`
const libopencv_video = "libopencv_video.so.4.5"
# Relative path to `libopencv_videoio`
const libopencv_videoio_splitpath = ["lib", "libopencv_videoio.so"]
# This will be filled out by __init__() for all products, as it must be done at runtime
libopencv_videoio_path = ""
# libopencv_videoio-specific global declaration
# This will be filled out by __init__()
libopencv_videoio_handle = C_NULL
# This must be `const` so that we can use it with `ccall()`
const libopencv_videoio = "libopencv_videoio.so.4.5"
# Inform that the wrapper is available for this platform
wrapper_available = true
"""
Open all libraries
"""
function __init__()
# This either calls `@artifact_str()`, or returns a constant string if we're overridden.
global artifact_dir = find_artifact_dir()
global PATH_list, LIBPATH_list
# Initialize PATH and LIBPATH environment variable listings
# From the list of our dependencies, generate a tuple of all the PATH and LIBPATH lists,
# then append them to our own.
foreach(p -> append!(PATH_list, p), (Qt_jll.PATH_list, Libglvnd_jll.PATH_list,))
foreach(p -> append!(LIBPATH_list, p), (Qt_jll.LIBPATH_list, Libglvnd_jll.LIBPATH_list,))
global libopencv_calib3d_path = normpath(joinpath(artifact_dir, libopencv_calib3d_splitpath...))
# Manually `dlopen()` this right now so that future invocations
# of `ccall` with its `SONAME` will find this path immediately.
global libopencv_calib3d_handle = dlopen(libopencv_calib3d_path, RTLD_LAZY | RTLD_DEEPBIND)
push!(LIBPATH_list, dirname(libopencv_calib3d_path))
global libopencv_core_path = normpath(joinpath(artifact_dir, libopencv_core_splitpath...))
# Manually `dlopen()` this right now so that future invocations
# of `ccall` with its `SONAME` will find this path immediately.
global libopencv_core_handle = dlopen(libopencv_core_path, RTLD_LAZY | RTLD_DEEPBIND)
push!(LIBPATH_list, dirname(libopencv_core_path))
global libopencv_dnn_path = normpath(joinpath(artifact_dir, libopencv_dnn_splitpath...))
# Manually `dlopen()` this right now so that future invocations
# of `ccall` with its `SONAME` will find this path immediately.
global libopencv_dnn_handle = dlopen(libopencv_dnn_path, RTLD_LAZY | RTLD_DEEPBIND)
push!(LIBPATH_list, dirname(libopencv_dnn_path))
global libopencv_features2d_path = normpath(joinpath(artifact_dir, libopencv_features2d_splitpath...))
# Manually `dlopen()` this right now so that future invocations
# of `ccall` with its `SONAME` will find this path immediately.
global libopencv_features2d_handle = dlopen(libopencv_features2d_path, RTLD_LAZY | RTLD_DEEPBIND)
push!(LIBPATH_list, dirname(libopencv_features2d_path))
global libopencv_flann_path = normpath(joinpath(artifact_dir, libopencv_flann_splitpath...))
# Manually `dlopen()` this right now so that future invocations
# of `ccall` with its `SONAME` will find this path immediately.
global libopencv_flann_handle = dlopen(libopencv_flann_path, RTLD_LAZY | RTLD_DEEPBIND)
push!(LIBPATH_list, dirname(libopencv_flann_path))
global libopencv_gapi_path = normpath(joinpath(artifact_dir, libopencv_gapi_splitpath...))
# Manually `dlopen()` this right now so that future invocations
# of `ccall` with its `SONAME` will find this path immediately.
global libopencv_gapi_handle = dlopen(libopencv_gapi_path, RTLD_LAZY | RTLD_DEEPBIND)
push!(LIBPATH_list, dirname(libopencv_gapi_path))
global libopencv_highgui_path = normpath(joinpath(artifact_dir, libopencv_highgui_splitpath...))
# Manually `dlopen()` this right now so that future invocations
# of `ccall` with its `SONAME` will find this path immediately.
global libopencv_highgui_handle = dlopen(libopencv_highgui_path, RTLD_LAZY | RTLD_DEEPBIND)
push!(LIBPATH_list, dirname(libopencv_highgui_path))
global libopencv_imgcodecs_path = normpath(joinpath(artifact_dir, libopencv_imgcodecs_splitpath...))
# Manually `dlopen()` this right now so that future invocations
# of `ccall` with its `SONAME` will find this path immediately.
global libopencv_imgcodecs_handle = dlopen(libopencv_imgcodecs_path, RTLD_LAZY | RTLD_DEEPBIND)
push!(LIBPATH_list, dirname(libopencv_imgcodecs_path))
global libopencv_imgproc_path = normpath(joinpath(artifact_dir, libopencv_imgproc_splitpath...))
# Manually `dlopen()` this right now so that future invocations
# of `ccall` with its `SONAME` will find this path immediately.
global libopencv_imgproc_handle = dlopen(libopencv_imgproc_path, RTLD_LAZY | RTLD_DEEPBIND)
push!(LIBPATH_list, dirname(libopencv_imgproc_path))
global libopencv_ml_path = normpath(joinpath(artifact_dir, libopencv_ml_splitpath...))
# Manually `dlopen()` this right now so that future invocations
# of `ccall` with its `SONAME` will find this path immediately.
global libopencv_ml_handle = dlopen(libopencv_ml_path, RTLD_LAZY | RTLD_DEEPBIND)
push!(LIBPATH_list, dirname(libopencv_ml_path))
global libopencv_objdetect_path = normpath(joinpath(artifact_dir, libopencv_objdetect_splitpath...))
# Manually `dlopen()` this right now so that future invocations
# of `ccall` with its `SONAME` will find this path immediately.
global libopencv_objdetect_handle = dlopen(libopencv_objdetect_path, RTLD_LAZY | RTLD_DEEPBIND)
push!(LIBPATH_list, dirname(libopencv_objdetect_path))
global libopencv_photo_path = normpath(joinpath(artifact_dir, libopencv_photo_splitpath...))
# Manually `dlopen()` this right now so that future invocations
# of `ccall` with its `SONAME` will find this path immediately.
global libopencv_photo_handle = dlopen(libopencv_photo_path, RTLD_LAZY | RTLD_DEEPBIND)
push!(LIBPATH_list, dirname(libopencv_photo_path))
global libopencv_stitching_path = normpath(joinpath(artifact_dir, libopencv_stitching_splitpath...))
# Manually `dlopen()` this right now so that future invocations
# of `ccall` with its `SONAME` will find this path immediately.
global libopencv_stitching_handle = dlopen(libopencv_stitching_path, RTLD_LAZY | RTLD_DEEPBIND)
push!(LIBPATH_list, dirname(libopencv_stitching_path))
global libopencv_video_path = normpath(joinpath(artifact_dir, libopencv_video_splitpath...))
# Manually `dlopen()` this right now so that future invocations
# of `ccall` with its `SONAME` will find this path immediately.
global libopencv_video_handle = dlopen(libopencv_video_path, RTLD_LAZY | RTLD_DEEPBIND)
push!(LIBPATH_list, dirname(libopencv_video_path))
global libopencv_videoio_path = normpath(joinpath(artifact_dir, libopencv_videoio_splitpath...))
# Manually `dlopen()` this right now so that future invocations
# of `ccall` with its `SONAME` will find this path immediately.
global libopencv_videoio_handle = dlopen(libopencv_videoio_path, RTLD_LAZY | RTLD_DEEPBIND)
push!(LIBPATH_list, dirname(libopencv_videoio_path))
# Filter out duplicate and empty entries in our PATH and LIBPATH entries
filter!(!isempty, unique!(PATH_list))
filter!(!isempty, unique!(LIBPATH_list))
global PATH = join(PATH_list, ':')
global LIBPATH = join(vcat(LIBPATH_list, [joinpath(Sys.BINDIR, Base.LIBDIR, "julia"), joinpath(Sys.BINDIR, Base.LIBDIR)]), ':')
end # __init__()
|
{"hexsha": "75b9d2519b277a31b2021fc220614d4bbd70e8c2", "size": 14195, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/wrappers/armv7l-linux-gnueabihf-cxx03.jl", "max_stars_repo_name": "terasakisatoshi/OpenCVQt_jll.jl", "max_stars_repo_head_hexsha": "918d0031aa727683e0c324fde708cfaf9d200144", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-28T05:42:12.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-28T05:42:12.000Z", "max_issues_repo_path": "src/wrappers/armv7l-linux-gnueabihf-cxx03.jl", "max_issues_repo_name": "terasakisatoshi/OpenCVQt_jll.jl", "max_issues_repo_head_hexsha": "918d0031aa727683e0c324fde708cfaf9d200144", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/wrappers/armv7l-linux-gnueabihf-cxx03.jl", "max_forks_repo_name": "terasakisatoshi/OpenCVQt_jll.jl", "max_forks_repo_head_hexsha": "918d0031aa727683e0c324fde708cfaf9d200144", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3267045455, "max_line_length": 278, "alphanum_fraction": 0.7689327228, "num_tokens": 3757}
|
import atexit
import subprocess
import time
from collections import OrderedDict
from io import StringIO
from subprocess import PIPE, Popen
from xml.etree.ElementTree import fromstring
import cpuinfo
import numpy as np
import pandas as pd
import psutil
import requests
from bs4 import BeautifulSoup
from experiment_impact_tracker.utils import *
from .exceptions import GPUAttributeAssertionError
_timer = getattr(time, "monotonic", time.time)
def is_nvidia_compatible(*args, **kwargs):
from shutil import which
if which("nvidia-smi") is None:
return False
# make sure that nvidia-smi doesn't just return no devices
p = Popen(["nvidia-smi"], stdout=PIPE)
stdout, stderror = p.communicate()
output = stdout.decode("UTF-8")
if "no devices" in output.lower():
return False
return True
def get_gpu_info(*args, **kwargs):
p = Popen(["nvidia-smi", "-q", "-x"], stdout=PIPE)
outs, errors = p.communicate()
xml = fromstring(outs)
datas = []
driver_version = xml.findall("driver_version")[0].text
cuda_version = xml.findall("cuda_version")[0].text
for gpu_id, gpu in enumerate(xml.getiterator("gpu")):
gpu_data = {}
name = [x for x in gpu.getiterator("product_name")][0].text
memory_usage = gpu.findall("fb_memory_usage")[0]
total_memory = memory_usage.findall("total")[0].text
gpu_data["name"] = name
gpu_data["total_memory"] = total_memory
gpu_data["driver_version"] = driver_version
gpu_data["cuda_version"] = cuda_version
datas.append(gpu_data)
return datas
def assert_gpus_by_attributes(attributes_set):
"""Assert that you're running on GPUs with a certain set of attributes.
This helps when running jobs in a cluster setting with heterogeneous GPUs
to filter out sets of GPUs that you'd rather avoid. Current NVIDIA attributes,
include product_name (e.g., GeForce GTX TITAN X, Titan xp, Tesla k40m, etc.),
must be an exact match based on string seen in nvidia-smi -q -x.
Args:
attributes_set (dict): set of attribute key pairs
Raises:
GPUAttributeAssertionError on encountered asserted attribute mismatch
"""
gpu_info = get_gpu_info()
for gpu in gpu_info:
for attribute, value in attributes_set.items():
try:
if gpu[attribute] != value:
raise GPUAttributeAssertionError(
"Attribute {} asserted to be {}, but found {} instead.".format(
attribute, value, gpu[attribute]
)
)
except KeyError:
raise GPUAttributeAssertionError(
"Attribute {} does not exist. Available attributes: {}.".format(
attribute, ",".join(list(gpu.keys()))
)
)
def _stringify_performance_states(state_dict):
""" Stringifies performance states across multiple gpus
Args:
state_dict (dict(str)): a dictionary of gpu_id performance state values
Returns:
str: a stringified version of the dictionary with gpu_id::performance state|gpu_id2::performance_state2 format
"""
return "|".join("::".join(map(lambda x: str(x), z)) for z in state_dict.items())
def get_nvidia_gpu_power(pid_list, logger=None, **kwargs):
# Find per process per gpu usage info
sp = subprocess.Popen(
["nvidia-smi", "pmon", "-c", "5"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out_str = sp.communicate()
out_str_split = out_str[0].decode("utf-8").split("\n")
# sometimes with too many processess on the machine or too many gpus, this command will reprint the headers
# to avoid that we just remove duplicate lines
out_str_split = list(OrderedDict.fromkeys(out_str_split))
out_str_pruned = [
x for x in out_str_split if "Idx" not in x
] # [out_str_split[0], ] + out_str_split[2:]
# For some weird reason the header position sometimes gets jumbled so we need to re-order it to the front
position = -1
for i, x in enumerate(out_str_pruned):
if "gpu" in x:
position = i
if position == -1:
raise ValueError("Problem with output in nvidia-smi pmon -c 10")
out_str_pruned.insert(0, out_str_pruned.pop(position))
out_str_final = "\n".join(out_str_pruned)
out_str_final = out_str_final.replace("-", "0")
out_str_final = out_str_final.replace("#", "")
df = pd.read_csv(StringIO(out_str_final), engine="python", delim_whitespace=True)
process_percentage_used_gpu = df.groupby(["gpu", "pid"]).mean().reset_index()
p = Popen(["nvidia-smi", "-q", "-x"], stdout=PIPE)
outs, errors = p.communicate()
xml = fromstring(outs)
num_gpus = int(xml.findall("attached_gpus")[0].text)
results = []
power = 0
per_gpu_absolute_percent_usage = {}
per_gpu_relative_percent_usage = {}
absolute_power = 0
per_gpu_performance_states = {}
for gpu_id, gpu in enumerate(xml.findall("gpu")):
gpu_data = {}
name = gpu.findall("product_name")[0].text
gpu_data["name"] = name
# get memory
memory_usage = gpu.findall("fb_memory_usage")[0]
total_memory = memory_usage.findall("total")[0].text
used_memory = memory_usage.findall("used")[0].text
free_memory = memory_usage.findall("free")[0].text
gpu_data["memory"] = {
"total": total_memory,
"used_memory": used_memory,
"free_memory": free_memory,
}
# get utilization
utilization = gpu.findall("utilization")[0]
gpu_util = utilization.findall("gpu_util")[0].text
memory_util = utilization.findall("memory_util")[0].text
gpu_data["utilization"] = {"gpu_util": gpu_util, "memory_util": memory_util}
# get power
power_readings = gpu.findall("power_readings")[0]
power_draw = power_readings.findall("power_draw")[0].text
gpu_data["power_readings"] = {"power_draw": power_draw}
absolute_power += float(power_draw.replace("W", ""))
# processes
processes = gpu.findall("processes")[0]
infos = []
# all the info for processes on this particular gpu that we're on
gpu_based_processes = process_percentage_used_gpu[
process_percentage_used_gpu["gpu"] == gpu_id
]
# what's the total absolute SM for this gpu across all accessible processes
percentage_of_gpu_used_by_all_processes = float(gpu_based_processes["sm"].sum())
per_gpu_power_draw = {}
for info in processes.findall("process_info"):
pid = info.findall("pid")[0].text
process_name = info.findall("process_name")[0].text
used_memory = info.findall("used_memory")[0].text
sm_absolute_percent = gpu_based_processes[
gpu_based_processes["pid"] == int(pid)
]["sm"].sum()
if percentage_of_gpu_used_by_all_processes == 0:
# avoid divide by zero, sometimes nothing is used so 0/0 should = 0 in this case
sm_relative_percent = 0
else:
sm_relative_percent = (
sm_absolute_percent / percentage_of_gpu_used_by_all_processes
)
infos.append(
{
"pid": pid,
"process_name": process_name,
"used_memory": used_memory,
"sm_relative_percent": sm_relative_percent,
"sm_absolute_percent": sm_absolute_percent,
}
)
if int(pid) in pid_list:
# only add a gpu to the list if it's being used by one of the processes. sometimes nvidia-smi seems to list all gpus available
# even if they're not being used by our application, this is a problem in a slurm setting
if gpu_id not in per_gpu_absolute_percent_usage:
# percentage_of_gpu_used_by_all_processes
per_gpu_absolute_percent_usage[gpu_id] = 0
if gpu_id not in per_gpu_relative_percent_usage:
# percentage_of_gpu_used_by_all_processes
per_gpu_relative_percent_usage[gpu_id] = 0
if gpu_id not in per_gpu_performance_states:
# we only log information for gpus that we're using, we've noticed that nvidia-smi will sometimes return information
# about all gpu's on a slurm cluster even if they're not assigned to a worker
performance_state = gpu.findall("performance_state")[0].text
per_gpu_performance_states[gpu_id] = performance_state
power += sm_relative_percent * float(power_draw.replace("W", ""))
per_gpu_power_draw[gpu_id] = float(power_draw.replace("W", ""))
# want a proportion value rather than percentage
per_gpu_absolute_percent_usage[gpu_id] += sm_absolute_percent / 100.0
per_gpu_relative_percent_usage[gpu_id] += sm_relative_percent
gpu_data["processes"] = infos
results.append(gpu_data)
if len(per_gpu_absolute_percent_usage.values()) == 0:
average_gpu_utilization = 0
average_gpu_relative_utilization = 0
else:
average_gpu_utilization = np.mean(list(per_gpu_absolute_percent_usage.values()))
average_gpu_relative_utilization = np.mean(
list(per_gpu_relative_percent_usage.values())
)
data_return_values_with_headers = {
"nvidia_draw_absolute": absolute_power,
"nvidia_estimated_attributable_power_draw": power,
"average_gpu_estimated_utilization_absolute": average_gpu_utilization,
"per_gpu_average_estimated_utilization_absolute": process_percentage_used_gpu.set_index(
["gpu", "pid"]
).to_dict(
orient="index"
),
"average_gpu_estimated_utilization_relative": average_gpu_relative_utilization,
"per_gpu_performance_state": per_gpu_performance_states,
"per_gpu_power_draw": per_gpu_power_draw,
}
return data_return_values_with_headers
|
{"hexsha": "42339de130cadf53fa7527c75b8032d3c20eaa62", "size": 10345, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiment_impact_tracker/gpu/nvidia.py", "max_stars_repo_name": "W4ngatang/experiment-impact-tracker", "max_stars_repo_head_hexsha": "cf486ebacae9b68ec4770de36fb537704105d6de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "experiment_impact_tracker/gpu/nvidia.py", "max_issues_repo_name": "W4ngatang/experiment-impact-tracker", "max_issues_repo_head_hexsha": "cf486ebacae9b68ec4770de36fb537704105d6de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiment_impact_tracker/gpu/nvidia.py", "max_forks_repo_name": "W4ngatang/experiment-impact-tracker", "max_forks_repo_head_hexsha": "cf486ebacae9b68ec4770de36fb537704105d6de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3346007605, "max_line_length": 142, "alphanum_fraction": 0.6384726921, "include": true, "reason": "import numpy", "num_tokens": 2277}
|
#ifndef BOOST_SMART_PTR_DETAIL_SP_COUNTED_BASE_SOLARIS_HPP_INCLUDED
#define BOOST_SMART_PTR_DETAIL_SP_COUNTED_BASE_SOLARIS_HPP_INCLUDED
//
// detail/sp_counted_base_solaris.hpp
// based on: detail/sp_counted_base_w32.hpp
//
// Copyright (c) 2001, 2002, 2003 Peter Dimov and Multi Media Ltd.
// Copyright 2004-2005 Peter Dimov
// Copyright 2006 Michael van der Westhuizen
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
//
// Lock-free algorithm by Alexander Terekhov
//
// Thanks to Ben Hitchings for the #weak + (#shared != 0)
// formulation
//
#include <atomic.h>
#include <boost/config.hpp>
#include <boost/smart_ptr/detail/sp_typeinfo_.hpp>
namespace boost {
namespace detail {
class BOOST_SYMBOL_VISIBLE sp_counted_base {
private:
sp_counted_base(sp_counted_base const &);
sp_counted_base &operator=(sp_counted_base const &);
uint32_t use_count_; // #shared
uint32_t weak_count_; // #weak + (#shared != 0)
public:
sp_counted_base() : use_count_(1), weak_count_(1) {}
virtual ~sp_counted_base() // nothrow
{}
// dispose() is called when use_count_ drops to zero, to release
// the resources managed by *this.
virtual void dispose() = 0; // nothrow
// destroy() is called when weak_count_ drops to zero.
virtual void destroy() // nothrow
{
delete this;
}
virtual void *get_deleter(sp_typeinfo_ const &ti) = 0;
virtual void *get_local_deleter(sp_typeinfo_ const &ti) = 0;
virtual void *get_untyped_deleter() = 0;
void add_ref_copy() { atomic_inc_32(&use_count_); }
bool add_ref_lock() // true on success
{
for (;;) {
uint32_t tmp = static_cast<uint32_t const volatile &>(use_count_);
if (tmp == 0)
return false;
if (atomic_cas_32(&use_count_, tmp, tmp + 1) == tmp)
return true;
}
}
void release() // nothrow
{
if (atomic_dec_32_nv(&use_count_) == 0) {
dispose();
weak_release();
}
}
void weak_add_ref() // nothrow
{
atomic_inc_32(&weak_count_);
}
void weak_release() // nothrow
{
if (atomic_dec_32_nv(&weak_count_) == 0) {
destroy();
}
}
long use_count() const // nothrow
{
return static_cast<long const volatile &>(use_count_);
}
};
} // namespace detail
} // namespace boost
#endif // #ifndef BOOST_SMART_PTR_DETAIL_SP_COUNTED_BASE_SOLARIS_HPP_INCLUDED
|
{"hexsha": "8b3530fc59d8c424ed6d1bd871189a201c877d7e", "size": 2460, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "libs/boost_1_72_0/boost/smart_ptr/detail/sp_counted_base_solaris.hpp", "max_stars_repo_name": "henrywarhurst/matrix", "max_stars_repo_head_hexsha": "317a2a7c35c1c7e3730986668ad2270dc19809ef", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "libs/boost_1_72_0/boost/smart_ptr/detail/sp_counted_base_solaris.hpp", "max_issues_repo_name": "henrywarhurst/matrix", "max_issues_repo_head_hexsha": "317a2a7c35c1c7e3730986668ad2270dc19809ef", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libs/boost_1_72_0/boost/smart_ptr/detail/sp_counted_base_solaris.hpp", "max_forks_repo_name": "henrywarhurst/matrix", "max_forks_repo_head_hexsha": "317a2a7c35c1c7e3730986668ad2270dc19809ef", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.4285714286, "max_line_length": 77, "alphanum_fraction": 0.687804878, "num_tokens": 698}
|
# train logistic regression on mnist dataest using lista
import numpy as np
import theano.tensor as T
import theano as K
import theano
import gzip, cPickle
from random import sample, seed
import os, sys
os.chdir('/home/dikai/PycharmProjects/sparse_lstm')
print(os.getcwd())
from sparse_lstm import Sparse_LSTM_wo_O_Gate_v2
from keras.models import Model, Sequential
from keras.layers import Input, Dense
from keras import regularizers
from keras.callbacks import Callback
from keras.engine import Layer
from keras.optimizers import Adadelta
import matplotlib.pyplot as plt
from osdfutils import crino
output_dir = __file__[:-3]
if not os.path.exists(output_dir):
os.mkdir(output_dir)
sys.setrecursionlimit(10000)
# load and normalize data
with gzip.open("data/mnist.pkl.gz",'rb') as f:
train_set_mnist, valid_set_mnist, test_set_mnist = cPickle.load(f)
train_set_mnist_img, train_set_mnist_label = train_set_mnist
test_set_mnist_img, test_set_mnist_label = test_set_mnist
print('Original train set of mnist: ' + str(train_set_mnist_img.shape))
print('Original test set of mnist: ' + str(test_set_mnist_img.shape))
train_set_mnist_mean = train_set_mnist_img.mean(axis=0)
train_set_mnist_std = train_set_mnist_img.std(axis=0)
train_set_mnist_img -= train_set_mnist_mean
train_set_mnist_img /= train_set_mnist_std + 1e-10
test_set_mnist_img -= train_set_mnist_mean
test_set_mnist_img /= train_set_mnist_std + 1e-10
# shuffle data
total_img = np.vstack([train_set_mnist_img, test_set_mnist_img])
total_label = np.hstack([train_set_mnist_label, test_set_mnist_label])
np.random.seed(10023)
np.random.shuffle(total_img)
np.random.seed(10023)
np.random.shuffle(total_label)
n = total_img.shape[0]
train_set_mnist_img = total_img[:n//2]
train_set_mnist_label = total_label[:n//2]
test_set_mnist_img = total_img[n//2:]
test_set_mnist_label = total_label[n//2:]
# total # of epochs for training lista sparse encoder
epochs = 100
# batch size
btsz = 512
# learning rate
lr = 0.9
# momentum
momentum = 0.9
# learning rate decay
decay = 0.95
# number of batches per epoch
batches = train_set_mnist_img.shape[0]/btsz
# size of sparse vectors
sparse_shape = 14*14
# sparsity weight
lmbd = 0.1
L = 1.0
# number of iterations
layers = 10
def batch_generator_lista(images, batch_size, labels = None, yield_label=False):
while True:
s = np.array(sample(xrange(images.shape[0]), batch_size), dtype=np.int32)
if yield_label:
yield (images[s].copy(), labels[s].copy())
else:
yield images[s].copy()
print("LISTA -- Learned ISTA without ISTA")
print("Epochs", epochs)
print("Batches per epoch", batches)
Dinit_lista = {"shape": (sparse_shape, 28*28), "variant": "normal", "std": 0.1}
config_lista = {"D": Dinit_lista, "layers": layers, "L": L, "lambda": lmbd}
# normalize weights according to this config
norm_dic_lista = {"D": {"axis":1, "c": 1.}}
# threshold theta should be at least some value
thresh_dic_lista = {"theta": {"thresh": 1e-2}}
x_lista, params_lista, cost_lista, rec_lista, z_lista = crino.lista(config=config_lista, shrinkage=crino.sh)
grads_lista = T.grad(cost_lista, params_lista)
# training ...
settings_lista = {"lr": lr, "momentum": momentum, "decay": decay}
# ... with stochastic gradient + momentum
#updates = crino.momntm(params, grads, settings)#, **norm_dic)
updates_lista = crino.adadelta(params_lista, grads_lista, settings_lista)#, **norm_dic)
# ... normalize weights
updates_lista = crino.norm_updt(params_lista, updates_lista, todo=norm_dic_lista)
# ... make sure threshold is big enough
updates_lista = crino.max_updt(params_lista, updates_lista, todo=thresh_dic_lista)
train_lista = theano.function([x_lista], cost_lista, updates=updates_lista,
allow_input_downcast=True)
print 'done.'
# Rerun this cell if another full number of epochs should be trained.
generator_lista = batch_generator_lista(train_set_mnist_img, btsz)
hist = []
for epoch in xrange(epochs):
cost = 0
sz = 0
for i in xrange(batches):
cost += btsz*train_lista(generator_lista.next())
sz += btsz
hist.append([epoch, cost/sz])
hist = np.array(hist)
plt.plot(hist[:,0], hist[:,1])
plt.show()
# function to get sparse coding based on trained lista
sparse_lista = theano.function([x_lista], z_lista, allow_input_downcast=True)
# function to get reconstructed image from original image
reconstruct_lista = theano.function([x_lista], rec_lista, allow_input_downcast=True)
# function to get reconstructed image from sparse code
reconstruct_lista_2 = theano.function([z_lista], T.dot(z_lista, L*params_lista[0]), allow_input_downcast=True)
# batch generator to get (sparse_vector, label) pairs
def batch_generator(encoder=None, batch_size=512, img=None, label=None, n_classes=10):
img = img.astype(np.float32)
n_total = img.shape[0]
while True:
index = np.array(sample(xrange(n_total), batch_size), dtype=np.int)
img_batch = img[index]
x = encoder(img_batch)
y = np.zeros((batch_size, n_classes), dtype=np.float32)
y[np.arange(batch_size), label[index]] = 1.0
yield x, y
# logistic classifier
def get_logistic_classifier(input_dim, output_dim, C=0.01):
model = Sequential()
model.add(Dense(output_dim, input_dim=input_dim, activation='softmax', W_regularizer=regularizers.l2(C)))
model.compile(optimizer='adadelta', loss='categorical_crossentropy',
metrics=['accuracy'])
return model
n_epoch = 50
n_classes = 10
encoder = sparse_lista
bg = batch_generator(encoder, img=train_set_mnist_img, label=train_set_mnist_label, n_classes=n_classes)
# get sparse vector for test set
train_set_mnist_sparse_vec = encoder(train_set_mnist_img)
test_set_mnist_sparse_vec = encoder(test_set_mnist_img)
def get_1hot_lab(label, n_classes):
y = np.zeros((label.shape[0], n_classes), dtype=np.float32)
y[np.arange(label.shape[0]), label] = 1.0
return y
train_set_mnist_label_1hot = get_1hot_lab(train_set_mnist_label, 10)
test_set_mnist_label_1hot = get_1hot_lab(test_set_mnist_label, 10)
class Eval_Callback(Callback):
def __init__(self):
super(Eval_Callback, self).__init__()
self.acc_history_train = []
self.acc_history_test = []
def on_epoch_end(self, epoch, logs={}):
print('on epoch %s end' % epoch)
out_train = self.model.evaluate(train_set_mnist_sparse_vec, train_set_mnist_label_1hot, verbose=0)
out_test = self.model.evaluate(test_set_mnist_sparse_vec, test_set_mnist_label_1hot, verbose=0)
print('Train => %f, test => %f' % (out_train[1], out_test[1]))
self.acc_history_train.append(out_train)
self.acc_history_test.append(out_test)
callback = Eval_Callback()
classifier = get_logistic_classifier(input_dim=14*14, output_dim=10)
# history = classifier.fit_generator(bg, samples_per_epoch=len(train_set_mnist_img), nb_epoch=n_epoch, verbose=1, callbacks=[callback])
history = classifier.fit(train_set_mnist_sparse_vec, train_set_mnist_label_1hot, batch_size=512, nb_epoch=n_epoch, verbose=1, callbacks=[callback])
history = history.history
# plot results
plt.figure()
plt.plot([x[1] for x in callback.acc_history_train])
plt.plot([x[1] for x in callback.acc_history_test])
plt.legend(['train', 'test'])
plt.xlabel('epoch number')
plt.ylabel('accuracy')
plt.savefig('lista_logistic_reg_mnist.png')
plt.show()
|
{"hexsha": "5677322fd12365381aaedfecf6a1a2f861be9d8f", "size": 7410, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/lista_reg_mnist.py", "max_stars_repo_name": "limit-scu/2018-AAAI-SC2Net", "max_stars_repo_head_hexsha": "dd113627dc8a5e12fd8bd9c7c2333fc9b7dc4b60", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-26T01:54:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-26T01:54:16.000Z", "max_issues_repo_path": "code/lista_reg_mnist.py", "max_issues_repo_name": "limit-scu/2018-AAAI-SC2Net", "max_issues_repo_head_hexsha": "dd113627dc8a5e12fd8bd9c7c2333fc9b7dc4b60", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/lista_reg_mnist.py", "max_forks_repo_name": "limit-scu/2018-AAAI-SC2Net", "max_forks_repo_head_hexsha": "dd113627dc8a5e12fd8bd9c7c2333fc9b7dc4b60", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4545454545, "max_line_length": 147, "alphanum_fraction": 0.7466936572, "include": true, "reason": "import numpy,import theano", "num_tokens": 1947}
|
export KNNRegressor, KNNClassifier
import MLJBase: @mlj_model, metadata_model, metadata_pkg
using Distances
import NearestNeighbors
const NN = NearestNeighbors
const KNNRegressorDescription =
"""
K-Nearest Neighbors regressor: predicts the response associated with a new point
by taking an average of the response of the K-nearest points.
"""
const KNNClassifierDescription =
"""
K-Nearest Neighbors classifier: predicts the class associated with a new point
by taking a vote over the classes of the K-nearest points.
"""
const KNNFields =
"""
## Keywords
* `K=5` : number of neighbors
* `algorithm=:kdtree` : one of `(:kdtree, :brutetree, :balltree)`
* `metric=Euclidean()` : a `Metric` object for the distance between points
* `leafsize=10` : at what number of points to stop splitting the tree
* `reorder=true` : if true puts points close in distance close in memory
* `weights=:uniform` : one of `(:uniform, :distance)` if `:uniform` all neighbors are
considered as equally important, if `:distance`, closer neighbors
are proportionally more important.
See also the [package documentation](https://github.com/KristofferC/NearestNeighbors.jl).
"""
"""
KNNRegressoor(;kwargs...)
$KNNRegressorDescription
$KNNFields
"""
@mlj_model mutable struct KNNRegressor <: MLJBase.Deterministic
K::Int = 5::(_ > 0)
algorithm::Symbol = :kdtree::(_ in (:kdtree, :brutetree, :balltree))
metric::Metric = Euclidean()
leafsize::Int = 10::(_ ≥ 0)
reorder::Bool = true
weights::Symbol = :uniform::(_ in (:uniform, :distance))
end
"""
KNNRegressor(;kwargs...)
$KNNClassifierDescription
$KNNFields
"""
@mlj_model mutable struct KNNClassifier <: MLJBase.Probabilistic
K::Int = 5::(_ > 0)
algorithm::Symbol = :kdtree::(_ in (:kdtree, :brutetree, :balltree))
metric::Metric = Euclidean()
leafsize::Int = 10::(_ ≥ 0)
reorder::Bool = true
weights::Symbol = :uniform::(_ in (:uniform, :distance))
end
const KNN = Union{KNNRegressor, KNNClassifier}
MMI.reformat(::KNN, X) = (MMI.matrix(X, transpose=true),)
MMI.reformat(::KNN, X, y) = (MMI.matrix(X, transpose=true), y)
MMI.reformat(::KNN, X, y, w) =
error("$Weights must be abstract vectors with `AbstractFloat` "*
"or `Integer` eltype, or be `nothing`. ")
MMI.reformat(::KNN, X, y, w::Union{Nothing,AbstractVector{<:AbstractFloat}}) =
(MMI.matrix(X, transpose=true), y, w)
MMI.reformat(::KNN, X, y, w::AbstractVector{<:Integer}) =
(MMI.matrix(X, transpose=true), y, float.(w))
MMI.selectrows(::KNN, I, Xmatrix) =
(view(Xmatrix, :, I),)
MMI.selectrows(::KNN, I, Xmatrix, y) =
(view(Xmatrix, :, I), view(y, I))
MMI.selectrows(::KNN, I, Xmatrix, y, w) =
(view(Xmatrix, :, I), view(y, I), view(w, I))
MMI.selectrows(::KNN, I, Xmatrix, y, ::Nothing) =
(view(Xmatrix, :, I), view(y, I), nothing)
function MLJBase.fit(m::KNN, verbosity::Int, Xmatrix, y, w=nothing)
if m.algorithm == :kdtree
tree = NN.KDTree(Xmatrix; leafsize=m.leafsize, reorder=m.reorder)
elseif m.algorithm == :balltree
tree = NN.BallTree(Xmatrix; leafsize=m.leafsize, reorder=m.reorder)
elseif m.algorithm == :brutetree
tree = NN.BruteTree(Xmatrix; leafsize=m.leafsize, reorder=m.reorder)
end
report = NamedTuple{}()
return (tree, y, w), nothing, report
end
MLJBase.fitted_params(model::KNN, (tree, _)) = (tree=tree,)
function MLJBase.predict(m::KNNClassifier, (tree, y, w), Xmatrix)
# for each entry, get the K closest training point + their distance
idxs, dists = NN.knn(tree, Xmatrix, m.K)
preds = Vector{MLJBase.UnivariateFinite}(undef, length(idxs))
classes = MLJBase.classes(y[1])
probas = zeros(length(classes))
w_ = ones(m.K)
# go over each test record, and for each go over the k nearest entries
for i in eachindex(idxs)
idxs_ = idxs[i]
dists_ = dists[i]
labels = y[idxs_]
if w !== nothing
w_ = w[idxs_]
end
probas .*= 0.0
if m.weights == :uniform
for (k, label) in enumerate(labels)
probas[classes .== label] .+= 1.0 / m.K * w_[k]
end
else
for (k, label) in enumerate(labels)
probas[classes .== label] .+= 1.0 / dists_[k] * w_[k]
end
end
# normalize so that sum to 1
probas ./= sum(probas)
preds[i] = MLJBase.UnivariateFinite(classes, probas)
end
return preds
end
function MLJBase.predict(m::KNNRegressor, (tree, y, w), Xmatrix)
idxs, dists = NN.knn(tree, Xmatrix, m.K)
preds = zeros(length(idxs))
w_ = ones(m.K)
for i in eachindex(idxs)
idxs_ = idxs[i]
dists_ = dists[i]
values = y[idxs_]
if w !== nothing
w_ = w[idxs_]
end
if m.weights == :uniform
preds[i] = sum(values .* w_) / sum(w_)
else
preds[i] = sum(values .* w_ .* (1.0 .- dists_ ./ sum(dists_))) / (sum(w_) - 1)
end
end
return preds
end
# ====
metadata_pkg.((KNNRegressor, KNNClassifier),
name="NearestNeighbors",
uuid="b8a86587-4115-5ab1-83bc-aa920d37bbce",
url="https://github.com/KristofferC/NearestNeighbors.jl",
julia=true,
license="MIT",
is_wrapper=false
)
metadata_model(KNNRegressor,
input=MLJBase.Table(MLJBase.Continuous),
target=AbstractVector{MLJBase.Continuous},
weights=true,
descr=KNNRegressorDescription
)
metadata_model(KNNClassifier,
input=MLJBase.Table(MLJBase.Continuous),
target=AbstractVector{<:MLJBase.Finite},
weights=true,
descr=KNNClassifierDescription
)
|
{"hexsha": "7cc6e0c6fbe4b7522878f40cb94c108239216a48", "size": 5865, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/_models/NearestNeighbors.jl", "max_stars_repo_name": "adityasaini70/MLJBase.jl", "max_stars_repo_head_hexsha": "6dae5136d70dbc310b8876d727a442efebaa223d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/_models/NearestNeighbors.jl", "max_issues_repo_name": "adityasaini70/MLJBase.jl", "max_issues_repo_head_hexsha": "6dae5136d70dbc310b8876d727a442efebaa223d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/_models/NearestNeighbors.jl", "max_forks_repo_name": "adityasaini70/MLJBase.jl", "max_forks_repo_head_hexsha": "6dae5136d70dbc310b8876d727a442efebaa223d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5322580645, "max_line_length": 95, "alphanum_fraction": 0.6167092924, "num_tokens": 1734}
|
#include <boost/test/unit_test.hpp>
#include "algorithms/data_structures/sll/delete_k_to_last_elem_in_sll.hpp"
BOOST_AUTO_TEST_SUITE(DeleteKthToLastElementInSLL)
BOOST_AUTO_TEST_CASE(test_dktlesll_one_elem) {
NodeSLL<int>* sll = new NodeSLL<int>(10);
NodeSLL<int>* result =
Algo::DS::SLL::DeleteKthFromEndElement::Delete(sll, 1);
BOOST_CHECK(nullptr == result);
}
BOOST_AUTO_TEST_CASE(test_dktlesll_delete_first)
{
NodeSLL<int>* sll = CreateSLL<int>({1, 2, 3, 4});
NodeSLL<int>* result =
Algo::DS::SLL::DeleteKthFromEndElement::Delete(sll, 4);
std::vector<int> expectedValues = {2, 3, 4};
BOOST_CHECK(expectedValues == ValuesInSLL(&result));
DeleteSLL(&result);
}
BOOST_AUTO_TEST_CASE(test_dktlesll_delete_last)
{
NodeSLL<int>* sll = CreateSLL<int>({1, 2, 3, 4});
NodeSLL<int>* result =
Algo::DS::SLL::DeleteKthFromEndElement::Delete(sll, 1);
std::vector<int> expectedValues = {1, 2, 3};
BOOST_CHECK(expectedValues == ValuesInSLL(&result));
DeleteSLL(&result);
}
BOOST_AUTO_TEST_CASE(test_dktlesll_delete_one_before_last)
{
NodeSLL<int>* sll = CreateSLL<int>({1, 2, 3, 4});
NodeSLL<int>* result =
Algo::DS::SLL::DeleteKthFromEndElement::Delete(sll, 2);
std::vector<int> expectedValues = {1, 2, 4};
BOOST_CHECK(expectedValues == ValuesInSLL(&result));
DeleteSLL(&result);
}
BOOST_AUTO_TEST_CASE(test_dktlesll_delete_three_before_last)
{
NodeSLL<int>* sll = CreateSLL<int>({1, 2, 3, 4});
NodeSLL<int>* result =
Algo::DS::SLL::DeleteKthFromEndElement::Delete(sll, 3);
std::vector<int> expectedValues = {1, 3, 4};
BOOST_CHECK(expectedValues == ValuesInSLL(&result));
DeleteSLL(&result);
}
BOOST_AUTO_TEST_CASE(test_dktlesll_delete_invalid_element)
{
NodeSLL<int>* sll = CreateSLL<int>({1, 2, 3, 4});
NodeSLL<int>* result =
Algo::DS::SLL::DeleteKthFromEndElement::Delete(sll, 10);
std::vector<int> expectedValues = {1, 2, 3, 4};
BOOST_CHECK(expectedValues == ValuesInSLL(&result));
DeleteSLL(&result);
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "bcbc6e76bf668bbe82f2a2137082cffc89d79edc", "size": 2125, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/algorithms/data_structures/sll/test_delete_k_to_last_elem_in_sll.cpp", "max_stars_repo_name": "iamantony/CppNotes", "max_stars_repo_head_hexsha": "2707db6560ad80b0e5e286a04b2d46e5c0280b3f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2020-07-31T14:13:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-03T09:51:43.000Z", "max_issues_repo_path": "test/algorithms/data_structures/sll/test_delete_k_to_last_elem_in_sll.cpp", "max_issues_repo_name": "iamantony/CppNotes", "max_issues_repo_head_hexsha": "2707db6560ad80b0e5e286a04b2d46e5c0280b3f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 28.0, "max_issues_repo_issues_event_min_datetime": "2015-09-22T07:38:21.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-02T11:00:58.000Z", "max_forks_repo_path": "test/algorithms/data_structures/sll/test_delete_k_to_last_elem_in_sll.cpp", "max_forks_repo_name": "iamantony/CppNotes", "max_forks_repo_head_hexsha": "2707db6560ad80b0e5e286a04b2d46e5c0280b3f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2018-10-11T14:10:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-27T08:53:50.000Z", "avg_line_length": 30.3571428571, "max_line_length": 74, "alphanum_fraction": 0.6809411765, "num_tokens": 659}
|
#include "utils.hpp"
#include "edlib/Basis/Basis1DZ2.hpp"
#include "edlib/Basis/ToOriginalBasis.hpp"
#include "edlib/Hamiltonians/TIXXZ.hpp"
#include "edlib/Op/NodeMV.hpp"
#include "edlib/EDP/ConstructSparseMat.hpp"
#include "edlib/EDP/LocalHamiltonian.hpp"
#include <Eigen/Dense>
#include <Eigen/Eigenvalues>
#include <Eigen/Sparse>
#include <unsupported/Eigen/KroneckerProduct>
#include <Spectra/MatOp/SparseSymMatProd.h>
#include <Spectra/SymEigsSolver.h>
#include <catch2/catch.hpp>
#include <algorithm>
#include <cassert>
#include <iostream>
#include <random>
using namespace edlib;
template<typename T>
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>
twoQubitOp(uint32_t N, uint32_t pos1, uint32_t pos2, // NOLINT
const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>& v1,
const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>& v2)
{
using namespace Eigen;
// const uint32_t dim = (1U << N);
assert(pos1 < pos2);
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> res(1, 1);
res(0, 0) = 1.0;
for(uint32_t i = 0; i < pos1; i++)
{
res = Eigen::kroneckerProduct(MatrixXd::Identity(2, 2), res).eval();
}
res = Eigen::kroneckerProduct(v1, res).eval();
for(uint32_t i = pos1 + 1; i < pos2; i++)
{
res = Eigen::kroneckerProduct(MatrixXd::Identity(2, 2), res).eval();
}
res = Eigen::kroneckerProduct(v2, res).eval();
for(uint32_t i = pos2 + 1; i < N; i++)
{
res = Eigen::kroneckerProduct(MatrixXd::Identity(2, 2), res).eval();
}
return res;
}
template<typename T>
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>
singleQubitOp(int N, int pos, const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>& v) // NOLINT
{
using namespace Eigen;
// const uint32_t dim = (1u << N);
using MatrixT = Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>;
MatrixT res(1, 1);
res(0, 0) = 1.0;
for(int i = 0; i < pos; i++)
{
res = Eigen::kroneckerProduct(MatrixT::Identity(2, 2), res).eval();
}
res = Eigen::kroneckerProduct(v, res).eval();
for(int i = pos + 1; i < N; i++)
{
res = Eigen::kroneckerProduct(MatrixT::Identity(2, 2), res).eval();
}
return res;
}
template<uint32_t N> class CompareXXZ
{
private:
double delta_;
Basis1DZ2<uint32_t> basis_;
Eigen::MatrixXd hamFull_;
double gsEnergy_;
Eigen::VectorXd gsVec_;
public:
constexpr static int k = (N / 2) * ((N / 2) % 2);
constexpr static int parity = 1 - 2 * static_cast<int>((N / 2) % 2);
explicit CompareXXZ(double delta) : delta_{delta}, basis_{N, k, parity, true}
{
using namespace Eigen;
static_assert(N % 2 == 0, "N must be even");
edp::LocalHamiltonian<double> lh(N, 2);
for(uint32_t i = 0; i < N; i++)
{
lh.addTwoSiteTerm({i, (i + 1) % N}, getSXXYY() + delta_ * getSZZ());
}
hamFull_ = MatrixXd(edp::constructSparseMat<double>(1U << N, lh));
SelfAdjointEigenSolver<MatrixXd> es;
es.compute(hamFull_);
gsEnergy_ = es.eigenvalues()[0];
gsVec_ = es.eigenvectors().col(0);
}
void Test()
{
using namespace Eigen;
using Spectra::CompInfo;
using Spectra::SortRule;
constexpr size_t max_iter = 1000;
constexpr double tol = 1e-10;
TIXXZ<uint32_t> ham(basis_, 1.0, delta_);
const int dim = basis_.getDim();
NodeMV mv(dim, 0, dim, ham);
Spectra::SymEigsSolver<NodeMV> eigs(mv, 2, 6);
eigs.init();
eigs.compute(SortRule::SmallestAlge, max_iter, tol, SortRule::SmallestAlge);
if(eigs.info() != CompInfo::Successful)
{
REQUIRE(false);
}
const double gsEnergy1 = eigs.eigenvalues()[0];
REQUIRE(gsEnergy_ == Approx(gsEnergy1).margin(1e-4));
const VectorXd subspaceGs = eigs.eigenvectors().col(0);
const VectorXd gsVec1 = [&]() -> VectorXd {
auto v = toOriginalVectorLM(basis_, subspaceGs.data());
return Map<VectorXd>(v.data(), 1U << N);
}();
const double gsEnergy2
= double(gsVec1.transpose() * hamFull_ * gsVec1) / double(gsVec1.transpose() * gsVec1);
REQUIRE(gsEnergy1 == Approx(gsEnergy2).margin(1e-6));
REQUIRE(std::abs(gsVec_.transpose() * gsVec1) == Approx(1.0).margin(1e-6));
}
};
TEST_CASE("Compare GS of XXZ using LocalHamiltonian and TIBasis", "[XXZGS]")
{
SECTION("TIBasis Z2 XXZ N=8")
{
CompareXXZ<8> test(1.0);
test.Test();
}
SECTION("TIBasis Z2 XXZ N=10")
{
CompareXXZ<10> test(1.0);
test.Test();
}
SECTION("TIBasis Z2 XXZ N=12")
{
CompareXXZ<12> test(1.0);
test.Test();
}
}
|
{"hexsha": "f948e2befd8447b49f78bb856a537935b8df8a51", "size": 4791, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/test_xxz_gs.cpp", "max_stars_repo_name": "cecri/ExactDiagonalization", "max_stars_repo_head_hexsha": "a168ed2f60149b1c3e5bd9ae46a5d169aea76773", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_xxz_gs.cpp", "max_issues_repo_name": "cecri/ExactDiagonalization", "max_issues_repo_head_hexsha": "a168ed2f60149b1c3e5bd9ae46a5d169aea76773", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_xxz_gs.cpp", "max_forks_repo_name": "cecri/ExactDiagonalization", "max_forks_repo_head_hexsha": "a168ed2f60149b1c3e5bd9ae46a5d169aea76773", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8546511628, "max_line_length": 99, "alphanum_fraction": 0.6015445627, "num_tokens": 1457}
|
'''
Descripttion: 这个文件是写论文绘制ROC曲线用的
Version: 1.0
Author: ZhangHongYu
Date: 2021-02-27 11:20:37
LastEditors: ZhangHongYu
LastEditTime: 2021-05-04 21:24:17
'''
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# plt.rcParams['font.sans-serif'] = ['SimHei'] # 步骤一(替换sans-serif字体)
# plt.rcParams['axes.unicode_minus'] = False # 步骤二(解决坐标轴负数的负号显示问题)
if __name__ == '__main__':
data = pd.read_csv('data/roc_data.csv')
list_y = [ data.iloc[:, i*2+1].tolist() for i in range(6) ]
list_y.append(data.iloc[:, 11].tolist())
list_x = [ data.iloc[:, i*2].tolist() for i in range(6) ]
list_x.append(data.iloc[:, 10].tolist())
plt.figure()
colors = ['lightblue', 'red', 'yellowgreen', 'black', 'green', 'orange', 'pink']
labels = ['stacking', 'xgb', 'rf', 'et', 'dt', 'knn', 'lr']
AUCs = [0.8332, 0.8250, 0.8220, 0.8121, 0.8118, 0.8106, 0.8057]
plt.xlim(0, 1)
plt.ylim(0, 1)
# for i in range(7):
# plt.plot(list_x[i], list_y[i], color = colors[i], label='AUC '+labels[i]+' = '+ str(AUCs[i]))
# plt.legend()
plt.plot(list_x[0], list_y[0], color = colors[0], label='AUC '+'Bagging+DCRN'+' = '+ str(AUCs[0]))
#plt.plot(list_x[5], list_y[5], color = colors[5], label='AUC '+'DCRN'+' = '+ str(AUCs[5]))
plt.legend(loc='lower right')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.plot(np.linspace(0, 1, 1000), np.linspace(0, 1, 1000), linestyle='--')
plt.savefig('其他行业.png')
|
{"hexsha": "68aada5fcef9da49a42fc01de5f6b3ee5595f2d6", "size": 1520, "ext": "py", "lang": "Python", "max_stars_repo_path": "plot_roc2.py", "max_stars_repo_name": "lonelyprince7/TipDMCup", "max_stars_repo_head_hexsha": "69e8e752cf4622c698872ad80a86f384c5151b9c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-02-20T23:21:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-19T09:47:38.000Z", "max_issues_repo_path": "plot_roc2.py", "max_issues_repo_name": "lonelyprince7/TipDMCup", "max_issues_repo_head_hexsha": "69e8e752cf4622c698872ad80a86f384c5151b9c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-05-06T13:16:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-31T01:49:24.000Z", "max_forks_repo_path": "plot_roc2.py", "max_forks_repo_name": "lonelyprince7/TipDMCup", "max_forks_repo_head_hexsha": "69e8e752cf4622c698872ad80a86f384c5151b9c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-24T12:09:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-19T09:47:38.000Z", "avg_line_length": 40.0, "max_line_length": 103, "alphanum_fraction": 0.6144736842, "include": true, "reason": "import numpy", "num_tokens": 574}
|
import jax
import jax.numpy as jnp
import numpy as np # get rid of this eventually
import argparse
from jax import jit
from jax.experimental.ode import odeint
from functools import partial # reduces arguments to function by making some subset implicit
from jax.experimental import stax
from jax.experimental import optimizers
import os, sys, time
sys.path.append('..')
# ## Set up LNN:
sys.path.append('../experiment_dblpend/')
from lnn import raw_lagrangian_eom
from data import get_dataset
from models import mlp as make_mlp
from utils import wrap_coords
sys.path.append('../hyperopt')
from HyperparameterSearch import learned_dynamics
from HyperparameterSearch import extended_mlp
class ObjectView(object):
def __init__(self, d): self.__dict__ = d
from data import get_trajectory
from data import get_trajectory_analytic
from physics import analytical_fn
vfnc = jax.jit(jax.vmap(analytical_fn))
vget = partial(jax.jit, backend='cpu')(jax.vmap(partial(get_trajectory_analytic, mxstep=100), (0, None), 0))
import pickle as pkl
# ## Here are our model parameters
while True:
hidden_dim = int(10**(np.random.rand()*1.5 + 1))
layers = np.random.randint(2, 5)
args = ObjectView({'dataset_size': 200,
'fps': 10,
'samples': 100,
'num_epochs': 80000,
'seed': 0,
'loss': 'l1',
'act': 'softplus',
'hidden_dim': hidden_dim,
'output_dim': 1,
'layers': layers,
'n_updates': 1,
'lr': 0.001,
'lr2': 2e-05,
'dt': 0.1,
'model': 'gln',
'batch_size': 68,
'l2reg': 5.7e-07,
})
# args = loaded['args']
rng = jax.random.PRNGKey(args.seed)
from jax.experimental.ode import odeint
from HyperparameterSearch import new_get_dataset
from matplotlib import pyplot as plt
vfnc = jax.jit(jax.vmap(analytical_fn, 0, 0))
vget = partial(jax.jit, backend='cpu')(jax.vmap(partial(get_trajectory_analytic, mxstep=100), (0, None), 0))
batch = 60
@jax.jit
def get_derivative_dataset(rng):
# randomly sample inputs
y0 = jnp.concatenate([
jax.random.uniform(rng, (batch, 2))*2.0*np.pi,
(jax.random.uniform(rng+1, (batch, 2))-0.5)*10*2
], axis=1)
return y0, vfnc(y0)
best_params = None
best_loss = np.inf
init_random_params, nn_forward_fn = extended_mlp(args)
import HyperparameterSearch
HyperparameterSearch.nn_forward_fn = nn_forward_fn
_, init_params = init_random_params(rng+1, (-1, 4))
rng += 1
model = (nn_forward_fn, init_params)
opt_init, opt_update, get_params = optimizers.adam(args.lr)
opt_state = opt_init(init_params)
from jax.tree_util import tree_flatten
from HyperparameterSearch import make_loss, train
from copy import deepcopy as copy
# train(args, model, data, rng);
from jax.tree_util import tree_flatten
# Current std:
from jax.ops import index_update
HyperparameterSearch.nn_forward_fn = nn_forward_fn
# ## Let's score the qdotdot output over normally distributed input for 256 batch size:
from jax import grad, vmap
normal = True
n = 256
@jax.jit
def custom_init(stds, rng2):
new_params = []
i = 0
for l1 in init_params:
if (len(l1)) == 0: new_params.append(()); continue
new_l1 = []
for l2 in l1:
if len(l2.shape) == 1:
new_l1.append(jnp.zeros_like(l2))
else:
if normal:
new_l1.append(jax.random.normal(rng2, l2.shape)*stds[i])
# n1 = l2.shape[0]
# n2 = l2.shape[1]
# power = stds[0]
# base_scale = stds[1]
# s = base_scale/(n1+n2)**power
# new_l1.append(jax.random.normal(rng2, l2.shape)*s)
else:
new_l1.append(jax.random.uniform(rng2, l2.shape, minval=-0.5, maxval=0.5)*stds[i])
rng2+=1
i += 1
new_params.append(new_l1)
return new_params
@jax.jit
def j_score_init(stds, rng2):
new_params = custom_init(stds, rng2)
rand_input = jax.random.normal(rng2, [n, 4])
rng2 += 1
outputs = jax.vmap(
partial(
raw_lagrangian_eom,
learned_dynamics(new_params)))(rand_input)[:, 2:]
#KL-divergence to mu=0, std=1:
mu = jnp.average(outputs, axis=0)
std = jnp.std(outputs, axis=0)
KL = jnp.sum((mu**2 + std**2 - 1)/2.0 - jnp.log(std))
def total_output(p):
return vmap(partial(raw_lagrangian_eom, learned_dynamics(p)))(rand_input).sum()
d_params = grad(total_output)(new_params)
i = 0
for l1 in d_params:
if (len(l1)) == 0: continue
new_l1 = []
for l2 in l1:
if len(l2.shape) == 1: continue
mu = jnp.average(l2)
std = jnp.std(l2)
KL += (mu**2 + std**2 - 1)/2.0 - jnp.log(std)
#HACK
desired_gaussian = jnp.sqrt(6)/jnp.sqrt(l2.shape[0] + l2.shape[1])
scaled_std = stds[i]/desired_gaussian
#Avoid extremely large values
KL += 0.1*(scaled_std**2/2.0 - jnp.log(scaled_std))
i += 1
return jnp.log10(KL)
cur_std = jnp.array(
[ 0.01]*(args.layers+1)
)
rng2 = jax.random.PRNGKey(0)
j_score_init(cur_std, rng2)
# @jax.jit
vv = jax.jit(vmap(j_score_init, (None, 0), 0))
rng2 = jax.random.PRNGKey(0)
def score_init(stds):
global rng2
stds = jnp.array(stds)
stds = jnp.exp(stds)
q75, q50, q25 = np.percentile(vv(stds, jax.random.split(rng2, num=10)), [75, 50, 25])
rng2 += 30
return q50, q75-q25
score_init(cur_std)
# from bayes_opt import BayesianOptimization
# # Bounded region of parameter space
pbounds = {'s%d'%(i,): (-15, 15) for i in range(len(cur_std))}
def bb(**kwargs):
out, std = score_init([kwargs[q] for q in ['s%d'%(i,) for i in range(len(cur_std))]])
# if out is None or not out > -30:
# return -30.0
return -out, std
# Let's fit the best distribution:
# # Let's redo that with Bayes:
# # Bayesian:
# # Old stuff:
import hyperopt
from hyperopt import hp, fmin, tpe, Trials
def run_trial(args):
loss, std = bb(**args)
if not np.isfinite(loss) or not np.isfinite(std):
return {
'status': 'fail', # or 'fail' if nan loss
'loss': np.inf
}
return {
'status': 'ok', # or 'fail' if nan loss
'loss': -loss,
'loss_variance': std,
}
#TODO: Declare your hyperparameter priors here:
space = {
**{'s%d'%(i,): hp.normal('s%d'%(i,), -2, 5) for i in range(len(cur_std)-1)
},
**{'s%d'%(len(cur_std)-1,): hp.normal('s%d'%(len(cur_std)-1,), 3, 8)}
}
trials = Trials()
best = fmin(run_trial,
space=space,
algo=tpe.suggest,
max_evals=2500,
trials=trials,
verbose=1
)
def k(t):
if 'loss' not in t['result']:
return np.inf
return t['result']['loss']
sorted_trials = sorted(trials.trials, key=k)
len(trials.trials)
q = np.array(
[[s['misc']['vals']['s%d'%(i,)][0] for i in range(len(cur_std))] for s in sorted_trials[:100]]
)
print(q[0], flush=True)
# ## 4 layers, 1000 hidden: {(4, 1000), (1000, 1000), (1000, 1000), (1000, 1)}
#
# ## median top 10/2000: array([-1.47842217, -4.37217279, -3.37083752, 11.13480387])
#
# (unconverged)
#
# ## 4 layers, 100 hidden: {(4, 100), (100, 100), (100, 100), (100, 1)}
#
# ## median top 30/5000: array([-1.70680816, -2.40340615, -2.17201716, 10.55268474])
#
# (unconverged)
#
# ## 3 layers, 100 hidden:
#
# ## median top 100/7000: array([-1.69875614, -2.74589338, 3.75818009])
#
# (unverged converged)
#
# ## 3 layers, 30 hidden:
# # Use Eureqa to get the scalings!
simple_data = np.array(
[
[t['misc']['vals']['s%d'%(i,)][0] for i in range(len(cur_std))] + [t['result']['loss']]
for t in trials.trials if 'loss' in t['result'] and np.isfinite(t['result']['loss'])])
# np.save('sdata.npy', simple_data)
from sklearn.gaussian_process import GaussianProcessRegressor, kernels
gp = GaussianProcessRegressor(alpha=3, n_restarts_optimizer=20, normalize_y=True)
simple_data[:, -1].min()
gp.fit(simple_data[:, :-1], simple_data[:, -1])
print(args.layers+1, args.hidden_dim, q[gp.predict(q).argmin()], flush=True)
|
{"hexsha": "4d8c859ff80b91cd7e9b89a7a9e087ddf15b03f1", "size": 8888, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebooks/BestInitialization.py", "max_stars_repo_name": "breandan/lagrangian_nns", "max_stars_repo_head_hexsha": "5beedd01affc2aaecc78ea158834f8edae00cb98", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/BestInitialization.py", "max_issues_repo_name": "breandan/lagrangian_nns", "max_issues_repo_head_hexsha": "5beedd01affc2aaecc78ea158834f8edae00cb98", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/BestInitialization.py", "max_forks_repo_name": "breandan/lagrangian_nns", "max_forks_repo_head_hexsha": "5beedd01affc2aaecc78ea158834f8edae00cb98", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8924731183, "max_line_length": 112, "alphanum_fraction": 0.5715571557, "include": true, "reason": "import numpy,import jax,from jax", "num_tokens": 2595}
|
function [idxStart, idxEnd] = find_ts_idx(ts, tStart, tEnd)
% find indices for starting and ending time points
if(tStart > ts.Time(end))
warning(['Start time is greater than last point in time for timeseries: ' ts.Name]);
idxStart = -1;
idxEnd = -1;
return;
else
idxStart = find(ts.Time >= tStart, 1);
end
if(tEnd < ts.Time(1))
warning(['End time is smaller than first point in time for timeseries: ' ts.Name]);
idxStart = -1;
idxEnd = -1;
else
idxEnd = find(ts.Time <= tEnd, 1, 'last');
end
if(tStart >= tEnd)
warning(['Start time is larger than end time']);
idxStart = -1;
idxEnd = -1;
end
end
|
{"author": "TUMFTM", "repo": "mod_vehicle_dynamics_control", "sha": "48b12705b72740b0c1574b0da2eab66fe0c75127", "save_path": "github-repos/MATLAB/TUMFTM-mod_vehicle_dynamics_control", "path": "github-repos/MATLAB/TUMFTM-mod_vehicle_dynamics_control/mod_vehicle_dynamics_control-48b12705b72740b0c1574b0da2eab66fe0c75127/scripts/ControlVisualizer/find_ts_idx.m"}
|
from numpy import array, sin, exp, sqrt, pi
from benchmarks.benchmark import Benchmark
class Crossit(Benchmark):
"""dim: 2"""
def __init__(self, lower=-10, upper=10, dimension=2):
super(Crossit, self).__init__(lower, upper, dimension)
def get_optimum(self):
return array([[1.3491, -1.3491], [1.3491, 1.3491], [-1.3491, 1.3491], [-1.3491, -1.3491]]), -2.0626118504479614
@staticmethod
def eval(sol):
term1 = sin(sol[0]) * sin(sol[1])
term2 = exp(abs(100 - sqrt(sol[0] ** 2 + sol[1] ** 2) / pi))
return -0.0001 * (abs(term1 * term2) + 1) ** 0.1
|
{"hexsha": "4cbbe335aeaea19451104cc71a11a15c1de01ede", "size": 616, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmarks/crossit.py", "max_stars_repo_name": "buctlab/NIO", "max_stars_repo_head_hexsha": "094e688dd1cd3def7f31cd16ff927d4324651422", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-09-23T09:12:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T08:43:32.000Z", "max_issues_repo_path": "benchmarks/crossit.py", "max_issues_repo_name": "buctlab/NIO", "max_issues_repo_head_hexsha": "094e688dd1cd3def7f31cd16ff927d4324651422", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "benchmarks/crossit.py", "max_forks_repo_name": "buctlab/NIO", "max_forks_repo_head_hexsha": "094e688dd1cd3def7f31cd16ff927d4324651422", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-12-02T08:03:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-05T18:04:00.000Z", "avg_line_length": 32.4210526316, "max_line_length": 119, "alphanum_fraction": 0.586038961, "include": true, "reason": "from numpy", "num_tokens": 217}
|
# Copyright 2019 Yuhao Zhang
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import pathmagic # noqa
from panorama.config_gen import PanoramaConfig
from panorama.net.net import PanoramaNet
from panorama.net.net import VerificationBase
from panorama.utils import bbox_iou
from panorama.utils import BoundBox
import argparse
import os
import random
import numpy as np
from panorama.misctools.utils import dir_and_class
from panorama.misctools.utils import PIL_to_cv2
from panorama.misctools.IO import save_obj
from PIL import Image
class Recognition(VerificationBase):
def __init__(self,
panoramaNet,
img_folders,
ann_folders,
in_labels,
obj_thr,
nms_thr,
GT=False
):
super(Recognition, self).__init__(
panoramaNet, img_folders, ann_folders, in_labels)
self.obj_thr = obj_thr
self.nms_thr = nms_thr
self.GT = GT
def preprocess_all(self):
cached_data = {}
for file_obj in self.file_list:
filename = file_obj['filename']
image_data = self.panoramaNet.load_from_disk(filename)
image_h, image_w, _ = image_data.shape
image_data = self.panoramaNet.preprocessed(image_data)
cached_data[filename] = image_data, image_h, image_w
return cached_data
def detect_all(self, save_name, save_dir, sample_r=None):
self.dir_class_pairs, self.class_dir_pairs = dir_and_class(
self.file_list)
if sample_r:
self.class_dir_pairs = {k:random.sample(v, sample_r) for k, v in self.class_dir_pairs.items()}
fdir_xml = {}
for xml in self.file_list:
fdir_xml[xml['filename']] = xml
self.label_filename_embs = {}
lcount = 0
for label, dirs in self.class_dir_pairs.items():
lcount += 1
detected_list = []
count = 0
for fdir in dirs:
count += 1
print(
"label count:{}/{}, file count:{}/{}".format(
lcount,
len(self.class_dir_pairs),
count,
len(dirs)
)
)
xml = fdir_xml[fdir]
objs = xml['object']
img = Image.open(fdir).convert('RGB')
img_cv2 = PIL_to_cv2(img)
dur, emb_grid, netout, image_h, image_w = self.panoramaNet.get_raw(
img_cv2, 'all')
depth_bests = [None] * len(emb_grid)
# loop of depths
for i in range(len(emb_grid)):
best = [0, None, None, None]
raw = emb_grid[i], netout[i], image_h, image_w
out_boxes, \
out_scores, \
out_classes, \
embs = self.panoramaNet.decode_raw(
raw, self.obj_thr, self.nms_thr, True)
for out_bb, out_score, emb in zip(out_boxes,
out_scores, embs):
top, left, bottom, right = out_bb
out_BB = [left, top, right, bottom]
out_BB = BoundBox(*out_BB)
for obj in objs:
name = obj['name']
if name == label:
bb = BoundBox(
obj['xmin'],
obj['ymin'],
obj['xmax'],
obj['ymax']
)
iou = bbox_iou(out_BB, bb)
if iou > best[0]:
best = [iou, out_score, emb, out_bb]
depth_bests[i] = best
if all(depth_bests):
iou_score = np.mean([x[0] for x in depth_bests])
detected_list.append([fdir, iou_score, depth_bests])
self.label_filename_embs[label] = detected_list
save_obj(self.label_filename_embs, save_name, dir=save_dir)
def run_helper(self, obj_thr, nms_thr, no_GT, neigh, k, filename_time, l_only=None, icaches=None, rec=False, rec_depth=None, cache_skip=1, cached_data=None):
total_dur = [0] * (len(self.panoramaNet.depth_ls) + 1)
solved_ins = {}
count = 0
GTCNNcount = 0
total_faces = 0
wrong = 0
gt_time = 0
total_embs_count = 0
total_cache_hit_count = 0
for file_obj in self.file_list:
filename = file_obj['filename']
labels = set([x['name'] for x in file_obj['object']])
count += 1
if icaches:
durs, res_set, GT_invoked, solved_in, embs_count, cache_hit = \
self.panoramaNet.recognize(
filename, obj_thr, nms_thr, no_GT, neigh, k, l_only, icaches, cache_skip=cache_skip, cached_data=cached_data)
total_embs_count += embs_count
total_cache_hit_count += cache_hit
cache_hit_rate = np.true_divide(
total_cache_hit_count, total_embs_count)
print("Total embs:{}, Cache hit:{}, Hit rate:{}".format(
total_embs_count, total_cache_hit_count, cache_hit_rate))
elif rec:
image_data = self.panoramaNet.load_from_disk(filename)
dur, curr_res, res_set = self.panoramaNet.predict(
image_data,
obj_thr,
nms_thr,
cascade_depth=rec_depth,
obj_class=False,
return_raw=False,
k=k)
durs = [0] * (len(self.panoramaNet.depth_ls) + 1)
durs[rec_depth] = dur
GT_invoked = False
solved_in = rec_depth
else:
durs, res_set, GT_invoked, solved_in = \
self.panoramaNet.recognize(
filename, obj_thr, nms_thr, no_GT, neigh, k, l_only, icaches, cached_data=cached_data)
total_dur = [sum(x) for x in zip(total_dur, durs)]
if GT_invoked and not no_GT:
solved_in = 'GT'
print (labels)
print(res_set)
total_faces += len(labels)
if solved_in == 'GT':
GTCNNcount += 1
if not self.GT:
wrong += filename_time[filename][1]
gt_time += filename_time[filename][0]
else:
wrong += 0
gt_time += filename_time[filename]
else:
if res_set:
wrong += len(labels - res_set)
else:
wrong += len(labels)
if solved_in in solved_ins:
solved_ins[solved_in] += 1
else:
solved_ins[solved_in] = 1
total_time_spent = sum(total_dur) + gt_time
fps = count / total_time_spent
acc = 1 - np.true_divide(wrong, total_faces)
print(filename)
print("Acc.:{}, FPS:{}".format(acc, fps))
if icaches:
return fps, acc, total_dur, solved_ins, count, total_embs_count, total_cache_hit_count, cache_hit_rate
else:
return fps, acc, total_dur, solved_ins, count
def get_all_test_dirs(root, which, splits=['test']):
imgs = []
anns = []
for split in splits:
all_imgs_dirs = []
all_anns_dirs = []
if which == 'out' or which == 'both':
all_imgs_dirs.append(os.path.join(
root, split, 'out', 'JPEGImages'))
all_anns_dirs.append(os.path.join(
root, split, 'out', 'Annotations'))
if which == 'in' or which == 'both':
all_imgs_dirs.append(os.path.join(root, split, 'in', 'JPEGImages'))
all_anns_dirs.append(os.path.join(
root, split, 'in', 'Annotations'))
imgs += all_imgs_dirs
anns += all_anns_dirs
return imgs, anns
def main():
all_imgs_dirs, all_anns_dirs = get_all_test_dirs(
args.root, 'both', ['train', 'val'])
config_gen = PanoramaConfig(args.config_savedir,
'',
'',
'',
'',
'',
args.model_save_path,
is_force=False
)
config = config_gen.get_config()
random.seed(config['random_seed'])
panoramaNet = PanoramaNet(config)
panoramaNet.load_weights(args.model_save_path)
rec = Recognition(panoramaNet, all_imgs_dirs, all_anns_dirs,
[], args.obj_thr, args.nms_thr)
rec.detect_all(args.save_name, args.save_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--config_savedir", nargs='?',
default='./faces_config.json',
)
parser.add_argument(
"--root", type=str
)
parser.add_argument(
"--model_save_path", type=str
)
parser.add_argument(
"--save_path", type=str
)
parser.add_argument(
"--save_name", type=str
)
parser.add_argument(
"--model_qualification_path", type=str
)
parser.add_argument(
"--nms_thr", type=float, default=0.5
)
parser.add_argument(
"--obj_thr", type=float, default=0.1
)
parser.add_argument(
"--sample_r", type=int, default=None
)
args = parser.parse_args()
main()
|
{"hexsha": "66255870154de45d3a8c0f013e63e253b24b2211", "size": 10606, "ext": "py", "lang": "Python", "max_stars_repo_path": "panorama/examples/recognition.py", "max_stars_repo_name": "makemebitter/Panorama-UCSD", "max_stars_repo_head_hexsha": "bdb89d00472e449318dae322eab42b0376d6e1f3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-11-22T18:05:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-30T00:58:32.000Z", "max_issues_repo_path": "panorama/examples/recognition.py", "max_issues_repo_name": "makemebitter/Panorama-UCSD", "max_issues_repo_head_hexsha": "bdb89d00472e449318dae322eab42b0376d6e1f3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-06-06T11:48:19.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-06T11:48:19.000Z", "max_forks_repo_path": "panorama/examples/recognition.py", "max_forks_repo_name": "makemebitter/Panorama-UCSD", "max_forks_repo_head_hexsha": "bdb89d00472e449318dae322eab42b0376d6e1f3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-02-27T09:43:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-13T13:20:43.000Z", "avg_line_length": 38.1510791367, "max_line_length": 161, "alphanum_fraction": 0.5230058457, "include": true, "reason": "import numpy", "num_tokens": 2272}
|
import numpy as np
from scipy import ndimage
import queue
def region_grow(image, seed_point):
"""
Performs a region growing on the image from seed_point
:param image: An 3D grayscale input image
:param seed_point: The seed point for the algorithm
:return: A 3D binary segmentation mask with the same dimensions as image
"""
segmentation_mask = np.zeros(image.shape, np.bool)
z, y, x = seed_point
threshold = image[z, y, x]
print('segmenting at ({0}, {1}, {2}) is {3}'.format(x, y, z, threshold))
## TODO: choose a lower and upper threshold
threshold_lower = threshold - 200
threshold_upper = threshold + 200
_segmentation_mask = (np.greater(image, threshold_lower)
& np.less(image, threshold_upper)).astype(np.bool)
structure = np.ones((2, 2, 2))
## TODO: post-process the image with a morphological filter
_segmentation_mask = ndimage.binary_opening(_segmentation_mask, structure=structure).astype(np.bool)
_segmentation_mask = ndimage.binary_closing(_segmentation_mask, structure=structure).astype(np.bool)
to_check = queue.Queue()
check_point = np.asarray([z, y, x], dtype=np.uint32)
to_check.put(check_point)
while not to_check.empty():
check_point = to_check.get()
if _segmentation_mask[check_point[0], check_point[1], check_point[2]]:
_segmentation_mask[check_point[0], check_point[1], check_point[2]] = False
segmentation_mask[check_point[0], check_point[1], check_point[2]] = 1
# These for loops will visit all the neighbors of a voxel and see if
# they belong to the region
for ix in range(-1, 2, 2):
for iy in range(-1, 2, 2):
for iz in range(-1, 2, 2):
## TODO: implement the code which checks whether the current
## voxel (new_check_point) belongs to the region or not
if not (iz == 0 and ix == 0 and iy == 0):
new_check_point = check_point + np.asarray([iz, iy, ix], dtype=np.uint32)
if (image[new_check_point[0], new_check_point[1], new_check_point[2]]<threshold_upper and
image[new_check_point[0], new_check_point[1], new_check_point[2]]>threshold_lower):
segmentation_mask[new_check_point[0], new_check_point[1], new_check_point[2]]=1
## TODO: implement a stop criteria such that the algorithm
## doesn't check voxels which are too far away
if (
new_check_point[0] + 1 < image.shape[0] and
new_check_point[1] + 1 < image.shape[1] and
new_check_point[2] + 1 < image.shape[2] and
new_check_point[0] - 1 > 0 and
new_check_point[1] - 1 > 0 and
new_check_point[2] - 1 > 0
):
to_check.put(new_check_point)
# Your code goes here
structure = np.ones((2, 2, 2))
segmentation_mask = ndimage.binary_closing(segmentation_mask, structure=structure).astype(np.bool)
print('finished')
return segmentation_mask
|
{"hexsha": "ea690af8864994268ed4e78e74dbdf5beb53d8fa", "size": 3437, "ext": "py", "lang": "Python", "max_stars_repo_path": "assignments/planning/segmentation.py", "max_stars_repo_name": "ProbstAlex/BME_CAS", "max_stars_repo_head_hexsha": "d7eca91b8f51170da4f3e5b8067e5b2f18b95d79", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assignments/planning/segmentation.py", "max_issues_repo_name": "ProbstAlex/BME_CAS", "max_issues_repo_head_hexsha": "d7eca91b8f51170da4f3e5b8067e5b2f18b95d79", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assignments/planning/segmentation.py", "max_forks_repo_name": "ProbstAlex/BME_CAS", "max_forks_repo_head_hexsha": "d7eca91b8f51170da4f3e5b8067e5b2f18b95d79", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.8115942029, "max_line_length": 118, "alphanum_fraction": 0.574629037, "include": true, "reason": "import numpy,from scipy", "num_tokens": 794}
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import cv2
from recognition.lpr_util import sparse_tuple_from, DICT, decode_sparse_tensor
dict2 = {value:key for key, value in DICT.items()}
provinces = ["皖", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "京", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂",
"琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "警", "学", "O"]
alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',
'X', 'Y', 'Z', 'O']
ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']
class DataGenerator:
"""docstring for DataGenerator"""
def __init__(self, img_dir, batch_size=1, img_size=[0, 0], num_channels=3):
self._img_dir = img_dir
self._batch_size = batch_size
self._img_w, self._img_h = img_size
self._num_channels = num_channels
self._num_examples = 0
self._next_index = 0
self._filenames = []
self._labels = []
self.filenames = []
self.labels = []
self.init()
def init(self):
self.filenames = self.get_data_list()
self._num_examples = len(self.filenames)
for filename in self.filenames:
fn, _ = os.path.splitext(filename) #0_0_22_27_27_33_16
if len(fn) < 7:
self.labels.append(0)
elif '\u4e00' <= fn[0]<= '\u9fff':
self.labels.append(fn)
else:
lp_number_encoder = fn.split('-')[4].split('_')
#lp_number_name = 'S01_AY33909S_0.jpg'
lp_number = self.decode_lpnumber(lp_number_encoder)
lp_len = len(lp_number)
self.labels.append(DICT[lp_number[:3]] + lp_number[4:lp_len])
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._filenames = [self.filenames[i] for i in perm]
self._labels = np.array(self.labels)[perm]
def next_batch(self, mode='recognition'): #mode = ['detection' or 'recognition']
start = self._next_index
end = start + self._batch_size
if end > self._num_examples:
raise Exception('There are no enough data left for a batch!')
self._next_index = end
labels = []
for j, i in enumerate(range(start, end)):
fname = self._filenames[i]
#cv2.imread()按照(H,W,C)格式返回numpy.ndarray对象,通道顺序为BGR
#img = cv2.imread(os.path.join(self._img_dir, fname))
file_path = os.path.join(self._img_dir, fname)
#print(file_path)
img = cv2.imdecode(np.fromfile(file_path, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
ratio = float(max(img.shape[:2])) / min(img.shape[:2])
side = int(ratio * 288.)
bound_dim = min(side + (side % (2 ** 4)), 608)
I = self.im2single(img)
min_dim_img = min(I.shape[:2])
factor = float(bound_dim)/min_dim_img
net_step = 16
w,h = (np.array(I.shape[1::-1],dtype=float)*factor).astype(int).tolist()
w += (w%net_step!=0)*(net_step - w%net_step)
h += (h%net_step!=0)*(net_step - h%net_step)
#print(w, h)
Iresized = cv2.resize(I,(w,h))
T = Iresized.copy()
T = T.reshape((1, T.shape[0],T.shape[1],T.shape[2]))
if mode == 'detection' and len(fname) > 10:
pt4 = file_path.split('-')[-4].split('_')
pt4 = np.array(pt4)[[2, 3, 0, 1]]
w, h = img.shape[1], img.shape[0]
ps1 = np.array([self.divi(pt.split('&'), [w, h]) for pt in pt4])
labels.append(ps1)
if mode == 'recognition':
labels = self._labels[start:end, ...]
labels = [list(i) for i in labels]
return I, Iresized, T, labels, fname
#读取指定目录下的所有图片,返回图片完整路径(包含文件名)列表
def get_data_list(self):
'''获取目录下的文件'''
img_files = []
exts = ['jpg', 'png', 'jpeg', 'JPG']
for parent, dirnames, filenames in os.walk(self._img_dir):
for filename in filenames:
for ext in exts:
if filename.endswith(ext):
img_files.append(filename)
break
return img_files
def decode_lpnumber(self, names):
name = []
pro = dict2[provinces[int(names[0])]]
alp = alphabets[int(names[1])]
for i in names[2: ]:
name.append(ads[int(i)])
adss = ''.join(name)
lp_number = pro + '_' + alp + adss
return lp_number
def im2single(self, I):
assert(I.dtype == 'uint8')
return I.astype('float32')/255.
def has_chinese(self, str):
for ch in str:
if '\u4e00' <= ch<= '\u9fff':
return True
return False
def divi(self, list1, list2):
return [round(float(list1[0]) / list2[0], 6), round(float(list1[1]) / list2[1], 6)]
|
{"hexsha": "0499250407f4479e90c5a56daf47bd0b420d21b1", "size": 5165, "ext": "py", "lang": "Python", "max_stars_repo_path": "recognition/data_generator.py", "max_stars_repo_name": "shuxin-qin/eulpr", "max_stars_repo_head_hexsha": "9be720a3f8dc9ef322e9d5358cc13315185eacbb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "recognition/data_generator.py", "max_issues_repo_name": "shuxin-qin/eulpr", "max_issues_repo_head_hexsha": "9be720a3f8dc9ef322e9d5358cc13315185eacbb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "recognition/data_generator.py", "max_forks_repo_name": "shuxin-qin/eulpr", "max_forks_repo_head_hexsha": "9be720a3f8dc9ef322e9d5358cc13315185eacbb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9779411765, "max_line_length": 117, "alphanum_fraction": 0.5072604066, "include": true, "reason": "import numpy", "num_tokens": 1571}
|
from matplotlib import cm, rcParams
import matplotlib.pyplot as plt
import numpy as np
import math as math
import random as rand
import os
import csv
rcParams.update({'figure.autolayout': True})
c = ['#aa3863', '#d97020', '#ef9f07', '#449775', '#3b7d86']
times = []
V1 = []
V2 = []
Vth = 1
Vr = -0
with open('gap_potential.dat', newline='') as file:
datareader = csv.reader(file, delimiter=' ')
for row in datareader:
times.append(float(row[0]))
V2.append(float(row[1]))
V1, V2 = np.array(V1), np.array(V2)
plt.figure(figsize=(11,4))
plt.plot(times, V2, alpha=0.75, color=c[0], linestyle='-', label='Voltage $V$')
plt.xlim(5,15)
plt.ylim(0,0.2)
plt.xlabel('time t ($10^{-2}$ seconds)')
plt.ylabel('voltage $V_k, k \in \{1,2\}$')
plt.title('Gap Potential with $\\beta=0.4$ and $\gamma=0.1$')
plt.legend(loc='upper right')
plt.savefig(f'gap_potential.png', dpi=600)
plt.show()
|
{"hexsha": "efb1c3b69eec3bfdcc798e1b0179dad8c536c76b", "size": 909, "ext": "py", "lang": "Python", "max_stars_repo_path": "gap_potential/xpp_to_py.py", "max_stars_repo_name": "helene-todd/XPPAUT_code", "max_stars_repo_head_hexsha": "e4caf112c03889a68eed0f4e5fa9d9d436918914", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gap_potential/xpp_to_py.py", "max_issues_repo_name": "helene-todd/XPPAUT_code", "max_issues_repo_head_hexsha": "e4caf112c03889a68eed0f4e5fa9d9d436918914", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gap_potential/xpp_to_py.py", "max_forks_repo_name": "helene-todd/XPPAUT_code", "max_forks_repo_head_hexsha": "e4caf112c03889a68eed0f4e5fa9d9d436918914", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.1707317073, "max_line_length": 79, "alphanum_fraction": 0.6490649065, "include": true, "reason": "import numpy", "num_tokens": 292}
|
# Import the relevant packages
import os
import tweepy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
import pickle
from sklearn.feature_extraction.text import TfidfTransformer
# define your parameters
text_query = "Coronavirus"
coordinates = '43.651070,-79.347015,50mi'
language = 'en'
result_type = 'recent'
since_date = '2020-09-06'
until_date = '2020-09-13'
max_tweets = 10000
# define your keys
consumer_key = '2YCaHB1rnU7I7U8BuDJVqPGP2'
consumer_secret = 'UJR0oFVc6JzaoWC6J2K2n3cMEfdAZS6nhJtHTGeHBnehrFVPZw'
access_token = '1280193789756309511-b6F7ZCckvK3crRh7EzhfKk0sIJBYXQ'
access_token_secret = '1I3YLccFnFoGzP0ekSWGLgXdUVBFHyVhnmvOs2ZQWX1XX'
auth = tweepy.OAuthHandler(consumer_key,consumer_secret)
auth.set_access_token(access_token,access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
# Creation of query method using parameters
tweets = tweepy.Cursor(api.search,
q = text_query,
geocode = coordinates,
lang=language,
result_type = result_type,
since = since_date,
until = until_date,
count = 100).items(max_tweets)
# List comprehension pulling chosen tweet information from tweets iterable object
# Add or remove tweet information you want in the below list comprehension
tweets_list = [[tweet.created_at,tweet.text] for tweet in tweets]
# Creation of dataframe from tweets_list
# Did not include column names to simplify code
tweets_df = pd.DataFrame(tweets_list,columns=['date','text'])
# Feature Engineering
# Hours
tweets_df['hour'] = [dt.hour for dt in tweets_df['date'].astype(object)]
# Days
tweets_df['day'] = [dt.day for dt in tweets_df['date'].astype(object)]
# Month
tweets_df['month'] = [dt.month for dt in tweets_df['date'].astype(object)]
# Weekday
tweets_df['dayofweek'] = [dt.dayofweek for dt in tweets_df['date'].astype(object)]
# Delete date column
tweets_df = tweets_df.drop(['date'],axis=1)
# Retrieve the hashtags and add the column to the dataset
hashtags = []
for tweet in tweets_df['text']:
hashtags.append([i for i in tweet.split() if i.startswith("#") ])
tweets_df['hashtags'] = hashtags
# Find number of hashtags in each tweet
hashtag_counts = []
for hashtag in hashtags:
hashtag_counts.append(len(hashtag))
tweets_df['hashtag_counts'] = hashtag_counts
# Retrieve the user names and add the column to the dataset
users = []
for tweet in tweets_df['text']:
users.append([i for i in tweet.split() if i.startswith("@") ])
tweets_df['users'] = users
# Find number of tagged users in each tweet
user_counts = []
for user in users:
user_counts.append(len(user))
tweets_df['user_counts'] = user_counts
# Drop users column
tweets_df = tweets_df.drop(['users'],axis=1)
# Retrieve the URLs from the tweets
from urlextract import URLExtract
extractor = URLExtract()
urls = []
for i in range(len(tweets_df)):
urls.append(extractor.find_urls(tweets_df['text'][i]))
tweets_df['urls'] = urls
# Find number of urls in each tweet
url_counts = []
for url in tweets_df['urls']:
url_counts.append(len(url))
tweets_df['url_counts'] = url_counts
# Drop urls column
tweets_df = tweets_df.drop(['urls'],axis=1)
# Remove excessive information from text Column
import re
# Creating a function called clean, that removes all hyperlink, hashtags and mentions
def clean(x):
x = re.sub(r"^RT[\s]+", "", x)
x = re.sub(r"https?:\/\/.*[\r\n]*", "", x)
#x = re.sub('[^ ]+\.[^ ]+','',x)
x = re.sub(r"#","", x)
x = re.sub(r"@[A-Za-z0–9]+","", x)
return x
# Apply the clean function to text column
tweets_df['text'] = tweets_df['text'].apply(clean)
# Vectorization
# Load features from training dataset
transformer = TfidfTransformer()
loaded_features = pickle.load(open("pickle/feature.pkl", "rb"))
# Vectorize the text column
X_text = tweets_df['text'].astype(str)
tfidfconverter = TfidfVectorizer(max_features=10000,
min_df=5, max_df=0.7,
stop_words=stopwords.words('english'),
token_pattern=r'\b[^\d\W]+\b',
strip_accents = "ascii",
vocabulary = loaded_features)
# Convert the features in test set to train set
X_text = transformer.fit_transform(tfidfconverter.fit_transform(X_text))
X_text = pd.DataFrame(columns=tfidfconverter.get_feature_names(),data=X_text.toarray())
# Retrieve the numerical columns
X_num = tweets_df.drop(['text','hashtags','topic'],axis=1)
# Concatenate the test dataset
X_sample = pd.concat([X_num,X_text],axis=1).astype('int64')
# load the model from disk
filename = 'sgd_model.sav'
sgd_incremental_model = pickle.load(open(filename, 'rb'))
|
{"hexsha": "331d054523b7dabb30f6c4ceb1c24f690b747426", "size": 4884, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/06_twitter_api.py", "max_stars_repo_name": "jennafu/howistwitterfeeling", "max_stars_repo_head_hexsha": "a5e1defb78f2ab714829d6ba936d77a651a40b91", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/06_twitter_api.py", "max_issues_repo_name": "jennafu/howistwitterfeeling", "max_issues_repo_head_hexsha": "a5e1defb78f2ab714829d6ba936d77a651a40b91", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/06_twitter_api.py", "max_forks_repo_name": "jennafu/howistwitterfeeling", "max_forks_repo_head_hexsha": "a5e1defb78f2ab714829d6ba936d77a651a40b91", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8857142857, "max_line_length": 87, "alphanum_fraction": 0.6961506962, "include": true, "reason": "import numpy", "num_tokens": 1246}
|
# take unaligned seed -> make a msa
function build_model(fileseed::String, filefull::String, ctype::Symbol, L::Int64;
filename_ins::String="LambdaOpen_LambdaExt.dat",
filename_par::String="Parameters_PlmDCA.dat",
filename_gap::String="Gap_Ext_Int.dat",
Mtest::Int64=0,
verbose::Bool=true)
if ctype != :amino && ctype != :nbase
error("Wrong second argument: choose between :amino and :nbase")
return
end
println("### Reading seed ###")
if ctype == :nbase
print_pos = true
else
print_pos = false
end
seed = DCAlign.readfull(fileseed, ctype=ctype, pos = true)
println("### Inferring insertions penalties ###")
l_o, l_e = infer_ins_pen(seed, L)
println("### Inferring a Potts model using PlmDCA ###")
aligntmp = filter_insertions(seed)
println("Temporary FASTA alignment in ", aligntmp)
PlmData = PlmDCA.plmdca(aligntmp, theta=0.20)
println("### Finding gap penalties ###")
println("WARNING: Reasonable values are obtained when using many (> 500) sequences")
full = DCAlign.readfull(filefull, ctype=ctype, pos = print_pos)
if Mtest == 0
Mtest = length(seed)
println("Using all seed sequences to get the gap penalties")
else
println("Using ", Mtest, " out of ", length(seed), " to get the gap penalties")
end
fulltmp = extract_full_seq(full, seed, Mtest, ctype=ctype)
println("Temporary full length sequences in ", fulltmp)
if ctype == :amino
q = 21
elseif ctype == :nbase
q = 5
end
print_results(filename_ins, l_o, l_e, filename_par, PlmData, ctype, L, filename_gap, 0.0, 0.0) # tmp
mu = 0.00:0.50:4.00
muint = 0.00:0.50:4.00
d = zeros(length(mu),length(muint))
aseed = DCAlign.readfull(aligntmp, ctype=ctype, pos = true)
for a in 1:length(mu)
for b in 1:length(muint)
println("#### Aligning ", Mtest, " sequences using (μext, μint) = (", mu[a], ", ", muint[b],")")
filename_out = tempname()
filename_flag = tempname()
DCAlign.align_all(q, L, filename_par, fulltmp, filename_ins, mu[a], muint[b]; typel=:plm, filename_flag=filename_flag, filename_align=aligntmp, filename_ins=fileseed, filename_out=filename_out, verbose = verbose, maxiter = 300)
tmpseed = DCAlign.readfull(filename_out, ctype=ctype, pos = true)
d[a,b] = compute_average_dist(aseed, tmpseed)
rm(filename_out)
rm(filename_flag)
end
end
#println(d)
aux = argmin(d)
muext_best = mu[aux[1]]
muint_best = muint[aux[2]]
print_results(filename_ins, l_o, l_e, filename_par, PlmData, ctype, L, filename_gap, muext_best, muint_best)
rm(fulltmp)
rm(aligntmp)
println("Done!")
end
|
{"hexsha": "22181367554ed88526cb651009e3a95faeb83698", "size": 2557, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/build_model.jl", "max_stars_repo_name": "infernet-h2020/DCAbuild", "max_stars_repo_head_hexsha": "09c86361b14522f3da851231c42dabef8b4d5dbb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/build_model.jl", "max_issues_repo_name": "infernet-h2020/DCAbuild", "max_issues_repo_head_hexsha": "09c86361b14522f3da851231c42dabef8b4d5dbb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/build_model.jl", "max_forks_repo_name": "infernet-h2020/DCAbuild", "max_forks_repo_head_hexsha": "09c86361b14522f3da851231c42dabef8b4d5dbb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2077922078, "max_line_length": 230, "alphanum_fraction": 0.6980836918, "num_tokens": 773}
|
#!/usr/bin/env python
# rmsd.py
import MDAnalysis as mda
from MDAnalysis.analysis.rms import RMSD
import numpy
import argparse
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--ref', dest='refpath', required=True)
parser.add_argument('--top', dest='toppath', required=True)
parser.add_argument('--mob', dest='mobpath', required=True)
parser.add_argument('--for', dest='FORM', required=True)
args = parser.parse_args()
return args.refpath, args.toppath, args.mobpath, args.FORM
def calc_pcoord(refpath, toppath, mobpath, FORM):
""" Calculate pcoord (RMSD) using MDAnalysis and save results to file specified
in get_pcoord.sh/runseg.sh. Here the filename is rmsd.dat, but if you were
calculating something else such as distance you could change the filename to
distance.dat instead. Just make sure to change the filename both in this
script and in get_pcoord.sh/runseg.sh.
Parameters:
refpath (str): path to initial state coordinate file.
toppath (str): path to topology file.
mobpath (str): path to trajectory file.
FORM (str): indicates whether we're evaluating a basis/initial state or not.
If we are evaluating an initial/basis state (ie. if the script is
called from get_pcoord.sh) then FORM = 'RESTRT', and we check to
make sure our pcoord is a numpy array with shape (1,). Otherwise,
the pcoord is a numpy array with shape = (pcoord_len, pcoord_ndim)
as specified in west.cfg.
"""
# Create Universe objects for initial structure and segment
# structure. (args: topology file, trajectory file)
# If segment file is Amber netCDF trajectory, it must have extension
# ".ncdf" to be recognized automatically by MDAnalysis. The filetype can
# also be specified using the optional "format" argument.
init_u = mda.Universe(toppath, refpath, format="RESTRT")
seg_u = mda.Universe(toppath, mobpath, format=str(FORM))
# Create c-alpha AtomGroups.
init_cAlpha = init_u.select_atoms("name CA")
seg_cAlpha = seg_u.select_atoms("name CA")
# Calculate RMSD (relative to initial structure) at each time step.
R = RMSD(seg_cAlpha, init_cAlpha, select = 'name CA', center=True, superposition=True)
R.run()
# Write RMSD to output file.
if FORM == "RESTRT":
numpy.savetxt("rmsd.dat", R.rmsd[:,2])
else:
numpy.savetxt("rmsd.dat", R.rmsd[:,2])
def main():
# Get arguments from the caller and pass to calc_pcoord().
refpath, toppath, mobpath, FORM = parse_arguments()
calc_pcoord(refpath, toppath, mobpath, FORM)
if __name__ == "__main__":
main()
|
{"hexsha": "f5badc89db64c8ac4ff0ed021377d3f0b600797e", "size": 2712, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/examples/analysis_mdanalysis/rmsd.py", "max_stars_repo_name": "mczwier/westpa_py3", "max_stars_repo_head_hexsha": "ad0d778c43b7009ee57251bf1fa1e908c4f1a2e9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-09-12T01:14:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-14T23:35:06.000Z", "max_issues_repo_path": "lib/examples/analysis_mdanalysis/rmsd.py", "max_issues_repo_name": "dnlebard/westpa_py3", "max_issues_repo_head_hexsha": "6dfb95f4057c1e0d426f7b67ff82371657877f2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-07-16T23:45:41.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-26T17:30:53.000Z", "max_forks_repo_path": "lib/examples/analysis_mdanalysis/rmsd.py", "max_forks_repo_name": "mczwier/westpa_py3", "max_forks_repo_head_hexsha": "ad0d778c43b7009ee57251bf1fa1e908c4f1a2e9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-11-20T22:57:08.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-16T16:54:47.000Z", "avg_line_length": 41.7230769231, "max_line_length": 90, "alphanum_fraction": 0.6910029499, "include": true, "reason": "import numpy", "num_tokens": 667}
|
"""
Module for various types of particle emission in WarpX.
"""
import collections
# import collections
import logging
import warnings
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numba
import numpy as np
from pywarpx import callbacks, picmi
import skimage.measure
from mewarpx.mespecies import Species
from mewarpx.mwxrun import mwxrun
from mewarpx.utils_store import appendablearray, parallel_util
import mewarpx.utils_store.mwxconstants as constants
import mewarpx.utils_store.util as mwxutil
# Get module-level logger
logger = logging.getLogger(__name__)
class Injector(object):
"""Base class for injection.
All injectors must include an emitter object, and should also include a
'name' field for diagnostics.
"""
emitter = None
# This is overridden if a diagnostic is installed to record injected
# current.
injector_diag = None
# fields is used by the diags.FluxInjectorDiag to know what to write to
# the CSV file. It can be overridden by child classes, but is not currently
# adjustable by the user.
# IF CHANGING THIS, CHANGE IN self.record_injectedparticles() AS WELL.
fields = ['t', 'step', 'species_id', 'V_e', 'n', 'q', 'E_total']
# @staticmethod
# def setup_warp():
# """Stuff that needs to be set before injectors are used."""
# # Update warp derived quantities if needed.
# warp.derivqty()
# # Record E_total
# if 'E_total' not in warp.Species._addedpids:
# warp.Species.addpid('E_total')
@staticmethod
def compute_npart(npart_total, unique_particles):
"""Compute number of particles to insert at a given timestep.
This function translates between total particle number and this
processor's particle numbers. If particles are designated "unique",
none are discarded by WarpX so we have logic here to give the processor
the right number of particles, with additional logic to load-balance
the remainder. If unique_particles is False, WarpX essentially does the
particle discarding, so each processor should inject the whole number
of particles to start.
Arguments:
npart_total (int): Integer number of total particles to insert this
timestep.
unique_particles (bool): If True, WarpX keeps all particles sent to
it. If False, it only keeps a processor's fraction of total
particles.
Returns:
npart (int): Integer number of total particles for this processor
to insert this timestep.
"""
if not unique_particles:
return npart_total
npart = npart_total // mwxrun.n_procs
# Early-numbered processors add one additional particle if needed.
# Particles get re-distributed between processors after injection, so
# this shouldn't load-imbalance anything.
if mwxrun.me < (npart_total % mwxrun.n_procs):
npart += 1
return npart
def getvoltage_e(self):
"""Return the electrical voltage of the injector. Defaults to returning
0, unless an emitter is associated with this injector (it should be) in
which case return the emitter's electrical voltage.
Child classes can override this if needed.
"""
if self.emitter is not None:
return self.emitter.getvoltage_e()
return 0.
def init_injectedparticles(self, fieldlist):
"""Set up the injected particles array. Call before
append_injectedparticles.
Arguments:
fieldlist (list): List of string titles for the fields. Order is
important; it must match the order for future particle appends
that are made.
"""
self._injectedparticles_fields = fieldlist
self._injectedparticles_data = appendablearray.AppendableArray(
typecode='d', unitshape=[len(fieldlist)])
def record_injectedparticles(self, species, w, E_total=None,
n=None):
"""Handles transforming raw particle information to the information
used to record particles as a function of time. Also handles parallel
sum and appending to the data array the current amount of injection.
Note:
Assumes the fixed form of fields given in Injector(). Doesn't
check since this is called many times.
Since a parallelsum is performed, call this with only the species
argument if no particles are being added by this processor.
Arguments:
species (:class:`mewarpx.mespecies.Species`): Species of particle
w (np.ndarray or float): Array of length npart with particle weights
E_total (np.ndarray or float): Array of length npart with E_total
values.
n (int): Number of macroparticles, _only_ needed if overriding the
length of E_total. This is useful mostly in the case that
E_total is already summed over particles, in which case a
single number can be passed for it rather than an array.
"""
if n is not None and np.size(w) != 1:
raise RuntimeError("Cannot pass array for w and specify n")
if n is None and np.size(w) == 1:
raise RuntimeError("Cannot pass single value for w and not specify n")
data = np.zeros(7)
# time for current step
data[0] = mwxrun.get_it() * mwxrun.get_dt()
# current step
data[1] = mwxrun.get_it()
# species ID
data[2] = species.species_number
# voltage of emitter
data[3] = self.getvoltage_e()
# number of macroparticles
data[4] = n if np.size(w) == 1 else np.size(w)
# total charge emitted
data[5] = species.sq * np.sum(w)
if E_total is not None:
data[6] = np.sum(E_total)
self.append_injectedparticles(data)
def append_injectedparticles(self, data):
"""Append one or more lines of injected particles data.
Arguments:
data (np.ndarray): Array of shape (m) or (n, m) where m is the
number of fields and n is the number of rows of data to append.
"""
self._injectedparticles_data.append(data)
def get_injectedparticles(self, clear=False):
"""Retrieve a copy of injectedparticles data.
Arguments:
clear (bool): If True, clear the particle data rows entered (field
names are still initialized as before). Default False.
Returns:
injectedparticles_dict (collections.OrderedDict): Keys are the
originally passed field strings for lost particles. Values are
an (n)-shape numpy array for each field.
"""
lpdata = self._injectedparticles_data.data()
# Sum all except t/step/species_id/V_e from all processors
lpdata[:,4:] = parallel_util.parallelsum(np.array(lpdata[:,4:]))
lpdict = collections.OrderedDict(
[(fieldname, np.array(lpdata[:, ii], copy=True))
for ii, fieldname in enumerate(self._injectedparticles_fields)])
if clear:
self._injectedparticles_data.cleardata()
return lpdict
class FixedNumberInjector(Injector):
"""Inject n particles every t timesteps."""
def __init__(self, emitter, species, npart,
injectfreq=None, injectoffset=1,
weight=0., rseed=None,
name=None, unique_particles=True):
"""Sets up user-specified injection with fixed timestep and weights.
Arguments:
emitter (:class:`mewarpx.emission.Emitter`): Emitter object that
will specify positions and velocities of particles to inject.
species (picmi.Species): Premade species to inject particles of.
npart (int): Number of particles to inject total
injectfreq (int): Number of steps to wait for next injection.
Default infinity.
injectoffset (int): First timestep to inject. Default 1 (the
first possible timestep in WarpX).
weight (float): Macroparticle weight to be introduced.
rseed (int): If specified, all injection should be repeatable using
this rseed. At present each set of injected particles will have
the same initial position and velocities as the previous set.
name (str): Injector name for diagnostics. Constructed from
speciesname if not given.
unique_particles (bool): Whether WarpX will keep all particles
given it from every processor (True) or keep only a fraction of
particles based on processor count (False).
"""
# Save class parameters
self.emitter = emitter
self.species = species
self.npart_total = npart
self.injectfreq = injectfreq
if self.injectfreq is None:
self.injectfreq = np.inf
self.injectoffset = injectoffset
self.weight = weight
self.rseed = rseed
self.name = name
if self.name is None:
self.name = "fixed_injector_" + self.species.name
self.unique_particles = unique_particles
logger.info(
f"Fixed injection of {self.npart_total} particles, "
f"weight {self.weight}, every {self.injectfreq}"
f"timesteps."
)
callbacks.installparticleinjection(self.inject_particles)
# add E_total PID to this species
self.species.add_pid("E_total")
def inject_particles(self):
"""Perform the actual injection!"""
effective_it = mwxrun.get_it() - self.injectoffset
if effective_it >= 0 and effective_it % self.injectfreq == 0:
# Adjust npart for processor number if needed
npart = self.compute_npart(
npart_total=self.npart_total,
unique_particles=self.unique_particles
)
# TODO randomdt and velhalfstep are False simply because they're
# not supported at present
particles_dict = self.emitter.get_newparticles(
npart=npart, w=self.weight,
q=self.species.sq, m=self.species.sm,
rseed=self.rseed,
randomdt=False, velhalfstep=False
)
logger.info(f"Inject {len(particles_dict['x'])} particles")
# Note some parts of WarpX call the variables ux and some parts vx,
# and they're referred to as momenta. But I don't see anywhere
# they're actually used as momenta including the particle mass -
# the actual update is in Source/Particles/Pusher/UpdatePosition.H
mwxrun.sim_ext.add_particles(
self.species.name,
x=particles_dict['x'],
y=particles_dict['y'],
z=particles_dict['z'],
ux=particles_dict['vx'],
uy=particles_dict['vy'],
uz=particles_dict['vz'],
w=particles_dict['w'],
E_total=particles_dict['E_total'],
unique_particles=self.unique_particles
)
if self.injector_diag is not None:
self.record_injectedparticles(
species=self.species,
w=particles_dict['w'],
E_total=particles_dict['E_total'],
)
class ThermionicInjector(Injector):
"""Performs standard every-timestep injection from a thermionic cathode."""
def __init__(self, emitter, species, npart_per_cellstep, T=None,
WF=None, A=constants.A0*1e4, use_Schottky=True,
allow_poisson=False, wfac=1.0,
name=None, profile_decorator=None,
unique_particles=True):
"""Sets up user-specified injection for warpX.
Arguments:
emitter (:class:`mewarpx.emission.Emitter`): Emitter object that
will specify positions and velocities of particles to inject.
species (mewarpx.mespecies.Species): A premade species. Note only
electrons will actually give physically meaningful weight
calculations.
npart_per_cellstep (int): Number of macroparticles to inject per
cell on the cathode surface per timestep
T (float): Cathode temperature (K). Uses emitter T if not specified.
WF (float): Cathode work function (eV). Uses WF of the conductor
associated with the emitter if not specified.
A (float): Coefficient of emission in Amp/m^2/K^2. Default is
the theoretical max, approximately 1.2e6.
use_Schottky (bool): Flag specifying whether or not to augment the
emission current via field-dependent particle weights.
Defaults to True.
allow_poisson (bool): If True and < npart_per_cellstep electrons
would be injected per cell, inject whole electrons with a
Poisson distribution. If False, inject fractions of electrons.
Default False.
wfac (float): Constant factor applied to variable particle
weights, which changes the actual injection weight from the
physically calculated quantity. Currently used only for
testing, or for e.g. artificially lowering weight of trace
particles.
name (str or None): Injector name for diagnostics. Constructed from
speciesname if not given.
profile_decorator (decorator): A decorator used to profile the
injection methods and related functions.
unique_particles (bool): Whether WarpX will keep all particles
given it from every processor (True) or keep only a fraction of
particles based on processor count (False). Default True.
"""
# sanity check species
if species.particle_type != 'electron':
raise AttributeError(
"Thermionic emission is only applicable with electrons as the "
f"injection species, but species type {species.particle_type} "
"was given."
)
# Save class parameters
self.emitter = emitter
self.species = species
self.T = T
self.WF = WF
self.A = A
self.use_Schottky = use_Schottky
self.wfac = wfac
# Get values from the emitter and its conductor if not specified
if self.T is None:
self.T = self.emitter.T
if self.WF is None:
self.WF = self.emitter.conductor.WF
if profile_decorator is not None:
self.inject_particles = profile_decorator(self.inject_particles)
self.record_injectedparticles = (
profile_decorator(self.record_injectedparticles)
)
self.name = name
if self.name is None:
self.name = "thermionic_injector_" + self.species.name
self.unique_particles = unique_particles
area = self.emitter.area
dt = mwxrun.get_dt()
if (area is None) or (area <= 0.0) or (dt <= 0.0):
raise ValueError(f"area {area} or dt {dt}"
f" is invalid for injection.")
# Determine weight and injection numbers
electrons_per_step = (mwxutil.J_RD(self.T, self.WF, self.A)
* area * dt / picmi.constants.q_e)
logger.info(
f"Setting up thermionic paticle injection. Area {area:.3g} m^2, "
f"dt {dt:.3e} s, J {mwxutil.J_RD(self.T, self.WF, self.A):.3g} "
"A/m^2."
)
logger.info(
"Emission current corresponds to injection of "
f"{electrons_per_step:.2e} electrons per timestep"
)
max_injections = int(round(npart_per_cellstep *
self.emitter.cell_count))
# If it was requested to inject more particles than we have electrons,
# we instead inject electrons with a poisson distribution if allowed.
if electrons_per_step < max_injections and allow_poisson:
self.ptcl_per_step = electrons_per_step
self.weight = self.wfac
self.poisson = True
logger.info(
"Using stochastic injection of electrons with "
"Poisson sampling"
)
else:
self.ptcl_per_step = max_injections
self.weight = self.wfac * electrons_per_step / self.ptcl_per_step
self.poisson = False
logger.info(
f"Using deterministic injection of {self.ptcl_per_step} "
f"particles per step, each with weight {self.weight}"
)
# create new species that will be used to properly distribute new
# particles and retrieve the electric field at their injection sites in
# order to calculate Schottky enhancement
if self.use_Schottky:
self.injection_species = Species(
particle_type='electron', name=self.species.name+'_injection'
)
else:
self.injection_species = self.species
callbacks.installparticleinjection(self.inject_particles)
# add E_total PID to this species
self.species.add_pid("E_total")
self.injection_species.add_pid("E_total")
if self.use_Schottky:
# add PIDs to hold the normal vector
# TODO work out a better way to handle these PIDs since this is not
# a great use of memory
self.species.add_pid("norm_x")
self.species.add_pid("norm_y")
self.species.add_pid("norm_z")
self.injection_species.add_pid("norm_x")
self.injection_species.add_pid("norm_y")
self.injection_species.add_pid("norm_z")
def inject_particles(self):
"""Perform the actual injection!"""
if self.poisson:
num_injections = np.random.poisson(self.ptcl_per_step)
else:
num_injections = self.ptcl_per_step
# Adjust npart for processor number if needed
npart = self.compute_npart(
npart_total=num_injections,
unique_particles=self.unique_particles
)
# TODO randomdt and velhalfstep are False simply because they're
# not supported at present
particles_dict = self.emitter.get_newparticles(
npart=npart, w=self.weight, q=self.species.sq, m=self.species.sm,
randomdt=False, velhalfstep=False
)
extra_pids = {}
extra_pids['E_total'] = particles_dict['E_total']
extra_pids['w'] = particles_dict['w']
if self.use_Schottky:
# Determine the local surface normal for each particle
normal_vectors = self.emitter.get_normals(
particles_dict['x'], particles_dict['y'], particles_dict['z']
)
extra_pids['norm_x'] = normal_vectors[:, 0]
extra_pids['norm_y'] = normal_vectors[:, 1]
extra_pids['norm_z'] = normal_vectors[:, 2]
# Note some parts of WarpX call the variables ux and some parts vx,
# and they're referred to as momenta. But I don't see anywhere
# they're actually used as momenta including the particle mass -
# the actual update is in Source/Particles/Pusher/UpdatePosition.H
mwxrun.sim_ext.add_particles(
self.injection_species.name,
x=particles_dict['x'],
y=particles_dict['y'],
z=particles_dict['z'],
ux=particles_dict['vx'],
uy=particles_dict['vy'],
uz=particles_dict['vz'],
unique_particles=self.unique_particles,
**extra_pids
)
if self.use_Schottky:
# Up-weight the particles by the local Schottky factor, calculated
# as exp[sqrt(e / 4*pi*eps0) / (kT) * sqrt(max(-E, 0))]
pre_fac = (
np.sqrt(constants.e / (4.0 * np.pi * constants.epsilon_0))
/ (constants.kb_eV * self.emitter.T)
)
mwxrun.calc_Schottky_weight(
self.injection_species.name, pre_fac
)
# get the total injected weight and energy
total_weight = 0.
total_energy = 0.
npart = 0
weight_arrays = mwxrun.sim_ext.get_particle_arrays(
self.injection_species.name, 'w', 0
)
ux_arrays = mwxrun.sim_ext.get_particle_arrays(
self.injection_species.name, 'ux', 0
)
uy_arrays = mwxrun.sim_ext.get_particle_arrays(
self.injection_species.name, 'uy', 0
)
uz_arrays = mwxrun.sim_ext.get_particle_arrays(
self.injection_species.name, 'uz', 0
)
for ii, w in enumerate(weight_arrays):
npart += len(w)
total_weight += np.sum(w)
total_energy += np.sum(self.emitter._get_E_total(
ux_arrays[ii], uy_arrays[ii], uz_arrays[ii],
constants.e, constants.m_e, w
))
# Move particles from temporary container to "real" container
mwxrun.move_particles_between_species(
self.injection_species.name, self.species.name
)
else:
total_weight = np.sum(particles_dict['w'])
total_energy = np.sum(particles_dict['E_total'])
if self.injector_diag is not None:
self.record_injectedparticles(
species=self.species,
w=total_weight,
E_total=total_energy,
n=npart
)
class PlasmaInjector(Injector):
"""Inject particles at simulation start, or at regular timesteps, to
seed a plasma. Can use any emitter object. The defining feature is that the
2nd species positions and weights are copied from the first species, so the
spatial distribution is always identical to start. Velocities are
independent, however.
"""
def __init__(self, emitter, species1, species2, npart, T_2=None,
plasma_density=None, ionization_frac=None,
P_neutral=None, T_neutral=None,
injectfreq=None, injectoffset=1,
rseed=None, name=None, unique_particles=True
):
"""Initialize injection of a plasma with two species and given emitter.
Arguments:
emitter (:class:`mewarpx.emission.BaseEmitter`): BaseEmitter object
that will specify positions and velocities of particles to
inject.
species1 (:class:`mewarpx.mespecies.Species`): First species, eg
electron
species2 (:class:`mewarpx.mespecies.Species`): Second species, eg ion
npart (int): Number of macroparticles to inject total among all
processors and species.
T_2 (float): If specified, species2 will be injected at this
temperature.
plasma_density (float): Ion number density to inject. If using
volumetric emitter, in m^(-3), if using surface emitter, in
m^(-2)
ionization_frac (float): Instead of plasma_density, use a specific
ionization fraction of the neutral gas. Volumetric emitter
only.
P_neutral (float): If using ionization_frac only, the neutral gas
density (*Torr*).
T_neutral (float): If using ionization_frac only, the neutral gas
temperature (K).
injectfreq (int): Number of steps to wait for next injection.
Default infinity.
injectoffset (int): First timestep to inject. Default 1 (the first
possible timestep in WarpX).
rseed (int): If specified, all injection should be repeatable using
this rseed. At present each set of injected particles will have
the same initial position and velocities as the previous set.
name (str or None): Injector name for diagnostics. Constructed from
species names if not given.
unique_particles (bool): Whether WarpX will keep all particles
given it from every processor (True) or keep only a fraction of
particles based on processor count (False). Default True.
"""
# Save class parameters
self.emitter = emitter
self.npart_per_species = npart // 2
self.species1 = species1
self.species2 = species2
self.T_2 = T_2
if injectfreq is None:
injectfreq = np.inf
self.injectfreq = injectfreq
self.injectoffset = injectoffset
self.rseed = rseed
self._calc_plasma_density(
plasma_density=plasma_density,
ionization_frac=ionization_frac,
P_neutral=P_neutral,
T_neutral=T_neutral,
)
self.name = name
if self.name is None:
self.name = (
f"plasma_injector_{self.species1.name}_{self.species2.name}"
)
self.unique_particles = unique_particles
logger.info(
f"Plasma injection {self.name}: "
f"{self.npart_per_species} particles each of {self.species1.name} "
f"and {self.species2.name}, every {self.injectfreq} timesteps,"
)
# Surface emission
if isinstance(self.emitter, Emitter):
self.weight = (
self.emitter.area * self.plasma_density / self.npart_per_species
)
warnings.warn(
"Using a surface emitter with the PlasmaInjector has not been "
"tested for accuracy."
)
logger.info(
f" full weight {self.weight:.4g}, surface density "
f"{self.plasma_density:.4g} m^-2, area "
f"{self.emitter.area:.4g} m^2."
)
# Volume emission
else:
self.weight = (
self.emitter.volume * self.plasma_density
/ self.npart_per_species
)
logger.info(
f" full weight {self.weight:.4g}, volume density "
f"{self.plasma_density:.4g} m^-3, volume "
f"{self.emitter.volume:.4g} m^3."
)
debye_length = mwxutil.plasma_Debye_length(
self.emitter.T, self.plasma_density)
logger.info(
f" Corresponding plasma Debye length is {debye_length:.3e} m."
)
callbacks.installparticleinjection(self.inject_particles)
# add E_total PID to the species involved
self.species1.add_pid("E_total")
self.species2.add_pid("E_total")
def _calc_plasma_density(self, plasma_density, ionization_frac, P_neutral,
T_neutral):
"""Helper function to separate out part of initialization."""
self.plasma_density = plasma_density
if ionization_frac is not None:
if self.plasma_density is not None:
raise ValueError(
"Specify ionization_frac or plasma_density, not both.")
if (
(P_neutral is None) or (P_neutral <= 0) or
(T_neutral is None) or (T_neutral <= 0)
):
raise ValueError("Must specify positive neutral pressure and "
"temperature to use ionization_frac.")
if isinstance(self.emitter, Emitter):
raise RuntimeError("Cannot use ionization_frac with a surface"
" (area-based) Emitter.")
n_neutral = mwxutil.ideal_gas_density(P_neutral, T_neutral)
self.plasma_density = n_neutral * ionization_frac
if (self.plasma_density is None) or (self.plasma_density <= 0):
raise ValueError("Invalid plasma_density {}".format(
self.plasma_density))
def inject_particles(self):
"""Inject particles, same position & weight for each."""
effective_it = mwxrun.get_it() - self.injectoffset
if effective_it >= 0 and effective_it % self.injectfreq == 0:
# Adjust npart for processor number if needed
npart = self.compute_npart(
npart_total=self.npart_per_species,
unique_particles=self.unique_particles
)
# TODO randomdt and velhalfstep are False simply because they're
# not supported at present
particles1_dict = self.emitter.get_newparticles(
npart=npart, w=self.weight, q=self.species1.sq,
m=self.species1.sm, rseed=self.rseed,
randomdt=False, velhalfstep=False
)
# if requested get particles for species2 at the specified
# temperature
if self.T_2 is not None:
T_temp, self.emitter.T = self.emitter.T, self.T_2
# TODO randomdt and velhalfstep are False simply because they're
# not supported at present
particles2_dict = self.emitter.get_newparticles(
npart=npart, w=self.weight, q=self.species2.sq,
m=self.species2.sm, rseed=self.rseed,
randomdt=False, velhalfstep=False
)
if self.T_2 is not None:
self.emitter.T = T_temp
for key in ['x', 'y', 'z', 'w']:
particles2_dict[key] = particles1_dict[key]
logger.info(
f"Inject {len(particles1_dict['x'])} particles each of "
f"{self.species1.name} and {self.species2.name}."
)
mwxrun.sim_ext.add_particles(
self.species1.name,
x=particles1_dict['x'],
y=particles1_dict['y'],
z=particles1_dict['z'],
ux=particles1_dict['vx'],
uy=particles1_dict['vy'],
uz=particles1_dict['vz'],
w=particles1_dict['w'],
E_total=particles1_dict['E_total'],
unique_particles=self.unique_particles
)
mwxrun.sim_ext.add_particles(
self.species2.name,
x=particles2_dict['x'],
y=particles2_dict['y'],
z=particles2_dict['z'],
ux=particles2_dict['vx'],
uy=particles2_dict['vy'],
uz=particles2_dict['vz'],
w=particles2_dict['w'],
E_total=particles2_dict['E_total'],
unique_particles=self.unique_particles
)
if self.injector_diag is not None:
self.record_injectedparticles(
species=self.species1,
w=particles1_dict['w'],
E_total=particles1_dict['E_total'],
)
self.record_injectedparticles(
species=self.species2,
w=particles2_dict['w'],
E_total=particles2_dict['E_total'],
)
class BaseEmitter(object):
"""Parent class of both Emitter (which handles injection from a surface or
other area) and VolumeEmitter (which handles injection throughout a
volume).
All BaseEmitter objects are expected to contain:
- ``get_newparticles()`` returns coordinates, velocities, and KE in a
dict - implemented here
- ``_get_xv_coords()`` implements the subclass-specific particle
injection logic
- ``getvoltage()`` calculates the potential energy for particle
energies.
- ``getvoltage_e()`` calculates the potential energy for particle
energies including the work function.
- ``geoms`` is a property containing a list of simulation geometries
supported by the Emitter, as strings
"""
# Stores a list of functions that are used to adjust variable particle
# weights.
_wfnlist = None
# Needs to be overridden to specify acceptable geometries
geoms = []
def __init__(self):
"""Check geometry and any other universal initialization.
"""
self.solver_geom = self.check_geom()
# # Use to get E and phi as needed.
# self.particle_helper = ParticleValHelper()
def check_geom(self):
"""Return the current solver geometry, or throw an error if it is
unsupported by the Emitter.
"""
geom = mwxrun.geom_str
if geom not in self.geoms:
raise ValueError(
f"{geom} geometry not supported by this Emitter")
return geom
def getvoltage(self):
"""This should return the potential energy at the injection site for
fully accurate energetics.
"""
raise NotImplementedError
def getvoltage_e(self):
"""This should return the potential energy, including work function,
at the injection site for fully accurate energetics.
"""
raise NotImplementedError
@staticmethod
def _gen_particle_dict(x, y, z, vx, vy, vz, w, **kwargs):
"""Change standard arrays into format expected by an injector.
The transfer to an injector uses a dict so that optional
arguments can be passed, or additional arguments added.
Arguments:
x (np.ndarray): n-shape position array
y (np.ndarray): n-shape position array
z (np.ndarray): n-shape position array
vx (np.ndarray): n-shape velocity array
vy (np.ndarray): n-shape velocity array
vz (np.ndarray): n-shape velocity array
w (float or np.ndarray): Particle weight, either constant or
per-particle.
kwargs (np.ndarray): These are simply copied into the dictionary
"""
particle_dict = {
'x': x, 'y': y, 'z': z,
'vx': vx, 'vy': vy, 'vz': vz,
'w': np.ones_like(x) * w
}
particle_dict.update(kwargs)
return particle_dict
def _get_E_total(self, vx, vy, vz, q, m, w):
"""Calculate initial particle energies.
Note:
The conductor voltage V of the conductor the particle is ejected
from must also be set for this object.
Arguments:
vx (np.ndarray): n-length array of velocity x-components
vy (np.ndarray): n-length array of velocity y-components
vz (np.ndarray): n-length array of velocity z-components
q (float): Charge of the particles, usually species.sq.
m (float): Mass of the particles, usually species.sm.
w (np.ndarray): Variable particle weight, n-shape
"""
V = self.getvoltage()
E_total = w*(0.5*m*(vx**2 + vy**2 + vz**2) + q*V)
return E_total
def get_newparticles(self, npart, w, q, m, rseed=None,
randomdt=True, velhalfstep=True):
"""Return dict with coordinates, velocities, and KE
Note:
This function SHOULD (but doesn't in WarpX yet) handle the random
timestep advancement and the negative half-step velocity push. They
can be turned off if desired. No leapfrogging is done in the
initial random advancement, which could be a (hopefully very minor)
source of error.
Arguments:
npart (int): Total number of particles to inject
w (float): Weight of the particles
q (float): Charge of the particles, usually species.sq.
m (float): Mass of the particles, usually species.sm. Equals
electron mass if not otherwise specified.
rseed (int): Random seed, if specified, can be used to provide
reproducible results. Typically used for test / not production
runs.
randomdt (bool): If True, move each particle ahead a random delta t
in [0, dt), advancing both position and velocity together.
Default True.
velhalfstep (bool): If True, push the velocities a negative
half-step using the E-field. Aligns position and velocities
correctly for the leapfrog algorithm.
Returns:
particle_dict (dict): Contains lists, each with length equal to the
number of particles:
- ``x``, ``y``, and ``z`` contain initial positions
- ``vx``, ``vy``, and ``vz`` contain initial velocities
- ``E_total`` contains initial energy of each particle, kinetic
& potential.
- ``w`` contains particle weights.
"""
if rseed is not None:
nprstate = np.random.get_state()
np.random.seed(rseed)
rseedxv = np.random.randint(1000000000)
rseedt = np.random.randint(1000000000)
else:
rseedxv = None
rseedt = None
x, y, z, vx, vy, vz = self._get_xv_coords(
npart=npart, m=m, rseed=rseedxv
)
particle_dict = self._gen_particle_dict(
x=x, y=y, z=z, vx=vx, vy=vy, vz=vz, w=w
)
if self._wfnlist is not None:
for wfn in self._wfnlist:
particle_dict['w'] = wfn(particle_dict)
particle_dict['E_total'] = self._get_E_total(
vx=particle_dict['vx'],
vy=particle_dict['vy'],
vz=particle_dict['vz'],
q=q, m=m, w=particle_dict['w']
)
# After E_total has been computed, we advance particles as needed.
if randomdt:
self.particle_helper.advance_random_deltat(
particle_dict['x'], particle_dict['y'], particle_dict['z'],
particle_dict['vx'], particle_dict['vy'], particle_dict['vz'],
q=q, m=m, rseed=rseedt
)
if velhalfstep:
self.particle_helper.push_v_minus_halfstep(
particle_dict['x'], particle_dict['y'], particle_dict['z'],
particle_dict['vx'], particle_dict['vy'], particle_dict['vz'],
q=q, m=m
)
if rseed is not None:
np.random.set_state(nprstate)
return particle_dict
def _update_params(self):
"""Update local parameters if needed based on WarpX settings.
By default does nothing, but subclasses can implement it to update
parameters before new particle coordinates are generated.
"""
pass
def _get_xv_coords(self, npart, m, rseed):
"""Per-subclass implementation of generating new particle data.
See :func:`mewarpx.emission.BaseEmitter.get_newparticles` for details on
arguments.
Returns:
x, y, z, vx, vy, vz (np.array): Each must be a 1D numpy array.
"""
raise NotImplementedError(
"BaseEmitter subclasses must implement _get_xv_coords")
def add_wfn(self, wfn):
"""Add a variable weight function to the emitter.
Arguments:
wfn (function): This must take in a particle dictionary with
positions, velocities, and existing weights, and return a new
array of particle weights.
"""
if self._wfnlist is None:
self._wfnlist = []
self._wfnlist.append(wfn)
class Emitter(BaseEmitter):
"""Parent class for emission from a surface.
All Emitter objects are expected to contain:
- ``area`` is a property containing the area in m^2
- ``cell_count`` is a property containing the number of mesh cells
spanned by the Emitter
- ``geoms`` is a property containing a list of simulation geometries
supported by the Emitter
- ``_get_xv_coords()`` implements the subclass-specific particle
injection logic
- ``get_normals()`` returns the normals for a set of particle
coordinates.
"""
area = None
cell_count = None
geoms = []
def __init__(self, T, conductor=None, emission_type='thermionic'):
"""Default initialization for all Emitter objects.
Arguments:
T (float): Emitter temperature in Kelvin. Determines particle
velocity distribution. If None, the temperature of the
conductor will be used if one is specified.
conductor (assemblies.Assembly): Conductor the emitter is attached
to, used for recording initial voltages and energies. If None,
V_e is set to 0. Since there's no current use case for this, a
warning is printed.
emission_type (str): Distribution function type used to sample
velocities of the emitted particles. Must be defined in
:func:`mewarpx.utils_store.util.get_velocities`. Defaults to
'thermionic'.
"""
super(Emitter, self).__init__()
self.T = T
if self.T is None and conductor is not None:
self.T = conductor.T
if self.T is None:
raise ValueError(
"No value for T given to the Emitter. An Emitter T must be "
"specified directly, or on a conductor passed to the Emitter."
)
self.conductor = conductor
if self.conductor is not None:
if self.conductor.WF <= 0.:
raise ValueError("Conductor WF must be set for emitters.")
else:
warnings.warn("No conductor set for emitter. Power will not be "
"correct.")
self.emission_type = emission_type
def getvoltage(self):
if self.conductor is None:
return 0.
return self.conductor.getvoltage()
def getvoltage_e(self):
"""Electrical voltage includes WF, eg the Fermi level voltage."""
if self.conductor is None:
return 0.
return self.conductor.getvoltage() + self.conductor.WF
def get_normals(self, x, y, z):
"""Calculate local surface normal at specified coordinates.
Arguments:
x (np.ndarray): x-coordinates of emitted particles (in meters).
y (np.ndarray): y-coordinates of emitted particles (in meters).
z (np.ndarray): z-coordinates of emitted particles (in meters).
Returns:
normals (np.ndarray): nx3 array containing the outward surface
normal vector at each particle location.
"""
raise NotImplementedError("Normal calculations must be implemented by "
"Emitter sub-classes.")
class ZPlaneEmitter(Emitter):
"""This is the standard injection for a planar cathode."""
geoms = ['Z', 'XZ', 'XYZ']
def __init__(self, conductor, T=None, xmin=None, xmax=None,
ymin=None, ymax=None, transverse_fac=1.0, **kwargs):
"""Initialize an emitter for a planar cathode.
Arguments:
conductor (:class:`mewarpx.assemblies.Assembly`): Conductor object,
used to obtain work function and z coordinate/direction.
T (float): Temperature in Kelvin for the emitter; determines
velocities. If not specified the temperature of the conductor
will be used.
xmin (float): Minimum position of the rectangular emitter along x.
Default mwxrun.xmin.
xmax (float): Maximum position of the rectangular emitter along x.
Default mwxrun.xmax.
ymin (float): Minimum position of the rectangular emitter along y.
Default mwxrun.ymin.
ymax (float): Maximum position of the rectangular emitter along y.
Default mwxrun.ymax.
transverse_fac (float): Scale the transverse energy distribution by
this factor. Default 1. See
:func:`mewarpx.utils_store.util.get_velocities` for details.
kwargs (dict): Any other keyword arguments supported by the parent
Emitter constructor (such as "emission_type").
"""
# Default initialization
super(ZPlaneEmitter, self).__init__(T=T, conductor=conductor, **kwargs)
self.z = conductor.z
self.zsign = conductor.zsign
self.transverse_fac = transverse_fac
# Determine bounds
# Will be 4 element array [xmin, xmax, ymin, ymax]
self.bounds = []
for coord, default in [(xmin, mwxrun.xmin),
(xmax, mwxrun.xmax),
(ymin, mwxrun.ymin),
(ymax, mwxrun.ymax)]:
self.bounds.append(coord if coord is not None else default)
# Compute area
x_range = self.bounds[1] - self.bounds[0]
y_range = self.bounds[3] - self.bounds[2]
if self.solver_geom == 'Z':
logger.info("x/y span is 1m for purposes of charge injection")
x_range = 1.
y_range = 1.
if self.solver_geom == 'XZ':
logger.info("y span is 1m for purposes of charge injection")
y_range = 1.
self.area = x_range * y_range
# Compute cell count
if self.solver_geom == 'Z':
self.cell_count = 1
elif self.solver_geom == 'XZ':
self.cell_count = self.area / mwxrun.dx
else:
self.cell_count = self.area / (mwxrun.dx * mwxrun.dy)
def _get_xv_coords(self, npart, m, rseed):
"""Get particle coordinates given particle number.
See :func:`mewarpx.emission.BaseEmitter.get_newparticles` for details.
"""
if rseed is not None:
nprstate = np.random.get_state()
np.random.seed(rseed)
rseedv = np.random.randint(1000000000)
rseedx = np.random.randint(1000000000)
else:
rseedv = None
rseedx = None
vx, vy, vz = mwxutil.get_velocities(
npart, self.T, m=m, transverse_fac=self.transverse_fac,
emission_type=self.emission_type, rseed=rseedv)
x, y, z = mwxutil.get_positions(
npart, xmin=self.bounds[0], xmax=self.bounds[1],
ymin=self.bounds[2], ymax=self.bounds[3], z=self.z,
rseed=rseedx)
# Flip z velocities for anode emission. This appears to be faster than
# an if statement for 10000 or fewer particles.
vz = -self.zsign * vz
if rseed is not None:
np.random.set_state(nprstate)
return x, y, z, vx, vy, vz
def get_normals(self, x, y, z):
"""Calculate local surface normal at specified coordinates.
Arguments:
x (np.ndarray): x-coordinates of emitted particles (in meters).
y (np.ndarray): y-coordinates of emitted particles (in meters).
z (np.ndarray): z-coordinates of emitted particles (in meters).
Returns:
normals (np.ndarray): nx3 array containing the outward surface
normal vector at each particle location.
"""
normals = np.zeros((len(x), 3))
normals[:, 2] = -self.zsign
return normals
class XPlaneEmitter(Emitter):
"""Injection for a planar cathode emitting from the simulation side."""
geoms = ['XZ', 'XYZ']
def __init__(self, conductor, T=None, x=None, ymin=None, ymax=None,
zmin=None, zmax=None, transverse_fac=1.0, xdir=1, **kwargs):
"""Initialize an emitter for a planar cathode.
Arguments:
conductor (:class:`mewarpx.assemblies.Assembly`): Conductor object,
used to obtain work function and z coordinate/direction.
T (float): Temperature in Kelvin for the emitter; determines
velocities. If not specified the temperature of the conductor
will be used.
x (float): Position of the emitter along the x axis. Default
conductor.x if it exists otherwise conductor.xmin/max depending
on xdir. If none of those attributes exist an error will be
raised if x is not specified.
ymin (float): Minimum position of the rectangular emitter along y.
Default conductor.ymin if it exists otherwise mwxrun.ymin.
ymax (float): Maximum position of the rectangular emitter along y.
Default conductor.ymax if it exists otherwise mwxrun.ymax.
zmin (float): Minimum position of the rectangular emitter along z.
Default conductor.zmin if it exists otherwise mwxrun.zmin.
zmax (float): Maximum position of the rectangular emitter along z.
Default conductor.zmax if it exists otherwise mwxrun.zmax.
transverse_fac (float): Scale the transverse energy distribution by
this factor. Default 1. See
:func:`mewarpx.utils_store.util.get_velocities` for details.
xdir (int): 1 to emit in +x, -1 to emit in -x.
kwargs (dict): Any other keyword arguments supported by the parent
Emitter constructor (such as "emission_type").
"""
# Default initialization
super(XPlaneEmitter, self).__init__(T=T, conductor=conductor, **kwargs)
self.transverse_fac = transverse_fac
self.xdir = int(round(xdir))
if self.xdir not in [-1, 1]:
raise ValueError("xdir must be +1 or -1 for x-plane emitters.")
self.x = x
if self.x is None:
try:
self.x = conductor.x
except AttributeError:
try:
if self.xdir == 1:
attr = 'xmax'
else:
attr = 'xmin'
self.x = getattr(conductor, attr)
except AttributeError:
raise AttributeError(
'x must be specified for x-plane emitter if the '
f'attached conductor does not specify x or {attr}'
)
# Determine bounds
# Will be 4 element array [ymin, ymax, zmin, zmax]
self.bounds = []
for boundstr in ['ymin', 'ymax', 'zmin', 'zmax']:
bound = locals()[boundstr]
if bound is None:
bound = (
getattr(mwxrun, boundstr) if not hasattr(conductor, boundstr) else
getattr(conductor, boundstr)
)
self.bounds.append(bound)
# Compute area
y_range = self.bounds[1] - self.bounds[0]
z_range = self.bounds[3] - self.bounds[2]
if self.solver_geom == 'XZ':
print("y span is 1m for purposes of charge injection")
y_range = 1.
self.area = y_range * z_range
# Compute cell count
if self.solver_geom == 'XZ':
self.cell_count = self.area / mwxrun.dz
else:
self.cell_count = self.area / (mwxrun.dz * mwxrun.dy)
def _get_xv_coords(self, npart, m, rseed):
"""Get particle coordinates given particle number.
See :func:`mewarpx.emission.BaseEmitter.get_newparticles` for details.
"""
if rseed is not None:
nprstate = np.random.get_state()
np.random.seed(rseed)
rseedv = np.random.randint(1000000000)
rseedx = np.random.randint(1000000000)
else:
rseedv = None
rseedx = None
# in sampling the positions and the velocities the x and z coordinates
# are swapped so that the same functions as for the ZPlaneEmitter can
# be used
vz, vy, vx = mwxutil.get_velocities(
npart, self.T, m=m, transverse_fac=self.transverse_fac,
emission_type=self.emission_type, rseed=rseedv)
z, y, x = mwxutil.get_positions(
npart, xmin=self.bounds[2], xmax=self.bounds[3],
ymin=self.bounds[0], ymax=self.bounds[1], z=self.x,
rseed=rseedx)
# Flip x velocities if needed. This appears to be faster than
# an if statement for 10000 or fewer particles.
vx = self.xdir * vx
if rseed is not None:
np.random.set_state(nprstate)
return x, y, z, vx, vy, vz
def get_normals(self, x, y, z):
"""Calculate local surface normal at specified coordinates.
Arguments:
x (np.ndarray): x-coordinates of emitted particles (in meters).
y (np.ndarray): y-coordinates of emitted particles (in meters).
z (np.ndarray): z-coordinates of emitted particles (in meters).
Returns:
normals (np.ndarray): nx3 array containing the outward surface
normal vector at each particle location.
"""
normals = np.zeros((len(x), 3))
normals[:, 0] = self.xdir
return normals
class ZDiscEmitter(Emitter):
"""This injects over an x-y disc rather than a rectangle."""
geoms = ['RZ']
def __init__(self, conductor, T=None, inner_emission_radius=None,
outer_emission_radius=None, transverse_fac=1.0, **kwargs):
"""Initialize an emitter for a disc (circular) cathode.
Arguments:
conductor (:class:`mewarpx.assemblies.Assembly`): Conductor object,
used to obtain work function and z coordinate/direction.
T (float): Temperature in Kelvin for the emitter; determines
velocities. If not specified the temperature of the conductor
will be used.
inner_emission_radius (float): Inner radius of the disc (in meters)
for particles to be emitted from. Default mwxrun.rmin.
outer_emission_radius (float): Outer radius of the disc (in meters)
for particles to be emitted from. Default mwxrun.rmax.
transverse_fac (float): Scale the transverse energy distribution by
this factor. Default 1. See
:func:`mewarpx.utils_store.util.get_velocities` for details.
kwargs (dict): Any other keyword arguments supported by the parent
Emitter constructor (such as "emission_type").
Notes:
The center of the disc is always x = y = 0 at present.
"""
# Default initialization
super(ZDiscEmitter, self).__init__(T=T, conductor=conductor, **kwargs)
self.z = conductor.z
self.zsign = conductor.zsign
self.transverse_fac = transverse_fac
# Save input parameters
if inner_emission_radius is None:
inner_emission_radius = mwxrun.rmin
self.inner_emission_radius = inner_emission_radius
if outer_emission_radius is None:
outer_emission_radius = mwxrun.rmax
self.outer_emission_radius = outer_emission_radius
self.area = (np.pi
* (self.outer_emission_radius**2 - self.inner_emission_radius**2))
# Compute cell count
self.cell_count = (
(self.outer_emission_radius - self.inner_emission_radius)
/ mwxrun.dr
)
def _get_xv_coords(self, npart, m, rseed):
"""Get particle coordinates given particle number.
See :func:`mewarpx.emission.BaseEmitter.get_newparticles` for details.
"""
if rseed is not None:
nprstate = np.random.get_state()
np.random.seed(rseed)
rseedv = np.random.randint(1000000000)
rseedx = np.random.randint(1000000000)
else:
rseedv = None
rseedx = None
vx, vy, vz = mwxutil.get_velocities(
npart, self.T, m=m, transverse_fac=self.transverse_fac,
emission_type=self.emission_type, rseed=rseedv)
x, y, z = mwxutil.get_positions_RZ(
npart, rmin=self.inner_emission_radius,
rmax=self.outer_emission_radius, z=self.z,
rseed=rseedx)
# Flip z velocities for anode emission. This appears to be faster than
# an if statement for 10000 or fewer particles.
vz = -self.zsign * vz
if rseed is not None:
np.random.set_state(nprstate)
return x, y, z, vx, vy, vz
def get_normals(self, x, y, z):
"""Calculate local surface normal at specified coordinates.
Arguments:
x (np.ndarray): x-coordinates of emitted particles (in meters).
y (np.ndarray): y-coordinates of emitted particles (in meters).
z (np.ndarray): z-coordinates of emitted particles (in meters).
Returns:
normals (np.ndarray): nx3 array containing the outward surface
normal vector at each particle location.
"""
normals = np.zeros((len(x), 3))
normals[:, 2] = -self.zsign
return normals
class ZCylinderEmitter(Emitter):
"""This injects over the side faces of a cylinder oriented along z."""
geoms = ['RZ', 'XYZ']
def __init__(self, conductor, T=None, zmin=None, zmax=None, rdir=1,
transverse_fac=1.0, **kwargs):
"""Initialize a 3D cylindrical emitter oriented along the z-axis.
Arguments:
conductor (:class:`mewarpx.assemblies.Assembly`): Conductor object,
used to obtain work function, coordinates and possibly
temperature.
T (float): Temperature in Kelvin for the emitter; determines
velocities. If not specified the temperature of the conductor
will be used.
zmin (float): Lower z-coordinate for emitting surface. Default
conductor.zmin if it exists otherwise mwxrun.zmin.
zmax (float): Upper z-coordinate for emitting surface. Default
conductor.zmax if it exists otherwise mwxrun.zmax.
rdir (float): 1 for emitting outward of the cylinder (will use
r_outer attribute of the conductor for the emitting surface),
-1 for emitting inward towards r = 0 (will use r_inner attribute
of the conductor for the emitting surface). Default 1.
transverse_fac (float): Scale the transverse energy distribution by
this factor. Default 1. See
:func:`mewarpx.utils_store.util.get_velocities` for details.
kwargs (dict): Any other keyword arguments supported by the parent
Emitter constructor (such as "emission_type").
Notes:
The center of the cylinder is always x = y = 0 at present.
"""
# Default initialization
super(ZCylinderEmitter, self).__init__(T=T, conductor=conductor,
**kwargs)
self.zmin = zmin
if self.zmin is None:
if hasattr(conductor, 'zmin'):
self.zmin = conductor.zmin
else:
self.zmin = mwxrun.zmin
self.zmax = zmax
if self.zmax is None:
if hasattr(conductor, 'zmax'):
self.zmax = conductor.zmax
else:
self.zmax = mwxrun.zmax
self.rdir = int(round(rdir))
if self.rdir not in [-1, 1]:
raise ValueError("rdir must be +1 or -1 for z-cylinder emitters.")
if self.rdir == 1:
self.r = conductor.r_outer + 1e-10
else:
self.r = conductor.r_inner - 1e-10
self.transverse_fac = transverse_fac
# sanity check
if self.r <= 0:
raise AttributeError("Cannot emit from a cylinder with 0 radius.")
self.area = 2.0 * np.pi * self.r * (self.zmax - self.zmin)
# Compute cell count
if mwxrun.geom_str == 'RZ':
self.cell_count = (self.zmax - self.zmin) / mwxrun.dz
elif mwxrun.geom_str == 'XYZ':
self.cell_count = (
self.area
/ min(mwxrun.dx * mwxrun.dy, mwxrun.dx * mwxrun.dz,
mwxrun.dy * mwxrun.dz)
)
def _get_xv_coords(self, npart, m, rseed):
"""Get particle coordinates given particle number.
See :func:`mewarpx.emission.BaseEmitter.get_newparticles` for details.
"""
if rseed is not None:
nprstate = np.random.get_state()
np.random.seed(rseed)
# rseedv is passed to get velocities. The basic rseed here is used
# for positions, below.
rseedv = np.random.randint(1000000000)
else:
rseedv = None
theta = np.random.uniform(0.0, 2.0*np.pi, npart)
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
x = self.r * cos_theta
y = self.r * sin_theta
z = np.random.uniform(self.zmin, self.zmax, npart)
vz, v_trans, v_long = mwxutil.get_velocities(
npart, self.T, m=m, transverse_fac=self.transverse_fac,
emission_type=self.emission_type, rseed=rseedv)
# Flip the longitudinal velocity if needed. This appears to be faster
# than an if statement for 10000 or fewer particles.
v_long = self.rdir * v_long
vx = cos_theta * v_long - sin_theta * v_trans
vy = sin_theta * v_long + cos_theta * v_trans
if rseed is not None:
np.random.set_state(nprstate)
return x, y, z, vx, vy, vz
def get_normals(self, x, y, z):
"""Calculate local surface normal at specified coordinates.
Arguments:
x (np.ndarray): x-coordinates of emitted particles (in meters).
y (np.ndarray): y-coordinates of emitted particles (in meters).
z (np.ndarray): z-coordinates of emitted particles (in meters).
Returns:
normals (np.ndarray): nx3 array containing the outward surface
normal vector at each particle location.
"""
normals = np.zeros((len(x), 3))
# The normal is r-hat
r = np.sqrt(x**2 + y**2)
normals[:, 0] = x / r
normals[:, 1] = y / r
return normals
class ArbitraryEmitter2D(Emitter):
""" ArbitraryEmitter2D class takes in a conductor, calculates an approximate
surface that encloses the conductor and then sets up the appropriate
emitting surfaces, given a number of particles to emit.
"""
geoms = ['XZ']
def __init__(self, conductor, T=None, res_fac=5., transverse_fac=1.0,
**kwargs):
"""Construct the emitter based on conductor object and temperature.
Arguments:
conductor (mewarpx.assemblies object): Conductor to emit from.
T (float): Temperature in Kelvin. If not specified the temperature
of the conductor will be used.
res_fac (float): Level of resolution beyond the grid resolution to
use for calculating shape contours.
transverse_fac (float): Scale the transverse energy distribution by
this factor. Default 1. See
:func:`mewarpx.utils_store.util.get_velocities` for details.
kwargs (dict): Any other keyword arguments supported by the parent
Emitter constructor (such as "emission_type").
"""
# Default initialization
super(ArbitraryEmitter2D, self).__init__(
T=T, conductor=conductor, **kwargs
)
# Save input parameters
self.res_fac = res_fac
self.transverse_fac = transverse_fac
# Generate grid enclosed in bounding box
self.dx = mwxrun.dx/res_fac
self.dy = 1.
self.dz = mwxrun.dz/res_fac
self.dA = np.sqrt(self.dx*self.dz)
# A small delta is added to the maxima here; this ensures the last point
# is included. Without it, floating point errors determine whether or
# not the last point is included.
self.xvec = np.arange(
mwxrun.xmin, mwxrun.xmax + self.dx/1000., self.dx)
self.yvec = [0.]
self.zvec = np.arange(
mwxrun.zmin, mwxrun.zmax + self.dz/1000., self.dz)
[X, Y, Z] = np.squeeze(np.meshgrid(self.xvec, self.yvec, self.zvec,
indexing='xy'))
oshape = X.shape
X = X.flatten()
Y = Y.flatten()
Z = Z.flatten()
inside = np.reshape(
self.conductor.isinside(X, Y, Z, aura=self.dA/5.),
oshape)
# level of 0.17 was chosen to keep the original ratio of 0.5:3 from warp
# compared to 0.17:1 now in warpx
# increasing the level causes particles to be injected inside cylinder
self.contours = np.squeeze(skimage.measure.find_contours(
inside, 0.17))
self.contours[:, 0] = np.interp(self.contours[:, 0],
np.arange(self.xvec.size),
self.xvec)
self.contours[:, 1] = np.interp(self.contours[:, 1],
np.arange(self.zvec.size),
self.zvec)
self.centers = np.array(
[(self.contours[1:, 0] + self.contours[:-1, 0])/2.,
(self.contours[1:, 1] + self.contours[:-1, 1])/2.]).T
self.dvec = np.array(
[self.contours[1:, 0] - self.contours[:-1, 0],
self.contours[1:, 1] - self.contours[:-1, 1]]).T
# Calculate the distance of each segment & sum to calculate the area
self.distances = np.sqrt(self.dvec[:, 0]**2 + self.dvec[:, 1]**2)
self.area = sum(self.distances)
self.cell_count = self.area / min(mwxrun.dx, mwxrun.dz)
self.CDF = np.cumsum(self.distances)/self.area
# Calculate Normal Vector by taking cross product with y-hat
ndvec = self.dvec/np.tile(self.distances, (2, 1)).T
marching_normal = np.zeros(self.dvec.shape)
marching_normal[:, 0] = -ndvec[:, 1]
marching_normal[:, 1] = ndvec[:, 0]
# Check to make sure normal plus center is outside of conductor
partdist = self.dA * float(self.res_fac) / 2.
pos = self.centers + marching_normal * partdist
px = pos[:, 0]
py = np.zeros_like(px)
pz = pos[:, 1]
nhat = self.conductor.calculatenormal(px, py, pz)
self.normal = nhat[[0, 2], :].T
def _get_xv_coords(self, npart, m, rseed):
"""Get particle coordinates given particle number.
See :func:`mewarpx.emitter.get_newparticles` for details.
"""
if rseed is not None:
nprstate = np.random.get_state()
np.random.seed(rseed)
# rseedv is passed to get velocities. The basic rseed here is used
# for positions, below.
rseedv = np.random.randint(1000000000)
else:
rseedv = None
# Draw Random Numbers to determine which face to emit from
self.contour_idx = np.searchsorted(self.CDF, np.random.rand(npart))
vels = np.column_stack(mwxutil.get_velocities(
num_samples=npart, T=self.T, m=m,
rseed=rseedv,
transverse_fac=self.transverse_fac,
emission_type=self.emission_type
))
# Rotate velocities based on angle of normal
newvels = self.convert_vel_zhat_nhat(
vels, self.normal[self.contour_idx])
vx = np.asarray(newvels[:, 0], order="C")
vy = np.asarray(newvels[:, 1], order="C")
vz = np.asarray(newvels[:, 2], order="C")
# Now get positions
pos1 = self.contours[self.contour_idx, :]
positions = (pos1 +
(np.tile(np.random.rand(npart), (2, 1)).T
* self.dvec[self.contour_idx, :]))
x = np.asarray(positions[:, 0], order="C")
y = np.asarray(0., order="C")
z = np.asarray(positions[:, 1], order="C")
if rseed is not None:
np.random.set_state(nprstate)
return x, y, z, vx, vy, vz
@staticmethod
# Synthetic tests showed 18 ms to 660us change from using np.dot +
# numba compilation. Without these changes, this function was taking 2-4% of
# some run times so the improvement is warranted.
@numba.jit(nopython=True)
def convert_vel_zhat_nhat(vels, nhat):
"""Create a rotation matrix for Zhat to Nhat"""
Zhat = np.array([0., 1.])
newvels = np.zeros(vels.shape)
for ii in range(vels.shape[0]):
Cvec = Zhat - nhat[ii, :]
Cvec2 = np.dot(Cvec, Cvec)
theta = np.arccos(1. - Cvec2/2.)
# Check to see if normal is pointing toward -xhat
# Resolves angle ambiguity in law of cosines
if nhat[ii, 0] < 0.:
theta = -theta
# Rotate in XZ plane, keeping Y the same
R = np.array([[np.cos(theta), 0., np.sin(theta)],
[0., 1., 0.],
[-np.sin(theta), 0., np.cos(theta)]])
newvels[ii, :] = np.dot(R, vels[ii, :])
return newvels
def get_normals(self, x, y, z):
"""Calculate local surface normal at specified coordinates.
Arguments:
x (np.ndarray): x-coordinates of emitted particles (in meters).
y (np.ndarray): y-coordinates of emitted particles (in meters).
z (np.ndarray): z-coordinates of emitted particles (in meters).
Returns:
normals (np.ndarray): nx3 array containing the outward surface
normal vector at each particle location.
"""
# Since we've already pre-computed all the normals and already picked
# the right ones during the call to _get_xv_coords(), we can ignore the
# coordinate arguments here entirely and use the recently saved
# "contour_idx" values for indexing the pre-tabulated normals. To
# prevent this from being abused, we'll first check that the length of
# the coordinate lists matches that of the contour_idx list.
if len(x) != len(self.contour_idx):
raise ValueError('Length of particle coordinate list does not match'
+ ' the most recent number of emitted particles!')
normals = np.zeros((len(x), 3))
normals[:, 0] = self.normal[self.contour_idx, 0]
normals[:, 2] = self.normal[self.contour_idx, 1]
return normals
def plot_contours(self):
"""Plots the contours generated for the assembly object and the
assembly object. The object is plotted in yellow, and the contours
are plotted in blue. The plot is saved in contours.png"""
# calculate which tiles are inside of assembly object
self.xvec = np.arange(
mwxrun.xmin, mwxrun.xmax + self.dx/1000., self.dx)
self.yvec = [0.]
self.zvec = np.arange(
mwxrun.zmin, mwxrun.zmax + self.dz/1000., self.dz)
[X, Y, Z] = np.squeeze(np.meshgrid(self.xvec, self.yvec, self.zvec,
indexing='xy'))
oshape = X.shape
X = X.flatten()
Y = Y.flatten()
Z = Z.flatten()
inside = np.reshape(
self.conductor.isinside(X, Y, Z, aura=self.dA/5.),
oshape)
contours = np.array(skimage.measure.find_contours(inside, 0.17))
# plot assembly object first
assembly_cmap = colors.LinearSegmentedColormap.from_list('my_cmap',['white','#66c2a5'],256)
fig, ax = plt.subplots()
ax.imshow(inside, cmap=assembly_cmap, origin="lower")
# plot contours
for contour in contours:
ax.plot(contour[:, 1], contour[:, 0], linewidth=2, color="#fc8d62")
# set title and labels
ax.set_title(f"{self.conductor.name} contour plot")
x_range = [self.res_fac * mwxrun.zmin / mwxrun.dz, self.res_fac * mwxrun.zmax / mwxrun.dz]
y_range = [self.res_fac * mwxrun.xmin / mwxrun.dx, self.res_fac * mwxrun.xmax / mwxrun.dx]
x_step = mwxrun.dz / (self.res_fac)
y_step = mwxrun.dx / (self.res_fac)
minor_xticks = np.linspace(x_range[0], x_range[1], mwxrun.nz)
minor_yticks = np.linspace(y_range[0], y_range[1], mwxrun.nx)
major_xticks = np.linspace(x_range[0], x_range[1], 5)
major_yticks = np.linspace(y_range[0], y_range[1], 5)
ax.set_xlabel("Z (m)")
ax.set_ylabel("X (m)")
ax.set_xticks(major_xticks)
ax.set_xticks(minor_xticks, minor=True)
ax.set_xticklabels(np.round(major_xticks * x_step, 8), rotation=45)
ax.set_yticks(major_yticks)
ax.set_yticks(minor_yticks, minor=True)
ax.set_yticklabels(np.round(major_yticks * y_step, 8))
ax.grid(visible=True, which="minor")
ax.set_aspect(mwxrun.dx/mwxrun.dz, adjustable='box')
fig.tight_layout()
fig.savefig(f"{self.conductor.name}_contour_plot.png")
class VolumeEmitter(BaseEmitter):
"""Parent class for volumetric particle injection coordinates.
- ``volume`` gives the spatial volume in m^3
- ``_get_x_coords()`` implements the subclass-specific particle
injection logic
"""
volume = 0
geoms = ['Z', 'XZ', 'RZ', 'XYZ']
def __init__(self, T, xmin=None, xmax=None, ymin=None, ymax=None,
zmin=None, zmax=None, rmin=None, rmax=None):
"""Initialize emitter boundaries. A rectangular or cylindrical emitter
volume is supported. If x & y boundaries are specified the r boundaries
will be ignored and vice versa. If both x & y and r boundaries are
specified an AttributeError will be raised. If no boundaries are given
the simulation geometry and boundaries will be used.
Arguments:
T (float): Emitter temperature in Kelvin. Determines particle
velocity distribution.
x/y/z/rmin (float): Lower boundary of the volume.
x/y/z/rmax (float): Upper boundary of the volume.
"""
super(VolumeEmitter, self).__init__()
self.T = T
# determine default prism type from simulation geometry
self.rectangular = mwxrun.geom_str != 'RZ'
# check if a different volume was specified
r_bounds_given = (rmin is not None or rmax is not None)
xy_bounds_given = (
xmin is not None or xmax is not None or ymin is not None or
ymax is not None
)
if r_bounds_given and xy_bounds_given:
raise AttributeError(
"Both rectangular and cylindrical boundaries specified for a "
"VolumeEmitter"
)
if r_bounds_given:
self.rectangular = False
if xy_bounds_given:
self.rectangular = True
self.bounds = np.zeros((3, 2))
if self.rectangular:
for ii, (lim, defaultlim) in enumerate(
zip([xmin, xmax, ymin, ymax, zmin, zmax],
[mwxrun.xmin, mwxrun.xmax, mwxrun.ymin,
mwxrun.ymax, mwxrun.zmin, mwxrun.zmax])
):
if lim is None:
lim = defaultlim
self.bounds[ii // 2, ii % 2] = lim
self.volume = np.prod(self.bounds[:, 1] - self.bounds[:, 0])
# handle cylindrical case
else:
for ii, (lim, defaultlim) in enumerate(
zip([rmin, rmax, 0, 2.0*np.pi, zmin, zmax],
[mwxrun.rmin, mwxrun.rmax, 0, 2.0*np.pi,
mwxrun.zmin, mwxrun.zmax])
):
if lim is None:
lim = defaultlim
self.bounds[ii // 2, ii % 2] = lim
self.volume = (
np.pi * (self.bounds[0, 1]**2 - self.bounds[0, 0]**2)
* (self.bounds[2, 1] - self.bounds[2, 0])
)
# Note the negation here will catch nans, checking <= 0 won't.
if not (self.volume > 0):
raise RuntimeError("Invalid warpX geometry limits.")
def getvoltage(self):
"""Ideally this is probably the local potential, but default to 0."""
return 0.
def getvoltage_e(self):
"""Ideally this is probably the local potential, but default to 0."""
return self.getvoltage()
def _get_xv_coords(self, npart, m, rseed):
"""Get velocities and call specialized function for position."""
if rseed is not None:
nprstate = np.random.get_state()
np.random.seed(rseed)
# rseedv is passed to get velocities. The basic rseed here is used
# for positions, below.
rseedv = np.random.randint(1000000000)
else:
rseedv = None
x_coords = self._get_x_coords(npart)
v_coords = mwxutil.get_velocities(
npart, self.T, m=m, emission_type='random', rseed=rseedv)
if rseed is not None:
np.random.set_state(nprstate)
return (
x_coords[:, 0], x_coords[:, 1], x_coords[:, 2],
v_coords[0], v_coords[1], v_coords[2]
)
class UniformDistributionVolumeEmitter(VolumeEmitter):
"""Inject particles uniformly throughout a given volume at a specified
temperature.
"""
def _get_x_coords(self, npart):
"""Get coordinates uniformly distributed in space.
rseed, if used, is handled by the parent function.
"""
if self.rectangular:
xyz_pos = [
np.random.uniform(self.bounds[ii, 0], self.bounds[ii, 1],
npart)
for ii in range(3)
]
# handle cylindrical case
else:
r = np.sqrt(np.random.uniform(
self.bounds[0, 1]**2, self.bounds[0, 0]**2, npart
))
theta = np.random.uniform(self.bounds[1, 0], self.bounds[1, 1],
npart)
xyz_pos = [
r * np.cos(theta), r * np.sin(theta),
np.random.uniform(self.bounds[2, 0], self.bounds[2, 1],
npart)
]
return np.array(xyz_pos).T
class ZSinDistributionVolumeEmitter(VolumeEmitter):
"""Vary density in z as a half-period sin wave."""
def _get_x_coords(self, npart):
"""Get coordinates with sin distribution.
rseed, if used, is handled by the parent function.
"""
if self.rectangular:
xpos = np.random.uniform(self.bounds[0, 0], self.bounds[0, 1],
npart)
ypos = np.random.uniform(self.bounds[1, 0], self.bounds[1, 1],
npart)
# handle cylindrical case
else:
r = np.sqrt(np.random.uniform(
self.bounds[0, 1]**2, self.bounds[0, 0]**2, npart
))
theta = np.random.uniform(self.bounds[1, 0], self.bounds[1, 1],
npart)
xpos = r * np.cos(theta)
ypos = r * np.sin(theta)
z_random_draw = np.random.random(npart)
zpos = (
np.arccos(1 - 2.0*z_random_draw) / np.pi
* (self.bounds[2, 1] - self.bounds[2, 0])
+ self.bounds[2, 0]
)
return np.array([xpos, ypos, zpos]).T
|
{"hexsha": "aa4569544b4a87d989e3b5b0890cd8b47a1c3de7", "size": 80240, "ext": "py", "lang": "Python", "max_stars_repo_path": "mewarpx/mewarpx/emission.py", "max_stars_repo_name": "ModernElectron/WarpX", "max_stars_repo_head_hexsha": "563813bc125a01a1a54267a3d4bb3ba77bcc68a3", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-23T23:38:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-23T23:38:50.000Z", "max_issues_repo_path": "mewarpx/mewarpx/emission.py", "max_issues_repo_name": "ModernElectron/WarpX", "max_issues_repo_head_hexsha": "563813bc125a01a1a54267a3d4bb3ba77bcc68a3", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 106, "max_issues_repo_issues_event_min_datetime": "2021-06-08T23:57:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T00:36:46.000Z", "max_forks_repo_path": "mewarpx/mewarpx/emission.py", "max_forks_repo_name": "ModernElectron/WarpX", "max_forks_repo_head_hexsha": "563813bc125a01a1a54267a3d4bb3ba77bcc68a3", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-21T18:50:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-21T18:50:43.000Z", "avg_line_length": 40.0599101348, "max_line_length": 99, "alphanum_fraction": 0.5855558325, "include": true, "reason": "import numpy,import numba", "num_tokens": 17918}
|
//
// Copyright (c) 2009--2010
// Thomas Klimpel and Rutger ter Borg
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BOOST_NUMERIC_BINDINGS_GLAS_COMPRESSED_HPP
#define BOOST_NUMERIC_BINDINGS_GLAS_COMPRESSED_HPP
#include <boost/numeric/bindings/begin.hpp>
#include <boost/numeric/bindings/end.hpp>
#include <boost/numeric/bindings/detail/adaptor.hpp>
#include <boost/numeric/bindings/detail/copy_const.hpp>
#include <boost/numeric/bindings/glas/detail/convert_to.hpp>
#include <boost/numeric/bindings/glas/dense_vector.hpp>
#include <boost/numeric/bindings/std/vector.hpp>
#include <glas/sparse/compressed.hpp>
namespace boost {
namespace numeric {
namespace bindings {
namespace detail {
template< typename T, typename O, typename IndexType, typename NNZType, int IB, typename Id, typename Enable >
struct adaptor< glas::sparse_matrix< T, glas::compressed_sparse_structure<O, IndexType, NNZType, IB> >, Id, Enable > {
typedef typename copy_const< Id, T >::type value_type;
typedef typename copy_const< Id, IndexType >::type index_type;
typedef typename convert_to< tag::data_order, O >::type data_order;
typedef mpl::map<
mpl::pair< tag::value_type, value_type >,
mpl::pair< tag::index_type, index_type >,
mpl::pair< tag::entity, tag::matrix >,
mpl::pair< tag::size_type<1>, std::ptrdiff_t >,
mpl::pair< tag::size_type<2>, std::ptrdiff_t >,
mpl::pair< tag::matrix_type, tag::general >,
mpl::pair< tag::data_structure, tag::compressed_sparse >,
mpl::pair< tag::data_order, data_order >,
mpl::pair< tag::index_base, mpl::int_<IB> >
> property_map;
static std::ptrdiff_t size1( const Id& id ) {
return id.num_rows();
}
static std::ptrdiff_t size2( const Id& id ) {
return id.num_columns();
}
static value_type* begin_value( Id& id ) {
return bindings::begin_value( id.value_array() );
}
static value_type* end_value( Id& id ) {
return bindings::begin_value( id.value_array() ) + id.nnz();
}
static index_type* begin_compressed_index_major( Id& id ) {
return bindings::begin_value( id.sparse_structure().compressed_index_array() );
}
static index_type* end_compressed_index_major( Id& id ) {
return bindings::end_value( id.sparse_structure().compressed_index_array() );
}
static index_type* begin_index_minor( Id& id ) {
return bindings::begin_value( id.sparse_structure().index_array() );
}
static index_type* end_index_minor( Id& id ) {
return bindings::begin_value( id.sparse_structure().index_array() ) + id.nnz();
}
};
} // namespace detail
} // namespace bindings
} // namespace numeric
} // namespace boost
#endif
|
{"hexsha": "f3bd2460f06faccda62fdac236b3dcaf3feba012", "size": 2875, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "externals/numeric_bindings/boost/numeric/bindings/glas/compressed.hpp", "max_stars_repo_name": "ljktest/siconos", "max_stars_repo_head_hexsha": "85b60e62beca46e6bf06bfbd65670089e86607c7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 137.0, "max_stars_repo_stars_event_min_datetime": "2015-06-16T15:55:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T06:01:59.000Z", "max_issues_repo_path": "externals/numeric_bindings/boost/numeric/bindings/glas/compressed.hpp", "max_issues_repo_name": "ljktest/siconos", "max_issues_repo_head_hexsha": "85b60e62beca46e6bf06bfbd65670089e86607c7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 381.0, "max_issues_repo_issues_event_min_datetime": "2015-09-22T15:31:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T09:05:23.000Z", "max_forks_repo_path": "externals/numeric_bindings/boost/numeric/bindings/glas/compressed.hpp", "max_forks_repo_name": "ljktest/siconos", "max_forks_repo_head_hexsha": "85b60e62beca46e6bf06bfbd65670089e86607c7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 30.0, "max_forks_repo_forks_event_min_datetime": "2015-08-06T22:57:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T20:30:20.000Z", "avg_line_length": 33.8235294118, "max_line_length": 118, "alphanum_fraction": 0.6914782609, "num_tokens": 718}
|
[STATEMENT]
lemma rprodl_simps [simp]: "rprodl ((a, b), c) = (a, (b, c))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rprodl ((a, b), c) = (a, b, c)
[PROOF STEP]
by(simp add: rprodl_def)
|
{"llama_tokens": 98, "file": "CryptHOL_Misc_CryptHOL", "length": 1}
|
# import useful libraries
import os
import numpy as np
import numpy.linalg as la
import myml.factorizations as myfac
import myml.images as myimg
import mysp.sound as mysnd
# implement main function to be executed
if __name__ == '__main__':
# specify directory to data
ddir = '../data'
# load the data
if 0:
D = np.array([])
for n in range(1,5):
dir = '{0}/dataset_{1}'.format(ddir,n)
print('Currently in directory({0})'.format(n))
for file in os.listdir(dir):
D0 = np.load('{0}/{1}'.format(dir,file))
(nt, nx, ny, nz) = D0.shape
Dt = D0.reshape((nt, nx * ny * nz)).T
if D.shape[0] == 0:
D = np.copy(Dt)
else:
D = np.hstack((D,Dt))
else:
D = np.load('../data/raw_features.npy').T
# get mean of data and subtract it
(d,nd) = D.shape
Dmean = np.mean(D,axis=1).reshape(d,1)
Dn = D - Dmean
# Get features using randomized range finder algorithm
(Q,B) = myfac.projrep(Dn,k_or_tol=20)
print('2-Norm error per matrix element: ',la.norm(Dn - Q@B)/(d*nd))
# save the resulting data we can use to form the model
np.save('{0}/pca_features2.npy'.format(ddir), arr=Q)
np.save('{0}/pca_weights2.npy'.format(ddir), arr=B)
np.save('{0}/pca_mean2.npy'.format(ddir), arr=Dmean)
|
{"hexsha": "e42a218efaea6677d256ba5fb74c5b0981d83966", "size": 1470, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/pca_data.py", "max_stars_repo_name": "choward1491/cs598ps_project", "max_stars_repo_head_hexsha": "f6b15a418790c38637d80ff1bd62b6a2ab12cd3a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-01-21T17:05:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-16T23:00:30.000Z", "max_issues_repo_path": "scripts/pca_data.py", "max_issues_repo_name": "choward1491/cs598ps_project", "max_issues_repo_head_hexsha": "f6b15a418790c38637d80ff1bd62b6a2ab12cd3a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/pca_data.py", "max_forks_repo_name": "choward1491/cs598ps_project", "max_forks_repo_head_hexsha": "f6b15a418790c38637d80ff1bd62b6a2ab12cd3a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4090909091, "max_line_length": 71, "alphanum_fraction": 0.5414965986, "include": true, "reason": "import numpy", "num_tokens": 415}
|
# As described in Algorith, 7.3.4 in [CGTBOOK]
struct CGT <: TRSPSolver
end
|
{"hexsha": "21115a91223a441c5d4ee7a1e4a9e807da0c420d", "size": 76, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/globalization/trs_solvers/solvers/CGT.jl", "max_stars_repo_name": "aaowens/NLSolvers.jl", "max_stars_repo_head_hexsha": "8be4390b85bf9b3631659b9d2966760bc722ed9c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/globalization/trs_solvers/solvers/CGT.jl", "max_issues_repo_name": "aaowens/NLSolvers.jl", "max_issues_repo_head_hexsha": "8be4390b85bf9b3631659b9d2966760bc722ed9c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/globalization/trs_solvers/solvers/CGT.jl", "max_forks_repo_name": "aaowens/NLSolvers.jl", "max_forks_repo_head_hexsha": "8be4390b85bf9b3631659b9d2966760bc722ed9c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.0, "max_line_length": 46, "alphanum_fraction": 0.7236842105, "num_tokens": 29}
|
import numpy as np
import scipy.ndimage.measurements as scipy_measurements
import miapy.data.transformation as miapy_tfm
class ClipNegativeTransform(miapy_tfm.Transform):
def __init__(self, entries=('images',)) -> None:
super().__init__()
self.entries = entries
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if entry not in sample:
continue
img = sample[entry]
m = np.min(img)
if m < 0:
print('Clipping... min: {}'.format(m))
img = np.clip(img, a_min=0, a_max=None)
sample[entry] = img
return sample
class CenterCentroidTransform(miapy_tfm.Transform):
def __init__(self, entries=('images',)) -> None:
super().__init__()
self.entries = entries
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if entry not in sample:
continue
img = sample[entry]
centroid_transform = []
# move centroid to center
com = scipy_measurements.center_of_mass(img > 0)
for axis in range(0, 3):
diff = com[axis] - int(img.shape[axis] / 2)
centroid_transform.append(-diff)
if abs(diff) > 1:
img = np.roll(img, int(-diff), axis=axis)
sample[entry] = img
# store the centroid transformation (will be written to metadata later)
sample['centroid_transform'] = np.array(centroid_transform)
return sample
class RandomRotateShiftTransform(miapy_tfm.Transform):
def __init__(self, do_rotate=True, shift_amount=0, entries=('images',)) -> None:
super().__init__()
self.entries = entries
self.do_rotate = do_rotate
self.shift_amount = shift_amount
print('Using RandomRotateShiftTransform({}, {})'.format(do_rotate, shift_amount))
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if entry not in sample:
continue
img = sample[entry]
# shift +/- shift_amount pixels
if self.shift_amount != 0:
# number of pixels to shift
n = np.random.randint(-self.shift_amount, self.shift_amount + 1)
# axis
k = np.random.randint(0, 3)
img = np.roll(img, n, axis=k)
# 3x rotate by 90 degree around a random axis
if self.do_rotate:
planes = [(0, 1), (0, 2), (1, 2)]
for i in range(0, 3):
k = np.random.randint(0, 3)
plane_idx = np.random.randint(0, 3)
img = np.rot90(img, k, planes[plane_idx])
sample[entry] = img
return sample
def get_bounding_box(img):
a = np.argwhere(img)
min0, min1, min2 = a.min(0)
max0, max1, max2 = a.max(0)
return [min0, max0, min1, max1, min2, max2]
# Apply reverse center centroid transform
def revert_centroid_transform(img, centroid_transform):
for axis in range(0, 3):
diff = -centroid_transform[axis]
if abs(diff) > 1:
img = np.roll(img, int(diff), axis=axis)
return img
|
{"hexsha": "5a8c5dccd774b45cfea010980c9e6fb6227679df", "size": 3307, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/data/preprocess.py", "max_stars_repo_name": "SCAN-NRAD/BrainRegressorCNN", "max_stars_repo_head_hexsha": "7917c6a6c4e3728db17ec762c63f8253392e6c04", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-11T18:49:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T18:49:34.000Z", "max_issues_repo_path": "Python/data/preprocess.py", "max_issues_repo_name": "SCAN-NRAD/BrainRegressorCNN", "max_issues_repo_head_hexsha": "7917c6a6c4e3728db17ec762c63f8253392e6c04", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/data/preprocess.py", "max_forks_repo_name": "SCAN-NRAD/BrainRegressorCNN", "max_forks_repo_head_hexsha": "7917c6a6c4e3728db17ec762c63f8253392e6c04", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0636363636, "max_line_length": 89, "alphanum_fraction": 0.5585122467, "include": true, "reason": "import numpy,import scipy", "num_tokens": 777}
|
import numpy as np
def create_iterable_dataset(torch_transforms_module, pipeline_results):
"""
Create a PyTorch iterable dataset that loads samples from pipeline results.
:param torch_transforms_module: The imported torch.transforms module.
:param pipeline_results: Pipeline results iterator.
:return: Dataset that has valid PyTorch images saved as tensors and density maps.
"""
class PipelineDataset:
def __init__(self):
self.images_and_density_maps = pipeline_results
self.image_transform = torch_transforms_module.Compose([
torch_transforms_module.ToTensor()
])
def __iter__(self):
for image, density_map in self.images_and_density_maps:
yield self.image_transform(image.copy().astype("float32")), density_map.copy().astype("float32")
return PipelineDataset()
def create_data_loader(torch_transforms_module, dataset, batch_size):
"""
Create a loader similar to PyTorch DataLoader but only with a single thread and no shuffling. Allows batching
results in a way that batches are created only from samples with the same shape. If not enough samples of the same
shape were present in a row, incomplete batches are returned.
:param torch_transforms_module: The imported torch.transforms module.
:param dataset: Dataset that yields tuples of image and density map.
:param batch_size: Preferred batch size.
:return: Iterator of batches of data from the dataset.
"""
class PipelineDataLoader:
def __init__(self):
self.dataset = dataset
self.batch_size = batch_size
self._loaded_data = sorted([(image, density_map) for (image, density_map) in self.dataset], key=lambda t: t[0].shape)
self._iterator = iter(self._loaded_data)
self._batch = []
self._current_batch_size = 0
def _unload_batch_into_tensors(self):
tensor_images = torch_transforms_module.ToTensor()(np.array(tuple(zip(*self._batch))[0]))
tensor_density_maps = torch_transforms_module.ToTensor()(np.array(tuple(zip(*self._batch))[1]))
self._batch = []
self._current_batch_size = 0
return tensor_images, tensor_density_maps
def _add_to_batch(self, image, density_map):
self._batch.append((image, density_map))
self._current_batch_size += 1
def __len__(self):
return len(self._loaded_data)
def __next__(self):
for _ in range(self._current_batch_size, self.batch_size):
try:
image, density_map = next(self._iterator)
except StopIteration:
self._iterator = iter(self._loaded_data)
raise
if self._current_batch_size != 0 and self._batch[0][0].shape != image.shape:
tensor_images, tensor_density_maps = self._unload_batch_into_tensors()
self._add_to_batch(image, density_map)
return tensor_images, tensor_density_maps
else:
self._add_to_batch(image, density_map)
tensor_images, tensor_density_maps = self._unload_batch_into_tensors()
return tensor_images, tensor_density_maps
def __iter__(self):
return self
return PipelineDataLoader()
|
{"hexsha": "29685bf2169f7561c8a110cfe28cc09dc54c1e99", "size": 3439, "ext": "py", "lang": "Python", "max_stars_repo_path": "CCAugmentation/examples/pytorch.py", "max_stars_repo_name": "pijuszczyk/CCAugmentation", "max_stars_repo_head_hexsha": "035ca0eaf000f5151fe8c68fc65ac8138bbc0e64", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-11-25T00:51:18.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-25T00:55:52.000Z", "max_issues_repo_path": "CCAugmentation/examples/pytorch.py", "max_issues_repo_name": "pijuszczyk/CCAugmentation", "max_issues_repo_head_hexsha": "035ca0eaf000f5151fe8c68fc65ac8138bbc0e64", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CCAugmentation/examples/pytorch.py", "max_forks_repo_name": "pijuszczyk/CCAugmentation", "max_forks_repo_head_hexsha": "035ca0eaf000f5151fe8c68fc65ac8138bbc0e64", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.9875, "max_line_length": 129, "alphanum_fraction": 0.6577493457, "include": true, "reason": "import numpy", "num_tokens": 687}
|
### A Pluto.jl notebook ###
# v0.12.20
using Markdown
using InteractiveUtils
# This Pluto notebook uses @bind for interactivity. When running this notebook outside of Pluto, the following 'mock version' of @bind gives bound variables a default value (instead of an error).
macro bind(def, element)
quote
local el = $(esc(element))
global $(esc(def)) = Core.applicable(Base.get, el) ? Base.get(el) : missing
el
end
end
# ╔═╡ 63f5861e-6244-11eb-268b-a16bc3f8265c
using LinearAlgebra
# ╔═╡ dfc779ee-6246-11eb-240b-4dc7a7d95641
using Plots, RecipesBase
# ╔═╡ 23bcbb02-62ef-11eb-27f9-13ed327ac098
# edit the code below to set your name and UGent username
student = (name = "Joel Janssen", email = "Joel.Janssen@UGent.be");
# press the ▶ button in the bottom right of this cell to run your edits
# or use Shift+Enter
# you might need to wait until all other cells in this notebook have completed running.
# scroll down the page to see what's up
# ╔═╡ 1657b9b2-62ef-11eb-062e-4758f9ea1075
begin
using DSJulia;
using PlutoUI;
tracker = ProgressTracker(student.name, student.email);
md"""
Submission by: **_$(student.name)_**
"""
end
# ╔═╡ b1d21552-6242-11eb-2665-c9232be7026e
md"""
# Flatland

## Introduction and goal
In this notebook, we will implement a variety of two-dimensional geometric shapes.
The different shapes might have drastically different representations. For example, we can describe a rectangle
by the coordinates of its center, its length and its width. A triangle, on the other hand,
is more naturally represented by its three points. Similarly, computing the area of a rectangle or a triangle
involves two different formulas. The nice thing about Julia is that you can hide this complexity from the users.
You have to create your structures, subtypes of the abstract `Shape` type and have custom methods that will work
for each type!
Below, we suggest a variety of shapes, each with its unique representation. For this assignment, you have to complete **one**
type and make sure all the provided functions `corners`, `area`, `move!`, `rotate!`,... work. Using `PlottingRecipes`, you can easily
plot all your shapes (provided you implemented all the helper functions).
Implementing such shapes can have various exciting applications, such as making a drawing tool or a ray tracer. Our
end goal is to implement a simulator of a toy statistical physics system. Here, we simulate a system with inert particles, leading to self-organization.
Our simple rejection sampling algorithm that we will use is computationally very demanding, an ideal case study for Julia!
"""
# ╔═╡ 7189b1ee-62ef-11eb-121a-8d7bb3df52c3
md"""
## Assignments
"""
# ╔═╡ f8b080fe-6309-11eb-17aa-fb098fc00b11
md"""
| shape | difficulty |
|---|---|
| `Rectangle` | ⭐️ |
| `Square` | ⭐️ |
| `Circle` | ⭐️ ⭐️ |
|`RegularPolygon` | ⭐️ ⭐️ ⭐️ |
|`Triangle` | ⭐️ ⭐️ ⭐️ ⭐️ |
|`Quadrilateral` | ⭐️ ⭐️ ⭐️ ⭐️ ⭐️ |
"""
# ╔═╡ d65b61ba-6242-11eb-030d-b18a7518731b
md"## Types
We define all kinds of shapes. For the constructors, we follow the convention: `Shape((x,y); kwargs)` where `kwargs` are the keyword arguments determining
the shape.
"
# ╔═╡ e3f846c8-6242-11eb-0d12-ed9f7e534db8
abstract type Shape end
# ╔═╡ e7e43620-6242-11eb-1e2e-65874fe8e293
md"""
`AbstractRectangle` is for simple rectangles and squares, for which the sides are always aligned with the axes.
They have a `l`ength and `w`idth attribute, in addition to an `x` and `y` for their center.
"""
# ╔═╡ f4b05730-6242-11eb-0e24-51d4c60dc451
abstract type AbstractRectangle <: Shape end
# ╔═╡ fe413efe-6242-11eb-3c38-13b9d996bc90
begin
mutable struct Rectangle <: AbstractRectangle
x::Float64
y::Float64
l::Float64
w::Float64
function Rectangle((x, y); l=1.0, w=1.0)
return missing # replace this with the correct statement
end
end
function Rectangle((xmin, xmax), (ymin, ymax))
@assert xmin < xmax && ymin < ymax "Corners have to be ordered: `xmin < xmax && ymin < ymax `"
x = (xmin + xmax) / 2
y = (ymin + ymax) / 2
l = xmax - xmin
w = ymax - ymin
return Rectangle((x, y), l=l, w=w)
end
end
# ╔═╡ 4d4285e8-6334-11eb-0d76-136cc5f645cd
# ╔═╡ 12ddaece-6243-11eb-1e9d-2be312d2e22d
md"Squares are a special case of rectangle."
# ╔═╡ 16666cac-6243-11eb-0e0f-dd0d0ec53926
mutable struct Square <: AbstractRectangle
x::Float64
y::Float64
l::Float64
function Square((x, y); l=1.0)
return missing # replace this with the correct statement
end
end
# ╔═╡ 501f9828-6334-11eb-0f2a-ebaa1d5b0f46
# ╔═╡ 23ea0a46-6243-11eb-145a-b38e34969cfd
md"This small function to get `l` and `w` will allow you to treat `Square` and `Rectangle` the same!"
# ╔═╡ 1b129bf4-6243-11eb-1fa2-d7bd5563a1b4
begin
lw(shape::Rectangle) = shape.l, shape.w
lw(shape::Square) = shape.l, shape.l
end
# ╔═╡ 94ec5382-6335-11eb-100c-15d70f27e703
# ╔═╡ 3d67d61a-6243-11eb-1f83-49032ad146da
mutable struct Circle <: Shape
x::Float64
y::Float64
R::Float64
function Circle((x, y); R=1.0)
return missing # replace this with the correct statement
end
end
# ╔═╡ 4ce7abea-6335-11eb-1657-a3ee8986d55e
# ╔═╡ 33757f2c-6243-11eb-11c2-ab5bbd90aa6b
mutable struct RegularPolygon{N} <: Shape
x::Float64
y::Float64
R::Float64
θ::Float64 # angle
function RegularPolygon((x, y), n::Int; R=1.0, θ=0.0)
@assert n ≥ 3 "polygons need a minimum of three corners"
return missing # replace this with the correct statement
end
end
# ╔═╡ bbbe4c9a-6335-11eb-1dc7-55ddf17887f4
# ╔═╡ 4234b198-6243-11eb-2cfa-6102bfd9b896
md"Triangles will be described by their three points. Its center will be computed when needed."
# ╔═╡ 473d9b5c-6243-11eb-363d-23108e81eb93
abstract type AbstractTriangle <: Shape end
# ╔═╡ ce3393a8-6335-11eb-06e9-af93a3794902
md"This one is for free:"
# ╔═╡ 50e45ac6-6243-11eb-27f9-d5e7d0e1dc01
mutable struct Triangle <: AbstractTriangle
x1::Float64
x2::Float64
x3::Float64
y1::Float64
y2::Float64
y3::Float64
Triangle((x1, y1), (x2, y2), (x3, y3)) = new(x1, x2, x3, y1, y2, y3)
end
# ╔═╡ dad14258-6309-11eb-0a9a-37c0386c8cb4
md"Define some examples."
# ╔═╡ 55de4f76-6243-11eb-1445-a54d01242f64
rect = Rectangle((1, 2), l=1, w=2)
# ╔═╡ 5b6b9854-6243-11eb-2d5b-f3e41ecf2914
square = Square((0, 1))
# ╔═╡ 5f120f1a-6243-11eb-1448-cb12a75680b0
triangle = Triangle((-0.1, 0.5), (1, 2), (1, -0.5))
# ╔═╡ 64fcb6a0-6243-11eb-1b35-437e8e0bfac8
pent = RegularPolygon((0, 0), 5)
# ╔═╡ 668f568a-6243-11eb-3f01-adf1b603e0e4
hex = RegularPolygon((1.2, 3), 6)
# ╔═╡ 7b785b7a-6243-11eb-31c2-9d9deea78842
circle = Circle((10, 10))
# ╔═╡ 5a61e0da-6338-11eb-2a58-ad06aae62940
md"""**Select one of the shapes you have developed.**
My shape type: $(@bind myshapeType Select(["Square", "Rectangle", "Circle", "RegularPolygon{N}", "Triangle"]))
"""
# ╔═╡ b6a4c98a-6300-11eb-0542-ab324d8e4d7e
begin
if myshapeType == "Square"
myshape = square
elseif myshapeType == "Rectangle"
myshape = rect
elseif myshapeType == "Circle"
myshape = circle
elseif myshapeType == "RegularPolygon{N}"
myshape = pent # needs to be more general WIP
elseif myshapeType == "Triangle"
myshape = triangle # needs to be more general WIP
else
myshape = missing
end
tester = ismissing(myshape) ? md"❌ **$myshapeType is not properly defined!** First complete the inner constructors for $myshapeType or change to a type that do have defined." : md""
end;
# ╔═╡ ca5302b2-6337-11eb-2e98-efb764a792a4
tester
# ╔═╡ 7c80d608-6243-11eb-38ba-f97f7476b245
md"""
## Corners and center
"""
# ╔═╡ 58eb84be-63c3-11eb-09f5-6d16973c7aa7
md""
# ╔═╡ 57dee25a-63c3-11eb-0c7a-bfb1ac79bc7b
md"""
Some very basic functions to get or generate the corners and centers of your shapes. The corners are returned as a list of tuples, e.g. `[(x1, y1), (x2, y2), ...]`.
"""
# ╔═╡ 62e7e05e-62fe-11eb-1611-61274c5498cc
begin
q_cc1 = Question(
description = md"""
**The number of corners**
```julia
ncorners(shape::myShape)
```
that returns the number of corners.
"""
)
q_cc2 = Question(
description = md"""
**The corners**
```julia
corners(shape::myShape)
```
that returns the coordinates of the corners.
"""
)
q_cc3 = Question(
description = md"""
**The center**
```julia
center(shape::myShape)
```
that returns the center.
"""
)
q_cc4 = Question(
description = md"""
**The outline**
```julia
xycoords(shape::Shape)
```
xycoords returns two vectors of the outline: xcoords and ycoords, for `Circle`, you can specify the number of points to take (50 by default). For a lot of shapes this is just another representation of the corners.
"""
)
qb_cc = QuestionBlock(;
title=md"**Assignment: corners and center**",
description = md"""
Complete the following functions for your shape ($myshapeType)
""",
questions = [q_cc1, q_cc2, q_cc3, q_cc4]
)
#validate(qb_cc, tracker)
end
# ╔═╡ a005992e-6243-11eb-3e29-61c19c6e5c7c
begin
ncorners(::Circle) = 0 # this one is for free!
ncorners(shape::Shape) = missing # leave this default
#...add your own ncorners
end
# ╔═╡ ac423fa8-6243-11eb-1385-a395d208c42d
begin
function corners(shape::Shape)
return missing
end
end
# ╔═╡ ddf0ac38-6243-11eb-3a1d-cd39d70b2ee0
#=begin
center(shape::Shape) = (missing, missing)
end=#
# ╔═╡ ecc9a53e-6243-11eb-2784-ed46ccbcadd2
begin
#xycoords(shape::Shape) = missing, missing
xycoords(s::Shape) = [first(p) for p in corners(s)], [last(p) for p in corners(s)]
function xycoords(shape::Circle; n=50)
# compute `n` points of the circle
return missing, missing
end
end
# ╔═╡ c16c36f6-6339-11eb-20d4-27ef9f74b747
# ╔═╡ 5de0c912-6244-11eb-13fd-bfd8328191a6
md"""
## x,y-bounding
"""
# ╔═╡ fe48d2f0-63c2-11eb-12f7-d5a8d6d0ce27
md""
# ╔═╡ aa186788-63c3-11eb-1bd1-d138d586e8b6
begin
q_bound = Question(;
description=md"""
The fuctions below is supposed to yield the outer limits of the x and y axes of your shape. Can you complete the methods with a oneliner?
""")
qb_bound = QuestionBlock(;
title=md"**Assignment: bounding box ⭐️⭐️**",
questions = [q_bound],
hints=[hint(md"The function `extrema` could be useful here...")]
)
end
# ╔═╡ a89bdba6-6244-11eb-0b83-c1c64e4de17d
begin
xlim(shape::Shape) = missing
end
# ╔═╡ b1372784-6244-11eb-0279-27fd755cda6a
begin
ylim(shape::Shape) = missing
end
# ╔═╡ bd706964-6244-11eb-1d9d-2b60e53cdce1
md"This should return the bounding box, as the smallest rectangle that can completely contain your shape."
# ╔═╡ b91e1e62-6244-11eb-1045-0770fa92e040
boundingbox(shape::Shape) = missing
# ╔═╡ d60f8ca4-6244-11eb-2055-4551e4c10906
md"""
## Area
"""
# ╔═╡ ebf4a45a-6244-11eb-0965-197f536f8e87
begin
area(shape::Shape) = missing
end
# ╔═╡ 230dd290-6303-11eb-0f55-311ef2b9541e
# ╔═╡ 36cc0492-6246-11eb-38dd-4f42fb7066dc
md"""
## Moving, rotating and scaling
"""
# ╔═╡ ed4bfad2-63c6-11eb-0292-73c34e4d34a6
md""
# ╔═╡ 285930b8-63c7-11eb-372b-edc4a4ed0d0a
begin
q_mrs = Question(;
description=md"""
Next, let us define some translation, rotation and scaling operations on the shapes.
Important: the functions work *in-place*, meaning that the modify your object (that is why use use `mutable` structures).
For `Circle` and `AbstractRectangle` types, `rotate!` leaves them unchanged.
**Note**: rotations are in radials, so between $0$ and $2\pi$.
""")
qb_mrs = QuestionBlock(;
title=md"**Assignment: I like to move it ⭐️⭐️⭐️**",
questions = [q_mrs])
end
# ╔═╡ 83c6d25c-6246-11eb-1a24-57e20f5e7262
begin
function move!(shape::Shape, (dx, dx))
# move the shape
return shape
end
end
# ╔═╡ a1b2a4f8-6246-11eb-00ea-8f6042c72f4e
begin
function rotate!(shape::Shape, dθ)
# rotate the shape, counterclockwise
return shape
end
end
# ╔═╡ b907e8fc-6246-11eb-0beb-bb44930d033c
begin
function scale!(shape::Shape, a)
@assert a > 0 "scaling has to be a positive number"
# scale with a factor a
return shape
end
end
# ╔═╡ 2d7a63cc-64db-11eb-0a4c-bb7771af8b14
# ╔═╡ d08ab6d0-6246-11eb-08a8-152f9802cdfc
md"""
## Plotting
OK, let's take a look at our shapes! We use `RecipesBase` to allow plotting.
This falls back on `xycoords` (can you see how it works?), so make sure this method is operational.
"""
# ╔═╡ e30d10d2-6246-11eb-1d59-332b5916712e
@recipe function f(s::Shape)
xguide --> "x"
yguide --> "y"
label --> ""
aspect_ratio := :equal
seriestype := :shape
x, y = xycoords(s)
return x, y
end
# ╔═╡ e7e90744-6246-11eb-157c-cf67e8619d6e
"""Plots a list of shapes."""
function plotshapes(shapes; kwargs...)
p = plot(;kwargs...)
plot!.(shapes)
return p
end
# ╔═╡ 1aec9fc2-6247-11eb-2942-edc370918f9e
md"Let's look!"
# ╔═╡ 8bdc61b0-6330-11eb-3e9a-15412fecf8af
myshape
# ╔═╡ 16d0ea9c-6247-11eb-12c6-1709f6d0ac99
plot(myshape)
# ╔═╡ 6bae2128-6303-11eb-34f2-1dfa96e46ae6
md"This is how it should look like,"
# ╔═╡ 287a7506-6247-11eb-2bad-0778802c00d5
plot(myshape, color="pink")
# ╔═╡ 01d899e6-6305-11eb-017b-27bb2c104ef5
# ╔═╡ 221e09a2-6247-11eb-12a8-a13c0a2f96e7
md"""
## In and intersection
Here, we want to perform some geometric checks.
```julia
(x, y) in shape
```
should return a Boolean whether the point is in the shape.
"""
# ╔═╡ 6851ebb2-6339-11eb-2ab7-39e07c4e3154
begin
q_in = Question(;
description=md"""
Complete the function `in(q, shape::Shape)` that checks whether a points falls inside a shape. The function `same_side((a, b), p, q)` is provided to check whether two points are on the same side of a line. This should prove very useful to complete this task.""")
qb_in = QuestionBlock(;
title=md"**Is point in shape? ⭐️⭐️⭐️**",
questions = [q_in],
hints=[
hint(md"It has something to do with the center..."),
hint(md"... but also with the edges"),
hint(md"Given that a point is outside a shape, it is always **outside** all edges.")
]
)
end
# ╔═╡ e565d548-6247-11eb-2824-7521d4fa6b2b
begin
function Base.in((x, y), s::Shape)
# compute something
return missing
end
end
# ╔═╡ 150f1dae-6248-11eb-276f-9bbf7eba58fd
"""
same_side((a, b), p, q)
Given a line described by two points, `a` and `b`, check whether two points
`p` and `q` are on the same side.
"""
function same_side((a, b), p, q)
# normal vector on the line
n = (a[2] - b[2], b[1] - a[1])
# check if they are on both sides by projection
return sign(n ⋅ (p .- a)) == sign(n ⋅ (q .-a ))
end
# ╔═╡ b8ed26f2-633b-11eb-380e-9379b0f4697f
md"Too prove that this function works:"
# ╔═╡ f4873fce-6249-11eb-0140-871354ca5430
let
# verfication
points_all = [1randn(2) for i in 1:1_000]
points =filter(p->p ∈ myshape, points_all)
scatter(first.(points_all), last.(points_all), opacity=0.1, color=:lightgrey, label="all")
scatter!(first.(points), last.(points), label="out")
plot!(myshape, alpha=0.2)
end
# ╔═╡ 22f63a5e-633a-11eb-27c7-27fcabc7bc6f
# ╔═╡ f3ea648e-633b-11eb-3444-317a4eb5b8ea
begin
q_circle = Question(;
description=md"""
**Two circles intersecting ⭐️**
```julia
Base.intersect(shape1::Circle, shape2::Circle)
```
""")
q_recrec = Question(;
description=md"""
**Rectangle intersecting another rectangle ⭐️**
```julia
Base.intersect(shape1::AbstractRectangle, shape2::AbstractRectangle)
```
""")
q_triatria = Question(;
description=md"""
**Two triangles intersecting (efficient) ⭐️⭐️⭐️⭐️⭐️**
```julia
Base.intersect(shape1::Triangle, shape2::Triangle)
```
""")
q_general = Question(;
description=md"""
**Any two shapes intersecting (naive) ⭐️⭐️⭐️**
```julia
Base.intersect(shape1::T, shape2::T) where {T<:Shape}
```
""")
qb_inter = QuestionBlock(;
title=md"**Intersection between shapes?**",
description=md"""
Similarly, we want to check whether two shapes overlap (partially):
```julia
intersect(shape1, shape2)
```
Complete the function `intersect(shape1, shape2)` that checks whether there is a partial overlap (intersection) between two shapes
The efficiency and the process of checking intersection is very different for each shape and each combination of two shapes. Complete **at least one** of the following combinations.""",
questions = [q_circle, q_recrec, q_triatria, q_general],
hints=[
hint(md"Checking overlapping bounding boxes is very efficient"),
hint(md"For most shapes: overlapping bounding boxes is a required, but not a sufficient condition for overlap"),
hint(md"[Very good hint](https://i.imgur.com/TpIStMK.png)")
]
)
end
# ╔═╡ 5368c46e-633e-11eb-0d98-b1ccb37cc7f8
begin
function Base.intersect(shape1::AbstractRectangle, shape2::AbstractRectangle)
return missing
end
function Base.intersect(shape1::Circle, shape2::Circle)
return missing
end
function Base.intersect(shape1::Triangle, shape2::Triangle)
return missing
end
function Base.intersect(shape1::T, shape2::T) where {T<:Shape}
return missing
end
end
# ╔═╡ e6efb632-6338-11eb-2e22-eb0b1ff577c4
md"We have defined some functions you might find useful:"
# ╔═╡ 0381fbba-6248-11eb-3e80-b37137438531
crossprod((x1, y1), (x2, y2)) = x1 * y2 - x2 * y1
# ╔═╡ 91273cd2-6248-11eb-245c-abb6269f916b
md"Note, Julia will parse composite arguments:"
# ╔═╡ 653af7c6-6248-11eb-2a7b-fbf7550ef92b
"""
linecross((p1, p2), (q1, q2))
Check whether line segments `(p1, p2)` and `(q1, q2)` intersect.
"""
function linecross((p1, p2), (q1, q2))
v = p2 .- p1
w = q2 .- q1
vw = crossprod(v, w)
t = crossprod(q1 .- p1, w) / vw
s = crossprod(q1 .- p1, v) / vw
return 0.0 ≤ t ≤ 1.0 && 0.0 ≤ s ≤ 1.0
end
# ╔═╡ 6aa3519a-6248-11eb-193d-a3537f7d3bd0
linecross(((0, 0), (1, 1)), ((0, 1), (1, 0)))
# ╔═╡ 51bca412-6340-11eb-3f38-8bfc8377715b
# ╔═╡ e21b0f1c-633b-11eb-3609-9b9dae71c915
md"""
By completing these functions, the following mathematical syntax should also work:
```julia
(x, y) ∈ shape # \in<TAB>
shape1 ∩ shape2 # \cap<TAB>
```
"""
# ╔═╡ f97bf1c0-6247-11eb-1acc-e30068a277d0
md"""
## Random placement
Finally, `randplace!` takes a shape, rotates it randomly and moves it randomly within the bounds of the limits `(xmin, xmax)` and `(ymin, ymax)`. Note that the **whole** shape should be within these bounds, not only the center!
"""
# ╔═╡ 97c8cd32-6340-11eb-1d6d-b7d364c0c987
begin
q_rand = Question(;
description=md"""
Complete the function `randplace!(shape::Shape, (xmin, xmax), (ymin, ymax); rotate=true)` that places a shape at a random location withing a given bounding box. Can you define it in such a way that it also gives a random rotation to the shape?""")
qb_rand = QuestionBlock(;
title=md"**Assignment: random placement⭐️⭐️⭐️**",
questions = [q_rand],
hints=[
]
)
end
# ╔═╡ 8d73b66c-624e-11eb-0a52-2309ef897b1c
function randplace!(shape::Shape, (xmin, xmax), (ymin, ymax); rotate=true)
# random rotation
# random translation within bound
return shape
end
# ╔═╡ 3e0a2e20-6341-11eb-3c23-a38b04c89b37
md"Testing if the function works:"
# ╔═╡ 0ee778d2-6341-11eb-10b0-7146fbbc71ff
begin
# verification
my_shapes = [randplace!(deepcopy(myshape), (-10,10),(-10,10)) for i in 1:100]
plotshapes(my_shapes, alpha=0.2)
end
# ╔═╡ 2338ef6a-630b-11eb-1837-431b567ad619
md"""
## Simulating a system of shapes
Suppose we want to use our shape(s) to study a system of non-interacting particles.
Here, we assume that the shapes are rigid and cannot overlap.
There are no forces that attract or repel particles.
Such studies might be of interest in nanoscience, molecular dynamics or self-organization of complex systems.
One approach to study systems of particles is to model every particle's dynamics, keep track of all collisions, etc.
We will do something more ingenious: we will use ideas from statistical physics.
Namely, every valid state (i.e., no shapes overlap and all shapes are within the box) is equally likely.
So instead of simulating the system, we will take samples from it!
These samples are equivalent to random 'snapshots' of a more complex simulation.
Pretty cool, right?
To generate the samples, we will use [rejection sampling](https://en.wikipedia.org/wiki/Rejection_sampling).
Here, we will randomly place shapes within the box until we are lucky and found one that does not overlap.
More concretely, we follow the following steps:
1. generate all the shapes you want to place;
2. randomly place the shapes into the box (using `randplace!`);
3. from the moment a single shape overlaps with another shape, you have to start entirely anew to step 2.
The last point is crucial! If you place a shape that overlaps an earlier shape,
it is insufficient to redistribute that shape. **You have to start over completely.** Only then will you generate correct samples.
The inputs of our function implementing the above algorithm are:
- `shapes`: a list of your shapes (same type, but not necessarily with the same dimensions);
- `xlims`, `ylims`: tuples outlining the box;
The function works inplace, and returns the number of trials needed to generate a valid sample.
This quantity is relevant by itself (it is related to the partition function of the Boltzmann distribution), but we will only use it for diagnostic purposes.
As you might imagine, this algorithm is still very computationally expensive.
Try with about 20 shapes, and work yourself up to more extensive examples.
Try a mixture of small and large shapes. You should see some self-organization going on!
"""
# ╔═╡ 965b578e-63a5-11eb-2cf4-690ec58e939d
xlims = (0, 100)
# ╔═╡ 961ea26c-63a5-11eb-1227-4bcaf4778d82
ylims = (0, 80)
# ╔═╡ a30ded16-63a5-11eb-35f2-2b1ff724eb54
function rejection_sampling!(shapes::Vector{<:Shape}, xlims, ylims)
# place all the shapes one-by-one, such that they don't overlap
# the moment you find a single conflict, you have to start over again
return shapes, trials
end
# ╔═╡ 3a961b6e-62f1-11eb-250b-13a3f6f17eaa
begin
checkbox(test::Bool)= test ? "✅" : "◯"
checkbox2(test::Bool)= test ? "✅" : ""
test1 = @safe !ismissing(myshape)
test2 = @safe !ismissing(myshape |> corners) && !ismissing(myshape|> ncorners)
test3 = @safe !ismissing(myshape |> center)
test4 = @safe !ismissing(myshape |> xycoords)
test5 = @safe !ismissing(myshape |> xlim) && !ismissing(myshape |> ylim) &&
!ismissing(myshape |> boundingbox)
test6 = @safe !ismissing(myshape |> area)
test7 = @safe !ismissing(move!(myshape |> deepcopy,(1.0,1.0))) &&
!ismissing(rotate!(myshape |> deepcopy,(1.0))) &&
!ismissing(scale!(myshape |> deepcopy,(1.0)))
test8 = @safe !ismissing(in((0.5, 0), myshape))
test9 = @safe !ismissing(intersect(myshape, myshape))
test10 = @safe !ismissing(randplace!(myshape, (0.0, 1.0), (0.0, 1.0)))
test11 = @safe rejection_sampling!([deepcopy(myshape) for i in 1:2], (0, 100), (0, 100)) |> !ismissing
end;
# ╔═╡ 7545c788-62f0-11eb-3f6e-01deeaf990e0
md"""
$(checkbox(test1)) add the correct *inner* constructor to your type (see below);
$(checkbox(test2)) complete `corners` and `ncorners`, which return the corners and the number of corners, respecitively;
$(checkbox(test3)) complete `center` to return the center of mass of the shape;
$(checkbox(test4)) complete `xycoords`, which give two vectors with the x- and y-coordinates of the shape, used for plotting;
$(checkbox(test5)) complete `xlim` and `ylim` to give the range on the x- and y-axes of your shape, in addition to `boundingbox` to generate a bounding box of your shape;
$(checkbox(test6)) complete `area`, this computes the area of your shape;
$(checkbox(test7)) complete `move!`, `rotate!` and `scale!` to transform your shape **in place** (note: `AbstractRectangle`s cannot be rotated, they are always aligned to the axes);
$(checkbox(test8)) complete the function `in`, to check whether a point is in your shape;
$(checkbox(test9)) complete `intersect`, to check whether two shapes overlap;
$(checkbox(test10)) complete `randplace!`, which randomly moves and rotates a shape within a box;
$(checkbox(test11)) complete the rejection sampling algorithm and experiment with your shape(s).
**Note:** You will need to create specific methods for different types. It's your job to split the template for the functions in several methods and use dispatch.
"""
# ╔═╡ 30c89806-6331-11eb-0610-d3545e7aeba4
begin
test_rect = @safe Rectangle((1.0, 1.0)) !== missing
q_rect_con = Question(;
description=md"""
So we have defined a composite Rectangle type with a few fields but the inner constructor is missing. This inner constructor should to instantiate a Rectangle with center (`x`,`y`) and a default length and width of 1.0.
Multiple dispatch allows us to define multiple constructors for different scenario's. So we have defined an additional constructor where the extremum coordinates are provided (`xmin`, `xmin`), (`ymin`, `ymax`), assuming that the rectangle is always aligned with the axes.
""")
q_rect_con = QuestionBlock(;
title=md"**Rectangle ⭐️** $(checkbox2(test_rect)) ",
questions = [q_rect_con],
hints=[
hint(md"Remember, `new()`?")
]
)
end
# ╔═╡ abc99468-6333-11eb-1a9d-e50f8e56e468
begin
test_sq = @safe Square((1.0, 1.0)) !== missing
q_sq_con = Question(;
description=md"""
Can you complete the inner constructor for the square type?
""")
qb_sq_con = QuestionBlock(;
title=md"**Square ⭐️** $(checkbox2(test_sq)) ",
questions = [q_sq_con]
)
end
# ╔═╡ 5dcdba4e-6335-11eb-19d2-2d10ae81fa39
begin
test_circle = @safe Circle((1.0, 1.0)) !== missing
q_circle_con = Question(;
description=md"""
`Circle`'s are pretty straightforward, having a center and a radius.
""")
qb_circle_con = QuestionBlock(;
title=md"**Circles ⭐️⭐️** $(checkbox2(test_circle)) ",
questions = [q_circle_con]
)
end
# ╔═╡ 6d06ddfc-6334-11eb-2995-81333ac5e1cd
begin
test_poly = @safe RegularPolygon((1.0, 1.0), 5) !== missing
q_poly_con = Question(;
description=md"""
Regular polygons have a center (`x`, `y`), a radius `R` (distance center to one of the corners) and an angle `θ` how it is tilted.
The order of the polygon is part of its parametric type, so we give the compiler some hints on how it will behave.
Can you complete the inner constructor for the polygon type? This is a little more challenging since it is a **parametric composite type** where `N` is the number of corners.
""")
qb_poly_con = QuestionBlock(;
title=md"**Regular polygons ⭐️⭐️⭐️** $(checkbox2(test_poly)) ",
questions = [q_poly_con],
hints=[hint(md"`new{n}(...)`")]
)
end
# ╔═╡ 94cbef4e-6348-11eb-030f-d7a9debdd305
begin
q_area = Question(;
description=md"""
Next, let us compute the area of our shapes.
""")
qb_area = QuestionBlock(;
title=md"**Assignment: Area ⭐️** $(checkbox2(test_circle)) ",
questions = [q_area],
hints= [
hint(md"The area of a triangle can be computed as $${\frac {1}{2}}{\big |}(x_{A}-x_{C})(y_{B}-y_{A})-(x_{A}-x_{B})(y_{C}-y_{A}){\big |}$$."),
hint(md"A regular polygon consists of a couple of isosceles triangles.")
]
)
end
# ╔═╡ e5a7eee2-63a5-11eb-0267-499409488b19
md"Function to place `n` copies of a given `shape`:"
# ╔═╡ de6124d2-63a5-11eb-1145-5b11f5f1c0f4
function rejection_sampling(shape, n, xlims, ylims)
shapes = [deepcopy(shape) for i in 1:n]
trials = rejection_sampling!(shapes, xlims, ylims)
return shapes, trials
end
# ╔═╡ Cell order:
# ╠═23bcbb02-62ef-11eb-27f9-13ed327ac098
# ╟─1657b9b2-62ef-11eb-062e-4758f9ea1075
# ╠═63f5861e-6244-11eb-268b-a16bc3f8265c
# ╟─b1d21552-6242-11eb-2665-c9232be7026e
# ╟─7189b1ee-62ef-11eb-121a-8d7bb3df52c3
# ╟─7545c788-62f0-11eb-3f6e-01deeaf990e0
# ╟─f8b080fe-6309-11eb-17aa-fb098fc00b11
# ╟─3a961b6e-62f1-11eb-250b-13a3f6f17eaa
# ╟─d65b61ba-6242-11eb-030d-b18a7518731b
# ╠═e3f846c8-6242-11eb-0d12-ed9f7e534db8
# ╟─e7e43620-6242-11eb-1e2e-65874fe8e293
# ╠═f4b05730-6242-11eb-0e24-51d4c60dc451
# ╠═fe413efe-6242-11eb-3c38-13b9d996bc90
# ╟─30c89806-6331-11eb-0610-d3545e7aeba4
# ╟─4d4285e8-6334-11eb-0d76-136cc5f645cd
# ╟─12ddaece-6243-11eb-1e9d-2be312d2e22d
# ╠═16666cac-6243-11eb-0e0f-dd0d0ec53926
# ╟─abc99468-6333-11eb-1a9d-e50f8e56e468
# ╟─501f9828-6334-11eb-0f2a-ebaa1d5b0f46
# ╟─23ea0a46-6243-11eb-145a-b38e34969cfd
# ╠═1b129bf4-6243-11eb-1fa2-d7bd5563a1b4
# ╟─94ec5382-6335-11eb-100c-15d70f27e703
# ╟─5dcdba4e-6335-11eb-19d2-2d10ae81fa39
# ╠═3d67d61a-6243-11eb-1f83-49032ad146da
# ╟─4ce7abea-6335-11eb-1657-a3ee8986d55e
# ╟─6d06ddfc-6334-11eb-2995-81333ac5e1cd
# ╠═33757f2c-6243-11eb-11c2-ab5bbd90aa6b
# ╟─bbbe4c9a-6335-11eb-1dc7-55ddf17887f4
# ╟─4234b198-6243-11eb-2cfa-6102bfd9b896
# ╠═473d9b5c-6243-11eb-363d-23108e81eb93
# ╟─ce3393a8-6335-11eb-06e9-af93a3794902
# ╠═50e45ac6-6243-11eb-27f9-d5e7d0e1dc01
# ╟─dad14258-6309-11eb-0a9a-37c0386c8cb4
# ╠═55de4f76-6243-11eb-1445-a54d01242f64
# ╠═5b6b9854-6243-11eb-2d5b-f3e41ecf2914
# ╠═5f120f1a-6243-11eb-1448-cb12a75680b0
# ╠═64fcb6a0-6243-11eb-1b35-437e8e0bfac8
# ╠═668f568a-6243-11eb-3f01-adf1b603e0e4
# ╠═7b785b7a-6243-11eb-31c2-9d9deea78842
# ╟─b6a4c98a-6300-11eb-0542-ab324d8e4d7e
# ╟─5a61e0da-6338-11eb-2a58-ad06aae62940
# ╟─ca5302b2-6337-11eb-2e98-efb764a792a4
# ╟─7c80d608-6243-11eb-38ba-f97f7476b245
# ╟─58eb84be-63c3-11eb-09f5-6d16973c7aa7
# ╟─57dee25a-63c3-11eb-0c7a-bfb1ac79bc7b
# ╟─62e7e05e-62fe-11eb-1611-61274c5498cc
# ╠═a005992e-6243-11eb-3e29-61c19c6e5c7c
# ╠═ac423fa8-6243-11eb-1385-a395d208c42d
# ╠═ddf0ac38-6243-11eb-3a1d-cd39d70b2ee0
# ╠═ecc9a53e-6243-11eb-2784-ed46ccbcadd2
# ╟─c16c36f6-6339-11eb-20d4-27ef9f74b747
# ╟─5de0c912-6244-11eb-13fd-bfd8328191a6
# ╟─fe48d2f0-63c2-11eb-12f7-d5a8d6d0ce27
# ╟─aa186788-63c3-11eb-1bd1-d138d586e8b6
# ╠═a89bdba6-6244-11eb-0b83-c1c64e4de17d
# ╠═b1372784-6244-11eb-0279-27fd755cda6a
# ╟─bd706964-6244-11eb-1d9d-2b60e53cdce1
# ╠═b91e1e62-6244-11eb-1045-0770fa92e040
# ╟─d60f8ca4-6244-11eb-2055-4551e4c10906
# ╟─94cbef4e-6348-11eb-030f-d7a9debdd305
# ╠═ebf4a45a-6244-11eb-0965-197f536f8e87
# ╟─230dd290-6303-11eb-0f55-311ef2b9541e
# ╟─36cc0492-6246-11eb-38dd-4f42fb7066dc
# ╟─ed4bfad2-63c6-11eb-0292-73c34e4d34a6
# ╟─285930b8-63c7-11eb-372b-edc4a4ed0d0a
# ╠═83c6d25c-6246-11eb-1a24-57e20f5e7262
# ╠═a1b2a4f8-6246-11eb-00ea-8f6042c72f4e
# ╠═b907e8fc-6246-11eb-0beb-bb44930d033c
# ╟─2d7a63cc-64db-11eb-0a4c-bb7771af8b14
# ╟─d08ab6d0-6246-11eb-08a8-152f9802cdfc
# ╠═dfc779ee-6246-11eb-240b-4dc7a7d95641
# ╠═e30d10d2-6246-11eb-1d59-332b5916712e
# ╠═e7e90744-6246-11eb-157c-cf67e8619d6e
# ╟─1aec9fc2-6247-11eb-2942-edc370918f9e
# ╠═8bdc61b0-6330-11eb-3e9a-15412fecf8af
# ╠═16d0ea9c-6247-11eb-12c6-1709f6d0ac99
# ╟─6bae2128-6303-11eb-34f2-1dfa96e46ae6
# ╠═287a7506-6247-11eb-2bad-0778802c00d5
# ╟─01d899e6-6305-11eb-017b-27bb2c104ef5
# ╟─221e09a2-6247-11eb-12a8-a13c0a2f96e7
# ╟─6851ebb2-6339-11eb-2ab7-39e07c4e3154
# ╠═e565d548-6247-11eb-2824-7521d4fa6b2b
# ╠═150f1dae-6248-11eb-276f-9bbf7eba58fd
# ╟─b8ed26f2-633b-11eb-380e-9379b0f4697f
# ╠═f4873fce-6249-11eb-0140-871354ca5430
# ╟─22f63a5e-633a-11eb-27c7-27fcabc7bc6f
# ╟─f3ea648e-633b-11eb-3444-317a4eb5b8ea
# ╠═5368c46e-633e-11eb-0d98-b1ccb37cc7f8
# ╟─e6efb632-6338-11eb-2e22-eb0b1ff577c4
# ╠═0381fbba-6248-11eb-3e80-b37137438531
# ╟─91273cd2-6248-11eb-245c-abb6269f916b
# ╠═653af7c6-6248-11eb-2a7b-fbf7550ef92b
# ╠═6aa3519a-6248-11eb-193d-a3537f7d3bd0
# ╟─51bca412-6340-11eb-3f38-8bfc8377715b
# ╟─e21b0f1c-633b-11eb-3609-9b9dae71c915
# ╟─f97bf1c0-6247-11eb-1acc-e30068a277d0
# ╟─97c8cd32-6340-11eb-1d6d-b7d364c0c987
# ╠═8d73b66c-624e-11eb-0a52-2309ef897b1c
# ╟─3e0a2e20-6341-11eb-3c23-a38b04c89b37
# ╠═0ee778d2-6341-11eb-10b0-7146fbbc71ff
# ╟─2338ef6a-630b-11eb-1837-431b567ad619
# ╠═965b578e-63a5-11eb-2cf4-690ec58e939d
# ╠═961ea26c-63a5-11eb-1227-4bcaf4778d82
# ╠═a30ded16-63a5-11eb-35f2-2b1ff724eb54
# ╟─e5a7eee2-63a5-11eb-0267-499409488b19
# ╠═de6124d2-63a5-11eb-1145-5b11f5f1c0f4
|
{"hexsha": "0f30c4844615d1f0da2231e205b48daa57c6051c", "size": 31852, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "notebooks/day2/04-flatland.jl", "max_stars_repo_name": "jpgmolina/DS-Julia2925", "max_stars_repo_head_hexsha": "4d96351afb72f4107fa12561a6a460dcd3c617e3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/day2/04-flatland.jl", "max_issues_repo_name": "jpgmolina/DS-Julia2925", "max_issues_repo_head_hexsha": "4d96351afb72f4107fa12561a6a460dcd3c617e3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/day2/04-flatland.jl", "max_forks_repo_name": "jpgmolina/DS-Julia2925", "max_forks_repo_head_hexsha": "4d96351afb72f4107fa12561a6a460dcd3c617e3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8799249531, "max_line_length": 274, "alphanum_fraction": 0.7095315836, "num_tokens": 12552}
|
# -*- coding: utf-8 -*-
import time
import numpy
from krypy.linsys import LinearSystem, Cg
from krypy.deflation import DeflatedCg, DeflatedGmres, Ritz
from krypy.utils import Arnoldi, ritz, BoundCG
from krypy.recycling import RecyclingCg
from krypy.recycling.factories import RitzFactory,RitzFactorySimple
from krypy.recycling.evaluators import RitzApriori,RitzApproxKrylov
from scipy import random, linalg
def find_deflation_subspace(A,b,k,ortho='dmgs',ritz_type='ritz'):
Ar = Arnoldi(A,b,ortho=ortho)
for i in range(1,k+1):
Ar.advance()
[V,H] = Ar.get()
[theta,U,resnorm,Z] = ritz(H,V,type=ritz_type)
return Z
def reuse_deflation_subspace(sol,ritz_type='ritz'):
[theta,U,resnorm,Z] = ritz(sol.H,sol.V,type=ritz_type)
return Z
cgt = []
dft = []
rct = []
for i in range(1,100):
matrixSize = 100
R = random.rand(matrixSize,matrixSize)
A = numpy.dot(R,R.transpose())
b=numpy.ones((matrixSize, 1))
k = 10
numSystems = 10
rank = 1 #rank of each system to add
Asys = [A]
for i in range(1,numSystems):
u = random.rand(matrixSize, rank)
Asys.append(Asys[i-1] + numpy.dot(u,u.T))
systems = []
for i in range(0,len(Asys)):
systems.append(LinearSystem(A=Asys[i],b=b,self_adjoint=True,positive_definite=True))
ts = time.time()
for i in range(0,len(Asys)):
cg_sol = Cg(systems[i],maxiter=1000)
te = time.time()
cgt.append((te-ts)*1000)
ts = time.time()
for i in range(0,len(Asys)):
U=find_deflation_subspace(Asys[i],b,k)
deflated_sol = DeflatedCg(systems[i],U=U,maxiter=1000)
te = time.time()
dft.append((te-ts)*1000)
vector_factory = RitzFactorySimple(n_vectors=k, which='sm')
ts = time.time()
recycler = RecyclingCg(vector_factory=vector_factory)
for i in range(0,len(Asys)):
recycled_sol = recycler.solve(systems[i],maxiter=1000)
te = time.time()
rct.append((te-ts)*1000)
print('Mean time taken for CG (ms):', sum(cgt)/len(cgt))
print('Mean time taken for Deflated CG (ms):', sum(dft)/len(dft))
print('Mean time taken for Recycled CG (ms):', sum(rct)/len(rct))
|
{"hexsha": "3b2534c0418b9126bf14031fac35d279d4d24036", "size": 2220, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiment1_meantime.py", "max_stars_repo_name": "mcsosa121/KSRFILS", "max_stars_repo_head_hexsha": "75995933771d8338de33cc9bbb5e9416e4242c6b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "experiment1_meantime.py", "max_issues_repo_name": "mcsosa121/KSRFILS", "max_issues_repo_head_hexsha": "75995933771d8338de33cc9bbb5e9416e4242c6b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiment1_meantime.py", "max_forks_repo_name": "mcsosa121/KSRFILS", "max_forks_repo_head_hexsha": "75995933771d8338de33cc9bbb5e9416e4242c6b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6470588235, "max_line_length": 93, "alphanum_fraction": 0.6441441441, "include": true, "reason": "import numpy,from scipy", "num_tokens": 675}
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @Time : 2019/8/29 11:20 PM
# @Author : baienyang
# @Email : baienyang@baidu.com
# @File : linear_regression.py
# @Software: PyCharm
"""
Copyright 2019 Baidu, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
# 超参数
INPUT_SIZE = 1
OUTPUT_SIZE = 1
EPOCHS = 60
LEARNING_RATE = 0.001
# Toy dataset
x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
[9.779], [6.182], [7.59], [2.167], [7.042],
[10.791], [5.313], [7.997], [3.1]], dtype=np.float32)
y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
[3.366], [2.596], [2.53], [1.221], [2.827],
[3.465], [1.65], [2.904], [1.3]], dtype=np.float32)
linear_model = nn.Linear(INPUT_SIZE, OUTPUT_SIZE)
# 优化器和损失函数
optimizer = torch.optim.SGD(linear_model.parameters(), lr=LEARNING_RATE)
criterion = nn.MSELoss()
for epoch in range(EPOCHS):
inputs = torch.from_numpy(x_train)
labels = torch.from_numpy(y_train)
# forward pass
outputs = linear_model(inputs)
loss = criterion(labels, outputs)
# backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch + 1) % 5 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'
.format(epoch + 1, EPOCHS, loss.item()))
# plot the model
predicted = linear_model(torch.from_numpy(x_train)).detach().numpy()
plt.plot(x_train, y_train, 'ro', label='Original data')
plt.plot(x_train, predicted, label='Fitted line')
plt.legend()
plt.show()
# save model
torch.save(linear_model.state_dict(), "./model/linear_model.ckpt")
|
{"hexsha": "9b7fb4de3a9ceb3eb8a5eb21d2aeddd6a50acae6", "size": 2219, "ext": "py", "lang": "Python", "max_stars_repo_path": "linear_regression.py", "max_stars_repo_name": "baiyyang/pytorch-simple-dnn-model", "max_stars_repo_head_hexsha": "49dc88b56d9a349c008e376d0cec5bf016723881", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "linear_regression.py", "max_issues_repo_name": "baiyyang/pytorch-simple-dnn-model", "max_issues_repo_head_hexsha": "49dc88b56d9a349c008e376d0cec5bf016723881", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "linear_regression.py", "max_forks_repo_name": "baiyyang/pytorch-simple-dnn-model", "max_forks_repo_head_hexsha": "49dc88b56d9a349c008e376d0cec5bf016723881", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1594202899, "max_line_length": 115, "alphanum_fraction": 0.6529968454, "include": true, "reason": "import numpy", "num_tokens": 659}
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
import mindspore.nn as nn
import mindspore.ops as ops
import mindspore.context as context
from mindspore import Tensor
from mindspore.ops.operations import _inner_ops as inner
class Net(nn.Cell):
def __init__(self, op, axis):
super(Net, self).__init__()
if op == "Cummin":
self.op = inner.Cummin(axis)
elif op == "Cummax":
self.op = ops.Cummax(axis)
else:
raise ValueError("op value error.")
def construct(self, x):
return self.op(x)
def cum_minmax_compare(op, x, expected, axis, data_type):
net = Net(op, axis)
x = np.array(x).astype(data_type)
expected = (np.array(expected[0]).astype(data_type), np.array(expected[1]).astype(data_type))
# Pynative
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
output = net(Tensor(x))
assert np.allclose(output[0].asnumpy(), expected[0], equal_nan=True)
assert np.allclose(output[1].asnumpy(), expected[1])
# Graph
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
output = net(Tensor(x))
assert np.allclose(output[0].asnumpy(), expected[0], equal_nan=True)
assert np.allclose(output[1].asnumpy(), expected[1])
@pytest.mark.level0
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("data_type", [np.uint8, np.int8, np.int32, np.float16, np.float32])
def test_cummin_multi_dims(data_type):
"""
Feature: Op Cummin
Description: test Cummin operator with multiple dimension.
Expectation: the result match expectation.
"""
op = "Cummin"
axis = 1
x = [[[14, 19, 18, 11, 6], [1, 4, 18, 6, 1], [15, 13, 12, 9, 19]],
[[16, 16, 17, 10, 15], [9, 7, 10, 9, 4], [6, 14, 16, 3, 2]],
[[1, 13, 15, 1, 6], [20, 6, 8, 19, 19], [3, 14, 20, 18, 19]],
[[20, 1, 14, 9, 3], [13, 11, 2, 17, 14], [0, 15, 13, 7, 10]]]
cummin_output = (
[[[14, 19, 18, 11, 6], [1, 4, 18, 6, 1], [1, 4, 12, 6, 1]],
[[16, 16, 17, 10, 15], [9, 7, 10, 9, 4], [6, 7, 10, 3, 2]],
[[1, 13, 15, 1, 6], [1, 6, 8, 1, 6], [1, 6, 8, 1, 6]], [[20, 1, 14, 9, 3], [13, 1, 2, 9, 3], [0, 1, 2, 7, 3]]],
[[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [1, 1, 2, 1, 1]], [[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 1, 1, 2, 2]],
[[0, 0, 0, 0, 0], [0, 1, 1, 0, 0], [0, 1, 1, 0, 0]], [[0, 0, 0, 0, 0], [1, 0, 1, 0, 0], [2, 0, 1, 2, 0]]])
cum_minmax_compare(op, x, cummin_output, axis, data_type)
@pytest.mark.level0
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("data_type", [np.uint8, np.uint32, np.int8, np.int32, np.int64, np.float16, np.float32])
def test_cummax_multi_dims(data_type):
"""
Feature: Op Cummax
Description: test Cummax operator with multiple dimension.
Expectation: the result match expectation.
"""
op = "Cummax"
axis = 1
x = [[[11, 11, 1, 7, 11], [1, 8, 18, 0, 9], [12, 1, 16, 11, 8]],
[[18, 8, 10, 17, 14], [4, 20, 8, 20, 11], [14, 1, 8, 5, 16]],
[[6, 13, 19, 14, 8], [17, 19, 11, 0, 7], [18, 4, 13, 14, 16]],
[[10, 7, 7, 7, 19], [15, 0, 15, 5, 14], [9, 7, 10, 4, 14]]]
cummax_output = ([[[11, 11, 1, 7, 11], [11, 11, 18, 7, 11], [12, 11, 18, 11, 11]],
[[18, 8, 10, 17, 14], [18, 20, 10, 20, 14], [18, 20, 10, 20, 16]],
[[6, 13, 19, 14, 8], [17, 19, 19, 14, 8], [18, 19, 19, 14, 16]],
[[10, 7, 7, 7, 19], [15, 7, 15, 7, 19], [15, 7, 15, 7, 19]]],
[[[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [2, 0, 1, 2, 0]],
[[0, 0, 0, 0, 0], [0, 1, 0, 1, 0], [0, 1, 0, 1, 2]],
[[0, 0, 0, 0, 0], [1, 1, 0, 0, 0], [2, 1, 0, 2, 2]],
[[0, 0, 0, 0, 0], [1, 0, 1, 0, 0], [1, 2, 1, 0, 0]]])
cum_minmax_compare(op, x, cummax_output, axis, data_type)
@pytest.mark.level0
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("data_type", [np.float16, np.float32])
def test_cumminmax_nan(data_type):
"""
Feature: Op Cummin/Cummax
Description: test Cummin/Cummax operator with nan input.
Expectation: the result match expectation.
"""
inf = float('inf')
nan = float('nan')
axis = 0
x = [4, inf, 1.5, -inf, 0, nan, 1]
cummin_output = ([4, 4, 1.5, -inf, -inf, nan, nan], [0, 0, 2, 3, 3, 5, 5])
cummax_output = ([4, inf, inf, inf, inf, nan, nan], [0, 1, 1, 1, 1, 5, 5])
cum_minmax_compare("Cummin", x, cummin_output, axis, data_type)
cum_minmax_compare("Cummax", x, cummax_output, axis, data_type)
|
{"hexsha": "cd0fb53b6b289e7930404e291ee71c8df88df881", "size": 5328, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/st/ops/gpu/test_cum_minmax_op.py", "max_stars_repo_name": "httpsgithu/mindspore", "max_stars_repo_head_hexsha": "c29d6bb764e233b427319cb89ba79e420f1e2c64", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-23T09:13:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T09:13:43.000Z", "max_issues_repo_path": "tests/st/ops/gpu/test_cum_minmax_op.py", "max_issues_repo_name": "949144093/mindspore", "max_issues_repo_head_hexsha": "c29d6bb764e233b427319cb89ba79e420f1e2c64", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/st/ops/gpu/test_cum_minmax_op.py", "max_forks_repo_name": "949144093/mindspore", "max_forks_repo_head_hexsha": "c29d6bb764e233b427319cb89ba79e420f1e2c64", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9846153846, "max_line_length": 120, "alphanum_fraction": 0.5583708709, "include": true, "reason": "import numpy", "num_tokens": 2079}
|
# -*- coding: utf-8 -*-
"""Utility functions for running examples
"""
# Author: Yue Zhao <zhaoy@cmu.edu>
# License: BSD 2 clause
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle, islice
def visualize_clusters(model_name, X, predicted_labels, show_figure=True,
save_figure=False): # pragma: no cover
"""Utility function for visualizing the results in examples.
Internal use only.
Parameters
----------
model_name : str
The name of the clustering method.
X : numpy array of shape (n_samples, n_features)
The input samples.
predicted_labels : numpy array of shape (n_samples, n_features)
The predicted labels of the input samples.
show_figure : bool, optional (default=True)
If set to True, show the figure.
save_figure : bool, optional (default=False)
If set to True, save the figure to the local.
"""
colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']),
int(max(predicted_labels) + 1))))
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[predicted_labels])
plt.title(model_name)
plt.xticks(())
plt.yticks(())
if save_figure:
plt.savefig('{clf_name}.png'.format(clf_name=model_name), dpi=300)
if show_figure:
plt.show()
|
{"hexsha": "b749204ee44a18c4fb8e2c915d8cc444feec0173", "size": 1509, "ext": "py", "lang": "Python", "max_stars_repo_path": "combo/utils/example.py", "max_stars_repo_name": "vishalbelsare/combo", "max_stars_repo_head_hexsha": "229d578de498b47ae03cf2580472aceebf8c2766", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 611, "max_stars_repo_stars_event_min_datetime": "2019-07-14T14:54:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T08:34:48.000Z", "max_issues_repo_path": "combo/utils/example.py", "max_issues_repo_name": "chrinide/combo-1", "max_issues_repo_head_hexsha": "0c0539c9b116dd35763c89d97edb6b568e98abbf", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2019-08-01T05:41:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T22:55:45.000Z", "max_forks_repo_path": "combo/utils/example.py", "max_forks_repo_name": "chrinide/combo-1", "max_forks_repo_head_hexsha": "0c0539c9b116dd35763c89d97edb6b568e98abbf", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 107, "max_forks_repo_forks_event_min_datetime": "2019-07-16T16:01:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-19T18:52:10.000Z", "avg_line_length": 29.5882352941, "max_line_length": 75, "alphanum_fraction": 0.5957587806, "include": true, "reason": "import numpy", "num_tokens": 357}
|
import numpy as np
import torch
def mixup_data(x, y, alpha=0.2):
"""Returns mixed up inputs pairs of targets and lambda"""
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size(0)
index = torch.randperm(batch_size)
index = index.to(x.device)
lam = max(lam, 1 - lam)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a = y
y_b = y[index, :]
return mixed_x, y_a, y_b, lam
|
{"hexsha": "9bc51cc2e538173d91ed556dec0fca29e6efd404", "size": 462, "ext": "py", "lang": "Python", "max_stars_repo_path": "project_lightning/utils/Mixup.py", "max_stars_repo_name": "pprp/mixed_precision_imagenet_benchmark", "max_stars_repo_head_hexsha": "08dc2abbb6067b569ed394a849eb830deaf91429", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-12-02T17:52:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T06:19:51.000Z", "max_issues_repo_path": "project_lightning/utils/Mixup.py", "max_issues_repo_name": "pprp/mixed_precision_imagenet_benchmark", "max_issues_repo_head_hexsha": "08dc2abbb6067b569ed394a849eb830deaf91429", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "project_lightning/utils/Mixup.py", "max_forks_repo_name": "pprp/mixed_precision_imagenet_benchmark", "max_forks_repo_head_hexsha": "08dc2abbb6067b569ed394a849eb830deaf91429", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.25, "max_line_length": 61, "alphanum_fraction": 0.5714285714, "include": true, "reason": "import numpy", "num_tokens": 145}
|
#TODO: Write more tests
#To run tests, load the Space module first (/src/Spaces/Space.jl)
using Test
abstract type AbstractSpace end
include("box.jl")
include("dict-space.jl")
include("multi-binary.jl")
include("multi-discrete.jl")
include("tuple-space.jl")
include("discrete.jl")
test_case1 = (
Discrete(3),
TupleSpace((Discrete(5), Discrete(10))),
TupleSpace((Discrete(5), Box([0, 0], [1, 5], Float32))),
TupleSpace((Discrete(5), Discrete(2), Discrete(2))),
MultiDiscrete([2, 2, 100]),
DictSpace(Dict("position" => Discrete(5),
"velocity" => Box([0, 0], [1, 5], Float32)))
)
test_case2 = (
(Discrete(3), Discrete(4)),
(MultiDiscrete([2, 2, 100]), MultiDiscrete([2, 2, 8])),
(MultiBinary(8), MultiBinary(7)),
(Box([-10, 0], [10, 10], Float32), Box([-10, 0], [10, 9], Float32)),
(TupleSpace([Discrete(5), Discrete(10)]), TupleSpace([Discrete(1), Discrete(10)])),
(DictSpace(Dict("position" => Discrete(5))), DictSpace(Dict("position" => Discrete(4)))),
(DictSpace(Dict("position" => Discrete(5))), DictSpace(Dict("speed" => Discrete(5)))),
)
@testset "samples are in the same space" begin
@testset "$space" for space in test_case1
sample_1 = sample(space)
sample_2 = sample(space)
@test contains(space, sample_1)
@test contains(space, sample_2)
end
end
#=
@testset "test equality" begin
@testset "$space" for space in test_case1
space1 = space
space2 = copy(space)
@test space1 == space2
end
end
=#
@testset "test inequality" begin
@testset "$spaces" for spaces in test_case2
space1, space2 = spaces
@test space1 != space2
end
end
|
{"hexsha": "041210b760fe51ce5e26c1bf2e28c23cd15da529", "size": 1704, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Spaces/tests/runtests.jl", "max_stars_repo_name": "dhairyagandhi96/Gym.jl", "max_stars_repo_head_hexsha": "bcef0355fd9ba32a4ae0dc212694446b942990e3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Spaces/tests/runtests.jl", "max_issues_repo_name": "dhairyagandhi96/Gym.jl", "max_issues_repo_head_hexsha": "bcef0355fd9ba32a4ae0dc212694446b942990e3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Spaces/tests/runtests.jl", "max_forks_repo_name": "dhairyagandhi96/Gym.jl", "max_forks_repo_head_hexsha": "bcef0355fd9ba32a4ae0dc212694446b942990e3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3793103448, "max_line_length": 93, "alphanum_fraction": 0.6220657277, "num_tokens": 538}
|
[](https://pythonista.io)
# Introducción a ```sympy```.
El proyecto [sympy](https://www.sympy.org/en/index.html) comprende una biblioteca de herramientas que permiten realziar operaciones de matemáticas simbólicas.
En este sentido, es posible utilizar algunos de sus componentes para realizar operaciones que en lugar de regresar valores numéricos regresan representaciones simbólicas.
```python
!pip install sympy
```
```python
import sympy
```
## La función *sympy.symbols()*.
Esta función permite crear objetos de la clase *sympy.core.symbol.Symbol* que pueden ser utulizadso como símbolos algebraicos.
```
sympy.symbols('<símbolo>')
```
```python
x = sympy.symbols('x')
```
```python
type(x)
```
```python
x + 1
```
```python
2/3 + x
```
```python
x ** 2
```
```python
x ** (1/2)
```
## La función *sympy.Rational()*
```python
sympy.Rational(2, 3)
```
```python
```
```python
x, y, z = sympy.symbols("x, y, z")
```
```python
f = sympy.Function("f")
```
```python
f(x)
```
```python
f = sympy.Function('f')(x)
```
```python
f
```
```python
expr = x**4 + x**3 + x**2 + x + 1
```
```python
expr
```
```python
expr.diff()
```
```python
expr.integrate()
```
```python
expresion = x + sympy.sin(x)
```
```python
expresion
```
```python
expresion.integrate(x, x)
```
```python
expresion.diff(x, x, x)
```
```python
expr.diff(x)
```
<p style="text-align: center"><a rel="license" href="http://creativecommons.org/licenses/by/4.0/"></a><br />Esta obra está bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Licencia Creative Commons Atribución 4.0 Internacional</a>.</p>
<p style="text-align: center">© José Luis Chiquete Valdivieso. 2019.</p>
|
{"hexsha": "b269f0d194bfce9315d3009937ce9024cb20fa14", "size": 5798, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "15_introduccion_a_sympy.ipynb", "max_stars_repo_name": "PythonistaMX/py301", "max_stars_repo_head_hexsha": "8831a0a0864d69b3ac6dc1a547c1e5066a124cde", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-05-14T18:23:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-24T13:34:16.000Z", "max_issues_repo_path": "15_introduccion_a_sympy.ipynb", "max_issues_repo_name": "PythonistaMX/py301", "max_issues_repo_head_hexsha": "8831a0a0864d69b3ac6dc1a547c1e5066a124cde", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "15_introduccion_a_sympy.ipynb", "max_forks_repo_name": "PythonistaMX/py301", "max_forks_repo_head_hexsha": "8831a0a0864d69b3ac6dc1a547c1e5066a124cde", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-12-25T23:09:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-13T04:49:52.000Z", "avg_line_length": 19.9243986254, "max_line_length": 406, "alphanum_fraction": 0.4815453605, "converted": true, "num_tokens": 546}
|
! This is a single line comment.
|
{"hexsha": "a2e4b507e551553e958eaa814788b42e413779f3", "size": 33, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "fortran/comments.f90", "max_stars_repo_name": "smenjas/programming-languages-compared", "max_stars_repo_head_hexsha": "d9e6a9034c969c8bcf6de219f1d4d6f87d1aa39e", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fortran/comments.f90", "max_issues_repo_name": "smenjas/programming-languages-compared", "max_issues_repo_head_hexsha": "d9e6a9034c969c8bcf6de219f1d4d6f87d1aa39e", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fortran/comments.f90", "max_forks_repo_name": "smenjas/programming-languages-compared", "max_forks_repo_head_hexsha": "d9e6a9034c969c8bcf6de219f1d4d6f87d1aa39e", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.5, "max_line_length": 32, "alphanum_fraction": 0.7272727273, "num_tokens": 8}
|
#TODO: convert two function calls into Union{COOTen,ThirdOrderSymTensor}
#=------------------------------------------------------------------------------
Routines for searching over alpha/beta parameters
------------------------------------------------------------------------------=#
"""------------------------------------------------------------------------------
This function aligns graphs using their tensor representations. These routines
call the param_search functions for the associated method used.
Inputs
------
* A, B - (ThirdOrderSymTensor):
Two third order tensors representing the presence of triangles within the
network. A must be larger than B, else the routines will be called with the
parameters swapped.
* method - (String):
The choice of method used to align the methods. Options include 'LambdaTAME',
'LowRankTAME', and 'TAME'.
Outputs
-------
* 'best_TAME_PP_tris' - (Int):
The largest number of triangles matched over all iterations.
* 'max_triangle_match' - (Int):
The maximum number of triangles matchable. This is the minimum between the
number of triangles in graphs A and B.
* 'best_TAME_PP_x' - (Array{Float,2}):
The best iterate found over all the alphas and betas specified by the user.
when 'LambdaTAME' and 'LowRankTAME' are called, this is replaced by the U
and V components of the best iterate.
-----------------------------------------------------------------------------"""
function align_tensors(A::ThirdOrderSymTensor, B::ThirdOrderSymTensor;
method::String="LambdaTAME",no_matching=false,kwargs...)
#put larger tensor on the left
if B.n > A.n
results = align_tensors(B,A;method = method, no_matching=no_matching,kwargs...)
#flip the matchings if A and B were swapped
if method == "LambdaTAME" || method == "LowRankTAME"
best_TAME_PP_tris, max_triangle_match, U_best, V_best, best_matching = results
return best_TAME_PP_tris, max_triangle_match, U_best, V_best, Dict((j,i) for (i,j) in best_matching)
elseif method == "TAME"
best_TAME_PP_tris, max_triangle_match, best_TAME_PP_x, best_matching = results
return best_TAME_PP_tris, max_triangle_match, best_TAME_PP_x, Dict((j,i) for (i,j) in best_matching)
end
end
if method == "LambdaTAME"
return ΛTAME_param_search(A,B;kwargs...)
elseif method == "LowRankTAME"
return LowRankTAME_param_search(A,B;no_matching = no_matching,kwargs...)
elseif method == "TAME"
return TAME_param_search(A,B;no_matching = no_matching,kwargs...)
else
throw(ArgumentError("method must be one of 'LambdaTAME', 'LowRankTAME',or 'TAME'."))
end
end
"""------------------------------------------------------------------------------
This function aligns graphs using their tensor representations and returns the
profiled version of the algorithms. These routines call the 'param_search'
functions for the associated method used.
Inputs
------
* A, B - (ThirdOrderSymTensor):
Two third order tensors representing the presence of triangles within the
network. A must be larger than B, else the routines will be called with the
parameters swapped.
* method - (String):
The choice of method used to align the methods. Options include 'LambdaTAME',
'LowRankTAME', and 'TAME'.
* 'no_matching' - (Bool):
Will not run the matching routines when True. This is useful when studying
the ranks of the iterates. Any counts which may rely on the matchings are
replaced by -1.
Outputs
-------
* 'best_TAME_PP_tris' - (Int):
The largest number of triangles matched over all iterations.
* 'max_triangle_match' - (Int):
The maximum number of triangles matchable. This is the minimum between the
number of triangles in graphs A and B.
* 'best_TAME_PP_x' - (Array{Float,2}):
The best iterate found over all the alphas and betas specified by the user.
when 'LambdaTAME' and 'LowRankTAME' are called, this is replaced by the U
and V components of the best iterate.
* 'best_matching' - (Dict{Int,Int}):
The best matching found, maps from A to B. When the method flips A and B,
the dictionary is also flipped when returned.
* profile - (Dict):
A dictionary storing the profiling results from each of the methods. Please
see the '_profiled' versions of the code to see what is returned by each
function.
------------------------------------------------------------------------------"""
function align_tensors_profiled(A::ThirdOrderSymTensor, B::ThirdOrderSymTensor;
method::String="LambdaTAME",no_matching=false,kwargs...)
#put larger tensor on the left
if B.n > A.n
results = align_tensors_profiled(B,A;method = method, no_matching=no_matching,kwargs...)
#flip the matchings if A and B were swapped
if method == "LambdaTAME" || method == "LowRankTAME"
best_TAME_PP_tris, max_triangle_match, U_best, V_best, best_matching,profile = results
return best_TAME_PP_tris, max_triangle_match, U_best, V_best, Dict((j,i) for (i,j) in best_matching), profile
elseif method == "TAME"
best_TAME_PP_tris, max_triangle_match, best_TAME_PP_x, best_matching,profile = results
return best_TAME_PP_tris, max_triangle_match, best_TAME_PP_x, Dict((j,i) for (i,j) in best_matching), profile
end
end
if method == "LambdaTAME"
return ΛTAME_param_search_profiled(A,B;kwargs...)
elseif method == "LowRankTAME"
return LowRankTAME_param_search_profiled(A,B;no_matching = no_matching,kwargs...)
elseif method == "TAME"
return TAME_param_search_profiled(A,B;no_matching = no_matching,kwargs...)
else
throw(ArgumentError("method must be one of 'LambdaTAME', 'LowRankTAME', or 'TAME'."))
end
end
function ΛTAME_param_search(A::ThirdOrderSymTensor,B::ThirdOrderSymTensor;
iter::Int = 15,tol::Float64=1e-6,
alphas::Array{F,1}=[.5,1.0],
betas::Array{F,1} =[1000.0,100.0,10.0,1.0,0.0,0.1,0.01,0.001]) where {F <: AbstractFloat}
max_triangle_match = min(size(A.indices,1),size(B.indices,1))
total_triangles = size(A.indices,1) + size(B.indices,1)
U = Array{Float64,2}(undef,A.n,iter)
V = Array{Float64,2}(undef,B.n,iter)
best_TAME_PP_tris = -1
best_i = -1
best_j = -1
best_matching = Dict{Int,Int}()
for α in alphas
for beta in betas
U,V = ΛTAME(A,B,beta,iter,tol,α)
search_tris, i, j, matching = search_Krylov_space(A,B,U,V)
println("α:$(α) -- β:$(beta) finished -- tri_match:$search_tris -- max_tris $(max_triangle_match) -- best tri_match:$best_TAME_PP_tris")
if search_tris > best_TAME_PP_tris
best_TAME_PP_tris = search_tris
best_i = i
best_j = j
best_matching = matching
end
end
end
println("best i:$best_i -- best j:$best_j")
return best_TAME_PP_tris, max_triangle_match, U[best_i,:], V[best_j,:], best_matching
end
function ΛTAME_param_search_profiled(A::ThirdOrderSymTensor,B::ThirdOrderSymTensor;
iter::Int = 15,tol::Float64=1e-6, alphas::Array{F,1}=[.5,1.0],
betas::Array{F,1} =[1000.0,100.0,10.0,1.0,0.0,0.1,0.01,0.001]) where {F <: AbstractFloat}
max_triangle_match = min(size(A.indices,1),size(B.indices,1))
best_TAME_PP_tris = -1
best_i = -1
best_j = -1
best_matching = Dict{Int,Int}()
m = A.n
n = B.n
results = Dict(
"TAME_timings" => Array{Float64,1}(undef,length(alphas)*length(betas)),
"Krylov Timings"=> Array{Float64,1}(undef,length(alphas)*length(betas))
)
exp_index = 1
U = Array{Float64,2}(undef,m,iter)
V = Array{Float64,2}(undef,n,iter)
for α in alphas
for beta in betas
((U,V),runtime) = @timed ΛTAME(A,B,beta,iter,tol,α)
results["TAME_timings"][exp_index] = runtime
#search the Krylov Subspace
((search_tris, i, j, matching),runtime) = @timed search_Krylov_space(A,B,U,V)
results["Krylov Timings"][exp_index] = runtime
exp_index += 1
if search_tris > best_TAME_PP_tris
best_matching = matching
best_TAME_PP_tris = search_tris
best_i = i
best_j = j
end
println("α:$(α) -- β:$(beta) finished -- tri_match:$search_tris -- max_tris $(max_triangle_match) -- best tri_match: $best_TAME_PP_tris")
end
end
println("best i:$best_i -- best j:$best_j")
return best_TAME_PP_tris, max_triangle_match, U[:,best_i], V[:,best_j], best_matching, results
end
#add in SparseSymmetricTensors.jl function definitions
function TAME_param_search(A::ThirdOrderSymTensor,B::ThirdOrderSymTensor;
iter::Int = 15,tol::Float64=1e-6, alphas::Array{F,1}=[.5,1.0],
betas::Array{F,1} =[1000.0,100.0,10.0,1.0,0.0,0.1,0.01,0.001],
kwargs...) where {F <: AbstractFloat}
max_triangle_match = min(size(A.indices,1),size(B.indices,1))
total_triangles = size(A.indices,1) + size(B.indices,1)
best_TAME_PP_tris::Int = -1
best_matching = Dict{Int,Int}()
m = A.n
n = B.n
best_TAME_PP_x = Array{Float64,2}(undef,m,n)
for α in alphas
for β in betas
x, triangle_count, matching = TAME(A,B,β,iter,tol,α;W = ones(A.n,B.n),kwargs...)
if triangle_count > best_TAME_PP_tris
best_matching = matching
best_TAME_PP_tris = triangle_count
best_TAME_PP_x = copy(x)
end
println("α:$(α) -- β:$β finished -- tri_match:$(triangle_count) -- max_tris $(max_triangle_match) -- best tri_match:$(best_TAME_PP_tris)")
end
end
return best_TAME_PP_tris, max_triangle_match, best_TAME_PP_x, best_matching
end
function TAME_param_search_profiled(A::ThirdOrderSymTensor,B::ThirdOrderSymTensor;
iter::Int = 15,tol::Float64=1e-6, alphas::Array{F,1}=[.5,1.0],
betas::Array{F,1} =[1000.0,100.0,10.0,1.0,0.0,0.1,0.01,0.001],
profile::Bool=false,profile_aggregation="all",
kwargs...) where {F <: AbstractFloat}
max_triangle_match = min(size(A.indices,1),size(B.indices,1))
total_triangles = size(A.indices,1) + size(B.indices,1)
best_TAME_PP_tris = -1
best_matching = Dict{Int,Int}()
m = A.n
n = B.n
best_TAME_PP_x = Array{Float64,2}(undef,m,n)
experiment_profiles = Array{Tuple{String,Dict{String,Union{Array{Float64,1},Array{Array{Float64,1},1}}}},1}(undef,0)
for α in alphas
for β in betas
x, triangle_count, matching, experiment_profile = TAME_profiled(A,B,β,iter,tol,α;W = ones(m,n),kwargs...)
push!(experiment_profiles,("α:$(α)_β:$(β)",experiment_profile))
if triangle_count > best_TAME_PP_tris
best_matching = matching
best_TAME_PP_tris = triangle_count
best_TAME_PP_x = copy(x)
end
println("α:$(α) -- β:$β finished -- tri_match:$(best_TAME_PP_tris) -- max_tris $(max_triangle_match)")
end
end
return best_TAME_PP_tris, max_triangle_match, best_TAME_PP_x, best_matching, experiment_profiles
end
function LowRankTAME_param_search(A::ThirdOrderSymTensor,B::ThirdOrderSymTensor;
iter::Int = 15,tol::Float64=1e-6,
U_0::Array{Float64,2} = ones(A.n,1),
V_0::Array{Float64,2} = ones(B.n,1),
alphas::Array{F,1}=[.5,1.0],
betas::Array{F,1} =[1000.0,100.0,10.0,1.0,0.0,0.1,0.01,0.001],
kwargs...) where {F <: AbstractFloat}
max_triangle_match = min(size(A.indices,1),size(B.indices,1))
total_triangles = size(A.indices,1) + size(B.indices,1)
best_TAME_PP_tris = -1
best_matching = Dict{Int,Int}()
m = A.n
n = B.n
best_TAME_PP_U = ones(m,1)
best_TAME_PP_V = ones(n,1)
for α in alphas
for β in betas
U, V, triangle_count,matching =
LowRankTAME(A,B,U_0,V_0,β,iter,tol,α;kwargs...)
if triangle_count > best_TAME_PP_tris
best_TAME_PP_tris = triangle_count
best_matching = matching
best_TAME_PP_U = copy(U)
best_TAME_PP_V = copy(V)
end
println("α:$(α) -- β:$(β) -- tri_match:$(triangle_count) -- max_tris:$(max_triangle_match) -- best tri match:$best_TAME_PP_tris")
end
end
return best_TAME_PP_tris, max_triangle_match, best_TAME_PP_U, best_TAME_PP_V, best_matching
end
function LowRankTAME_param_search_profiled(A::ThirdOrderSymTensor,B::ThirdOrderSymTensor;
iter::Int = 15,tol::Float64=1e-6,
alphas::Array{F,1}=[.5,1.0],
betas::Array{F,1} =[1000.0,100.0,10.0,1.0,0.0,0.1,0.01,0.001],
kwargs...) where {F <: AbstractFloat}
max_triangle_match = min(size(A.indices,1),size(B.indices,1))
total_triangles = size(A.indices,1) + size(B.indices,1)
best_TAME_PP_tris = -1
best_matching = Dict{Int,Int}()
best_TAME_PP_U = ones(A.n,1)
best_TAME_PP_V = ones(B.n,1)
experiment_profiles = []
for α in alphas
for β in betas
U, V, triangle_count,matching, experiment_profile =
LowRankTAME_profiled(A,B,ones(A.n,1),ones(B.n,1), β,iter,tol,α;kwargs...)
push!(experiment_profiles,("α:$(α)_β:$(β)",experiment_profile))
if triangle_count > best_TAME_PP_tris
best_TAME_PP_tris = triangle_count
best_matching = matching
best_TAME_PP_U = copy(U)
best_TAME_PP_V = copy(V)
end
println("α:$(α) -- β:$(β) -- tri_match:$(best_TAME_PP_tris) -- max_tris $(max_triangle_match)")
end
end
return best_TAME_PP_tris, max_triangle_match, best_TAME_PP_U, best_TAME_PP_V,best_matching, experiment_profiles
end
#=------------------------------------------------------------------------------
Spectral Relaxation Routines
------------------------------------------------------------------------------=#
"""------------------------------------------------------------------------------
The LambdaTAME method. Method starts with a uniform prior and computes the
power method for each of the tensors passed it. These iterates are stored and
the best rank one matching is picked out of all the quadratic pairs of vectors
computed. This portion of the algorith computes the contractions, the full
procedure can be found in the associated '_param_search' algorithms.
Inputs
------
* A,B - (ThirdOrderSymTensor):
The tensors to align against one another.
* β - (Float):
The shift to use on the iterations.
* α -(float):
The mixing parameter for combining in the starting iterates.
* 'max_iter' - (Int):
The maximum number of iterations to run.
* tol - (Float):
The tolerence to solve the algorithm to. Computes the tolerance by measuring
the absolute value of the difference between the computed eigenvalues.
* 'update_user' - (Int):
Specifies how frequently output messages should be printed to the user.
Default is -1 which means no output messages are printed, else if the value
is k, then the kth iterate will print out an update.
Output
------
* U, V - (Array{Float,2})
Returns the computed contractions started with a uniform prior.
------------------------------------------------------------------------------"""
function ΛTAME(A::ThirdOrderSymTensor, B::ThirdOrderSymTensor, β::Float64,
max_iter::Int,tol::Float64,α::Float64;update_user=-1)
U = zeros(A.n,max_iter+1)
V = zeros(B.n,max_iter+1) #store initial in first column
U[:,1] = ones(A.n)
U[:,1] /=norm(U[:,1])
V[:,1] = ones(B.n)
V[:,1] /=norm(U[:,1])
sqrt_β = β^(.5)
lambda = Inf
i = 1
while true
U[:,i+1] = tensor_vector_contraction(A,U[:,i])
V[:,i+1] = tensor_vector_contraction(B,V[:,i])
lambda_A = (U[:,i+1]'*U[:,i])
lambda_B = (V[:,i+1]'*V[:,i])
new_lambda = lambda_A*lambda_B
if β != 0.0
U[:,i+1] .+= sqrt_β*U[:,i+1]
V[:,i+1] .+= sqrt_β*V[:,i+1]
end
if α != 1.0
U[:,i+1] = α*U[:,i+1] + (1 -α)*U[:,1]
V[:,i+1] = α*V[:,i+1] + (1 -α)*V[:,1]
end
U[:,i+1] ./= norm(U[:,i+1])
V[:,i+1] ./= norm(V[:,i+1])
if update_user != -1 && i % update_user == 0
println("iteration $(i) λ_A: $(lambda_A) -- λ_B: $(lambda_B) -- newλ: $(new_lambda)")
end
if abs(new_lambda - lambda) < tol || i >= max_iter
return U[:,1:i], V[:,1:i]
else
lambda = new_lambda
i += 1
end
end
end
#runs TAME, but reduces down to lowest rank form first
function LowRankTAME(A::ThirdOrderSymTensor, B::ThirdOrderSymTensor,W::Array{F,2},
β::F, max_iter::Int,tol::F,α::F;
max_rank::Int = minimum((A.n,B.n)),kwargs...) where {F <:AbstractFloat}
dimension = minimum((A.n,B.n))
(U_k,S,VT),t = @timed svd(W)
singular_indexes = [i for i in 1:minimum((max_rank,length(C_S))) if S[i] > S[1]*eps(Float64)*dimension]
U = U_k[:,singular_indexes]
V = VT[:,singular_indexes]*diagm(S[singular_indexes])
return LowRankTAME(A,B,U,V,β,max_iter,tol,α;kwargs...)
end
"""------------------------------------------------------------------------------
The low rank implementation of TAME, computes the terms use the mixed property
generalized to the tensor case.
Inputs
------
* A,B - (ThirdOrderSymTensor):
The tensors to align against one another.
* 'U_0', 'V_0' - (Array{Float,2}):
The low rank components of the starting iteration X = UV'. Iterates are
normalized before the iterations begin.
* β - (Float):
The shift to use on the iterations.
* α -(float):
The mixing parameter for combining in the starting iterates.
* 'max_iter' - (Int):
The maximum number of iterations to run.
* tol - (Float):
The tolerence to solve the algorithm to. Computes the tolerance by measuring
the absolute value of the difference between the computed eigenvalues.
* 'max_rank' - (Int):
Specify the maximum rank of each of the iterates. Default makes it so that
only singular values small enough to be considered zero are truncated.
* 'update_user' - (Int):
Specifies how frequently output messages should be printed to the user.
Default is -1 which means no output messages are printed, else if the value
is k, then the kth iterate will print out an update.
* 'no_matching' - (Bool):
Specifies whether or not to run the matching and scoring portions of the
algorithm. Useful if only the iterates are desired.
* 'low_rank_matching' - (Bool):
Specifies whether or not to run the low rank matching procedure from [1].
This is useful when speed is needed, but may lead to regressions in the
matching performance.
Output
------
* 'best_U', 'best_V' - (Array{Float,2})
Returns the components to the iteration which matched the most triangles.
* 'best_triangle_count' - (Int)
The maximum number of triangles matched.
* 'best_matching' - (Dict{Int,Int})
The matching computed between the two graphs, maps the vertices of A to the
vertices of B.
Citation
--------
[1] - H. Nassar, N. Veldt, S. Mohammadi, A. Grama, and D. F. Gleich,
“Low rank spectral network alignment,” in Proceedings of the 2018
World Wide Web Conference, 2018, pp. 619–628
------------------------------------------------------------------------------"""
function LowRankTAME(A::ThirdOrderSymTensor, B::ThirdOrderSymTensor,
U_0::Array{F,2},V_0::Array{F,2}, β::F, max_iter::Int,tol::F,α::F;
max_rank::Int = minimum((A.n,B.n)),update_user::Int=-1,
no_matching=false,low_rank_matching=false) where {F <:AbstractFloat}
@assert size(U_0,2) == size(V_0,2)
dimension = minimum((A.n,B.n))
best_triangle_count::Int = -1
best_matching = Dict{Int,Int}()
best_x = zeros(size(U_0,1),size(V_0,1))
best_index = -1
triangles = -1
gaped_triangles = -1
normalization_factor = sqrt(tr((V_0'*V_0)*(U_0'*U_0)))
U_0 ./= sqrt(normalization_factor)
V_0 ./= sqrt(normalization_factor)
U_k = copy(U_0)
V_k = copy(V_0)
best_U::Array{F,2} = copy(U_k)
best_V::Array{F,2} = copy(U_k)
lambda = Inf
for i in 1:max_iter
A_comps, B_comps = get_kron_contract_comps(A,B,U_k,V_k)
lam = tr((B_comps'*V_k)*(U_k'*A_comps))
if α != 1.0 && β != 0.0
U_temp = hcat(sqrt(α) * A_comps, sqrt(α * β) * U_k, sqrt(1-α) * U_0)
V_temp = hcat(sqrt(α) * B_comps, sqrt(α * β) * V_k, sqrt(1-α) * V_0)
elseif α != 1.0
U_temp = hcat(sqrt(α)*A_comps, sqrt(1-α)*U_0)
V_temp = hcat(sqrt(α)*B_comps, sqrt(1-α)*V_0)
elseif β != 0.0
U_temp = hcat(A_comps, sqrt(β) * U_k)
V_temp = hcat(B_comps, sqrt(β) * V_k)
else
U_temp = A_comps
V_temp = B_comps
end
A_Q,A_R = qr(U_temp)
B_Q,B_R = qr(V_temp)
core = A_R*B_R'
C_U,C_S,C_Vt = svd(core)
singular_indexes= [i for i in 1:1:minimum((max_rank,length(C_S))) if C_S[i] > C_S[1]*eps(Float64)*dimension]
U_k_1 = A_Q*C_U[:,singular_indexes]
V_k_1 = B_Q*(C_Vt[:,singular_indexes]*diagm(C_S[singular_indexes]))
normalization_factor = sqrt(tr((V_k_1'*V_k_1)*(U_k_1'*U_k_1)))
U_k_1 ./= sqrt(normalization_factor)
V_k_1 ./= sqrt(normalization_factor)
#TODO: need to redo or remove this
Y, Z = get_kron_contract_comps(A,B,U_k_1,V_k_1)
lam = tr((Y'*U_k_1)*(V_k_1'*Z))
if !no_matching
#evaluate the matchings
if low_rank_matching
triangles, gaped_triangles,matching = TAME_score(A,B,U_k_1,V_k_1)
else
triangles, gaped_triangles,matching = TAME_score(A,B,U_k_1*V_k_1')
end
if triangles > best_triangle_count
best_matching = matching
best_triangle_count = triangles
best_U = copy(U_k_1)
best_V = copy(V_k_1)
end
end
if update_user != -1 && i % update_user == 0
println("λ_$i: $(lam) -- rank:$(length(singular_indexes)) -- tris:$(triangles) -- gaped_t:$(gaped_triangles)")
end
if abs(lam - lambda) < tol
return best_U, best_V, best_triangle_count, best_matching
end
#get the low rank factorization for the next one
U_k = copy(U_k_1)
V_k = copy(V_k_1)
lambda = lam
end
return best_U, best_V, best_triangle_count, best_matching
end
"""------------------------------------------------------------------------------
The low rank implementation of TAME, computes the terms use the mixed property
generalized to the tensor case.
Inputs
------
* A,B - (ThirdOrderSymTensor):
The tensors to align against one another.
* 'U_0', 'V_0' - (Array{Float,2}):
The low rank components of the starting iteration X = UV'. Iterates are
normalized before the iterations begin.
* β - (Float):
The shift to use on the iterations.
* α -(float):
The mixing parameter for combining in the starting iterates.
* 'max_iter' - (Int):
The maximum number of iterations to run.
* tol - (Float):
The tolerence to solve the algorithm to. Computes the tolerance by measuring
the absolute value of the difference between the computed eigenvalues.
* 'max_rank' - (Int):
Specify the maximum rank of each of the iterates. Default makes it so that
only singular values small enough to be considered zero are truncated.
* 'update_user' - (Int):
Specifies how frequently output messages should be printed to the user.
Default is -1 which means no output messages are printed, else if the value
is k, then the kth iterate will print out an update.
* 'no_matching' - (Bool):
Specifies whether or not to run the matching and scoring portions of the
algorithm. Useful if only the iterates are desired.
* 'low_rank_matching' - (Bool):
Specifies whether or not to run the low rank matching procedure from [1].
This is useful when speed is needed, but may lead to regressions in the
matching performance.
Output
------
* 'best_U', 'best_V' - (Array{Float,2})
Returns the components to the iteration which matched the most triangles.
* 'best_triangle_count' - (Int)
The maximum number of triangles matched.
* 'best_matching' - (Dict{Int,Int})
The matching computed between the two graphs, maps the vertices of A to the
vertices of B.
* 'experiment_profile' - (Dict{String,Union{Array{F,1},Array{Array{F,1},1}}}):
The experiment profile computed, keys for the experiment data collected are
as follows.
+'ranks' - The ranks of each iterate X_k.
+'contraction_timings' - Time taken to compute each contraction.
+'svd_timings' - Time taken to compute the svd.
+'qr_timings' - Time taken to compute the QR factorations.
+'matched_tris' - The number of triangles matched by each iterate.
+'sing_vals' - The singular values of each iterate X_k.
+'matching_times' - Time taken to solve the matchings.
+'soring_timings' - Time taken to score each matching.
Citation
--------
[1] - H. Nassar, N. Veldt, S. Mohammadi, A. Grama, and D. F. Gleich,
“Low rank spectral network alignment,” in Proceedings of the 2018
World Wide Web Conference, 2018, pp. 619–628
------------------------------------------------------------------------------"""
function LowRankTAME_profiled(A::ThirdOrderSymTensor, B::ThirdOrderSymTensor,
U_0::Array{F,2},V_0::Array{F,2}, β::F, max_iter::Int,tol::F,α::F;
max_rank::Int = minimum((A.n,B.n)),update_user::Int=-1,
no_matching::Bool=false,low_rank_matching::Bool=false) where {F <:AbstractFloat}
@assert size(U_0,2) == size(V_0,2)
dimension = minimum((A.n,B.n))
best_triangle_count::Int = -1
best_matching = Dict{Int,Int}()
best_x = zeros(size(U_0,1),size(V_0,1))
best_index = -1
triangles = -1
gaped_triangles = -1
normalization_factor = sqrt(tr((V_0'*V_0)*(U_0'*U_0)))
U_0 ./= sqrt(normalization_factor)
V_0 ./= sqrt(normalization_factor)
experiment_profile = Dict{String,Union{Array{F,1},Array{Array{F,1},1}}}(
"ranks"=>Array{Float64,1}(undef,0),
"contraction_timings"=>Array{Float64,1}(undef,0),
"svd_timings"=>Array{Float64,1}(undef,0),
"qr_timings"=>Array{Float64,1}(undef,0),
"matched_tris"=>Array{Float64,1}(undef,0),
"sing_vals"=>Array{Array{Float64,1},1}(undef,0),
"matching_timings"=>Array{Float64,1}(undef,0),
"scoring_timings"=>Array{Float64,1}(undef,0)
)
U_k = copy(U_0)
V_k = copy(V_0)
best_U::Array{F,2} = copy(U_k)
best_V::Array{F,2} = copy(U_k)
lambda = Inf
for i in 1:max_iter
(A_comps, B_comps),t = @timed get_kron_contract_comps(A,B,U_k,V_k)
push!(experiment_profile["contraction_timings"],t)
lambda_k_1 = tr((B_comps'*V_k)*(U_k'*A_comps))
if α != 1.0 && β != 0.0
U_temp = hcat(sqrt(α) * A_comps, sqrt(α * β) * U_k, sqrt(1-α) * U_0)
V_temp = hcat(sqrt(α) * B_comps, sqrt(α * β) * V_k, sqrt(1-α) * V_0)
elseif α != 1.0
U_temp = hcat(sqrt(α)*A_comps, sqrt(1-α)*U_0)
V_temp = hcat(sqrt(α)*B_comps, sqrt(1-α)*V_0)
elseif β != 0.0
U_temp = hcat(A_comps, sqrt(β) * U_k)
V_temp = hcat(B_comps, sqrt(β) * V_k)
else
U_temp = A_comps
V_temp = B_comps
end
(A_Q,A_R),t_A = @timed qr(U_temp)
(B_Q,B_R),t_B = @timed qr(V_temp)
push!(experiment_profile["qr_timings"],t_A + t_B)
core = A_R*B_R'
(C_U,C_S::Array{Float64,1},C_Vt),t = @timed svd(core)
push!(experiment_profile["svd_timings"],t)
singular_indexes= [i for i in 1:1:minimum((max_rank,length(C_S))) if C_S[i] > C_S[1]*eps(Float64)*dimension]
push!(experiment_profile["sing_vals"],C_S)
push!(experiment_profile["ranks"],float(length(singular_indexes)))
U_k_1 = A_Q*C_U[:,singular_indexes]
V_k_1 = B_Q*(C_Vt[:,singular_indexes]*diagm(C_S[singular_indexes]))
normalization_factor = sqrt(tr((V_k_1'*V_k_1)*(U_k_1'*U_k_1)))
U_k_1 ./= sqrt(normalization_factor)
V_k_1 ./= sqrt(normalization_factor)
if !no_matching
#evaluate the matchings
if low_rank_matching
triangles, gaped_tris, matching, matching_time, scoring_time = TAME_score(A,B,U_k_1,V_k_1;return_timings=true)
else
triangles, gaped_tris, matching, matching_time, scoring_time = TAME_score(A,B,U_k_1*V_k_1';return_timings=true)
end
push!(experiment_profile["matched_tris"],float(triangles))
push!(experiment_profile["matching_timings"],float(matching_time))
push!(experiment_profile["scoring_timings"], float(scoring_time))
if triangles > best_triangle_count
best_matching = matching
best_triangle_count = triangles
best_U = copy(U_k_1)
best_V = copy(V_k_1)
end
end
if update_user != -1 && i % update_user == 0
println("λ_$i: $(lambda_k_1) -- rank:$(length(singular_indexes)) -- tris:$(triangles) -- gaped_t:$(gaped_tris)")
end
if abs(lambda_k_1 - lambda) < tol || i >= max_iter
#=
triangles,_= TAME_score(A,B,sparse(best_U*best_V');return_timings=false)
if triangles > best_triangle_count
best_triangle_count = triangles
end
=#
return best_U, best_V, best_triangle_count,best_matching, experiment_profile
end
#get the low rank factorization for the next one
U_k = copy(U_k_1)
V_k = copy(V_k_1)
lambda = lambda_k_1
end
return best_U, best_V, best_triangle_count,best_matching, experiment_profile
end
function setup_tame_data(A::ThirdOrderSymTensor,B::ThirdOrderSymTensor)
return _index_triangles_nodesym(A.n,A.indices), _index_triangles_nodesym(B.n,B.indices)
end
function _index_triangles_nodesym(n,Tris::Array{Int,2})
Ti = [ Vector{Tuple{Int,Int}}(undef, 0) for i in 1:n ]
for (ti,tj,tk) in eachrow(Tris)
push!(Ti[ti], (tj,tk))
push!(Ti[tj], (ti,tk))
push!(Ti[tk], (ti,tj))
end
sort!.(Ti)
return Ti
end
"""------------------------------------------------------------------------------
An implementation of TAME, for our experiments we use the original C++ code,
but we include these routines for any additional experimentation desired.
Inputs
------
* A,B - (ThirdOrderSymTensor):
The tensors to align against one another.
* W - (Array{Float,2}):
The starting iteration, Iterate is normalized before the iterations begin.
* β - (Float):
The shift to use on the iterations.
* α -(float):
The mixing parameter for combining in the starting iterates.
* 'max_iter' - (Int):
The maximum number of iterations to run.
* tol - (Float):
The tolerence to solve the algorithm to. Computes the tolerance by measuring
the absolute value of the difference between the computed eigenvalues.
* 'update_user' - (Int):
Specifies how frequently output messages should be printed to the user.
Default is -1 which means no output messages are printed, else if the value
is k, then the kth iterate will print out an update.
* 'no_matching' - (Bool):
Specifies whether or not to run the matching and scoring portions of the
algorithm. Useful if only the iterates are desired.
Output
------
* 'best_x'- (Array{Float,2})
Returns the components to the iteration which matched the most triangles.
Reshapes the iterate x into a matrix.
* 'best_triangle_count' - (Int)
The maximum number of triangles matched.
* 'best_matching' - (Dict{Int,Int})
The matching computed between the two graphs, maps the vertices of A to the
vertices of B.
------------------------------------------------------------------------------"""
function TAME(A::ThirdOrderSymTensor, B::ThirdOrderSymTensor,β::F, max_iter::Int,
tol::F,α::F;update_user::Int=-1,W::Array{F,2}=ones(A.n,B.n),
no_matching=false,) where {F <:AbstractFloat}
dimension = minimum((A.n,B.n))
A_Ti, B_Ti = setup_tame_data(A,B)
best_x = Array{Float64,2}(undef,A.n,B.n)
best_triangle_count = -1
best_index = -1
best_matching = Dict{Int,Int}()
x0 = reshape(W,A.n*B.n)
x0 ./=norm(x0)
x_k = copy(x0)
i = 1
lambda = Inf
while true
x_k_1 = impTTVnodesym(A.n, B.n, x_k, A_Ti, B_Ti)
#x_k_1 = implicit_contraction(A,B,x_k)
#println("norm diff is:",norm(x_k_1_test - x_k_1)/norm(x_k_1_test))
new_lambda = dot(x_k_1,x_k)
if β != 0.0
x_k_1 .+= β * x_k
end
if α != 1.0
x_k_1 = α * x_k_1 + (1 - α) * x0
end
x_k_1 ./= norm(x_k_1)
if !no_matching
X = reshape(x_k_1,A.n,B.n)
triangles, gaped_triangles, matching = TAME_score(A,B,X)
if update_user != -1 && i % update_user == 0
println("finished iterate $(i):tris:$triangles -- gaped_t:$gaped_triangles")
end
if triangles > best_triangle_count
best_matching = matching
best_x = copy(x_k_1)
best_triangle_count = triangles
best_iterate = i
end
end
if update_user != -1 && i % update_user == 0
println("λ: $(new_lambda)")
end
if abs(new_lambda - lambda) < tol || i >= max_iter
return reshape(best_x,A.n,B.n), best_triangle_count, best_matching
else
x_k = copy(x_k_1)
lambda = new_lambda
i += 1
end
end
end
"""------------------------------------------------------------------------------
The low rank implementation of TAME, computes the terms use the mixed property
generalized to the tensor case.
Inputs
------
* A,B - (ThirdOrderSymTensor):
The tensors to align against one another.
* W - (Array{Float,2}):
The starting iteration, Iterate is normalized before the iterations begin.
* β - (Float):
The shift to use on the iterations.
* α -(float):
The mixing parameter for combining in the starting iterates.
* 'max_iter' - (Int):
The maximum number of iterations to run.
* tol - (Float):
The tolerence to solve the algorithm to. Computes the tolerance by measuring
the absolute value of the difference between the computed eigenvalues.
* 'update_user' - (Int):
Specifies how frequently output messages should be printed to the user.
Default is -1 which means no output messages are printed, else if the value
is k, then the kth iterate will print out an update.
* 'no_matching' - (Bool):
Specifies whether or not to run the matching and scoring portions of the
algorithm. Useful if only the iterates or profiling is desired.
Output
------
* 'best_x'- (Array{Float,2})
Returns the components to the iteration which matched the most triangles.
Reshapes the iterate x into a matrix.
* 'best_triangle_count' - (Int)
The maximum number of triangles matched.
* 'best_matching' - (Dict{Int,Int})
The matching computed between the two graphs, maps the vertices of A to the
vertices of B.
* 'experiment_profile' - (Dict{String,Union{Array{F,1},Array{Array{F,1}}}}):
The experiment profile computed, keys for the experiment data collected are
as follows.
+'contraction_timings' - Time taken to compute each contraction.
+'matched_tris' - The number of triangles matched by each iterate.
+'gaped triangles' - The number of unmatched triangles, equal to the
maximum - matched triangles.
+'sing_vals' - The singular values of each iterate X_k.
+'ranks' - The ranks of each iterate X_k.
+'matching_timings' - Time taken to solve the matchings.
+'soring_timings' - Time taken to score each matching.
------------------------------------------------------------------------------"""
function TAME_profiled(A::ThirdOrderSymTensor, B::ThirdOrderSymTensor, β::F, max_iter::Int,
tol::F,α::F;update_user::Int=-1,W::Array{F,2} = ones(m,n),
no_matching::Bool=false) where {F <:AbstractFloat}
dimension = minimum((A.n,B.n))
experiment_profile = Dict{String,Union{Array{F,1},Array{Array{F,1}}}}(
"contraction_timings"=>Array{F,1}(undef,0),
"matching_timings"=>Array{F,1}(undef,0),
"scoring_timings"=>Array{F,1}(undef,0),
"matched triangles"=>Array{F,1}(undef,0),
"gaped triangles"=>Array{F,1}(undef,0),
"sing_vals"=>Array{Array{F,1},1}(undef,0),
"ranks"=>Array{F,1}(undef,0)
)
A_Ti, B_Ti = setup_tame_data(A,B)
best_x = Array{Float64,2}(undef,A.n,B.n)
best_triangle_count::Int = -1
best_index = -1
best_matching = Dict{Int,Int}()
x0 = reshape(W,A.n*B.n)
x0 ./=norm(x0)
x_k = copy(x0)
i = 1
lambda = Inf
while true
x_k_1,t = @timed impTTVnodesym(A.n, B.n, x_k, A_Ti, B_Ti)
push!(experiment_profile["contraction_timings"],t)
new_lambda = dot(x_k_1,x_k)
if β != 0.0
x_k_1 .+= β * x_k
end
if α != 1.0
x_k_1 = α * x_k_1 + (1 - α) * x0
end
x_k_1 ./= norm(x_k_1)
S = svdvals(reshape(x_k_1,(A.n,B.n)))
rank = 0.0
for i in 1:length(S)
if S[i] > S[1]*eps(Float64)*dimension
rank = rank + 1
end
end
push!(experiment_profile["sing_vals"],S)
push!(experiment_profile["ranks"],rank)
if !no_matching
triangles, gaped_triangles, matching, matching_time, scoring_time = TAME_score(A,B,reshape(x_k_1,A.n,B.n);return_timings=true)
push!(experiment_profile["matching_timings"],matching_time)
push!(experiment_profile["scoring_timings"],scoring_time)
push!(experiment_profile["matched triangles"],float(triangles))
push!(experiment_profile["gaped triangles"],float(gaped_triangles))
if update_user != -1 && i % update_user == 0
println("finished iterate $(i):tris:$(triangles) -- gaped_t:$(gaped_triangles)")
end
if triangles > best_triangle_count
best_matching = matching
best_x = copy(x_k_1)
best_triangle_count = triangles
best_iterate = i
end
end
if update_user != -1 && i % update_user == 0
println("λ_$i: $(new_lambda)")
end
if abs(new_lambda - lambda) < tol || i >= max_iter
return reshape(best_x,A.n,B.n), best_triangle_count, best_matching, experiment_profile
else
x_k = copy(x_k_1)
lambda = new_lambda
i += 1
end
end
end
|
{"hexsha": "37511974823c38d9fd19bdecf9a18412534e0b68", "size": 37396, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/TAME_Implementations.jl", "max_stars_repo_name": "charlescolley/LambdaTAME.jl", "max_stars_repo_head_hexsha": "7c1384d4d3e7f507d7da9b79ee7929e79a4000e0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/TAME_Implementations.jl", "max_issues_repo_name": "charlescolley/LambdaTAME.jl", "max_issues_repo_head_hexsha": "7c1384d4d3e7f507d7da9b79ee7929e79a4000e0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-16T14:30:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-16T15:39:36.000Z", "max_forks_repo_path": "src/TAME_Implementations.jl", "max_forks_repo_name": "charlescolley/LambdaTAME.jl", "max_forks_repo_head_hexsha": "7c1384d4d3e7f507d7da9b79ee7929e79a4000e0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2140896615, "max_line_length": 141, "alphanum_fraction": 0.6445876564, "num_tokens": 11039}
|
# processing.py -- various audio processing functions
# Copyright (C) 2008 MUSIC TECHNOLOGY GROUP (MTG)
# UNIVERSITAT POMPEU FABRA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Bram de Jong <bram.dejong at domain.com where domain in gmail>
# 2012, Joar Wandborg <first name at last name dot se>
from __future__ import print_function
try:
from PIL import Image
except ImportError:
import Image
import math
import numpy
try:
import scikits.audiolab as audiolab
except ImportError:
print("WARNING: audiolab is not installed so wav2png will not work")
class AudioProcessingException(Exception):
pass
class SpectrogramImage(object):
def __init__(self, image_size, fft_size):
self.image_width, self.image_height = image_size
self.fft_size = fft_size
colors = [
(0, 0, 0, 0),
(58 / 4, 68 / 4, 65 / 4, 255),
(80 / 2, 100 / 2, 153 / 2, 255),
(90, 180, 100, 255),
(224, 224, 44, 255),
(255, 60, 30, 255),
(255, 255, 255, 255)
]
self.palette = interpolate_colors(colors)
# Generate lookup table for y-coordinate from fft-bin
self.y_to_bin = []
fft_min = 100.0
fft_max = 22050.0 # kHz?
y_min = math.log10(fft_min)
y_max = math.log10(fft_max)
for y in range(self.image_height):
freq = math.pow(
10.0,
y_min + y / (self.image_height - 1.0)
* (y_max - y_min))
fft_bin = freq / fft_max * (self.fft_size / 2 + 1)
if fft_bin < self.fft_size / 2:
alpha = fft_bin - int(fft_bin)
self.y_to_bin.append((int(fft_bin), alpha * 255))
# this is a bit strange, but using image.load()[x,y] = ... is
# a lot slower than using image.putadata and then rotating the image
# so we store all the pixels in an array and then create the image when saving
self.pixels = []
def draw_spectrum(self, x, spectrum):
# for all frequencies, draw the pixels
for index, alpha in self.y_to_bin:
self.pixels.append(
self.palette[int((255.0 - alpha) * spectrum[index]
+ alpha * spectrum[index + 1])])
# if the FFT is too small to fill up the image, fill with black to the top
for y in range(len(self.y_to_bin), self.image_height):
self.pixels.append(self.palette[0])
def save(self, filename, quality=90):
self.image = Image.new(
'RGBA',
(self.image_height, self.image_width))
self.image.putdata(self.pixels)
self.image.transpose(Image.ROTATE_90).save(
filename,
quality=quality)
class AudioProcessor(object):
"""
The audio processor processes chunks of audio an calculates the spectrac centroid and the peak
samples in that chunk of audio.
"""
def __init__(self, input_filename, fft_size, window_function=numpy.hanning):
max_level = get_max_level(input_filename)
self.audio_file = audiolab.Sndfile(input_filename, 'r')
self.fft_size = fft_size
self.window = window_function(self.fft_size)
self.spectrum_range = None
self.lower = 100
self.higher = 22050
self.lower_log = math.log10(self.lower)
self.higher_log = math.log10(self.higher)
self.clip = lambda val, low, high: min(high, max(low, val))
# figure out what the maximum value is for an FFT doing the FFT of a DC signal
fft = numpy.fft.rfft(numpy.ones(fft_size) * self.window)
max_fft = (numpy.abs(fft)).max()
# set the scale to normalized audio and normalized FFT
self.scale = 1.0 / max_level / max_fft if max_level > 0 else 1
def read(self, start, size, resize_if_less=False):
""" read size samples starting at start, if resize_if_less is True and less than size
samples are read, resize the array to size and fill with zeros """
# number of zeros to add to start and end of the buffer
add_to_start = 0
add_to_end = 0
if start < 0:
# the first FFT window starts centered around zero
if size + start <= 0:
return numpy.zeros(size) if resize_if_less else numpy.array([])
else:
self.audio_file.seek(0)
add_to_start = - start # remember: start is negative!
to_read = size + start
if to_read > self.audio_file.nframes:
add_to_end = to_read - self.audio_file.nframes
to_read = self.audio_file.nframes
else:
self.audio_file.seek(start)
to_read = size
if start + to_read >= self.audio_file.nframes:
to_read = self.audio_file.nframes - start
add_to_end = size - to_read
try:
samples = self.audio_file.read_frames(to_read)
except RuntimeError:
# this can happen for wave files with broken headers...
return numpy.zeros(size) if resize_if_less else numpy.zeros(2)
# convert to mono by selecting left channel only
if self.audio_file.channels > 1:
samples = samples[:,0]
if resize_if_less and (add_to_start > 0 or add_to_end > 0):
if add_to_start > 0:
samples = numpy.concatenate((numpy.zeros(add_to_start), samples), axis=1)
if add_to_end > 0:
samples = numpy.resize(samples, size)
samples[size - add_to_end:] = 0
return samples
def spectral_centroid(self, seek_point, spec_range=110.0):
""" starting at seek_point read fft_size samples, and calculate the spectral centroid """
samples = self.read(seek_point - self.fft_size/2, self.fft_size, True)
samples *= self.window
fft = numpy.fft.rfft(samples)
spectrum = self.scale * numpy.abs(fft) # normalized abs(FFT) between 0 and 1
length = numpy.float64(spectrum.shape[0])
# scale the db spectrum from [- spec_range db ... 0 db] > [0..1]
db_spectrum = ((20*(numpy.log10(spectrum + 1e-60))).clip(-spec_range, 0.0) + spec_range)/spec_range
energy = spectrum.sum()
spectral_centroid = 0
if energy > 1e-60:
# calculate the spectral centroid
if self.spectrum_range == None:
self.spectrum_range = numpy.arange(length)
spectral_centroid = (spectrum * self.spectrum_range).sum() / (energy * (length - 1)) * self.audio_file.samplerate * 0.5
# clip > log10 > scale between 0 and 1
spectral_centroid = (math.log10(self.clip(spectral_centroid, self.lower, self.higher)) - self.lower_log) / (self.higher_log - self.lower_log)
return (spectral_centroid, db_spectrum)
def peaks(self, start_seek, end_seek):
""" read all samples between start_seek and end_seek, then find the minimum and maximum peak
in that range. Returns that pair in the order they were found. So if min was found first,
it returns (min, max) else the other way around. """
# larger blocksizes are faster but take more mem...
# Aha, Watson, a clue, a tradeof!
block_size = 4096
max_index = -1
max_value = -1
min_index = -1
min_value = 1
if start_seek < 0:
start_seek = 0
if end_seek > self.audio_file.nframes:
end_seek = self.audio_file.nframes
if end_seek <= start_seek:
samples = self.read(start_seek, 1)
return (samples[0], samples[0])
if block_size > end_seek - start_seek:
block_size = end_seek - start_seek
for i in range(start_seek, end_seek, block_size):
samples = self.read(i, block_size)
local_max_index = numpy.argmax(samples)
local_max_value = samples[local_max_index]
if local_max_value > max_value:
max_value = local_max_value
max_index = local_max_index
local_min_index = numpy.argmin(samples)
local_min_value = samples[local_min_index]
if local_min_value < min_value:
min_value = local_min_value
min_index = local_min_index
return (min_value, max_value) if min_index < max_index else (max_value, min_value)
def create_spectrogram_image(source_filename, output_filename,
image_size, fft_size, progress_callback=None):
processor = AudioProcessor(source_filename, fft_size, numpy.hamming)
samples_per_pixel = processor.audio_file.nframes / float(image_size[0])
spectrogram = SpectrogramImage(image_size, fft_size)
for x in range(image_size[0]):
if progress_callback and x % (image_size[0] / 10) == 0:
progress_callback((x * 100) / image_size[0])
seek_point = int(x * samples_per_pixel)
next_seek_point = int((x + 1) * samples_per_pixel)
(spectral_centroid, db_spectrum) = processor.spectral_centroid(seek_point)
spectrogram.draw_spectrum(x, db_spectrum)
if progress_callback:
progress_callback(100)
spectrogram.save(output_filename)
def interpolate_colors(colors, flat=False, num_colors=256):
palette = []
for i in range(num_colors):
# TODO: What does this do?
index = (
(i *
(len(colors) - 1) # 7
) # 0..7..14..21..28...
/
(num_colors - 1.0) # 255.0
)
# TODO: What is the meaning of 'alpha' in this context?
alpha = index - round(index)
channels = list('rgb')
values = dict()
for k, v in zip(range(len(channels)), channels):
if alpha > 0:
values[v] = (
(1.0 - alpha)
*
colors[int(index)][k]
+
alpha * colors[int(index) + 1][k]
)
else:
values[v] = (
(1.0 - alpha)
*
colors[int(index)][k]
)
if flat:
palette.extend(
tuple(int(values[i]) for i in channels))
else:
palette.append(
tuple(int(values[i]) for i in channels))
return palette
def get_max_level(filename):
max_value = 0
buffer_size = 4096
audio_file = audiolab.Sndfile(filename, 'r')
n_samples_left = audio_file.nframes
while n_samples_left:
to_read = min(buffer_size, n_samples_left)
try:
samples = audio_file.read_frames(to_read)
except RuntimeError:
# this can happen with a broken header
break
# convert to mono by selecting left channel only
if audio_file.channels > 1:
samples = samples[:,0]
max_value = max(max_value, numpy.abs(samples).max())
n_samples_left -= to_read
audio_file.close()
return max_value
if __name__ == '__main__':
import sys
sys.argv[4] = int(sys.argv[4])
sys.argv[3] = tuple([int(i) for i in sys.argv[3].split('x')])
create_spectrogram_image(*sys.argv[1:])
|
{"hexsha": "433bb300b1dbda98a9454c8cfdf422bb25ffe59b", "size": 12115, "ext": "py", "lang": "Python", "max_stars_repo_path": "mediagoblin/media_types/audio/spectrogram.py", "max_stars_repo_name": "saksham1115/mediagoblin", "max_stars_repo_head_hexsha": "41302ad2b622b340caeb13339338ab3a5d0f7e6b", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-05-27T03:57:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-21T02:17:39.000Z", "max_issues_repo_path": "mediagoblin/media_types/audio/spectrogram.py", "max_issues_repo_name": "saksham1115/mediagoblin", "max_issues_repo_head_hexsha": "41302ad2b622b340caeb13339338ab3a5d0f7e6b", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mediagoblin/media_types/audio/spectrogram.py", "max_forks_repo_name": "saksham1115/mediagoblin", "max_forks_repo_head_hexsha": "41302ad2b622b340caeb13339338ab3a5d0f7e6b", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-05-13T14:42:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-28T10:36:46.000Z", "avg_line_length": 33.3746556474, "max_line_length": 153, "alphanum_fraction": 0.5938918696, "include": true, "reason": "import numpy", "num_tokens": 2848}
|
import cv2
import tensorflow as tf
import numpy as np
import scipy.ndimage as sci
from gtts import gTTS
import time
from textblob import TextBlob
# This module is imported so that we can
# play the converted audio
import os
#to normalize the images to same no. of pixels
def resizeIt(img,size=100,median=2):
img=np.float32(img)
r,c=img.shape
#filtering then resizing image
resized_img=cv2.resize(img,(size,size))
filtered_img=sci.median_filter(resized_img,median)
return np.uint8(filtered_img)
def preprocessing(img0,IMG_SIZE=100):
img_resized=resizeIt(img0,IMG_SIZE,1) # resize to normalize data size
#cv2.imshow("intermidieate",img_resized)
img_blur = cv2.GaussianBlur(img_resized,(5,5),0)
imgTh=cv2.adaptiveThreshold(img_blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,7,3)
ret,img_th = cv2.threshold(imgTh,0,255,cv2.THRESH_TOZERO+cv2.THRESH_OTSU)
#edges = cv2.Canny(img_resized,170, 300)
return img_th
def playText(text):
myobj = gTTS(text=text, lang='en' , slow=False)
if os.path.exists("audio.mp3"):
os.remove("audio.mp3")
myobj.save("audio.mp3")
# Playing the converted file
#os.system("mpg123 welcome.mp3")
from playsound import playsound
playsound('audio.mp3')
return 0
ALPHABET = [] #array containing letters to categorize
alpha = 'a'
for i in range(0, 27):
ALPHABET.append(alpha)
alpha = chr(ord(alpha) + 1)
prev=""
model = tf.keras.models.load_model("model_name.model")
cap = cv2.VideoCapture(0) #to load video file
count = 0
buffer = []
prev_time = time.time()
word = ' '
sentence = ' '
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if ret==False:
break
# Our operations on the frame come here
img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
IMG_SIZE = 200
#print(img_gray.shape)
w, h = img_gray.shape
top=180
left=h-195
down=w-50
right=195
img_rect = cv2.rectangle(img_gray,(right,top),(left,down),(255,0,0),2)
img_gray = img_gray[top:down, right:left]
#print(img_gray.shape)
img_test = preprocessing(img_gray,IMG_SIZE)
cv2.imshow('whole input frame', np.uint8(img_rect))
cv2.imshow('qwerty',img_test)
# Display the resulting frame
#cv2.imshow('testing this',np.uint8(img_test))
prediction = model.predict([img_test.reshape(-1, IMG_SIZE, IMG_SIZE, 1)])
#print(img_test)
text = ALPHABET[int(np.argmax(prediction[0]))]
_ = os.system('cls')
print('Alphabet: '+text+' Word: '+word+' Sentence: '+sentence+' Time Required: '+str(time.time()-prev_time))
prev_time = time.time()
no_frames = 50
count = count + 1
buffer.append(text)
print(buffer)
if (count > no_frames) :
text = max(set(buffer),key = text.count) #finding mode of buffer of letters
try:
playText(text)
except:
print("Try again!!")
count=0
buffer = []
if( text =='{'):
word = word + ' '
else:
word = word + text
playText(word)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
{"hexsha": "5d7c2debbda785a51a1b60f1b3200e4d85f05bac", "size": 3278, "ext": "py", "lang": "Python", "max_stars_repo_path": "gesture to speech.py", "max_stars_repo_name": "Aryan4786/Sign-Language-to-Speech-and-Speech-to-Gestures", "max_stars_repo_head_hexsha": "1ac707d516697751ac99acd5e2338d875f89f71d", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-05-18T02:31:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-20T11:47:03.000Z", "max_issues_repo_path": "gesture to speech.py", "max_issues_repo_name": "Aryan4786/Sign-Language-to-Speech-and-Speech-to-Gestures", "max_issues_repo_head_hexsha": "1ac707d516697751ac99acd5e2338d875f89f71d", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-11-06T03:54:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-16T03:32:42.000Z", "max_forks_repo_path": "gesture to speech.py", "max_forks_repo_name": "Aryan4786/Sign-Language-to-Speech-and-Speech-to-Gestures", "max_forks_repo_head_hexsha": "1ac707d516697751ac99acd5e2338d875f89f71d", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-18T02:30:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-18T02:30:52.000Z", "avg_line_length": 28.2586206897, "max_line_length": 112, "alphanum_fraction": 0.6485661989, "include": true, "reason": "import numpy,import scipy", "num_tokens": 917}
|
#!/usr/bin/env python
"""
Makes netcdf files of input data for NN trainig dataset
"""
import os
from dateutil.parser import parse
from netCDF4 import Dataset
import numpy as np
from math import cos, radians
#---
if __name__ == '__main__':
outdir = 'vnncLUT'
filename = 'LUT_angles_wind.nc4'
sza = np.linspace(0.15,1,18)
vza = np.linspace(0.15,1,18)
sza = np.degrees(np.arccos(sza))
vza = np.degrees(np.arccos(vza))
raa = np.degrees(np.arccos([-1, -0.5, 0, 0.5, 1]))
u10m = np.linspace(0,20,5)
v10m = np.array([0])
u10m[0] = 0.10
v10m[0] = 0.10
# create file
outfile = outdir + '/' + filename
ncOut = Dataset(outfile,'w',format='NETCDF4_CLASSIC')
ncOut.comment = 'angles and wind speeds for ocean NN training data'
# create dimensions
sza_dim = ncOut.createDimension('sza',len(sza))
vza_dim = ncOut.createDimension('vza',len(vza))
raa_dim = ncOut.createDimension('raa',len(raa))
uwind_dim = ncOut.createDimension('uwind',len(u10m))
vwind_dim = ncOut.createDimension('vwind',len(v10m))
# write out variables
varobj = ncOut.createVariable('sza','f4',('sza',))
varobj.long_name = 'solar zenith angle'
varobj.units = 'degrees'
varobj[:] = sza
varobj = ncOut.createVariable('vza','f4',('vza',))
varobj.long_name = 'viewing zenith angle'
varobj.units = 'degrees'
varobj[:] = vza
varobj = ncOut.createVariable('raa','f4',('raa',))
varobj.long_name = 'relative azimuth angle'
varobj.units = 'degrees'
varobj[:] = raa
varobj = ncOut.createVariable('u10m','f4',('uwind',))
varobj.long_name = 'u10m'
varobj.units = 'm/2'
varobj[:] = u10m
varobj = ncOut.createVariable('v10m','f4',('vwind',))
varobj.long_name = 'v10m'
varobj.units = 'm/2'
varobj[:] = v10m
ncOut.close()
|
{"hexsha": "734ec690076bcf6c7f546217157f5d1bf7d366cf", "size": 1857, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/Components/missions/TEMPO/vnncLUT_angles_ocean.py", "max_stars_repo_name": "GEOS-ESM/AeroApps", "max_stars_repo_head_hexsha": "874dad6f34420c014d98eccbe81a061bdc0110cf", "max_stars_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-02T14:23:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T15:39:30.000Z", "max_issues_repo_path": "src/Components/missions/TEMPO/vnncLUT_angles_ocean.py", "max_issues_repo_name": "GEOS-ESM/AeroApps", "max_issues_repo_head_hexsha": "874dad6f34420c014d98eccbe81a061bdc0110cf", "max_issues_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-04-15T16:22:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T13:59:25.000Z", "max_forks_repo_path": "src/Components/missions/TEMPO/vnncLUT_angles_ocean.py", "max_forks_repo_name": "GEOS-ESM/AeroApps", "max_forks_repo_head_hexsha": "874dad6f34420c014d98eccbe81a061bdc0110cf", "max_forks_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7164179104, "max_line_length": 70, "alphanum_fraction": 0.6289714593, "include": true, "reason": "import numpy", "num_tokens": 640}
|
"""
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
LOBPCG is a preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
Call the function lobpcg - see help for lobpcg.lobpcg.
"""
from __future__ import division, print_function, absolute_import
from .lobpcg import *
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
|
{"hexsha": "861274a6655f7c9984cd992379f7371661676bbe", "size": 485, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/Python27/Lib/site-packages/scipy/sparse/linalg/eigen/lobpcg/__init__.py", "max_stars_repo_name": "lefevre-fraser/openmeta-mms", "max_stars_repo_head_hexsha": "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bin/Python27/Lib/site-packages/scipy/sparse/linalg/eigen/lobpcg/__init__.py", "max_issues_repo_name": "lefevre-fraser/openmeta-mms", "max_issues_repo_head_hexsha": "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bin/Python27/Lib/site-packages/scipy/sparse/linalg/eigen/lobpcg/__init__.py", "max_forks_repo_name": "lefevre-fraser/openmeta-mms", "max_forks_repo_head_hexsha": "08f3115e76498df1f8d70641d71f5c52cab4ce5f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-08T12:44:48.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-08T12:44:48.000Z", "avg_line_length": 26.9444444444, "max_line_length": 77, "alphanum_fraction": 0.7505154639, "include": true, "reason": "from numpy", "num_tokens": 118}
|
# Copyright (C) 2016 Michael D. Nunez
#
# License: BSD (3-clause)
# Record of Revisions
#
# Date Programmers Descriptions of Change
# ==== ================ ======================
# 03/16/16 Michael Nunez Original code
# 03/29/16 Michael Nunez Cleanup
# Modules
import numpy as np
import matplotlib.pyplot as plt
# Power calculation and plot
def eegpower(data, sr, freqs=[0., 50.], dB=False, plot=False, **kwargs):
"""Calculates & plots power spectrum of sample*channel*trial EEG data
Useage:
xfreqs, outpower, fourier = eegPower(data,sr,freqs=[],dB=False,plot=False,**kwargs)
Inputs:
data - sample*channel*trial EEG data
sr - sample rate
Optional Inputs:
freqs - lower and upper boundary of frequencies to plot, default: [0., 50.] (Hz)
dB - Units in standardized dB instead of standardized power
plot - Produces plot
varargin - Any "plt.plot" inputs after the first two, i.e., plt.plot(X,Y,**kwargs);
Outputs:
xfreqs - Frequencies plotted (x-axis)
outpower - Power values plotted (y-axis)
fourier - Fouier coefficients
"""
# Find frequency interval
nyquist = (2. / 5.) * sr
if freqs[1] > nyquist:
print 'User defined maximum frequency %0.3f is larger than the Nyquist frequency %0.3f! \n' % (freqs[1], nyquist)
print 'Using Nyquist frequnecy as maximum \n'
freqs[1] = nyquist
# Recalculate if minimum is smaller than Nyquist sampling rate
nsr = sr / np.shape(data)[0] # Nyquist sampling rate (Hz)
if (freqs[0] < nsr) & (freqs[0] != 0):
print 'User defined minimum frequency %0.3f is smaller than Nyquist sampling rate %0.3f! \n' % (freqs[0], nsr)
print 'Using Nyquist sampling rate as minimum frequency \n'
freqs[0] = nsr
# Calculate Power
fourier = np.fft.fft(data, axis=0) / np.shape(data)[0]
plotfreqs = np.arange(0., freqs[1], nsr)
minindex = np.argmin(abs(freqs[0] - plotfreqs))
maxindex = np.shape(plotfreqs)[0]
# Power in standardized units (\muV^2/Hz)
power = np.mean(np.square(np.abs(fourier)), axis=2) * (2. / nsr)
if dB:
power = 10 * np.log10(power)
xfreqs = plotfreqs[minindex:maxindex]
outpower = power[minindex:maxindex, :]
fourier = fourier[minindex:maxindex, :, :]
if plot:
plt.plot(xfreqs, outpower, **kwargs)
plt.xlabel('Frequency (Hz)')
if dB:
plt.ylabel('Standardized Log Power (10*log_{10}(muV^2/Hz); dB)')
else:
plt.ylabel('Standardized Power (muV^2/Hz)')
plt.title('EEG Power Spectrum')
return xfreqs, outpower, fourier
|
{"hexsha": "7d2c6066c2d058865871af88cfe3d260cff785db", "size": 2757, "ext": "py", "lang": "Python", "max_stars_repo_path": "powercalcs.py", "max_stars_repo_name": "mdnunez/electroencephalopy", "max_stars_repo_head_hexsha": "32aad64be8567a44d8644720f684427616b33ea1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-22T19:52:17.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-22T19:52:17.000Z", "max_issues_repo_path": "powercalcs.py", "max_issues_repo_name": "mdnunez/electroencephalopy", "max_issues_repo_head_hexsha": "32aad64be8567a44d8644720f684427616b33ea1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "powercalcs.py", "max_forks_repo_name": "mdnunez/electroencephalopy", "max_forks_repo_head_hexsha": "32aad64be8567a44d8644720f684427616b33ea1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6219512195, "max_line_length": 121, "alphanum_fraction": 0.6010155967, "include": true, "reason": "import numpy", "num_tokens": 779}
|
from .gdsPrimitives import *
from datetime import *
#from mpmath import matrix
#from numpy import matrix
from vector import vector
import numpy as np
#import gdsPrimitives
import debug
class VlsiLayout:
"""Class represent a hierarchical layout"""
def __init__(self, name=None, units=(0.001,1e-9), libraryName = "DEFAULT.DB", gdsVersion=5):
#keep a list of all the structures in this layout
self.units = units
#print(units)
modDate = datetime.now()
self.structures=dict()
self.layerNumbersInUse = []
self.debug = False
if name:
self.rootStructureName=name
#create the ROOT structure
self.structures[self.rootStructureName] = GdsStructure()
self.structures[self.rootStructureName].name = name
self.structures[self.rootStructureName].createDate = (modDate.year,
modDate.month,
modDate.day,
modDate.hour,
modDate.minute,
modDate.second)
self.structures[self.rootStructureName].modDate = (modDate.year,
modDate.month,
modDate.day,
modDate.hour,
modDate.minute,
modDate.second)
self.info = dict() #information gathered from the GDSII header
self.info['units']=self.units
self.info['dates']=(modDate.year,
modDate.month,
modDate.day,
modDate.hour,
modDate.minute,
modDate.second,
modDate.year,
modDate.month,
modDate.day,
modDate.hour,
modDate.minute,
modDate.second)
self.info['libraryName']=libraryName
self.info['gdsVersion']=gdsVersion
self.xyTree = [] #This will contain a list of all structure names
#expanded to include srefs / arefs separately.
#each structure will have an X,Y,offset, and rotate associated
#with it. Populate via traverseTheHierarchy method.
#temp variables used in delegate functions
self.tempCoordinates=None
self.tempPassFail = True
# This is a dict indexed by the pin labels.
# It contains a list of list of shapes, one for each occurance of the label.
# Multiple labels may be disconnected.
self.pins = {}
def rotatedCoordinates(self,coordinatesToRotate,rotateAngle):
#helper method to rotate a list of coordinates
angle=math.radians(float(0))
if(rotateAngle):
angle = math.radians(float(rotateAngle))
coordinatesRotate = [] #this will hold the rotated values
for coordinate in coordinatesToRotate:
# This is the CCW rotation matrix
newX = coordinate[0]*math.cos(angle) - coordinate[1]*math.sin(angle)
newY = coordinate[0]*math.sin(angle) + coordinate[1]*math.cos(angle)
coordinatesRotate.extend((newX,newY))
return coordinatesRotate
def rename(self,newName):
#make sure the newName is a multiple of 2 characters
if(len(newName)%2 == 1):
#pad with a zero
newName = newName + '\x00'
#take the root structure and copy it to a new structure with the new name
self.structures[newName] = self.structures[self.rootStructureName]
self.structures[newName].name = newName
#and delete the old root
del self.structures[self.rootStructureName]
self.rootStructureName = newName
#repopulate the 2d map so drawing occurs correctly
del self.xyTree[:]
self.populateCoordinateMap()
def newLayout(self,newName):
#if (newName == "" | newName == 0):
# print("ERROR: vlsiLayout.py:newLayout newName is null")
#make sure the newName is a multiple of 2 characters
#if(len(newName)%2 == 1):
#pad with a zero
#newName = newName + '\x00'
#take the root structure and copy it to a new structure with the new name
#self.structures[newName] = self.structures[self.rootStructureName]
modDate = datetime.now()
self.structures[newName] = GdsStructure()
self.structures[newName].name = newName
self.rootStructureName = newName
self.rootStructureName=newName
#create the ROOT structure
self.structures[self.rootStructureName] = GdsStructure()
#self.structures[self.rootStructureName].name = name
self.structures[self.rootStructureName].createDate = (modDate.year,
modDate.month,
modDate.day,
modDate.hour,
modDate.minute,
modDate.second)
self.structures[self.rootStructureName].modDate = (modDate.year,
modDate.month,
modDate.day,
modDate.hour,
modDate.minute,
modDate.second)
#repopulate the 2d map so drawing occurs correctly
self.prepareForWrite()
def prepareForWrite(self):
del self.xyTree[:]
self.populateCoordinateMap()
def deduceHierarchy(self):
""" First, find the root of the tree.
Then go through and get the name of every structure.
Then, go through and find which structure is not
contained by any other structure. this is the root."""
structureNames=[]
for name in self.structures:
structureNames.append(name)
for name in self.structures:
if(len(self.structures[name].srefs)>0): #does this structure reference any others?
for sref in self.structures[name].srefs: #go through each reference
if sref.sName in structureNames: #and compare to our list
structureNames.remove(sref.sName)
debug.check(len(structureNames)==1,"Multiple possible root structures in the layout: {}".format(str(structureNames)))
self.rootStructureName = structureNames[0]
def traverseTheHierarchy(self, startingStructureName=None, delegateFunction = None,
transformPath = [], rotateAngle = 0, transFlags = [0,0,0], coordinates = (0,0)):
#since this is a recursive function, must deal with the default
#parameters explicitly
if startingStructureName == None:
startingStructureName = self.rootStructureName
#set up the rotation matrix
if(rotateAngle == None or rotateAngle == ""):
angle = 0
else:
# MRG: Added negative to make CCW rotate 8/29/18
angle = math.radians(float(rotateAngle))
mRotate = np.array([[math.cos(angle),-math.sin(angle),0.0],
[math.sin(angle),math.cos(angle),0.0],
[0.0,0.0,1.0]])
#set up the translation matrix
translateX = float(coordinates[0])
translateY = float(coordinates[1])
mTranslate = np.array([[1.0,0.0,translateX],[0.0,1.0,translateY],[0.0,0.0,1.0]])
#set up the scale matrix (handles mirror X)
scaleX = 1.0
if(transFlags[0]):
scaleY = -1.0
else:
scaleY = 1.0
mScale = np.array([[scaleX,0.0,0.0],[0.0,scaleY,0.0],[0.0,0.0,1.0]])
#we need to keep track of all transforms in the hierarchy
#when we add an element to the xy tree, we apply all transforms from the bottom up
transformPath.append((mRotate,mScale,mTranslate))
if delegateFunction != None:
delegateFunction(startingStructureName, transformPath)
#starting with a particular structure, we will recursively traverse the tree
#********might have to set the recursion level deeper for big layouts!
if(len(self.structures[startingStructureName].srefs)>0): #does this structure reference any others?
#if so, go through each and call this function again
#if not, return back to the caller (caller can be this function)
for sref in self.structures[startingStructureName].srefs:
#here, we are going to modify the sref coordinates based on the parent objects rotation
self.traverseTheHierarchy(startingStructureName = sref.sName,
delegateFunction = delegateFunction,
transformPath = transformPath,
rotateAngle = sref.rotateAngle,
transFlags = sref.transFlags,
coordinates = sref.coordinates)
#MUST HANDLE AREFs HERE AS WELL
#when we return, drop the last transform from the transformPath
del transformPath[-1]
return
def initialize(self):
self.deduceHierarchy()
#self.traverseTheHierarchy()
self.populateCoordinateMap()
for layerNumber in self.layerNumbersInUse:
self.processLabelPins(layerNumber)
def populateCoordinateMap(self):
def addToXyTree(startingStructureName = None,transformPath = None):
uVector = np.array([[1.0],[0.0],[0.0]]) #start with normal basis vectors
vVector = np.array([[0.0],[1.0],[0.0]])
origin = np.array([[0.0],[0.0],[1.0]]) #and an origin (Z component is 1.0 to indicate position instead of vector)
#make a copy of all the transforms and reverse it
reverseTransformPath = transformPath[:]
if len(reverseTransformPath) > 1:
reverseTransformPath.reverse()
#now go through each transform and apply them to our basis and origin in succession
for transform in reverseTransformPath:
origin = np.dot(transform[0], origin) #rotate
uVector = np.dot(transform[0], uVector) #rotate
vVector = np.dot(transform[0], vVector) #rotate
origin = np.dot(transform[1], origin) #scale
uVector = np.dot(transform[1], uVector) #scale
vVector = np.dot(transform[1], vVector) #scale
origin = np.dot(transform[2], origin) #translate
#we don't need to do a translation on the basis vectors
#uVector = transform[2] * uVector #translate
#vVector = transform[2] * vVector #translate
#populate the xyTree with each structureName and coordinate space
self.xyTree.append((startingStructureName,origin,uVector,vVector))
self.traverseTheHierarchy(delegateFunction = addToXyTree)
def microns(self,userUnits):
"""Utility function to convert user units to microns"""
userUnit = self.units[1]/self.units[0]
userUnitsPerMicron = userUnit / (userunit)
layoutUnitsPerMicron = userUnitsPerMicron / self.units[0]
return userUnits / layoutUnitsPerMicron
def userUnits(self,microns):
"""Utility function to convert microns to user units"""
userUnit = self.units[1]/self.units[0]
#userUnitsPerMicron = userUnit / 1e-6
userUnitsPerMicron = userUnit / (userUnit)
layoutUnitsPerMicron = userUnitsPerMicron / self.units[0]
#print("userUnit:",userUnit,"userUnitsPerMicron",userUnitsPerMicron,"layoutUnitsPerMicron",layoutUnitsPerMicron,[microns,microns*layoutUnitsPerMicron])
return round(microns*layoutUnitsPerMicron,0)
def changeRoot(self,newRoot, create=False):
"""
Method to change the root pointer to another layout.
"""
if self.debug:
debug.info(0,"DEBUG: GdsMill vlsiLayout: changeRoot: %s "%newRoot)
# Determine if newRoot exists
# layoutToAdd (default) or nameOfLayout
if (newRoot == 0 | ((newRoot not in self.structures) & ~create)):
print("ERROR: vlsiLayout.changeRoot: Name of new root [%s] not found and create flag is false"%newRoot)
exit(1)
else:
if ((newRoot not in self.structures) & create):
self.newLayout(newRoot)
self.rootStructureName = newRoot
def addInstance(self,layoutToAdd,nameOfLayout=0,offsetInMicrons=(0,0),mirror=None,rotate=None):
"""
Method to insert one layout into another at a particular offset.
"""
offsetInLayoutUnits = (self.userUnits(offsetInMicrons[0]),self.userUnits(offsetInMicrons[1]))
if self.debug:
debug.info(0,"DEBUG: GdsMill vlsiLayout: addInstance: type {0}, nameOfLayout {1}".format(type(layoutToAdd),nameOfLayout))
debug.info(0,"DEBUG: name={0} offset={1} mirror={2} rotate={3}".format(layoutToAdd.rootStructureName,offsetInMicrons, mirror, rotate))
# Determine if we are instantiating the root design of
# layoutToAdd (default) or nameOfLayout
if nameOfLayout == 0:
StructureFound = True
StructureName = layoutToAdd.rootStructureName
else:
StructureName = nameOfLayout #layoutToAdd
StructureFound = False
for structure in layoutToAdd.structures:
if StructureName in structure:
if self.debug:
debug.info(1,"DEBUG: Structure %s Found"%StructureName)
StructureFound = True
debug.check(StructureFound,"Could not find layout to instantiate {}".format(StructureName))
# If layoutToAdd is a unique object (not this), then copy hierarchy,
# otherwise, if it is a text name of an internal structure, use it.
if layoutToAdd != self:
#first, we need to combine the structure dictionaries from both layouts
for structure in layoutToAdd.structures:
if structure not in self.structures:
self.structures[structure]=layoutToAdd.structures[structure]
#also combine the "layers in use" list
for layerNumber in layoutToAdd.layerNumbersInUse:
if layerNumber not in self.layerNumbersInUse:
self.layerNumbersInUse.append(layerNumber)
#add a reference to the new layout structure in this layout's root
layoutToAddSref = GdsSref()
layoutToAddSref.sName = StructureName
layoutToAddSref.coordinates = offsetInLayoutUnits
if mirror or rotate:
layoutToAddSref.transFlags = [0,0,0]
# transFlags = (mirror around x-axis, magnification, rotation)
# If magnification or rotation is true, it is the flags are then
# followed by an amount in the record
if mirror=="R90":
rotate = 90.0
if mirror=="R180":
rotate = 180.0
if mirror=="R270":
rotate = 270.0
if rotate:
#layoutToAddSref.transFlags[2] = 1
layoutToAddSref.rotateAngle = rotate
if mirror == "x" or mirror == "MX":
layoutToAddSref.transFlags[0] = 1
if mirror == "y" or mirror == "MY": #NOTE: "MY" option will override specified rotate angle
layoutToAddSref.transFlags[0] = 1
#layoutToAddSref.transFlags[2] = 1
layoutToAddSref.rotateAngle = 180.0
if mirror == "xy" or mirror == "XY": #NOTE: "XY" option will override specified rotate angle
#layoutToAddSref.transFlags[2] = 1
layoutToAddSref.rotateAngle = 180.0
#add the sref to the root structure
self.structures[self.rootStructureName].srefs.append(layoutToAddSref)
def addBox(self,layerNumber=0, purposeNumber=None, offsetInMicrons=(0,0), width=1.0, height=1.0,center=False):
"""
Method to add a box to a layout
"""
offsetInLayoutUnits = (self.userUnits(offsetInMicrons[0]),self.userUnits(offsetInMicrons[1]))
#print("addBox:offsetInLayoutUnits",offsetInLayoutUnits)
widthInLayoutUnits = self.userUnits(width)
heightInLayoutUnits = self.userUnits(height)
#print("offsetInLayoutUnits",widthInLayoutUnits,"heightInLayoutUnits",heightInLayoutUnits)
if not center:
coordinates=[offsetInLayoutUnits,
(offsetInLayoutUnits[0]+widthInLayoutUnits,offsetInLayoutUnits[1]),
(offsetInLayoutUnits[0]+widthInLayoutUnits,offsetInLayoutUnits[1]+heightInLayoutUnits),
(offsetInLayoutUnits[0],offsetInLayoutUnits[1]+heightInLayoutUnits),
offsetInLayoutUnits]
else:
startPoint = (offsetInLayoutUnits[0]-widthInLayoutUnits/2.0, offsetInLayoutUnits[1]-heightInLayoutUnits/2.0)
coordinates=[startPoint,
(startPoint[0]+widthInLayoutUnits,startPoint[1]),
(startPoint[0]+widthInLayoutUnits,startPoint[1]+heightInLayoutUnits),
(startPoint[0],startPoint[1]+heightInLayoutUnits),
startPoint]
boundaryToAdd = GdsBoundary()
boundaryToAdd.drawingLayer = layerNumber
boundaryToAdd.dataType = 0
boundaryToAdd.coordinates = coordinates
boundaryToAdd.purposeLayer = purposeNumber
#add the sref to the root structure
self.structures[self.rootStructureName].boundaries.append(boundaryToAdd)
def addPath(self, layerNumber=0, purposeNumber = None, coordinates=[(0,0)], width=1.0):
"""
Method to add a path to a layout
"""
widthInLayoutUnits = self.userUnits(width)
layoutUnitCoordinates = []
#first convert to proper units
for coordinate in coordinates:
cX = self.userUnits(coordinate[0])
cY = self.userUnits(coordinate[1])
layoutUnitCoordinates.append((cX,cY))
pathToAdd = GdsPath()
pathToAdd.drawingLayer=layerNumber
pathToAdd.purposeLayer = purposeNumber
pathToAdd.pathWidth=widthInLayoutUnits
pathToAdd.coordinates=layoutUnitCoordinates
#add the sref to the root structure
self.structures[self.rootStructureName].paths.append(pathToAdd)
def addText(self, text, layerNumber=0, purposeNumber = None, offsetInMicrons=(0,0), magnification=0.1, rotate = None):
offsetInLayoutUnits = (self.userUnits(offsetInMicrons[0]),self.userUnits(offsetInMicrons[1]))
textToAdd = GdsText()
textToAdd.drawingLayer = layerNumber
textToAdd.purposeLayer = purposeNumber
textToAdd.dataType = 0
textToAdd.coordinates = [offsetInLayoutUnits]
textToAdd.transFlags = [0,0,0]
if(len(text)%2 == 1):
text = text + '\x00'
textToAdd.textString = text
#textToAdd.transFlags[1] = 1
textToAdd.magFactor = magnification
if rotate:
#textToAdd.transFlags[2] = 1
textToAdd.rotateAngle = rotate
#add the sref to the root structure
self.structures[self.rootStructureName].texts.append(textToAdd)
def isBounded(self,testPoint,startPoint,endPoint):
#these arguments are touples of (x,y) coordinates
if testPoint == None:
return 0
if(testPoint[0]<=max(endPoint[0],startPoint[0]) and \
testPoint[0]>=min(endPoint[0],startPoint[0]) and \
testPoint[1]<=max(endPoint[1],startPoint[1]) and \
testPoint[1]>=min(endPoint[1],startPoint[1])):
return 1
else:
return 0
def intersectionPoint(self,startPoint1,endPoint1,startPoint2,endPoint2):
if((endPoint1[0]-startPoint1[0])!=0 and (endPoint2[0]-startPoint2[0])!=0):
pSlope = (endPoint1[1]-startPoint1[1])/(endPoint1[0]-startPoint1[0])
pIntercept = startPoint1[1]-pSlope*startPoint1[0]
qSlope = (endPoint2[1]-startPoint2[1])/(endPoint2[0]-startPoint2[0])
qIntercept = startPoint2[1]-qSlope*startPoint2[0]
if(pSlope!=qSlope):
newX=(qIntercept-pIntercept)/(pSlope-qSlope)
newY=pSlope*newX+pIntercept
else:
#parallel lines can't intersect
newX=None
newY=None
elif((endPoint1[0]-startPoint1[0])==0 and (endPoint2[0]-startPoint2[0])==0):
#two vertical lines cannot intersect
newX = None
newY = None
elif((endPoint1[0]-startPoint1[0])==0 and (endPoint2[0]-startPoint2[0])!=0):
qSlope = (endPoint2[1]-startPoint2[1])/(endPoint2[0]-startPoint2[0])
qIntercept = startPoint2[1]-qSlope*startPoint2[0]
newX=endPoint1[0]
newY=qSlope*newX+qIntercept
elif((endPoint1[0]-startPoint1[0])!=0 and (endPoint2[0]-startPoint2[0])==0):
pSlope = (endPoint1[1]-startPoint1[1])/(endPoint1[0]-startPoint1[0])
pIntercept = startPoint1[1]-pSlope*startPoint1[0]
newX=endPoint2[0]
newY=pSlope*newX+pIntercept
return (newX,newY)
def isCollinear(self,testPoint,point1,point2):
slope1 = (testPoint[1]-point1[1])/(testPoint[0]-point1[0])
slope2 = (point2[1]-point1[1])/(point2[0]-point1[0])
if slope1 == slope2:
return True
return False
def doShapesIntersect(self,shape1Coordinates, shape2Coordinates):
"""
Utility function to determine if 2 arbitrary shapes intersect.
We define intersection by taking pairs of points in each shape (assuming they are in order)
and seeing if any of the lines formed by these pais intersect.
"""
for shape1Index in range(0,len(shape1Coordinates)-1):
for shape2Index in range(0,len(shape2Coordinates)-1):
startPoint1 = shape1Coordinates[shape1Index]
endPoint1 = shape1Coordinates[shape1Index+1]
startPoint2 = shape2Coordinates[shape2Index]
endPoint2 = shape2Coordinates[shape2Index+1]
intersect = self.intersectionPoint(startPoint1,endPoint1,startPoint2,endPoint2)
if(self.isBounded(intersect,startPoint1,endPoint1) and self.isBounded(intersect,startPoint2,endPoint2)):
return True #these shapes overlap!
return False #these shapes are ok
def isPointInsideOfBox(self,pointCoordinates,boxCoordinates):
"""
Check if a point is contained in the shape
"""
debug.check(len(boxCoordinates)==4,"Invalid number of coordinates for box.")
leftBound = boxCoordinates[0][0]
rightBound = boxCoordinates[0][0]
topBound = boxCoordinates[0][1]
bottomBound = boxCoordinates[0][1]
for point in boxCoordinates:
if point[0]<leftBound:
leftBound = point[0]
if point[0]>rightBound:
rightBound = point[0]
if point[1]<bottomBound:
bottomBound = point[1]
if point[1]>topBound:
topBound = point[1]
if(pointCoordinates[0]>rightBound or
pointCoordinates[0]<leftBound or
pointCoordinates[1]>topBound or
pointCoordinates[1]<bottomBound):
return False
return True
def isShapeInsideOfBox(self,shapeCoordinates, boxCoordinates):
"""
Go through every point in the shape to test if they are all inside the box.
"""
for point in shapeCoordinates:
if not self.isPointInsideOfBox(point,boxCoordinates):
return False
return True
def fillAreaDensity(self, layerToFill = 0, offsetInMicrons = (0,0), coverageWidth = 100.0, coverageHeight = 100.0, minSpacing = 0.22, blockSize = 1.0):
effectiveBlock = blockSize+minSpacing
widthInBlocks = int(coverageWidth/effectiveBlock)
heightInBlocks = int(coverageHeight/effectiveBlock)
passFailRecord = []
print("Filling layer:",layerToFill)
def isThisBlockOk(startingStructureName,coordinates,rotateAngle=None):
#go through every boundary and check
for boundary in self.structures[startingStructureName].boundaries:
#only test shapes on the same layer
if(boundary.drawingLayer == layerToFill):
#remap coordinates
shiftedBoundaryCoordinates = []
for shapeCoordinate in boundary.rotatedCoordinates(rotateAngle):
shiftedBoundaryCoordinates.append((shapeCoordinate[0]+coordinates[0],shapeCoordinate[1]+coordinates[1]))
joint = self.doShapesIntersect(self.tempCoordinates, shiftedBoundaryCoordinates)
if joint:
self.tempPassFail = False
common = self.isShapeInsideOfBox(shiftedBoundaryCoordinates,self.tempCoordinates)
if common:
self.tempPassFail = False
for path in self.structures[startingStructureName].paths:
#only test shapes on the same layer
if(path.drawingLayer == layerToFill):
#remap coordinates
shiftedBoundaryCoordinates = []
for shapeCoordinate in path.equivalentBoundaryCoordinates(rotateAngle):
shiftedBoundaryCoordinates.append((shapeCoordinate[0]+coordinates[0],shapeCoordinate[1]+coordinates[1]))
joint = self.doShapesIntersect(self.tempCoordinates, shiftedBoundaryCoordinates)
if joint:
self.tempPassFail = False
common = self.isShapeInsideOfBox(shiftedBoundaryCoordinates,self.tempCoordinates)
if common:
self.tempPassFail = False
for yIndex in range(0,heightInBlocks):
for xIndex in range(0,widthInBlocks):
percentDone = (float((yIndex*heightInBlocks)+xIndex) / (heightInBlocks*widthInBlocks))*100
blockX = (xIndex*effectiveBlock)+offsetInMicrons[0]
blockY = (yIndex*effectiveBlock)+offsetInMicrons[1]
self.tempCoordinates=[(self.userUnits(blockX-minSpacing),self.userUnits(blockY-minSpacing)),
(self.userUnits(blockX-minSpacing),self.userUnits(blockY+effectiveBlock)),
(self.userUnits(blockX+effectiveBlock),self.userUnits(blockY+effectiveBlock)),
(self.userUnits(blockX+effectiveBlock),self.userUnits(blockY-minSpacing)),
(self.userUnits(blockX-minSpacing),self.userUnits(blockY-minSpacing))]
self.tempPassFail = True
#go through the hierarchy and see if the block will fit
self.traverseTheHierarchy(delegateFunction = isThisBlockOk)
#if its bad, this global tempPassFail will be false
#if true, we can add the block
passFailRecord.append(self.tempPassFail)
print("Percent Complete:"+str(percentDone))
passFailIndex=0
for yIndex in range(0,heightInBlocks):
for xIndex in range(0,widthInBlocks):
blockX = (xIndex*effectiveBlock)+offsetInMicrons[0]
blockY = (yIndex*effectiveBlock)+offsetInMicrons[1]
if passFailRecord[passFailIndex]:
self.addBox(layerToFill, (blockX,blockY), width=blockSize, height=blockSize)
passFailIndex += 1
print("Done\n\n")
def getLayoutBorder(self,borderlayer):
cellSizeMicron=None
for boundary in self.structures[self.rootStructureName].boundaries:
if boundary.drawingLayer==borderlayer:
if self.debug:
debug.info(1,"Find border "+str(boundary.coordinates))
left_bottom=boundary.coordinates[0]
right_top=boundary.coordinates[2]
cellSize=[right_top[0]-left_bottom[0],right_top[1]-left_bottom[1]]
cellSizeMicron=[cellSize[0]*self.units[0],cellSize[1]*self.units[0]]
if not(cellSizeMicron):
print("Error: "+str(self.rootStructureName)+".cell_size information not found yet")
return cellSizeMicron
def measureSize(self,startStructure):
self.rootStructureName=startStructure
self.populateCoordinateMap()
cellBoundary = [None, None, None, None]
for TreeUnit in self.xyTree:
cellBoundary=self.measureSizeInStructure(TreeUnit,cellBoundary)
cellSize=[cellBoundary[2]-cellBoundary[0],cellBoundary[3]-cellBoundary[1]]
cellSizeMicron=[cellSize[0]*self.units[0],cellSize[1]*self.units[0]]
return cellSizeMicron
def measureBoundary(self,startStructure):
self.rootStructureName=startStructure
self.populateCoordinateMap()
cellBoundary = [None, None, None, None]
for TreeUnit in self.xyTree:
cellBoundary=self.measureSizeInStructure(TreeUnit,cellBoundary)
return [[self.units[0]*cellBoundary[0],self.units[0]*cellBoundary[1]],
[self.units[0]*cellBoundary[2],self.units[0]*cellBoundary[3]]]
def measureSizeInStructure(self,structure,cellBoundary):
(structureName,structureOrigin,structureuVector,structurevVector)=structure
for boundary in self.structures[str(structureName)].boundaries:
left_bottom=boundary.coordinates[0]
right_top=boundary.coordinates[2]
thisBoundary=[left_bottom[0],left_bottom[1],right_top[0],right_top[1]]
thisBoundary=self.transformRectangle(thisBoundary,structureuVector,structurevVector)
thisBoundary=[thisBoundary[0]+structureOrigin[0],thisBoundary[1]+structureOrigin[1],
thisBoundary[2]+structureOrigin[0],thisBoundary[3]+structureOrigin[1]]
cellBoundary=self.updateBoundary(thisBoundary,cellBoundary)
return cellBoundary
def updateBoundary(self,thisBoundary,cellBoundary):
[left_bott_X,left_bott_Y,right_top_X,right_top_Y]=thisBoundary
# If any are None
if not (cellBoundary[0] and cellBoundary[1] and cellBoundary[2] and cellBoundary[3]):
cellBoundary=thisBoundary
else:
if cellBoundary[0]>left_bott_X:
cellBoundary[0]=left_bott_X
if cellBoundary[1]>left_bott_Y:
cellBoundary[1]=left_bott_Y
if cellBoundary[2]<right_top_X:
cellBoundary[2]=right_top_X
if cellBoundary[3]<right_top_Y:
cellBoundary[3]=right_top_Y
return cellBoundary
def getTexts(self, layer):
"""
Get all of the labels on a given layer only at the root level.
"""
text_list = []
for Text in self.structures[self.rootStructureName].texts:
if Text.drawingLayer == layer:
text_list.append(Text)
return text_list
def getPinShape(self, pin_name):
"""
Search for a pin label and return the largest enclosing rectangle
on the same layer as the pin label.
If there are multiple pin lists, return the max of each.
"""
pin_map = self.pins[pin_name]
max_pins = []
for pin_list in pin_map:
max_pin = None
max_area = 0
for pin in pin_list:
(layer,boundary) = pin
new_area = boundaryArea(boundary)
if max_pin == None or new_area>max_area:
max_pin = pin
max_area = new_area
max_pins.append(max_pin)
return max_pins
def getAllPinShapes(self, pin_name):
"""
Search for a pin label and return ALL the enclosing rectangles on the same layer
as the pin label.
"""
shape_list = []
pin_map = self.pins[pin_name]
for pin_list in pin_map:
for pin in pin_list:
(pin_layer, boundary) = pin
shape_list.append(pin)
return shape_list
def processLabelPins(self, layer):
"""
Find all text labels and create a map to a list of shapes that
they enclose on the given layer.
"""
# Get the labels on a layer in the root level
labels = self.getTexts(layer)
# Get all of the shapes on the layer at all levels
# and transform them to the current level
shapes = self.getAllShapes(layer)
for label in labels:
label_coordinate = label.coordinates[0]
user_coordinate = [x*self.units[0] for x in label_coordinate]
pin_shapes = []
for boundary in shapes:
if self.labelInRectangle(user_coordinate,boundary):
pin_shapes.append((layer, boundary))
label_text = label.textString
# Remove the padding if it exists
if label_text[-1] == "\x00":
label_text = label_text[0:-1]
try:
self.pins[label_text]
except KeyError:
self.pins[label_text] = []
self.pins[label_text].append(pin_shapes)
def getBlockages(self,layer):
"""
Return all blockages on a given layer in [coordinate 1, coordinate 2,...] format and
user units.
"""
blockages = []
shapes = self.getAllShapes(layer)
for boundary in shapes:
vectors = []
for i in range(0,len(boundary),2):
vectors.append(vector(boundary[i],boundary[i+1]))
blockages.append(vectors)
return blockages
def getAllShapes(self,layer):
"""
Return all shapes on a given layer in [llx, lly, urx, ury] format and user units for rectangles
and [coordinate 1, coordinate 2,...] format and user units for polygons.
"""
boundaries = set()
for TreeUnit in self.xyTree:
#print(TreeUnit[0])
boundaries.update(self.getShapesInStructure(layer,TreeUnit))
# Convert to user units
user_boundaries = []
for boundary in boundaries:
boundaries_list = []
for i in range(0,len(boundary)):
boundaries_list.append(boundary[i]*self.units[0])
user_boundaries.append(boundaries_list)
return user_boundaries
def getShapesInStructure(self,layer,structure):
"""
Go through all the shapes in a structure and return the list of shapes in
the form [llx, lly, urx, ury] for rectangles and [coordinate 1, coordinate 2,...] for polygons.
"""
(structureName,structureOrigin,structureuVector,structurevVector)=structure
#print(structureName,"u",structureuVector.transpose(),"v",structurevVector.transpose(),"o",structureOrigin.transpose())
boundaries = []
for boundary in self.structures[str(structureName)].boundaries:
if layer==boundary.drawingLayer:
if len(boundary.coordinates)!=5:
# if shape is a polygon (used in DFF)
boundaryPolygon = []
# Polygon is a list of coordinates going ccw
for coord in range(0,len(boundary.coordinates)):
boundaryPolygon.append(boundary.coordinates[coord][0])
boundaryPolygon.append(boundary.coordinates[coord][1])
# perform the rotation
boundaryPolygon=self.transformPolygon(boundaryPolygon,structureuVector,structurevVector)
# add the offset
polygon = []
for i in range(0,len(boundaryPolygon),2):
polygon.append(boundaryPolygon[i]+structureOrigin[0].item())
polygon.append(boundaryPolygon[i+1]+structureOrigin[1].item())
# make it a tuple
polygon = tuple(polygon)
boundaries.append(polygon)
else:
# else shape is a rectangle
left_bottom=boundary.coordinates[0]
right_top=boundary.coordinates[2]
# Rectangle is [leftx, bottomy, rightx, topy].
boundaryRect=[left_bottom[0],left_bottom[1],right_top[0],right_top[1]]
# perform the rotation
boundaryRect=self.transformRectangle(boundaryRect,structureuVector,structurevVector)
# add the offset and make it a tuple
boundaryRect=(boundaryRect[0]+structureOrigin[0].item(),boundaryRect[1]+structureOrigin[1].item(),
boundaryRect[2]+structureOrigin[0].item(),boundaryRect[3]+structureOrigin[1].item())
boundaries.append(boundaryRect)
return boundaries
def transformPolygon(self,originalPolygon,uVector,vVector):
"""
Transforms the coordinates of a polygon in space.
"""
polygon = []
newPolygon = []
for i in range(0,len(originalPolygon),2):
polygon.append(self.transformCoordinate([originalPolygon[i],originalPolygon[i+1]],uVector,vVector))
newPolygon.append(polygon[int(i/2)][0])
newPolygon.append(polygon[int(i/2)][1])
return newPolygon
def transformRectangle(self,originalRectangle,uVector,vVector):
"""
Transforms the four coordinates of a rectangle in space
and recomputes the left, bottom, right, top values.
"""
leftBottom=[originalRectangle[0],originalRectangle[1]]
leftBottom=self.transformCoordinate(leftBottom,uVector,vVector)
rightTop=[originalRectangle[2],originalRectangle[3]]
rightTop=self.transformCoordinate(rightTop,uVector,vVector)
left=min(leftBottom[0],rightTop[0])
bottom=min(leftBottom[1],rightTop[1])
right=max(leftBottom[0],rightTop[0])
top=max(leftBottom[1],rightTop[1])
newRectangle = [left,bottom,right,top]
return newRectangle
def transformCoordinate(self,coordinate,uVector,vVector):
"""
Rotate a coordinate in space.
"""
# MRG: 9/3/18 Incorrect matrix multiplication!
# This is fixed to be:
# |u[0] v[0]| |x| |x'|
# |u[1] v[1]|x|y|=|y'|
x=coordinate[0]*uVector[0][0]+coordinate[1]*vVector[0][0]
y=coordinate[0]*uVector[1][0]+coordinate[1]*vVector[1][0]
transformCoordinate=[x,y]
return transformCoordinate
def labelInRectangle(self,coordinate,rectangle):
"""
Checks if a coordinate is within a given rectangle. Rectangle is [leftx, bottomy, rightx, topy].
"""
coordinate_In_Rectangle_x_range=(coordinate[0]>=rectangle[0])&(coordinate[0]<=rectangle[2])
coordinate_In_Rectangle_y_range=(coordinate[1]>=rectangle[1])&(coordinate[1]<=rectangle[3])
if coordinate_In_Rectangle_x_range & coordinate_In_Rectangle_y_range:
return True
else:
return False
def boundaryArea(A):
"""
Returns boundary area for sorting.
"""
area_A=(A[2]-A[0])*(A[3]-A[1])
return area_A
|
{"hexsha": "f4248ebd9586d1bedaae1cd238f5bced8f981c27", "size": 41064, "ext": "py", "lang": "Python", "max_stars_repo_path": "compiler/gdsMill/gdsMill/vlsiLayout.py", "max_stars_repo_name": "mguthaus/OpenRAM", "max_stars_repo_head_hexsha": "46c86d3bb3df82e150532ede75cbf6180a697cfd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2016-11-06T20:53:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-03T18:57:39.000Z", "max_issues_repo_path": "compiler/gdsMill/gdsMill/vlsiLayout.py", "max_issues_repo_name": "mguthaus/OpenRAM", "max_issues_repo_head_hexsha": "46c86d3bb3df82e150532ede75cbf6180a697cfd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2016-11-15T19:28:25.000Z", "max_issues_repo_issues_event_max_datetime": "2018-02-20T19:23:52.000Z", "max_forks_repo_path": "compiler/gdsMill/gdsMill/vlsiLayout.py", "max_forks_repo_name": "mguthaus/OpenRAM", "max_forks_repo_head_hexsha": "46c86d3bb3df82e150532ede75cbf6180a697cfd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2016-11-09T16:02:45.000Z", "max_forks_repo_forks_event_max_datetime": "2018-02-23T17:07:59.000Z", "avg_line_length": 47.0917431193, "max_line_length": 159, "alphanum_fraction": 0.5885934152, "include": true, "reason": "import numpy,from numpy,from mpmath", "num_tokens": 8916}
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
# ----------------------------------------------------------------------
'''
@author: Mario Tambos
'''
from __future__ import division, print_function
from collections import deque
import bottleneck as bn
import inspect
from numbapro import autojit
import numexpr as ne
import numpy as np
import numpy.linalg as lnp
from scipy.stats import norm
from cdf_table import CDF_TABLE
import mgng
from ring_buffer import RingBuffer
class AMGNG:
def __init__(self, comparison_function, buffer_len, dimensions,
prest_gamma, prest_lmbda, prest_theta,
pst_gamma, pst_lmbda, pst_theta,
prest_alpha=0.5, prest_beta=0.5, prest_delta=0.5,
prest_eta=0.9995, prest_e_w=0.05, prest_e_n=0.0006,
pst_alpha=0.5, pst_beta=0.75, pst_delta=0.5,
pst_eta=0.9995, pst_e_w=0.05, pst_e_n=0.0006,
ma_window_len=None, ma_recalc_delay=1, ddof=1):
values = inspect.getargvalues(inspect.currentframe())[3]
print('Init parameters: {}'.format(values))
self.comparison_function = comparison_function
self.buffer_len = buffer_len
self.dimensions = dimensions
self.present = mgng.MGNG(dimensions=dimensions,
gamma=int(prest_gamma),
lmbda=int(prest_lmbda),
theta=int(prest_theta),
alpha=float(prest_alpha),
beta=float(prest_beta),
delta=float(prest_delta),
eta=float(prest_eta),
e_w=float(prest_e_w),
e_n=float(prest_e_n))
self.past = mgng.MGNG(dimensions=dimensions,
gamma=int(pst_gamma),
lmbda=int(pst_lmbda),
theta=int(pst_theta),
alpha=float(pst_alpha),
beta=float(pst_beta),
delta=float(pst_delta),
eta=float(pst_eta),
e_w=float(pst_e_w),
e_n=float(pst_e_n))
# self.buffer = deque(maxlen=self.buffer_len)
self.buffer = RingBuffer([[np.nan]*dimensions]*buffer_len)
if ma_window_len is None:
# self.ma_window = deque(maxlen=self.buffer_len)
self.ma_window = RingBuffer([np.nan]*buffer_len)
else:
# self.ma_window = deque(maxlen=ma_window_len)
self.ma_window = RingBuffer([np.nan]*ma_window_len)
self.ma_recalc_delay = ma_recalc_delay
self.ddof = ddof
self.anomaly_mean = None
self.anomaly_std = None
self.t = 0
def time_step(self, xt):
xt = np.reshape(xt, newshape=self.dimensions)
ret_val = 0.
self.buffer.append(xt)
self.present.time_step(xt)
if self.t >= self.buffer_len:
pst_xt = self.buffer[0]
self.past.time_step(pst_xt)
if self.t >= self.present.theta + self.past.theta:
ret_val = self.comparison_function(self.present, self.past,
self.present.alpha)
self.ma_window.append(ret_val)
if self.t % self.ma_recalc_delay == 0:
self.anomaly_mean = bn.nanmean(self.ma_window)
self.anomaly_std = bn.nanstd(self.ma_window, ddof=self.ddof)
if self.anomaly_std is None or self.t < len(self.ma_window):
anomaly_density = 0
else:
normalized_score = (ret_val - self.anomaly_mean)/self.anomaly_std
if -4 <= normalized_score <= 4:
anomaly_density = CDF_TABLE[round(normalized_score, 3)]
elif normalized_score > 4:
anomaly_density = 1.
else:
anomaly_density = 0.
self.t += 1
return ret_val, anomaly_density
@autojit(target='cpu')
def compare_models(present_model, past_model, alpha):
tot = [0.]
ps_w = past_model.weights
ps_c = past_model.contexts
for pr_x in present_model.model.node:
pr_x_w = present_model.weights[pr_x]
pr_x_c = present_model.contexts[pr_x]
# dists = ne.evaluate('sum((1-alpha)*(pr_x_w - ps_w)**2 +'
# ' alpha*(pr_x_c - ps_c)**2, axis=1)')
# dists = np.add.reduce((1 - alpha)*(pr_x_w - ps_w)**2 +
# alpha*(pr_x_c - ps_c)**2, axis=1)
dists = mgng.distances(pr_x_w, ps_w, ps_c, pr_x_w, alpha)
ps_x = np.nanargmin(dists)
tot += dists[ps_x]
return tot[0] / len(present_model.model.nodes())
def compare_models_w(present_model, past_model):
tot_w = [0.]
ps_w = past_model.weights
for pr_x in self.present.model.nodes():
pr_x_w = self.present.get_node(pr_x)['w']
dists = ne.evaluate('sum((pr_x_w - ps_w)**2, axis=1)')
ps_x = np.nanargmin(dists)
tot_w += dists[ps_x]
return tot_w[0] / len(self.present.model.nodes())
def compare_models_c(present_model, past_model):
tot_c = [0.]
ps_c = past_model.contexts
for pr_x in self.present.model.nodes():
pr_x_c = self.present.get_node(pr_x)['c']
dists = ne.evaluate('sum((pr_x_c - ps_c)**2, axis=1)')
tot_c += dists[ps_x]
return tot_c[0] / len(self.present.model.nodes())
def main(input_file, output_file, input_frame=None,
buffer_len=None, sampling_rate=None, index_col=None,
skip_rows=None, ma_window=None, ma_recalc_delay=1):
import pandas as pd
from datetime import datetime
if buffer_len is None:
buffer_len = 2000
if input_frame is None:
signal = pd.read_csv(input_file, index_col=index_col, parse_dates=True,
skiprows=skip_rows)
if sampling_rate is not None:
signal = signal.resample(sampling_rate)
else:
signal = input_frame
if ma_window is None:
ma_window = len(signal)
print(signal.head())
print(signal.tail())
print('Seting up model.')
amgng = AMGNG(comparison_function=compare_models,
buffer_len=buffer_len, dimensions=signal.shape[1],
prest_gamma=buffer_len//2, prest_lmbda=buffer_len*6,
prest_theta=buffer_len, pst_gamma=buffer_len//2,
pst_lmbda=buffer_len*6, pst_theta=buffer_len,
ma_window_len=ma_window, ma_recalc_delay=ma_recalc_delay)
scores = np.zeros(len(signal))
pscores = np.zeros(len(signal))
print('Processing {} rows'.format(len(signal)))
start = datetime.now()
for t, xt in enumerate(signal.values):
if t % (len(signal)//100) == 0:
print('{}% done. Sample datapoint: {}'
.format(t / (len(signal)//100), xt))
scores[t], pscores[t] = amgng.time_step(xt)
time_taken = (datetime.now() - start).total_seconds()
print('It took {} seconds to process the signal'.format(time_taken))
signal['anomaly_score'] = pd.Series(scores, index=signal.index)
signal['anomaly_density'] = pd.Series(pscores, index=signal.index)
print('Writing results to {}'.format(output_file))
signal.to_csv(output_file)
if __name__ == '__main__':
import sys
args = sys.argv
if '--input_file' in args:
input_file = args[args.index('--input_file') + 1]
else:
input_file = 'samples.csv'
if '--output_file' in args:
output_file = args[args.index('--output_file') + 1]
else:
output_file = '{}_out.csv'.format(input_file)
if '--buffer_len' in args:
buffer_len = int(args[args.index('--buffer_len') + 1])
else:
buffer_len = None
if '--sampling_rate' in args:
sampling_rate = args[args.index('--sampling_rate') + 1]
else:
sampling_rate = None
if '--index_col' in args:
index_col = args[args.index('--index_col') + 1]
else:
index_col = None
if '--skip_rows' in args:
skip_rows = args[args.index('--skip_rows') + 1].split(',')
skip_rows = [int(r) for r in skip_rows]
else:
skip_rows = None
print(args)
main(input_file, output_file, buffer_len, sampling_rate,
index_col, skip_rows)
|
{"hexsha": "343a5ed50697e8553d5383c1838dc94d1cdd834e", "size": 9059, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/mgng/amgng.py", "max_stars_repo_name": "mtambos/online-anomaly-detection", "max_stars_repo_head_hexsha": "7a00d1bf3b39fcc0bcb17fc2211704a92c4e31c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-07-16T08:35:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-28T16:16:25.000Z", "max_issues_repo_path": "src/mgng/amgng.py", "max_issues_repo_name": "mtambos/online-anomaly-detection", "max_issues_repo_head_hexsha": "7a00d1bf3b39fcc0bcb17fc2211704a92c4e31c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-10-30T13:12:25.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-28T16:15:09.000Z", "max_forks_repo_path": "src/mgng/amgng.py", "max_forks_repo_name": "mtambos/online-anomaly-detection", "max_forks_repo_head_hexsha": "7a00d1bf3b39fcc0bcb17fc2211704a92c4e31c4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.2622222222, "max_line_length": 79, "alphanum_fraction": 0.5850535379, "include": true, "reason": "import numpy,from scipy,from numba,import numexpr", "num_tokens": 2128}
|
[STATEMENT]
lemma moebius_ocircline_id_moebius [simp]:
shows "moebius_ocircline id_moebius H = H"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. moebius_ocircline id_moebius H = H
[PROOF STEP]
by (transfer, transfer) (force simp add: mat_adj_def mat_cnj_def)
|
{"llama_tokens": 117, "file": "Complex_Geometry_Oriented_Circlines", "length": 1}
|
SUBROUTINE Poly_Intercept (a, b, x, y, n, u, v, m, num, ierr)
!-----------------------------------------------------------------------
! INTERSECTION OF A STRAIGHT LINE
! AND POLYGONAL PATH
!-----------------------------------------------------------------------
! The polygon is defined by the set of points (xi, yi), i = 1, 2, ..., n.
! The straight line is from (a1,a2) to (b1,b2).
! On exit, the arrays U and V contain the num points at which the line
! crosses the polygon in order, provided that num <= m.
! Error indicator:
! ierr = 0 no error detected
! = 1 if a = b
! = 2 U and V require more storage, i.e. num > m.
! = -i if the ith segment of the polygon is coincident with part of the
! line.
! Based upon routine PFIND from the NSWC Mathematics Library.
! Code converted using TO_F90 by Alan Miller
! Date: 2000-07-04 Time: 12:24:01
IMPLICIT NONE
REAL, INTENT(IN) :: a(2)
REAL, INTENT(IN) :: b(2)
REAL, INTENT(IN) :: x(:)
REAL, INTENT(IN) :: y(:)
INTEGER, INTENT(IN) :: n
REAL, INTENT(OUT) :: u(:)
REAL, INTENT(OUT) :: v(:)
INTEGER, INTENT(IN) :: m
INTEGER, INTENT(OUT) :: num
INTEGER, INTENT(OUT) :: ierr
! Local variables
INTEGER :: i, ind, nm1
REAL :: d, diff, diff1, eps, h, hi, k, ki, onem, onep, p, q, s, &
t, tmax, tmin, tol, tol0
!----------------------
! ****** EPS IS A MACHINE DEPENDENT CONSTANT. EPS IS THE
! SMALLEST NUMBER SUCH THAT 1.0 + EPS .GT. 1.0 .
eps = EPSILON(1.0)
!----------------------
num = 0
IF (n < 2) GO TO 200
h = b(1) - a(1)
k = b(2) - a(2)
IF (h == 0.0 .AND. k == 0.0) GO TO 200
ierr = 0
nm1 = n - 1
tol = 4.0*eps
tol0 = 2.0*eps
onep = 1.0 + tol
onem = 0.5 + (0.5 - tol0)
ind = 0
DO i = 1, nm1
hi = x(i + 1) - x(i)
ki = y(i + 1) - y(i)
IF (hi == 0.0 .AND. ki == 0.0) CYCLE
ind = 1
! CHECK IF THE LINE FROM A TO B AND THE I-TH
! LINE IN THE PATH ARE PARALLEL
s = hi*k
t = h*ki
d = s - t
IF (ABS(d) <= tol*MAX(ABS(s), ABS(t))) GO TO 40
!-----------------------------------------------------------------------
! THE LINES ARE NOT PARALLEL
!-----------------------------------------------------------------------
p = x(i) - a(1)
q = y(i) - a(2)
s = hi*q
t = ki*p
diff = s - t
IF (ABS(diff) <= tol*MAX(ABS(s),ABS(t))) diff = 0.0
s = h*q
t = k*p
diff1 = s - t
IF (ABS(diff1) <= tol*MAX(ABS(s),ABS(t))) diff1 = 0.0
s = diff/d
t = diff1/d
IF (s < 0.0 .OR. s > onep) CYCLE
IF (t < 0.0 .OR. t > onep) CYCLE
IF (num > 0 .AND. t == 0.0) CYCLE
IF (s > 0.0) GO TO 20
! POINT A IS ON THE I-TH LINE
10 num = num + 1
IF (num > m) GO TO 210
u(num) = a(1)
v(num) = a(2)
CYCLE
! POINT B IS ON THE I-TH LINE
20 IF (s < onem) GO TO 30
21 num = num + 1
IF (num > m) GO TO 210
u(num) = b(1)
v(num) = b(2)
CYCLE
! THE INTERIOR OF THE LINE FROM A TO B
! INTERSECTS WITH THE I-TH LINE
30 num = num + 1
IF (num > m) GO TO 210
u(num) = a(1) + s*h
v(num) = a(2) + s*k
CYCLE
!-----------------------------------------------------------------------
! THE LINES ARE PARALLEL
!-----------------------------------------------------------------------
40 IF (ABS(hi) > ABS(ki)) GO TO 50
d = a(2) - y(i)
IF (ABS(d) <= tol0*MAX(ABS(a(2)),ABS(y(i)))) d = 0.0
s = d/ki
p = x(i) + s*hi
IF (ABS(a(1) - p) > tol*MAX(ABS(a(1)),ABS(p))) CYCLE
d = b(2) - y(i)
IF (ABS(d) <= tol0*MAX(ABS(b(2)),ABS(y(i)))) d = 0.0
t = d/ki
GO TO 60
50 d = a(1) - x(i)
IF (ABS(d) <= tol0*MAX(ABS(a(1)),ABS(x(i)))) d = 0.0
s = d/hi
p = y(i) + s*ki
IF (ABS(p - a(2)) > tol*MAX(ABS(p),ABS(a(2)))) CYCLE
d = b(1) - x(i)
IF (ABS(d) <= tol0*MAX(ABS(b(1)),ABS(x(i)))) d = 0.0
t = d/hi
! THE 2 LINES ARE PORTIONS OF THE SAME
! STRAIGHT INFINITE LINE
60 IF (s > 0.0 .AND. s < onem) GO TO 220
IF (t > 0.0 .AND. t < onem) GO TO 220
tmin = MIN(s,t)
tmax = MAX(s,t)
IF (tmax <= 0.0) GO TO 70
IF (tmin >= onem) GO TO 80
GO TO 220
70 IF (tmax < 0.0) CYCLE
IF (num > 0) CYCLE
IF (tmax == s) GO TO 10
GO TO 21
80 IF (tmin > 1.0) CYCLE
IF (tmin == s) GO TO 10
GO TO 21
END DO
IF (ind == 0) GO TO 200
IF (num < 2) RETURN
IF (u(num) == x(1) .AND. v(num) == y(1)) num = num - 1
RETURN
! ERROR RETURN
200 ierr = 1
RETURN
210 ierr = 2
num = num - 1
RETURN
220 ierr = -i
RETURN
END SUBROUTINE Poly_Intercept
|
{"hexsha": "7c6c83318ae9a9bc9a6704f94b5c4c6b405e4788", "size": 4795, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "source/amil/p_intcpt.f90", "max_stars_repo_name": "agforero/FTFramework", "max_stars_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-08-19T21:43:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-20T02:57:25.000Z", "max_issues_repo_path": "source/amil/p_intcpt.f90", "max_issues_repo_name": "agforero/fortran-testing-framework", "max_issues_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-08-07T21:17:16.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-09T02:18:07.000Z", "max_forks_repo_path": "source/amil/p_intcpt.f90", "max_forks_repo_name": "agforero/fortran-testing-framework", "max_forks_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-31T08:41:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:41:53.000Z", "avg_line_length": 25.1047120419, "max_line_length": 77, "alphanum_fraction": 0.4350364964, "num_tokens": 1778}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 3 07:02:29 2020
Tests the performance of Gaussian elimination for systems of a variety of sizes
@author: zettergm
"""
# imports
import numpy as np
import time
from elimtools import Gauss_elim,backsub
import matplotlib.pyplot as plt
from ittools import Jacobi
# define a range of system sizes to investigate
nvals=np.arange(50,550,50) # note arange excluded the end of the range
testtimes=np.zeros(nvals.size) # time taken for each system size
lrep=1 # number of times to repeat each step (for consistency)
# Perform the solves for each system size
for ind, n in enumerate(list(nvals)):
a0=-1*np.ones(n-1)
a1=4*np.ones(n)
a2=-1*np.ones(n-1)
A=np.diag(a1)
A=np.diag(a0,-1)+np.diag(a1,0)+np.diag(a2,1)
b=np.random.randn(n,1)
for irep in range(0,lrep):
tstart=time.time()
[Amod,order]=Gauss_elim(A,b,False)
Amodsub=np.copy(Amod[order,:])
x=backsub(Amodsub,False)
tend=time.time()
testtimes[ind]=testtimes[ind]+(tend-tstart)/lrep
print("GE solution for system of size: ",n," took time: ", testtimes[ind])
# Plot the (average) time elapsed during each GE solve
plt.figure(1)
plt.plot(nvals,testtimes,'o')
plt.xlabel("system size (no. of unknowns)")
plt.ylabel("solution time (s)")
plt.title("Performance of self-coded solution")
# Solve same system sizes (diagonally dominant problem) with Jacobi iteration
tol=1e-5
testtimes=np.zeros(nvals.size) # time taken for each system size
for ind, n in enumerate(list(nvals)):
a0=-1*np.ones(n-1)
a1=4*np.ones(n)
a2=-1*np.ones(n-1)
A=np.diag(a1)
A=np.diag(a0,-1)+np.diag(a1,0)+np.diag(a2,1)
b=np.random.randn(n,1)
for irep in range(0,lrep):
tstart=time.time()
x0=np.random.randn(n,1)
[x,iteration]=Jacobi(x0,A,b,tol,False)
tend=time.time()
testtimes[ind]=testtimes[ind]+(tend-tstart)/lrep
print("JI solution for system of size: ",n," took time: ", testtimes[ind])
# Plot the time elapses for each Jacobi solve
plt.plot(nvals,testtimes,'^')
plt.legend(["Gaussian elim.","Jacobi it."])
plt.show()
|
{"hexsha": "6c999dc2d2c89d6afab1076f0d8c1c97594510c0", "size": 2239, "ext": "py", "lang": "Python", "max_stars_repo_path": "linear_algebra/benchmark.py", "max_stars_repo_name": "microckey/EP501_python", "max_stars_repo_head_hexsha": "2de97cc99ccce380564510b240fcbfb136974a7c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "linear_algebra/benchmark.py", "max_issues_repo_name": "microckey/EP501_python", "max_issues_repo_head_hexsha": "2de97cc99ccce380564510b240fcbfb136974a7c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "linear_algebra/benchmark.py", "max_forks_repo_name": "microckey/EP501_python", "max_forks_repo_head_hexsha": "2de97cc99ccce380564510b240fcbfb136974a7c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.3417721519, "max_line_length": 90, "alphanum_fraction": 0.6516301921, "include": true, "reason": "import numpy", "num_tokens": 647}
|
#include <boost/compute/algorithm/count_if.hpp>
|
{"hexsha": "a624572f643a47433b03f33f04c340c6077d350d", "size": 48, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_compute_algorithm_count_if.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_compute_algorithm_count_if.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_compute_algorithm_count_if.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 24.0, "max_line_length": 47, "alphanum_fraction": 0.8125, "num_tokens": 11}
|
import streamlit as st
from PIL import Image
import numpy as np
import cv2
import tensorflow
from tensorflow.keras.models import load_model
from scipy.spatial import distance
# from streamlit_webrtc import webrtc_streamer
################
## Tiltle ##
################
# app = MultiApp()
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
st.markdown("If you must **mask**, I shall answer...")
st.markdown("---")
choice = "Image"
################
## model ##
################
# Load the model
model = load_model("binary_model.h5")
# Load the cascade
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def predict(img):
# img = cv2.imread("./images/out.jpg")
img = cv2.cvtColor(img, cv2.IMREAD_GRAYSCALE)
faces = face_cascade.detectMultiScale(img,scaleFactor=1.1, minNeighbors=8)
if len(faces) > 0:
out_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) #colored output image
# resize image
desired_height=1000
img_height = img.shape[0]
scale = desired_height / img_height
width = int(img.shape[1] * scale)
height = int(img.shape[0] * scale)
dim = (width, height)
out_img = cv2.resize(out_img, dim, interpolation = cv2.INTER_AREA)
for i in range(len(faces)):
(x,y,w,h) = faces[i]
x, y, w, h = int(x * scale), int(y * scale), int(w * scale), int(h * scale)
crop = out_img[y:y+h,x:x+w]
crop = cv2.resize(crop,(150,150))
crop = np.reshape(crop,[1,150,150,3])/255.0
mask_result = model.predict_classes(crop)
if mask_result == 0:
cv2.putText(out_img,"With Mask",(x, y-10), cv2.FONT_HERSHEY_DUPLEX,1,(102,204,0),2)
cv2.rectangle(out_img,(x,y),(x+w,y+h),(102,204,0),5)
elif mask_result == 1:
cv2.putText(out_img,"No Mask",(x, y-10), cv2.FONT_HERSHEY_DUPLEX,1,(255,51,51),2)
cv2.rectangle(out_img,(x,y),(x+w,y+h),(255,51,51),5)
# out_img = cv.cvtColor(out_img, cv.COLOR_BGR2RGB)
return out_img
else:
print("No Face!")
################
## Home ##
################
if choice == "Home":
col1, col2, col3= st.beta_columns([1,8,1])
with col1:
st.write("")
with col2:
st.title('A Face Mask Detection System')
st.subheader('Built with OpenCV and Keras/TensorFlow leveraging Deep Learning and Computer Vision Concepts to detect face mask in still images as well as in real-time webcam streaming.')
st.write('You can choose the options from the left.')
st.write("")
with col3:
st.write("")
col1, col2, col3= st.beta_columns([3,6,2])
with col1:
st.write("")
with col2:
st.header('Upcoming Features: ')
st.markdown("- Webcam Mask Detection")
st.markdown("- Detecting Incorrect Mask")
with col3:
st.write("")
################
## Image ##
################
if choice == "Image":
st.subheader('Upload the image for detection')
uploaded_file = st.file_uploader("Choose an image...", type=["jpg","jpeg","png"]) #upload image
if uploaded_file is not None:
image = Image.open(uploaded_file) #making compatible to PIL
# image = np.array(Image.open(uploaded_file))
image = image.save('./images/out.jpg')
img = cv2.imread("./images/out.jpg")
st.write("")
st.write("**Image uploaded successfullly!**", use_column_width=True)
if st.button("Detect"):
out_img = predict(img)
st.image(out_img, caption="Processed Image", use_column_width=True)
else:
cover = Image.open('cover image.jpeg')
st.image(cover, caption="Mask me an Image", use_column_width=True)
################
## Webcam ##
################
if choice == "Webcam":
st.subheader('Real-time mask checking...')
# webrtc_streamer(key="example")
st.markdown("This feature will be available soon...")
# run = st.checkbox('Open Webcam')
# FRAME_WINDOW = st.image([])
# camera = cv2.VideoCapture(0)
# while run:
# # Reading image from video stream
# _, img = camera.read()
# # Call method we defined above
# # img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
# img = predict(img)
# # st.image(img, use_column_width=True)
# FRAME_WINDOW.image(img)
# if not run:
# st.write('Webcam has stopped.')
|
{"hexsha": "f67949dd867ab3aef2de447e95d9a070116cb107", "size": 4647, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py", "max_stars_repo_name": "MaheshvaranS/deep_learning_project", "max_stars_repo_head_hexsha": "a772eef954235eee03502cc81a433856217e18b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app.py", "max_issues_repo_name": "MaheshvaranS/deep_learning_project", "max_issues_repo_head_hexsha": "a772eef954235eee03502cc81a433856217e18b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app.py", "max_forks_repo_name": "MaheshvaranS/deep_learning_project", "max_forks_repo_head_hexsha": "a772eef954235eee03502cc81a433856217e18b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4222222222, "max_line_length": 194, "alphanum_fraction": 0.5833871315, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1169}
|
\small
\section{GNU GENERAL PUBLIC LICENSE}
\label{sec:gpl}
Version 2, June 1991\\
\noindent
Copyright \copyright\ 1989, 1991 Free Software Foundation, Inc.\\
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
\noindent
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
\subsection*{Preamble}
The licenses for most software are designed to take away your freedom to
share and change it. By contrast, the GNU General Public License is intended
to guarantee your freedom to share and change free software---to make sure
the software is free for all its users. This General Public License applies
to most of the Free Software Foundation's software and to any other program
whose authors commit to using it. (Some other Free Software Foundation
software is covered by the GNU Library General Public License instead.) You
can apply it to your programs, too.
When we speak of free software, we are referring to freedom, not price. Our
General Public Licenses are designed to make sure that you have the freedom
to distribute copies of free software (and charge for this service if you
wish), that you receive source code or can get it if you want it, that you
can change the software or use pieces of it in new free programs; and that
you know you can do these things.
To protect your rights, we need to make restrictions that forbid anyone to
deny you these rights or to ask you to surrender the rights. These
restrictions translate to certain responsibilities for you if you distribute
copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether gratis or
for a fee, you must give the recipients all the rights that you have. You
must make sure that they, too, receive or can get the source code. And you
must show them these terms so they know their rights.
We protect your rights with two steps: (1) copyright the software, and (2)
offer you this license which gives you legal permission to copy, distribute
and/or modify the software.
Also, for each author's protection and ours, we want to make certain that
everyone understands that there is no warranty for this free software. If
the software is modified by someone else and passed on, we want its
recipients to know that what they have is not the original, so that any
problems introduced by others will not reflect on the original authors'
reputations.
Finally, any free program is threatened constantly by software patents. We
wish to avoid the danger that redistributors of a free program will
individually obtain patent licenses, in effect making the program
proprietary. To prevent this, we have made it clear that any patent must be
licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and modification
follow.
\subsection*{Terms and conditions for copying, distribution and modification}
\begin{enumerate}
\makeatletter \setcounter{\@listctr}{-1} \makeatother
\item [0.] This License applies to any program or other work which contains a
notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The ``Program'',
below, refers to any such program or work, and a ``work based on the
Program'' means either the Program or any derivative work under
copyright law: that is to say, a work containing the Program or a
portion of it, either verbatim or with modifications and/or translated
into another language. (Hereinafter, translation is included without
limitation in the term ``modification''.) Each licensee is addressed
as ``you''.
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the Program
(independent of having been made by running the Program). Whether that
is true depends on what the Program does.
\item [1.] You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
\item [2.] You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
\begin{enumerate}
\item [(a)] You must cause the modified files to carry prominent
notices stating that you changed the files and the date of any
change.
\item [(b)] You must cause any work that you distribute or publish,
that in whole or in part contains or is derived from the Program
or any part thereof, to be licensed as a whole at no charge to
all third parties under the terms of this License.
\item [(c)] If the modified program normally reads commands
interactively when run, you must cause it, when started running
for such interactive use in the most ordinary way, to print or
display an announcement including an appropriate copyright notice
and a notice that there is no warranty (or else, saying that you
provide a warranty) and that users may redistribute the program
under these conditions, and telling the user how to view a copy
of this License. (Exception: if the Program itself is
interactive but does not normally print such an announcement,
your work based on the Program is not required to print an
announcement.)
\end{enumerate}
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote
it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of a
storage or distribution medium does not bring the other work under the
scope of this License.
\item [3.] You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
\begin{enumerate}
\item [(a)] Accompany it with the complete corresponding
machine-readable source code, which must be distributed under the
terms of Sections 1 and 2 above on a medium customarily used for
software interchange; or,
\item [(b)] Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
\item [(c)] Accompany it with the information you received as to the
offer to distribute corresponding source code. (This alternative
is allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
\end{enumerate}
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to control
compilation and installation of the executable. However, as a special
exception, the source code distributed need not include anything that
is normally distributed (in either source or binary form) with the
major components (compiler, kernel, and so on) of the operating system
on which the executable runs, unless that component itself accompanies
the executable.
If distribution of executable or object code is made by offering access
to copy from a designated place, then offering equivalent access to
copy the source code from the same place counts as distribution of the
source code, even though third parties are not compelled to copy the
source along with the object code.
\item [4.] You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt otherwise
to copy, modify, sublicense or distribute the Program is void, and will
automatically terminate your rights under this License. However,
parties who have received copies, or rights, from you under this
License will not have their licenses terminated so long as such parties
remain in full compliance.
\item [5.] You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying the
Program or works based on it.
\item [6.] Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
\item [7.] If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
\item [8.] If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License may
add an explicit geographical distribution limitation excluding those
countries, so that distribution is permitted only in or among countries
not thus excluded. In such case, this License incorporates the
limitation as if written in the body of this License.
\item [9.] The Free Software Foundation may publish revised and/or new
versions of the General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and
``any later version'', you have the option of following the terms and
conditions either of that version or of any later version published by
the Free Software Foundation. If the Program does not specify a
version number of this License, you may choose any version ever
published by the Free Software Foundation.
\item [10.] If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the
author to ask for permission. For software which is copyrighted by the
Free Software Foundation, write to the Free Software Foundation; we
sometimes make exceptions for this. Our decision will be guided by the
two goals of preserving the free status of all derivatives of our free
software and of promoting the sharing and reuse of software generally.
\begin{center}
NO WARRANTY
\end{center}
\bfseries
\item [11.] Because the Program is licensed free of charge, there is no
warranty for the Program, to the extent permitted by applicable law.
except when otherwise stated in writing the copyright holders and/or
other parties provide the program ``as is'' without warranty of any
kind, either expressed or implied, including, but not limited to, the
implied warranties of merchantability and fitness for a particular
purpose. The entire risk as to the quality and performance of the
Program is with you. Should the Program prove defective, you assume
the cost of all necessary servicing, repair or correction.
\item [12.] In no event unless required by applicable law or agreed to in
writing will any copyright holder, or any other party who may modify
and/or redistribute the program as permitted above, be liable to you
for damages, including any general, special, incidental or
consequential damages arising out of the use or inability to use the
program (including but not limited to loss of data or data being
rendered inaccurate or losses sustained by you or third parties or a
failure of the Program to operate with any other programs), even if
such holder or other party has been advised of the possibility of such
damages.
\end{enumerate}
\begin{center}
\textbf{END OF TERMS AND CONDITIONS}
\end{center}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: t
%%% End:
|
{"hexsha": "043fa13087bd8c87ad49c10a60149a15e63f00d2", "size": 16726, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "csim/documentation/gpl.tex", "max_stars_repo_name": "Kirito56/lsm-matlab", "max_stars_repo_head_hexsha": "bf1631031a7c2cb709ca476e458f85faa2a1f84d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "csim/documentation/gpl.tex", "max_issues_repo_name": "Kirito56/lsm-matlab", "max_issues_repo_head_hexsha": "bf1631031a7c2cb709ca476e458f85faa2a1f84d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-12-02T09:00:37.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-02T09:00:37.000Z", "max_forks_repo_path": "csim/documentation/gpl.tex", "max_forks_repo_name": "Kirito56/lsm-matlab", "max_forks_repo_head_hexsha": "bf1631031a7c2cb709ca476e458f85faa2a1f84d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.5974842767, "max_line_length": 77, "alphanum_fraction": 0.7382518235, "num_tokens": 3448}
|
MODULE read_ncoda_prep
!===============================================================================
! This program reads the NCODA prep files:
! coda.MVO_prp.*
! coda.SSH_prp.*
! coda.MOV_ENS_obs.*
! coda.SSH_ENS_obs.*
!
! These containt the observations (prp) and ensemble member innovations (ENS_obs)
!
! This routine reads both files and recovers the observation and model
! equivalent for a specified member.
!
! The obsop.f90 program uses this module to read these in directly.
!
! Observation errors are provided in the 'prp' files.
!
! Author:
! 12/22/17: Stephen G. Penny, University of Maryland, College Park
! Visiting Scientist, Naval Research Laboratory - Stennis Space Center
!
!===============================================================================
USE common, ONLY: r_sngl, r_size, slen
USE params_obs, ONLY: id_t_obs, id_s_obs, id_u_obs, id_v_obs, id_eta_obs
USE compute_profile_error, ONLY: cmpTz
IMPLICIT NONE
PUBLIC :: read_ncoda_prp, ncoda_prep_data
PRIVATE
INTEGER :: nobs, nobs0
INTEGER :: i,j,k,n
REAL(r_size) :: se0, seF
TYPE ncoda_prep_data
REAL(r_size) :: x_grd(3) ! longitude, latitude, and z depth (m)
REAL(r_size) :: value ! actual physical value of the parameter measured at this grid point
REAL(r_size) :: hxb ! Model equivalent ! (NEMO) added
REAL(r_size) :: lev ! grid z level
REAL(r_size) :: oerr ! observation standard error
REAL(r_size) :: hour ! Hour of observation
CHARACTER(9) :: plat ! Platform
CHARACTER(3) :: ptyp ! Profile type
CHARACTER(3) :: sid ! Source id
CHARACTER(1) :: qkey ! Quality key
INTEGER :: qc ! Quality control flag ! (NEMO) added
INTEGER :: typ ! observation variable type (e.g., PRES_TYPE)
INTEGER :: nlevs ! number of levels with data, counting from the top, including levels with missing data that have obs below them.
INTEGER :: id ! id number used in observation files to identify the observation
INTEGER :: rid ! id of the record, in order that it is read in
INTEGER :: lid ! id of the level for each record (upon skipping missing data for some levels)
LOGICAL :: kept ! tells letkf whether this obs is kept for assimilation
END TYPE ncoda_prep_data
LOGICAL :: DO_READ_OBE ! Read the observation error estimate from the file
LOGICAL :: DO_COMPUTE_OBE ! Compute the observation error estimate using the NCEP approach w/ vertical gradient
INTEGER :: undef = 99999
!! Write letkf file
!do i=1,nobs
!!STEVE: the following are required for miyoshi's letkf observation input format:
!!1 = obelm
!!2 = lon
!!3 = lat
!!4 = lev
!!5 = value
!!6 = oberr
! wk(1) = obs_data(i)%typ
! wk(2) = obs_data(i)%x_grd(1)
! wk(3) = obs_data(i)%x_grd(2)
! wk(4) = obs_data(i)%x_grd(3)
! wk(5) = obs_data(i)%value
! wk(6) = obs_data(i)%oerr
! WRITE(fid) wk
!enddo
CONTAINS
SUBROUTINE read_ncoda_prp(infile,infile2,obs_data,nobs,mem,obid_in)
!===============================================================================
! Read the ncoda prep observation data and model equivalent
!===============================================================================
USE params_letkf, ONLY: nbv
CHARACTER(*), INTENT(IN) :: infile, infile2
TYPE(ncoda_prep_data), INTENT(OUT), ALLOCATABLE, DIMENSION(:) :: obs_data
INTEGER, INTENT(OUT) :: nobs
INTEGER, INTENT(IN) :: mem ! Specify ensemble member to process
INTEGER, INTENT(IN), OPTIONAL :: obid_in
! Other variables:
INTEGER :: i,j,k,n
REAL(r_sngl), ALLOCATABLE, DIMENSION(:) :: xlon, ylat, hour
CHARACTER(3), ALLOCATABLE, DIMENSION(:) :: ptyp, sid
CHARACTER(9), ALLOCATABLE, DIMENSION(:) :: plat
CHARACTER(1), ALLOCATABLE, DIMENSION(:) :: qkey
INTEGER, ALLOCATABLE, DIMENSION(:) :: qc
REAL(r_sngl), ALLOCATABLE, DIMENSION(:) :: ovals, mvals, stde
REAL(r_sngl), ALLOCATABLE, DIMENSION(:) :: depth ! profile depths (dimensioned: nobs, depth)
REAL(r_sngl) :: val, hxb, err
INTEGER :: cnt, nlv
LOGICAL :: dodebug=.true.
REAL(r_sngl) :: missing_value=-999 ! Missing value flag in the data
REAL(r_sngl) :: max_value=999 ! Maximum value condition
REAL(r_sngl) :: max_depth = 9999 ! Maximum depth condition
INTEGER :: fid = 21
LOGICAL :: ex
INTEGER :: obid
REAL(r_sngl) :: obs_anm
REAL(r_sngl), ALLOCATABLE :: obs_xi (:)
REAL(r_sngl), ALLOCATABLE :: obs_yj (:)
REAL(r_sngl), ALLOCATABLE :: obs_zk (:)
INTEGER, ALLOCATABLE :: otype (:)
INTEGER, ALLOCATABLE :: otime (:)
INTEGER, ALLOCATABLE :: odate (:)
REAL(r_sngl), ALLOCATABLE :: ens_anm(:)
REAL(r_sngl), ALLOCATABLE :: dum_anm(:)
INTEGER, ALLOCATABLE :: etyp (:)
INTEGER, ALLOCATABLE :: obs_var (:)
!var is the variable 1-6 according to:
! data var_lbl / 'seatmp', 'salint', 'geoptl',
! * 'uucurr', 'vvcurr', 'lyrprs' /
! Check optional argument
if (present(obid_in)) then
obid = obid_in
else
obid = id_t_obs ! (Default, reads both id_t_obs and id_s_obs)
endif
!-------------------------------------------------------------------------------
! Open obs prep file
!-------------------------------------------------------------------------------
inquire (file=trim(infile), exist=ex)
if (ex) then
open (unit=fid,file=trim(infile),status='old',form='unformatted')
! rewind(fid)
else
print *, "infile = ", trim(infile)
STOP('ERROR: prp file is missing. EXITING...')
endif
!-------------------------------------------------------------------------------
! Read the number of obs
!-------------------------------------------------------------------------------
read (fid) cnt
nobs=cnt !STEVE: ?
print *, 'read innovation file 1: ', trim(infile)
print *, "nobs, cnt, cnt-nobs = ", nobs, cnt, cnt-nobs
!-------------------------------------------------------------------------------
! Allocate arrays for obs data
!-------------------------------------------------------------------------------
ALLOCATE (ovals(nobs))
ALLOCATE (stde(nobs))
ALLOCATE (xlon(nobs))
ALLOCATE (ylat(nobs))
ALLOCATE (depth(nobs))
ALLOCATE (hour(nobs))
ALLOCATE (etyp(nobs))
ALLOCATE (qc(nobs))
ALLOCATE (obs_xi(nobs))
ALLOCATE (obs_yj(nobs))
ALLOCATE (obs_zk(nobs))
ALLOCATE (obs_var(nobs))
ALLOCATE (otype(nobs))
ALLOCATE (otime(nobs))
ALLOCATE (odate(nobs))
!-------------------------------------------------------------------------------
! Read the obs data
!-------------------------------------------------------------------------------
WRITE(6,*) "Reading obs data, cnt = ", cnt
if (cnt .gt. 0) then
do i = 1, nobs
! print *, "start reading obs i = ", i
read (fid) odate(i), otime(i), obs_var(i), otype(i), &
& ovals(i), obs_anm, stde(i), depth(i), ylat(i), &
& xlon(i), obs_xi(i), obs_yj(i), obs_zk(i)
hour(i) = otime(i)
! Specify the observation type:
qc(i) = 1
if (obid == id_t_obs) then
select case (obs_var(i))
case(1)
etyp(i) = id_t_obs
case(2)
etyp(i) = id_s_obs
case default
! print *, "Observation variable type not recognized: ", obs_var(i)
! print *, "REMOVING..."
qc(i) = 0
end select
else
! Use this for ssh or sea ice ncoda-prep data:
etyp(i) = obid
endif
enddo
print *, "final nobs, xlon,ylat,depth = ", nobs, xlon(nobs),ylat(nobs),depth(nobs)
print *, "obs_var,ovals,stde,obs_anm = ", obs_var(nobs),ovals(nobs),stde(nobs),obs_anm
else
WRITE (6,*) "cnt==0, EXITING..."
STOP(13)
endif
!-------------------------------------------------------------------------------
! Close the obs file
!-------------------------------------------------------------------------------
close(fid)
!-------------------------------------------------------------------------------
! Open ens obs prep file
!-------------------------------------------------------------------------------
inquire (file=trim(infile2), exist=ex)
if (ex) then
open (unit=fid,file=trim(infile2),status='old',form='unformatted')
! rewind(fid)
else
print *, "infile2 = ", trim(infile2)
STOP('ERROR: ENS_obs file is missing. EXITING...')
endif
print *, 'read innovation file 2: ', trim(infile2)
!-------------------------------------------------------------------------------
! Allocate arrays for innovation data
!-------------------------------------------------------------------------------
ALLOCATE (ens_anm(nobs))
ALLOCATE (dum_anm(4)) !nbv))
!-------------------------------------------------------------------------------
! Read the ensemble innovation file
!-------------------------------------------------------------------------------
if (cnt .gt. 0) then
do i = 1, nobs
! print *, "start reading ens_anm i = ", i
read (fid) dum_anm
ens_anm(i) = dum_anm(mem)
! print *, "after reading ens_anm i = ", i
enddo
print *, "ens_anm(1) = ", ens_anm(1)
print *, "ens_anm(nobs-1) = ", ens_anm(nobs-1)
endif
!-------------------------------------------------------------------------------
! Close the ensemble innovation file
!-------------------------------------------------------------------------------
close(fid)
! CONVERT data to ncoda_prep_data format:
print *, "Finished reading NCODA prep files, formatting data..."
! Loop through all profiles and create a new observation for each unique data point
!nobs = cnt * nlv
print *, "ALLOCATING obs_data with nobs = ", nobs
ALLOCATE(obs_data(nobs))
n = 0
do i=1,cnt
if (qc(i)==0) CYCLE ! Skip any obs deemed unusable
if (dodebug) print *, "i = ", i
! Assign observation value:
val = ovals(i)
err = stde(i)
! if (etyp(i)==id_eta_obs) then
! hxb = ens_anm(i)
! else
hxb = val - ens_anm(i)
! endif
if (abs(val) < abs(max_value) .and. abs(val) < abs(missing_value)-1 .and. depth(i) < max_depth) then
n = n+1
if (dodebug) print *, "n,lon,lat,depth,hour,val,err,hxb,qc = ", n,xlon(i),ylat(i),depth(i),hour(i),val,err,hxb,qc(i)
obs_data(n)%typ = etyp(i)
obs_data(n)%x_grd(1) = xlon(i)
obs_data(n)%x_grd(2) = ylat(i)
obs_data(n)%x_grd(3) = depth(i)
obs_data(n)%hour = hour(i)
obs_data(n)%value = val
obs_data(n)%hxb = hxb
obs_data(n)%oerr = err
obs_data(n)%rid = obs_var(i) !STEVE: ?
obs_data(n)%lid = obs_zk(i) ! level id
endif
enddo
nobs = n
if (dodebug) print *, "nobs = ", nobs
! Explicitly deallocate temporary arrays
if (ALLOCATED(depth)) DEALLOCATE(depth)
if (ALLOCATED(xlon)) DEALLOCATE(xlon)
if (ALLOCATED(ylat)) DEALLOCATE(ylat)
if (ALLOCATED(hour)) DEALLOCATE(hour)
if (ALLOCATED(qc)) DEALLOCATE(qc)
if (ALLOCATED(ovals)) DEALLOCATE(ovals)
if (ALLOCATED(ens_anm)) DEALLOCATE(ens_anm)
if (ALLOCATED(stde)) DEALLOCATE(stde)
if (dodebug) print *, "Temporary arrays deallocated."
if (dodebug) print *, "Returning from read_ncoda_prp..."
END SUBROUTINE read_ncoda_prp
END MODULE read_ncoda_prep
|
{"hexsha": "4fde395f2f3f9cff1e11a5a1c12ae84b057d3b11", "size": 10814, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/obs/read_ncoda_prep.f90", "max_stars_repo_name": "GEOS-ESM/Ocean-LETKF", "max_stars_repo_head_hexsha": "a7c4bbf86cdbff078212914dcc059d0b1450accf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-12-31T15:40:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T13:44:20.000Z", "max_issues_repo_path": "src/obs/read_ncoda_prep.f90", "max_issues_repo_name": "GEOS-ESM/Ocean-LETKF", "max_issues_repo_head_hexsha": "a7c4bbf86cdbff078212914dcc059d0b1450accf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/obs/read_ncoda_prep.f90", "max_forks_repo_name": "GEOS-ESM/Ocean-LETKF", "max_forks_repo_head_hexsha": "a7c4bbf86cdbff078212914dcc059d0b1450accf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-14T18:46:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-14T18:46:56.000Z", "avg_line_length": 33.8996865204, "max_line_length": 133, "alphanum_fraction": 0.5559459959, "num_tokens": 2986}
|
import numpy as np
from operator import itemgetter
from federatedml.util import consts
from federatedml.util import LOGGER
from federatedml.ensemble.boosting import HeteroBoostingGuest
from federatedml.param.boosting_param import HeteroSecureBoostParam, DecisionTreeParam
from federatedml.util.io_check import assert_io_num_rows_equal
from federatedml.util.anonymous_generator import generate_anonymous
from federatedml.statistic.data_overview import with_weight, get_max_sample_weight
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.feature_importance import FeatureImportance
from federatedml.transfer_variable.transfer_class.hetero_secure_boosting_predict_transfer_variable import \
HeteroSecureBoostTransferVariable
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core import tree_plan as plan
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import ObjectiveMeta
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import QuantileMeta
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import FeatureImportanceInfo
from federatedml.ensemble.secureboost.secureboost_util.tree_model_io import load_hetero_tree_learner, \
produce_hetero_tree_learner
from federatedml.ensemble.secureboost.secureboost_util.boosting_tree_predict import sbt_guest_predict, \
mix_sbt_guest_predict, EINI_guest_predict
from federatedml.ensemble.secureboost.secureboost_util.subsample import goss_sampling
class HeteroSecureBoostingTreeGuest(HeteroBoostingGuest):
def __init__(self):
super(HeteroSecureBoostingTreeGuest, self).__init__()
self.tree_param = DecisionTreeParam() # decision tree param
self.use_missing = False
self.zero_as_missing = False
self.cur_epoch_idx = -1
self.grad_and_hess = None
self.feature_importances_ = {}
self.model_param = HeteroSecureBoostParam()
self.complete_secure = False
self.data_alignment_map = {}
self.hetero_sbt_transfer_variable = HeteroSecureBoostTransferVariable()
self.model_name = 'HeteroSecureBoost'
self.max_sample_weight = 1
self.max_sample_weight_computed = False
self.re_compute_goss_sample_weight = False
self.cipher_compressing = False
self.enable_goss = False # GOSS
self.top_rate = None
self.other_rate = None
self.new_ver = True
self.boosting_strategy = consts.STD_TREE # default work mode is std
# fast sbt param
self.tree_num_per_party = 1
self.guest_depth = 0
self.host_depth = 0
self.init_tree_plan = False
self.tree_plan = []
# multi-classification mode
self.multi_mode = consts.SINGLE_OUTPUT
# EINI predict param
self.EINI_inference = False
self.EINI_random_mask = False
def _init_model(self, param: HeteroSecureBoostParam):
super(HeteroSecureBoostingTreeGuest, self)._init_model(param)
self.tree_param = param.tree_param
self.use_missing = param.use_missing
self.zero_as_missing = param.zero_as_missing
self.complete_secure = param.complete_secure
self.enable_goss = param.run_goss
self.top_rate = param.top_rate
self.other_rate = param.other_rate
self.cipher_compressing = param.cipher_compress
self.new_ver = param.new_ver
self.EINI_inference = param.EINI_inference
self.EINI_random_mask = param.EINI_random_mask
# fast sbt param
self.tree_num_per_party = param.tree_num_per_party
self.boosting_strategy = param.boosting_strategy
self.guest_depth = param.guest_depth
self.host_depth = param.host_depth
if self.use_missing:
self.tree_param.use_missing = self.use_missing
self.tree_param.zero_as_missing = self.zero_as_missing
self.multi_mode = param.multi_mode
def process_sample_weights(self, grad_and_hess, data_with_sample_weight=None):
# add sample weights to gradient and hessian
if data_with_sample_weight is not None:
if with_weight(data_with_sample_weight):
LOGGER.info('weighted sample detected, multiply g/h by weights')
grad_and_hess = grad_and_hess.join(data_with_sample_weight,
lambda v1, v2: (v1[0] * v2.weight, v1[1] * v2.weight))
if not self.max_sample_weight_computed:
self.max_sample_weight = get_max_sample_weight(data_with_sample_weight)
LOGGER.info('max sample weight is {}'.format(self.max_sample_weight))
self.max_sample_weight_computed = True
return grad_and_hess
def get_tree_plan(self, idx):
if not self.init_tree_plan:
tree_plan = plan.create_tree_plan(self.boosting_strategy, k=self.tree_num_per_party,
tree_num=self.boosting_round,
host_list=self.component_properties.host_party_idlist,
complete_secure=self.complete_secure)
self.tree_plan += tree_plan
self.init_tree_plan = True
LOGGER.info('tree plan is {}'.format(self.tree_plan))
return self.tree_plan[idx]
def check_host_number(self, tree_type):
host_num = len(self.component_properties.host_party_idlist)
LOGGER.info('host number is {}'.format(host_num))
if tree_type == plan.tree_type_dict['layered_tree']:
assert host_num == 1, 'only 1 host party is allowed in layered mode'
def compute_grad_and_hess(self, y_hat, y, data_with_sample_weight=None):
LOGGER.info("compute grad and hess")
loss_method = self.loss
if self.task_type == consts.CLASSIFICATION:
grad_and_hess = y.join(y_hat, lambda y, f_val:
(loss_method.compute_grad(y, loss_method.predict(f_val)),
loss_method.compute_hess(y, loss_method.predict(f_val))))
else:
grad_and_hess = y.join(y_hat, lambda y, f_val:
(loss_method.compute_grad(y, f_val),
loss_method.compute_hess(y, f_val)))
grad_and_hess = self.process_sample_weights(grad_and_hess, data_with_sample_weight)
return grad_and_hess
@staticmethod
def get_grad_and_hess(g_h, dim=0):
LOGGER.info("get grad and hess of tree {}".format(dim))
grad_and_hess_subtree = g_h.mapValues(
lambda grad_and_hess: (grad_and_hess[0][dim], grad_and_hess[1][dim]))
return grad_and_hess_subtree
def update_feature_importance(self, tree_feature_importance):
for fid in tree_feature_importance:
if fid not in self.feature_importances_:
self.feature_importances_[fid] = tree_feature_importance[fid]
else:
self.feature_importances_[fid] += tree_feature_importance[fid]
LOGGER.debug('cur feature importance {}'.format(self.feature_importances_))
def goss_sample(self):
sampled_gh = goss_sampling(self.grad_and_hess, self.top_rate, self.other_rate)
return sampled_gh
def on_epoch_prepare(self, epoch_idx):
"""
Parameters
----------
epoch_idx cur epoch idx
Returns None
-------
Prepare g, h, sample weights, sampling at the beginning of every epoch
"""
if self.cur_epoch_idx != epoch_idx:
self.grad_and_hess = self.compute_grad_and_hess(self.y_hat, self.y, self.data_inst)
self.cur_epoch_idx = epoch_idx
# goss sampling
if self.enable_goss:
if not self.re_compute_goss_sample_weight:
self.max_sample_weight = self.max_sample_weight * ((1 - self.top_rate) / self.other_rate)
self.grad_and_hess = self.goss_sample()
def preprocess(self):
if self.multi_mode == consts.MULTI_OUTPUT:
# re-set dimension
self.booster_dim = 1
def postprocess(self):
host_feature_importance_list = self.hetero_sbt_transfer_variable.host_feature_importance.get(idx=-1)
for i in host_feature_importance_list:
self.feature_importances_.update(i)
LOGGER.debug('self feature importance is {}'.format(self.feature_importances_))
def fit_a_learner(self, epoch_idx: int, booster_dim: int):
self.on_epoch_prepare(epoch_idx)
if self.multi_mode == consts.MULTI_OUTPUT:
g_h = self.grad_and_hess
else:
g_h = self.get_grad_and_hess(self.grad_and_hess, booster_dim)
flow_id = self.generate_flowid(epoch_idx, booster_dim)
complete_secure = True if (epoch_idx == 0 and self.complete_secure) else False
tree_type, target_host_id = None, None
fast_sbt = (self.boosting_strategy == consts.MIX_TREE or self.boosting_strategy == consts.LAYERED_TREE)
if fast_sbt:
tree_type, target_host_id = self.get_tree_plan(epoch_idx)
self.check_host_number(tree_type)
tree = produce_hetero_tree_learner(role=self.role, tree_param=self.tree_param, flow_id=flow_id,
data_bin=self.data_bin, bin_split_points=self.bin_split_points,
bin_sparse_points=self.bin_sparse_points, task_type=self.task_type,
valid_features=self.sample_valid_features(),
host_party_list=self.component_properties.host_party_idlist,
runtime_idx=self.component_properties.local_partyid,
cipher_compress=self.cipher_compressing,
g_h=g_h, encrypter=self.encrypter,
goss_subsample=self.enable_goss,
complete_secure=complete_secure, max_sample_weights=self.max_sample_weight,
fast_sbt=fast_sbt, tree_type=tree_type, target_host_id=target_host_id,
guest_depth=self.guest_depth, host_depth=self.host_depth,
mo_tree=(self.multi_mode == consts.MULTI_OUTPUT),
class_num=len(self.classes_) if len(self.classes_) > 2 else 1 # mo parameter
)
tree.fit()
self.update_feature_importance(tree.get_feature_importance())
return tree
def load_learner(self, model_meta, model_param, epoch_idx, booster_idx):
flow_id = self.generate_flowid(epoch_idx, booster_idx)
runtime_idx = self.component_properties.local_partyid
host_list = self.component_properties.host_party_idlist
fast_sbt = (self.boosting_strategy == consts.MIX_TREE or self.boosting_strategy == consts.LAYERED_TREE)
tree_type, target_host_id = None, None
if fast_sbt:
tree_type, target_host_id = self.get_tree_plan(epoch_idx)
tree = load_hetero_tree_learner(role=self.role, tree_param=self.tree_param, model_meta=model_meta,
model_param=model_param,
flow_id=flow_id, runtime_idx=runtime_idx, host_party_list=host_list,
fast_sbt=fast_sbt, tree_type=tree_type, target_host_id=target_host_id)
return tree
def generate_summary(self) -> dict:
summary = {'loss_history': self.history_loss,
'best_iteration': self.callback_variables.best_iteration,
'feature_importance': self.make_readable_feature_importance(self.feature_name_fid_mapping,
self.feature_importances_),
'validation_metrics': self.callback_variables.validation_summary,
'is_converged': self.is_converged}
return summary
@staticmethod
def make_readable_feature_importance(fid_mapping, feature_importances):
"""
replace feature id by real feature name
"""
new_fi = {}
for id_ in feature_importances:
if isinstance(id_, tuple):
if consts.GUEST in id_[0]:
new_fi[fid_mapping[id_[1]]] = feature_importances[id_].importance
else:
role, party_id = id_[0].split(':')
new_fi[generate_anonymous(role=role, fid=id_[1], party_id=party_id)] = feature_importances[
id_].importance
else:
new_fi[fid_mapping[id_]] = feature_importances[id_].importance
return new_fi
@assert_io_num_rows_equal
def predict(self, data_inst, ret_format='std'):
# standard format, leaf indices, raw score
assert ret_format in ['std', 'leaf', 'raw'], 'illegal ret format'
LOGGER.info('running prediction')
cache_dataset_key = self.predict_data_cache.get_data_key(data_inst)
processed_data = self.data_and_header_alignment(data_inst)
last_round = self.predict_data_cache.predict_data_last_round(cache_dataset_key)
self.sync_predict_round(last_round)
rounds = len(self.boosting_model_list) // self.booster_dim
trees = []
LOGGER.debug('round involved in prediction {}, last round is {}, data key {}'
.format(list(range(last_round, rounds)), last_round, cache_dataset_key))
for idx in range(last_round, rounds):
for booster_idx in range(self.booster_dim):
tree = self.load_learner(self.booster_meta,
self.boosting_model_list[idx * self.booster_dim + booster_idx],
idx, booster_idx)
trees.append(tree)
predict_cache = None
tree_num = len(trees)
if last_round != 0:
predict_cache = self.predict_data_cache.predict_data_at(cache_dataset_key, min(rounds, last_round))
LOGGER.info('load predict cache of round {}'.format(min(rounds, last_round)))
if tree_num == 0 and predict_cache is not None and not (ret_format == 'leaf'):
return self.score_to_predict_result(data_inst, predict_cache)
if self.boosting_strategy == consts.MIX_TREE:
predict_rs = mix_sbt_guest_predict(
processed_data,
self.hetero_sbt_transfer_variable,
trees,
self.learning_rate,
self.init_score,
self.booster_dim,
predict_cache,
pred_leaf=(
ret_format == 'leaf'))
else:
if self.EINI_inference and not self.on_training: # EINI is for inference stage
sitename = self.role + ':' + str(self.component_properties.local_partyid)
predict_rs = EINI_guest_predict(
processed_data,
trees,
self.learning_rate,
self.init_score,
self.booster_dim,
self.encrypt_param.key_length,
self.hetero_sbt_transfer_variable,
sitename,
self.component_properties.host_party_idlist,
predict_cache,
False)
else:
predict_rs = sbt_guest_predict(
processed_data,
self.hetero_sbt_transfer_variable,
trees,
self.learning_rate,
self.init_score,
self.booster_dim,
predict_cache,
pred_leaf=(
ret_format == 'leaf'))
if ret_format == 'leaf':
return predict_rs # predict result is leaf position
self.predict_data_cache.add_data(cache_dataset_key, predict_rs, cur_boosting_round=rounds)
LOGGER.debug('adding predict rs {}'.format(predict_rs))
LOGGER.debug('last round is {}'.format(self.predict_data_cache.predict_data_last_round(cache_dataset_key)))
if ret_format == 'raw':
return predict_rs
else:
return self.score_to_predict_result(data_inst, predict_rs)
def load_feature_importance(self, feat_importance_param):
param = list(feat_importance_param)
rs_dict = {}
for fp in param:
key = (fp.sitename, fp.fid)
importance = FeatureImportance()
importance.from_protobuf(fp)
rs_dict[key] = importance
self.feature_importances_ = rs_dict
def get_model_meta(self):
model_meta = BoostingTreeModelMeta()
model_meta.tree_meta.CopyFrom(self.booster_meta)
model_meta.learning_rate = self.learning_rate
model_meta.num_trees = self.boosting_round
model_meta.quantile_meta.CopyFrom(QuantileMeta(bin_num=self.bin_num))
model_meta.objective_meta.CopyFrom(ObjectiveMeta(objective=self.objective_param.objective,
param=self.objective_param.params))
model_meta.use_missing = self.use_missing
model_meta.zero_as_missing = self.zero_as_missing
model_meta.task_type = self.task_type
model_meta.n_iter_no_change = self.n_iter_no_change
model_meta.tol = self.tol
model_meta.boosting_strategy = self.boosting_strategy
model_meta.module = "HeteroSecureBoost"
meta_name = consts.HETERO_SBT_GUEST_MODEL + "Meta"
return meta_name, model_meta
def get_model_param(self):
model_param = BoostingTreeModelParam()
model_param.tree_num = len(self.boosting_model_list)
model_param.tree_dim = self.booster_dim
model_param.trees_.extend(self.boosting_model_list)
model_param.init_score.extend(self.init_score)
model_param.losses.extend(self.history_loss)
model_param.classes_.extend(map(str, self.classes_))
model_param.num_classes = self.num_classes
if self.boosting_strategy == consts.STD_TREE:
model_param.model_name = consts.HETERO_SBT
elif self.boosting_strategy == consts.LAYERED_TREE:
model_param.model_name = consts.HETERO_FAST_SBT_LAYERED
elif self.boosting_strategy == consts.MIX_TREE:
model_param.model_name = consts.HETERO_FAST_SBT_MIX
model_param.best_iteration = self.callback_variables.best_iteration
feature_importances = list(self.feature_importances_.items())
feature_importances = sorted(feature_importances, key=itemgetter(1), reverse=True)
feature_importance_param = []
for (sitename, fid), importance in feature_importances:
if consts.GUEST in sitename:
fullname = self.feature_name_fid_mapping[fid]
else:
role_name, party_id = sitename.split(':')
fullname = generate_anonymous(fid=fid, party_id=party_id, role=role_name)
feature_importance_param.append(FeatureImportanceInfo(sitename=sitename, # sitename to distinguish sites
fid=fid,
importance=importance.importance,
fullname=fullname,
importance2=importance.importance_2,
main=importance.main_type
))
model_param.feature_importances.extend(feature_importance_param)
model_param.feature_name_fid_mapping.update(self.feature_name_fid_mapping)
model_param.tree_plan.extend(plan.encode_plan(self.tree_plan))
param_name = consts.HETERO_SBT_GUEST_MODEL + "Param"
return param_name, model_param
def set_model_meta(self, model_meta):
if not self.is_warm_start:
# these hyper parameters are not needed in warm start setting
self.boosting_round = model_meta.num_trees
self.tol = model_meta.tol
self.n_iter_no_change = model_meta.n_iter_no_change
self.bin_num = model_meta.quantile_meta.bin_num
self.learning_rate = model_meta.learning_rate
self.booster_meta = model_meta.tree_meta
self.objective_param.objective = model_meta.objective_meta.objective
self.objective_param.params = list(model_meta.objective_meta.param)
self.task_type = model_meta.task_type
self.boosting_strategy = model_meta.boosting_strategy
def set_model_param(self, model_param):
self.boosting_model_list = list(model_param.trees_)
self.init_score = np.array(list(model_param.init_score))
self.history_loss = list(model_param.losses)
self.classes_ = list(map(int, model_param.classes_))
self.booster_dim = model_param.tree_dim
self.num_classes = model_param.num_classes
self.feature_name_fid_mapping.update(model_param.feature_name_fid_mapping)
self.load_feature_importance(model_param.feature_importances)
# initialize loss function
self.loss = self.get_loss_function()
# init model tree plan if it exists
self.tree_plan = plan.decode_plan(model_param.tree_plan)
|
{"hexsha": "cd9992538227dece445507ef6af2e69380541467", "size": 22084, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/federatedml/ensemble/secureboost/hetero_secoreboost/hetero_secureboost_guest.py", "max_stars_repo_name": "rubenlozanoaht3m/DataDogm", "max_stars_repo_head_hexsha": "cd605e8072cca31e8418830c3300657ae2fa5b16", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 715, "max_stars_repo_stars_event_min_datetime": "2019-01-24T10:52:03.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-31T12:19:22.000Z", "max_issues_repo_path": "python/federatedml/ensemble/secureboost/hetero_secoreboost/hetero_secureboost_guest.py", "max_issues_repo_name": "rubenlozanoaht3m/DataDogm", "max_issues_repo_head_hexsha": "cd605e8072cca31e8418830c3300657ae2fa5b16", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 270, "max_issues_repo_issues_event_min_datetime": "2019-02-11T02:57:36.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-29T11:22:33.000Z", "max_forks_repo_path": "python/federatedml/ensemble/secureboost/hetero_secoreboost/hetero_secureboost_guest.py", "max_forks_repo_name": "rubenlozanoaht3m/DataDogm", "max_forks_repo_head_hexsha": "cd605e8072cca31e8418830c3300657ae2fa5b16", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 200, "max_forks_repo_forks_event_min_datetime": "2019-01-26T14:21:35.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-01T01:14:36.000Z", "avg_line_length": 46.2976939203, "max_line_length": 120, "alphanum_fraction": 0.6402825575, "include": true, "reason": "import numpy", "num_tokens": 4383}
|
from cnntools import cnntools
from torchvision import models, transforms
from os.path import join as pjoin
import torch
import numpy as np
import pandas as pd
from scipy import stats, linalg
import os
from dnnbrain.dnn import models as dnn_models
import torch.nn as nn
from PIL import Image
from ATT.iofunc import iofiles
from sklearn.decomposition import PCA
import pickle
def avg_by_imglabel(imgname, actval, label=0):
"""
"""
lblidx = np.array([imgname[i][1]==label for i in range(len(imgname))])
return actval[lblidx,:].mean(axis=0)
# Extract PC2
cnn_net = models.alexnet(pretrained=False)
# cnn_net.classifier[-1] = torch.nn.Linear(4096,100)
# cnn_net.classifier = torch.nn.Sequential(*cnn_net.classifier, torch.nn.Linear(1000,2))
# cnn_net.load_state_dict(torch.load('/nfs/a1/userhome/huangtaicheng/workingdir/models/DNNmodel_param/alexnet_twocate.pth'))
# cnn_net.load_state_dict(torch.load('/nfs/a1/userhome/huangtaicheng/workingdir/models/DNNmodel_param/alexnet_object100_singleobj.pth'))
cnn_net.load_state_dict(torch.load('/nfs/a1/userhome/huangtaicheng/workingdir/models/DNNmodel_param/alexnet.pth'))
transform = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
# transform = transforms.Compose([ShuffleImage(), transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
imgpath_bsobject = '/nfs/a1/userhome/huangtaicheng/workingdir/data/PhysicalSize/ObjectSize/SizeDataset_2021/Object100_origin'
imgname, object_act = cnntools.extract_activation(cnn_net, imgpath_bsobject, layer_loc=('features', '8'), imgtransforms=transform, isgpu=True)
if object_act.ndim == 4:
object_act = object_act.reshape(*object_act.shape[:2], -1).mean(axis=-1)
object_act_avg = np.zeros((100,object_act.shape[-1]))
for lbl in range(100):
object_act_avg[lbl,:] = avg_by_imglabel(imgname, object_act, lbl)
object_act_avg = object_act_avg/np.tile(linalg.norm(object_act_avg, axis=1), (object_act_avg.shape[-1],1)).T
iopkl = iofiles.make_ioinstance('/nfs/a1/userhome/huangtaicheng/workingdir/models/pca_imgnetval_conv4_alexnet.pkl')
pca_model = iopkl.load()
# pca_model = PCA()
# pca_model.fit(object_act_avg)
pca_act = np.dot(object_act_avg, np.linalg.pinv(pca_model.components_))
pc2_act = pca_act[:,1]*(-1)
# Load real-world size
# retin_size_pd = pd.read_csv('/nfs/a1/userhome/huangtaicheng/workingdir/data/PhysicalSize/RetinSizes.csv')
rw_size_pd = pd.read_csv('/nfs/a1/userhome/huangtaicheng/workingdir/data/PhysicalSize/Real_SizeRanks8.csv')
rw_size_pd = rw_size_pd.sort_values('name')
with open('/nfs/a1/userhome/huangtaicheng/workingdir/code/PhysicalSize_code/data/xuyitao_1_analysedata.pkl', 'rb') as f:
rw_size_human = pickle.load(f)
figure_data = {}
figure_data['pc2_act'] = pc2_act
figure_data['sizerank'] = np.array(rw_size_pd['real_sizerank'])
figure_data['rw_size_human'] = rw_size_human['subjsize_rank']
figure_data = pd.DataFrame(figure_data)
import matplotlib.cm as cm
import matplotlib.pyplot as plt
cmap = plt.get_cmap('rainbow')
for rank in np.arange(1,9,1):
rank_data = figure_data[figure_data['sizerank']==rank]
plt.scatter(rank_data['pc2_act'], rank_data['rw_size_human'], color=cmap(rank/9))
# plt.legend(['SizeRank '+str(rank) for rank in np.arange(1,9,1)])
# plt.show()
|
{"hexsha": "e004b68707ee52395ba0616bfe67c0aff4817e35", "size": 3418, "ext": "py", "lang": "Python", "max_stars_repo_path": "Code/PC2_humansize.py", "max_stars_repo_name": "helloTC/RealWorldSizeAxis", "max_stars_repo_head_hexsha": "769dff6c4602ecaa0c8f06244f190bb92e2038ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Code/PC2_humansize.py", "max_issues_repo_name": "helloTC/RealWorldSizeAxis", "max_issues_repo_head_hexsha": "769dff6c4602ecaa0c8f06244f190bb92e2038ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Code/PC2_humansize.py", "max_forks_repo_name": "helloTC/RealWorldSizeAxis", "max_forks_repo_head_hexsha": "769dff6c4602ecaa0c8f06244f190bb92e2038ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.1807228916, "max_line_length": 180, "alphanum_fraction": 0.7671152721, "include": true, "reason": "import numpy,from scipy", "num_tokens": 991}
|
!
! Copyright (c) 2006-2015, The Regents of the University of California,
! through Lawrence Berkeley National Laboratory (subject to receipt of any
! required approvals from the U.S. Dept. of Energy) and the Paul Scherrer
! Institut (Switzerland). All rights reserved.!
!
! License: see file COPYING in top level of source distribution.
!
include 'H5hut.f90'
program write_core_vfd
use H5hut
implicit none
#if defined(PARALLEL_IO)
include 'mpif.h'
#endif
! name of output file
character (len=*), parameter :: fname = "example_core_vfd.h5"
! H5hut verbosity level
integer*8, parameter :: h5_verbosity = H5_VERBOSE_DEFAULT
! number of particles we are going to write per core
integer*4, parameter :: num_particles = 32
integer :: comm_rank = 0
integer*8 :: file, h5_ierror
integer*8 :: prop
integer*4 :: i
integer*4, allocatable :: data(:)
! initialize MPI & H5hut
#if defined(PARALLEL_IO)
integer :: comm, mpi_ierror
comm = MPI_COMM_WORLD
call mpi_init (mpi_ierror)
call mpi_comm_rank (comm, comm_rank, mpi_ierror)
#endif
call h5_abort_on_error ()
call h5_set_verbosity_level (h5_verbosity)
! open file and create first step
prop = h5_createprop_file ()
h5_ierror = h5_setprop_file_corevfd (prop);
file = h5_openfile (fname, H5_O_WRONLY, prop)
h5_ierror = h5_setstep(file, 1_8)
! set number of particles this process is going to write
h5_ierror = h5pt_setnpoints (file, int8 (num_particles))
! create fake data
allocate (data (num_particles))
do i = 1, num_particles
data (i) = (i-1) + comm_rank * num_particles
end do
! write the data
h5_ierror = h5pt_writedata_i4 (file, "data", data);
! cleanup
deallocate (data)
h5_ierror = h5_closefile (file)
#if defined(PARALLEL_IO)
call mpi_finalize (mpi_ierror)
#endif
end program write_core_vfd
|
{"hexsha": "1e59802157467fa9411a6399df58d56bb882fba8", "size": 1850, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "examples/H5Part/write_core_vfdf.f90", "max_stars_repo_name": "greole/H5hut", "max_stars_repo_head_hexsha": "7833ed7877b7578b1ec3308ba2b465fc54d0c582", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-12T14:01:55.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-12T14:01:55.000Z", "max_issues_repo_path": "examples/H5Part/write_core_vfdf.f90", "max_issues_repo_name": "greole/H5hut", "max_issues_repo_head_hexsha": "7833ed7877b7578b1ec3308ba2b465fc54d0c582", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/H5Part/write_core_vfdf.f90", "max_forks_repo_name": "greole/H5hut", "max_forks_repo_head_hexsha": "7833ed7877b7578b1ec3308ba2b465fc54d0c582", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-06-04T08:40:40.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-10T21:25:42.000Z", "avg_line_length": 25.6944444444, "max_line_length": 75, "alphanum_fraction": 0.7205405405, "num_tokens": 558}
|
library(rmarkdown)
library(knitr)
args = commandArgs(trailingOnly=TRUE)
render(args[1], output_file=args[2], output_format="word_document")
|
{"hexsha": "cf14f4a03cc95e76b3e0441ff21edb99928b4428", "size": 143, "ext": "r", "lang": "R", "max_stars_repo_path": "docs/build-docx.r", "max_stars_repo_name": "pjhop/dmrff", "max_stars_repo_head_hexsha": "1f4b6785e18701eac4f76adde0e37f51bd0d1bcf", "max_stars_repo_licenses": ["Artistic-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-12-31T18:14:29.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-08T06:13:38.000Z", "max_issues_repo_path": "docs/build-docx.r", "max_issues_repo_name": "pjhop/dmrff", "max_issues_repo_head_hexsha": "1f4b6785e18701eac4f76adde0e37f51bd0d1bcf", "max_issues_repo_licenses": ["Artistic-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-12-06T16:17:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-25T14:30:05.000Z", "max_forks_repo_path": "docs/build-docx.r", "max_forks_repo_name": "pjhop/dmrff", "max_forks_repo_head_hexsha": "1f4b6785e18701eac4f76adde0e37f51bd0d1bcf", "max_forks_repo_licenses": ["Artistic-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-08-19T20:46:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-11T04:35:11.000Z", "avg_line_length": 17.875, "max_line_length": 67, "alphanum_fraction": 0.7832167832, "num_tokens": 36}
|
"""
DiscreteUniform(a,b)
A *Discrete uniform distribution* is a uniform distribution over a consecutive sequence of integers between `a` and `b`, inclusive.
```math
P(X = k) = 1 / (b - a + 1) \\quad \\text{for } k = a, a+1, \\ldots, b.
```
```julia
DiscreteUniform(a, b) # a uniform distribution over {a, a+1, ..., b}
params(d) # Get the parameters, i.e. (a, b)
span(d) # Get the span of the support, i.e. (b - a + 1)
probval(d) # Get the probability value, i.e. 1 / (b - a + 1)
minimum(d) # Return a
maximum(d) # Return b
```
External links
* [Discrete uniform distribution on Wikipedia](http://en.wikipedia.org/wiki/Uniform_distribution_(discrete))
"""
struct DiscreteUniform <: DiscreteUnivariateDistribution
a::Int
b::Int
pv::Float64 # individual probabilities
function DiscreteUniform(a::Real, b::Real; check_args::Bool=true)
check_args && @check_args(DiscreteUniform, a <= b)
new(a, b, 1 / (b - a + 1))
end
DiscreteUniform(b::Real; check_args::Bool=true) = DiscreteUniform(0, b; check_args=check_args)
DiscreteUniform() = new(0, 1, 0.5)
end
@distr_support DiscreteUniform d.a d.b
### Parameters
span(d::DiscreteUniform) = d.b - d.a + 1
probval(d::DiscreteUniform) = d.pv
params(d::DiscreteUniform) = (d.a, d.b)
### Show
show(io::IO, d::DiscreteUniform) = show(io, d, (:a, :b))
### Statistics
mean(d::DiscreteUniform) = middle(d.a, d.b)
median(d::DiscreteUniform) = fld(d.a + d.b, 2)
var(d::DiscreteUniform) = (span(d)^2 - 1.0) / 12.0
skewness(d::DiscreteUniform) = 0.0
function kurtosis(d::DiscreteUniform)
n2 = span(d)^2
-1.2 * (n2 + 1.0) / (n2 - 1.0)
end
entropy(d::DiscreteUniform) = log(span(d))
mode(d::DiscreteUniform) = d.a
modes(d::DiscreteUniform) = [d.a:d.b]
### Evaluation
pdf(d::DiscreteUniform, x::Real) = insupport(d, x) ? d.pv : zero(d.pv)
logpdf(d::DiscreteUniform, x::Real) = log(pdf(d, x))
function cdf(d::DiscreteUniform, x::Int)
a = d.a
result = (x - a + 1) * d.pv
return if x < a
zero(result)
elseif x >= d.b
one(result)
else
result
end
end
quantile(d::DiscreteUniform, p::Real) = iszero(p) ? d.a : d.a - 1 + ceil(Int, p * span(d))
function mgf(d::DiscreteUniform, t::Real)
a, b = d.a, d.b
u = b - a + 1
result = (exp(t*a) * expm1(t*u)) / (u*expm1(t))
return iszero(t) ? one(result) : result
end
function cf(d::DiscreteUniform, t::Real)
a, b = d.a, d.b
u = b - a + 1
result = (im*cos(t*(a+b)/2) + sin(t*(a-b-1)/2)) / (u*sin(t/2))
return iszero(t) ? one(result) : result
end
### Sampling
rand(rng::AbstractRNG, d::DiscreteUniform) = rand(rng, d.a:d.b)
# Fit model
function fit_mle(::Type{DiscreteUniform}, x::AbstractArray{<:Real})
if isempty(x)
throw(ArgumentError("data set must be non-empty."))
end
return DiscreteUniform(extrema(x)...)
end
|
{"hexsha": "c6a0b1735f629164617fbea857656cdb7135bdb2", "size": 2890, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/univariate/discrete/discreteuniform.jl", "max_stars_repo_name": "pdeffebach/Distributions.jl", "max_stars_repo_head_hexsha": "8aea3cc82ee2f8ffe1e8cd754e7fcd99369c7a1c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/univariate/discrete/discreteuniform.jl", "max_issues_repo_name": "pdeffebach/Distributions.jl", "max_issues_repo_head_hexsha": "8aea3cc82ee2f8ffe1e8cd754e7fcd99369c7a1c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/univariate/discrete/discreteuniform.jl", "max_forks_repo_name": "pdeffebach/Distributions.jl", "max_forks_repo_head_hexsha": "8aea3cc82ee2f8ffe1e8cd754e7fcd99369c7a1c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.7008547009, "max_line_length": 131, "alphanum_fraction": 0.6141868512, "num_tokens": 981}
|
import cv2
from matplotlib import pyplot as plt
import numpy as np
import imutils
import easyocr
image = cv2.imread("Images0.png")
# Convert to Grayscale Image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
plt.imshow(cv2.cvtColor(gray, cv2.COLOR_BGR2RGB))
bfilter = cv2.bilateralFilter(gray, 11, 17, 17) #Noise reduction
edged = cv2.Canny(bfilter, 30, 200) #Edge detection
plt.imshow(cv2.cvtColor(edged, cv2.COLOR_BGR2RGB))
keypoints = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(keypoints)
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]
location = None
for contour in contours:
approx = cv2.approxPolyDP(contour, 10, True)
if len(approx) == 4:
location = approx
break
location
mask = np.zeros(gray.shape, np.uint8)
new_image = cv2.drawContours(mask, [location], 0,255, -1)
new_image = cv2.bitwise_and(image, image, mask=mask)
plt.imshow(cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB))
(x,y) = np.where(mask==255)
(x1, y1) = (np.min(x), np.min(y))
(x2, y2) = (np.max(x), np.max(y))
cropped_image = gray[x1:x2+1, y1:y2+1]
plt.imshow(cv2.cvtColor(cropped_image, cv2.COLOR_BGR2RGB))
reader = easyocr.Reader(['en'])
result = reader.readtext(cropped_image)
print("Number Plate: ", result)
|
{"hexsha": "8a78f426004482f22f4e2fc4e0e6ba0ce7719164", "size": 1294, "ext": "py", "lang": "Python", "max_stars_repo_path": "License Plate Recognition System/OCR.py", "max_stars_repo_name": "mujtaba-farooq/FYP-F21-28-D-PRS", "max_stars_repo_head_hexsha": "2ad78b28486a59fbf16432b0643ca98b8f7ac438", "max_stars_repo_licenses": ["FTL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "License Plate Recognition System/OCR.py", "max_issues_repo_name": "mujtaba-farooq/FYP-F21-28-D-PRS", "max_issues_repo_head_hexsha": "2ad78b28486a59fbf16432b0643ca98b8f7ac438", "max_issues_repo_licenses": ["FTL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "License Plate Recognition System/OCR.py", "max_forks_repo_name": "mujtaba-farooq/FYP-F21-28-D-PRS", "max_forks_repo_head_hexsha": "2ad78b28486a59fbf16432b0643ca98b8f7ac438", "max_forks_repo_licenses": ["FTL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0930232558, "max_line_length": 82, "alphanum_fraction": 0.7326120556, "include": true, "reason": "import numpy", "num_tokens": 393}
|
#!/usr/bin/env python
# coding: utf-8
"""
Synthesizes the results of fits into a single file per harmonic.
"""
import re
import os
import math
import numpy as np
import cycle
import sys
if len(sys.argv)>1:
cycidf = sys.argv[1]
else:
cycidf = cycle.select() # cycle identifier
cycdir = cycle.directory(cycidf) # cycle directory
groups = cycle.groups(cycidf, 'fits') # groups
def load(impstm):
"""Load data from fit from a sample."""
with open(impstm, 'r') as f:
f.readline()
dst = eval(f.readline())
for i in range(4):
f.readline()
dat = np.loadtxt(f).T
res = {
'j': dat[0],
'L': dat[1],
'd': dat[3],
'R': dat[4],
'D': dst,
}
if len(dat) > 5:
res['f'] = dat[5]
return res
def analyze(dat, j):
"""Analyze a model."""
msk = dat['j']==j
d = dat['d'][msk]
R = dat['R'][msk]
res = {
'avg-d': np.abs(np.mean(d)*1e18-dat['D'])/dat['D'],
'avg-R': np.mean(R),
'std-d': np.std(d)*1e18/dat['D'],
'std-R': np.std(R),
}
if 'f' in dat:
f = dat['f'][msk]
res['avg-f'] = np.mean(f)
res['std-f'] = np.std(f)
return res
def sample(impstm, j):
"""Analyze a sample."""
res = {}
for mod in ('GUW1', 'GUW2', 'W1', 'W2'):
pth = os.path.join(impstm, f"fits_data_{mod}.dat")
dat = load(pth)
res[mod] = analyze(dat, j)
return res
def adjust(tab):
"""Make the items of a column the same width."""
for j in range(len(tab[0])):
w = max([len(tab[i][j]) for i in range(len(tab))])
for i in range(len(tab)):
tab[i][j] = format(tab[i][j], f'>{w}')
for group in groups:
expdir = os.path.join(cycdir, f"synthesis_{group}")
if not os.path.isdir(expdir):
os.makedirs(expdir)
for j in (1, 2):
lines = {'avg':[], 'std':[]}
for mtd in lines:
lines[mtd].append([
f'distribution',
f'GUW1-{mtd}-f',
f'GUW1-{mtd}-d',
f'GUW2-{mtd}-d',
f'W1-{mtd}-d',
f'W2-{mtd}-d',
f'GUW1-{mtd}-R',
f'GUW2-{mtd}-R',
f'W1-{mtd}-R',
f'W2-{mtd}-R',
])
pth0 = os.path.join(cycdir, f"fits_{group}")
for smp in os.listdir(pth0):
pth1 = os.path.join(pth0, smp)
res = sample(pth1, j)
for mtd in lines:
lines[mtd].append([
smp,
f"{res['GUW1'][f'{mtd}-f']:12.7e}",
f"{res['GUW1'][f'{mtd}-d']:12.7e}",
f"{res['GUW2'][f'{mtd}-d']:12.7e}",
f"{res['W1'][f'{mtd}-d']:12.7e}",
f"{res['W2'][f'{mtd}-d']:12.7e}",
f"{res['GUW1'][f'{mtd}-R']:12.7e}",
f"{res['GUW2'][f'{mtd}-d']:12.7e}",
f"{res['W1'][f'{mtd}-R']:12.7e}",
f"{res['W2'][f'{mtd}-R']:12.7e}",
])
for mtd in lines:
adjust(lines[mtd])
with open(os.path.join(expdir, f"{mtd}_j{j}.csv"), "w") as f:
for l in lines[mtd]:
f.write("; ".join(l)+"\n")
|
{"hexsha": "641198dc52f8cd3db2059a0bb6debcd4ce3056e5", "size": 3297, "ext": "py", "lang": "Python", "max_stars_repo_path": "launchers/local/synthesis.py", "max_stars_repo_name": "DunstanBecht/lpa-workspace", "max_stars_repo_head_hexsha": "316db41fed08f856c376e7f8e2ff92f2af5ecf7d", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "launchers/local/synthesis.py", "max_issues_repo_name": "DunstanBecht/lpa-workspace", "max_issues_repo_head_hexsha": "316db41fed08f856c376e7f8e2ff92f2af5ecf7d", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "launchers/local/synthesis.py", "max_forks_repo_name": "DunstanBecht/lpa-workspace", "max_forks_repo_head_hexsha": "316db41fed08f856c376e7f8e2ff92f2af5ecf7d", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4224137931, "max_line_length": 73, "alphanum_fraction": 0.4388838338, "include": true, "reason": "import numpy", "num_tokens": 1038}
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sn
df = pd.read_csv('height_weight.csv')
print(df.info())
print(df.describe())
#kernel density estimation
#kernel is specifying how data is smoothened. Here Gaussian is used
#violin plot also uses gaussian
sb.kdeplot(df["height"],df["weight"],cmap="viridis",bw=(2,20))
#jaggerness tells there are data points in it
#(2,20) -> here 20 is the gaussian width
plt.hist2d(df["height"],df["weight"],bins=20,cmap="magma",alpha=0.3)
#convolving 2D Histogram with kde
plt.show()
sn.kdeplot(df["height"],df["weight"],cmap="magma",shade=True)
#plot of probability surface
|
{"hexsha": "9d7a641a1c85d76901c33030e8486e2d020984ca", "size": 653, "ext": "py", "lang": "Python", "max_stars_repo_path": "kde_plot.py", "max_stars_repo_name": "WestHamster/Feature_engg", "max_stars_repo_head_hexsha": "18d2e935db14cb68c734fb67e99fe427841d1d1e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kde_plot.py", "max_issues_repo_name": "WestHamster/Feature_engg", "max_issues_repo_head_hexsha": "18d2e935db14cb68c734fb67e99fe427841d1d1e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kde_plot.py", "max_forks_repo_name": "WestHamster/Feature_engg", "max_forks_repo_head_hexsha": "18d2e935db14cb68c734fb67e99fe427841d1d1e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0952380952, "max_line_length": 68, "alphanum_fraction": 0.7457886677, "include": true, "reason": "import numpy", "num_tokens": 189}
|
[STATEMENT]
lemma comm_monoidI:
fixes G (structure)
assumes m_closed:
"!!x y. [| x \<in> carrier G; y \<in> carrier G |] ==> x \<otimes> y \<in> carrier G"
and one_closed: "\<one> \<in> carrier G"
and m_assoc:
"!!x y z. [| x \<in> carrier G; y \<in> carrier G; z \<in> carrier G |] ==>
(x \<otimes> y) \<otimes> z = x \<otimes> (y \<otimes> z)"
and l_one: "!!x. x \<in> carrier G ==> \<one> \<otimes> x = x"
and m_comm:
"!!x y. [| x \<in> carrier G; y \<in> carrier G |] ==> x \<otimes> y = y \<otimes> x"
shows "comm_monoid G"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Group.comm_monoid G
[PROOF STEP]
using l_one
[PROOF STATE]
proof (prove)
using this:
?x \<in> carrier G \<Longrightarrow> \<one> \<otimes> ?x = ?x
goal (1 subgoal):
1. Group.comm_monoid G
[PROOF STEP]
by (auto intro!: comm_monoid.intro comm_monoid_axioms.intro monoid.intro
intro: assms simp: m_closed one_closed m_comm)
|
{"llama_tokens": 396, "file": null, "length": 2}
|
subroutine banslv ( w, nroww, nrow, nbandl, nbandu, b )
c from * a practical guide to splines * by c. de boor
c companion routine to banfac . it returns the solution x of the
c linear system a*x = b in place of b , given the lu-factorization
c for a in the workarray w .
c
c****** i n p u t ******
c w, nroww,nrow,nbandl,nbandu.....describe the lu-factorization of a
c banded matrix a of roder nrow as constructed in banfac .
c for details, see banfac .
c b.....right side of the system to be solved .
c
c****** o u t p u t ******
c b.....contains the solution x , of order nrow .
c
c****** m e t h o d ******
c (with a = l*u, as stored in w,) the unit lower triangular system
c l(u*x) = b is solved for y = u*x, and y stored in b . then the
c upper triangular system u*x = y is solved for x . the calcul-
c ations are so arranged that the innermost loops stay within columns.
c
integer nbandl,nbandu,nrow,nroww, i,j,jmax,middle,nrowm1
real w(nroww,nrow),b(nrow)
middle = nbandu + 1
if (nrow .eq. 1) go to 49
nrowm1 = nrow - 1
if (nbandl .eq. 0) go to 30
c forward pass
c for i=1,2,...,nrow-1, subtract right side(i)*(i-th column
c of l ) from right side (below i-th row) .
do 21 i=1,nrowm1
jmax = min0(nbandl, nrow-i)
do 21 j=1,jmax
21 b(i+j) = b(i+j) - b(i)*w(middle+j,i)
c backward pass
c for i=nrow,nrow-1,...,1, divide right side(i) by i-th diag-
c onal entry of u, then subtract right side(i)*(i-th column
c of u) from right side (above i-th row).
30 if (nbandu .gt. 0) go to 40
c a is lower triangular .
do 31 i=1,nrow
31 b(i) = b(i)/w(1,i)
return
40 i = nrow
41 b(i) = b(i)/w(middle,i)
jmax = min0(nbandu,i-1)
do 45 j=1,jmax
45 b(i-j) = b(i-j) - b(i)*w(middle-j,i)
i = i - 1
if (i .gt. 1) go to 41
49 b(1) = b(1)/w(middle,1)
return
end
|
{"hexsha": "154ae051d46677c800df4f4889ea24fe5f7b3aa8", "size": 1962, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "iraf.v2161/math/deboor/banslv.f", "max_stars_repo_name": "ysBach/irafdocgen", "max_stars_repo_head_hexsha": "b11fcd75cc44b01ae69c9c399e650ec100167a54", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-12-01T15:19:09.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-02T16:48:42.000Z", "max_issues_repo_path": "math/deboor/banslv.f", "max_issues_repo_name": "kirxkirx/iraf", "max_issues_repo_head_hexsha": "fcd7569b4e0ddbea29f7dbe534a25759e0c31883", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-11-30T13:48:50.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-02T19:40:25.000Z", "max_forks_repo_path": "math/deboor/banslv.f", "max_forks_repo_name": "kirxkirx/iraf", "max_forks_repo_head_hexsha": "fcd7569b4e0ddbea29f7dbe534a25759e0c31883", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3333333333, "max_line_length": 72, "alphanum_fraction": 0.5840978593, "num_tokens": 755}
|
[STATEMENT]
lemma SourcesS13_L2: "Sources level2 sS13 = {sS9, sS10, sS12}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Sources level2 sS13 = {sS9, sS10, sS12}
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Sources level2 sS13 = {sS9, sS10, sS12}
[PROOF STEP]
have DSourcesS13:"DSources level2 sS13 = {sS12}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. DSources level2 sS13 = {sS12}
[PROOF STEP]
by (simp add: DSources_def AbstrLevel2, auto)
[PROOF STATE]
proof (state)
this:
DSources level2 sS13 = {sS12}
goal (1 subgoal):
1. Sources level2 sS13 = {sS9, sS10, sS12}
[PROOF STEP]
have "Sources level2 sS12 = {sS9, sS10}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Sources level2 sS12 = {sS9, sS10}
[PROOF STEP]
by (rule SourcesS12_L2)
[PROOF STATE]
proof (state)
this:
Sources level2 sS12 = {sS9, sS10}
goal (1 subgoal):
1. Sources level2 sS13 = {sS9, sS10, sS12}
[PROOF STEP]
with DSourcesS13
[PROOF STATE]
proof (chain)
picking this:
DSources level2 sS13 = {sS12}
Sources level2 sS12 = {sS9, sS10}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
DSources level2 sS13 = {sS12}
Sources level2 sS12 = {sS9, sS10}
goal (1 subgoal):
1. Sources level2 sS13 = {sS9, sS10, sS12}
[PROOF STEP]
by (simp add: Sources_singleDSource)
[PROOF STATE]
proof (state)
this:
Sources level2 sS13 = {sS9, sS10, sS12}
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 729, "file": "ComponentDependencies_DataDependenciesCaseStudy", "length": 9}
|
import oneflow
from oneflow.framework.docstr.utils import reset_docstr
reset_docstr(
oneflow.nn.ReLU,
r"""ReLU(inplace=False)
ReLU 激活函数,对张量中的每一个元素做 element-wise 运算,公式如下:
:math:`\text{ReLU}(x) = (x)^+ = \max(0, x)`
参数:
inplace: 是否做 in-place 操作。 默认为 ``False``
形状:
- Input: :math:`(N, *)` 其中 `*` 的意思是,可以指定任意维度
- Output: :math:`(N, *)` 输入形状与输出形状一致
示例:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> relu = flow.nn.ReLU()
>>> ndarr = np.asarray([1, -2, 3])
>>> x = flow.Tensor(ndarr)
>>> relu(x)
tensor([1., 0., 3.], dtype=oneflow.float32)
""",
)
|
{"hexsha": "440309f0d5d9f1f790bc5bbe83f7f6a4e7f41014", "size": 701, "ext": "py", "lang": "Python", "max_stars_repo_path": "docs/source/cn/activation.py", "max_stars_repo_name": "grybd/oneflow", "max_stars_repo_head_hexsha": "82237ad096a10527591660c09b61444c42917e69", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3285, "max_stars_repo_stars_event_min_datetime": "2020-07-31T05:51:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T15:20:16.000Z", "max_issues_repo_path": "docs/source/cn/activation.py", "max_issues_repo_name": "grybd/oneflow", "max_issues_repo_head_hexsha": "82237ad096a10527591660c09b61444c42917e69", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2417, "max_issues_repo_issues_event_min_datetime": "2020-07-31T06:28:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:04:14.000Z", "max_forks_repo_path": "docs/source/cn/activation.py", "max_forks_repo_name": "grybd/oneflow", "max_forks_repo_head_hexsha": "82237ad096a10527591660c09b61444c42917e69", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 520, "max_forks_repo_forks_event_min_datetime": "2020-07-31T05:52:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T02:38:11.000Z", "avg_line_length": 21.2424242424, "max_line_length": 55, "alphanum_fraction": 0.5235378031, "include": true, "reason": "import numpy", "num_tokens": 263}
|
"""
Read data from the "current" BATSE catalog (dubbed 5Bp here, with "p" for
"preliminary," since an official 5B successor to the 4B catalog has not yet
been released). Provide access to catalog data and other GRB data via
a GRBCollection instance providing access to its individual GRB elements
in three ways:
* as an OrderedDict indexed by BATSE trigger number
* via attributes of the form .t#, with # = BATSE trigger number
* via attributes of the form .b#, with # = YYMMDD burst designation;
this returns a list of triggers matching the designation (there will
be >1 if BATSE detected multiple bursts on the specified date)
Only one function in this module is intended for users: load().
This module was created to access the data as released in Jul-Sep 2000.
Created 2012-05-06 by Tom Loredo
"""
from os.path import abspath, exists, join, split
from os import mkdir
import cPickle
from collections import OrderedDict
from numpy import array
from grb import GRB, GRBCollection
from locations import *
from utils import retrieve_gzip
__all__ = ['load_catalog']
# TODO: get_grb_classes is presently unused; is there a use case? May
# only be useful if the pickled files are unpickled outside this module.
def get_grb_classes(modname, classname):
"""
Return class objects from the "grb" module that have the specified
`classname`.
This function is for identifying classes encountered when unpickling
BATSE 5Bp data; it satisfies the cPickle "find_global" interface.
"""
# print 'Module:', modname, ' -- Class:', classname
if classname == 'GRB':
return GRB
elif classname == 'GRBCollection':
return GRBCollection
else:
raise RuntimeError, 'Unrecognized class in pickled data: %s, %s' % (modname, classname)
def read_summaries():
"""
Read GRB summary information from a pre-existing pickled data file.
"""
try:
sfile = file(join(root, summaries), 'rb')
except:
raise IOError('Summary data file does not exist!')
# Define an unpickler that will recognize grb classes even when unpickling
# is done elsewhere elsewhere (in which case grb classes may not be on the
# top level, thwarting normal unpickling).
loader = cPickle.Unpickler(sfile)
# loader.find_global = get_grb_classes
GRBs = loader.load()
sfile.close()
print 'Loaded summary data for', len(GRBs), 'GRBs comprising the 5Bp catalog.'
return GRBs
def get_grb_bright(bfile):
"""
Read the brightness data for a single GRB from the brightness data file
`bfile`; return the trigger number and a list of data entries (strings).
"""
line = bfile.readline()
if line == '':
return None, None
data = []
# 1: trigger, ch1 fluence & err, ch2 fluence & err
words = line.strip().split()
trig = int(words[0])
data.extend(words[1:])
# 2: ch3 fluence & err, ch4 fluence & err
words = bfile.readline().strip().split()
data.extend(words)
# 3: 64ms peak flux, err, time
words = bfile.readline().strip().split()
data.extend(words)
# 4: 256ms peak flux, err, time
words = bfile.readline().strip().split()
data.extend(words)
# 5: 1024ms peak flux, err, time
words = bfile.readline().strip().split()
data.extend(words)
return trig, data
def fetch_summaries():
"""
Fetch GRB summary information from the CGRO SSC; return it in a
GRBCollection instance.
"""
# Get access to the raw data files, either cached or fetched from the SSC.
cache = join(root, raw_cache)
basic = retrieve_gzip(basic_url, cache)
bright4 = retrieve_gzip(bright_url4, cache)
bright5 = retrieve_gzip(bright_url5, cache)
durn = retrieve_gzip(durn_url, cache)
comments = retrieve_gzip(comments_url4, cache)
# Read basic data, defining the GRB objects. Add the trigger data path.
GRBs = GRBCollection()
ncomp = 0 # count complete GRBs (not overwritten by subsequent GRB)
for line in basic:
if not line: # in case of empty lines at end
break
grb = GRB(line)
if grb.trigger in GRBs:
raise ValueError, 'Duplicate entries for trigger %i !' % grb.trigger
GRBs.add(grb)
if not grb.incomplete:
ncomp += 1
basic.close()
print 'Read data for', len(GRBs), 'triggers from basic table,', ncomp,\
'complete...'
print
# Add brightness (flux, fluence) data.
nf = 0
extra = [] # collect triggers in flux table but not basic table
while True:
trigger, data = get_grb_bright(bright4)
if trigger is None:
break
if trigger in GRBs:
GRBs[trigger].set_bright(trigger, data)
nf += 1
else:
extra.append(trigger)
bright4.close()
while True:
trigger, data = get_grb_bright(bright5)
if trigger is None:
break
if trigger in GRBs:
GRBs[trigger].set_bright(trigger, data)
nf += 1
else:
extra.append(trigger)
bright5.close()
print 'Read flux data for', nf, 'basic table triggers.'
print 'Extraneous flux data for:', extra
if extra:
print '***** Data for these GRBs was ignored!!! *****'
print
# Add duration data.
ndur = 0
extra = []
for line in durn:
if not line:
break
data = line.strip().split()
trigger = int(data[0])
if trigger in GRBs:
GRBs[trigger].set_durn(trigger, data[1:])
ndur += 1
else: #
extra.append(trigger)
durn.close()
print 'Read duration data for', ndur, 'basic table triggers.'
print 'Extraneous data for:', extra
if extra:
print '***** Data for these GRBs was ignored!!! *****'
print
# Add comments.
ncom = 0
extra = []
for line in comments:
if not line:
break
if line[0] == '#': # header
continue
trigger = int(line[:6].strip())
flag = line[11]
com = line[14:].strip()
if trigger in GRBs:
GRBs[trigger].comments.append((flag, com))
ncom += 1
else: #
extra.append(trigger)
durn.close()
print 'Read comment data for', ncom, 'basic table triggers.'
print 'Extraneous data for:', extra
if extra:
print '***** Data for these GRBs was ignored!!! *****'
return GRBs
def load_catalog(root_dir=root):
"""
Establish access to GRB data from the BATSE '5B' catalog, stored in the
`root_dir` directory. Return a GRBCollection providing burst-by-burst
access keyed by trigger number and via trigger and YYMMDD (date)
attributes.
If no catalog has yet been established, the directory is created and
summary data for all GRBs are fetched from the CGRO SSC and stored
locally for future use.
Detailed data for specific bursts is fetched, parsed, and cached
lazily as requested.
"""
# TODO: Probably a better way to handle this than with a global....
global root
# Make sure root directory exists.
root = abspath(root_dir) # assigns full path throughout package
rc_dir = join(root, raw_cache)
if not exists(root):
mkdir(root)
if not exists(rc_dir):
mkdir(rc_dir)
try:
GRBs = read_summaries()
except IOError:
GRBs = fetch_summaries()
sfile = file(join(root,summaries), 'wb')
cPickle.dump(GRBs, sfile, 2) # protocol 2 for efficiency
sfile.close()
return GRBs
|
{"hexsha": "b2af14876b67de4d929b217500ea90da34235824", "size": 7657, "ext": "py", "lang": "Python", "max_stars_repo_path": "batse5bp/catalog.py", "max_stars_repo_name": "tloredo/batse5bp", "max_stars_repo_head_hexsha": "c039a8e5394da4764881cdee1e17c6b1c0ecc088", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-15T13:46:55.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-15T13:46:55.000Z", "max_issues_repo_path": "batse5bp/catalog.py", "max_issues_repo_name": "tloredo/batse5bp", "max_issues_repo_head_hexsha": "c039a8e5394da4764881cdee1e17c6b1c0ecc088", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "batse5bp/catalog.py", "max_forks_repo_name": "tloredo/batse5bp", "max_forks_repo_head_hexsha": "c039a8e5394da4764881cdee1e17c6b1c0ecc088", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1722689076, "max_line_length": 95, "alphanum_fraction": 0.6349745331, "include": true, "reason": "from numpy", "num_tokens": 1953}
|
[STATEMENT]
lemma x_vote_eq:
assumes run: "HORun UV_M rho HOs"
and com: "\<forall>r. HOcommPerRd UV_M (HOs r)"
and vote: "vote (rho r p) = Some v"
shows "v = x (rho r p)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. v = x (rho r p)
[PROOF STEP]
proof (cases r)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. r = 0 \<Longrightarrow> v = x (rho r p)
2. \<And>nat. r = Suc nat \<Longrightarrow> v = x (rho r p)
[PROOF STEP]
case 0
[PROOF STATE]
proof (state)
this:
r = 0
goal (2 subgoals):
1. r = 0 \<Longrightarrow> v = x (rho r p)
2. \<And>nat. r = Suc nat \<Longrightarrow> v = x (rho r p)
[PROOF STEP]
with run vote
[PROOF STATE]
proof (chain)
picking this:
HORun UV_M rho HOs
vote (rho r p) = Some v
r = 0
[PROOF STEP]
show ?thesis \<comment> \<open>no vote in initial state\<close>
[PROOF STATE]
proof (prove)
using this:
HORun UV_M rho HOs
vote (rho r p) = Some v
r = 0
goal (1 subgoal):
1. v = x (rho r p)
[PROOF STEP]
by (auto simp: UV_HOMachine_def HORun_eq HOinitConfig_eq
initState_def UV_initState_def)
[PROOF STATE]
proof (state)
this:
v = x (rho r p)
goal (1 subgoal):
1. \<And>nat. r = Suc nat \<Longrightarrow> v = x (rho r p)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>nat. r = Suc nat \<Longrightarrow> v = x (rho r p)
[PROOF STEP]
fix r'
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>nat. r = Suc nat \<Longrightarrow> v = x (rho r p)
[PROOF STEP]
assume r: "r = Suc r'"
[PROOF STATE]
proof (state)
this:
r = Suc r'
goal (1 subgoal):
1. \<And>nat. r = Suc nat \<Longrightarrow> v = x (rho r p)
[PROOF STEP]
let ?msgs = "HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>nat. r = Suc nat \<Longrightarrow> v = x (rho r p)
[PROOF STEP]
from run
[PROOF STATE]
proof (chain)
picking this:
HORun UV_M rho HOs
[PROOF STEP]
have "nextState UV_M r' p (rho r' p) ?msgs (rho (Suc r') p)"
[PROOF STATE]
proof (prove)
using this:
HORun UV_M rho HOs
goal (1 subgoal):
1. nextState UV_M r' p (rho r' p) (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')) (rho (Suc r') p)
[PROOF STEP]
by (auto simp: HORun_eq HOnextConfig_eq nextState_def)
[PROOF STATE]
proof (state)
this:
nextState UV_M r' p (rho r' p) (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')) (rho (Suc r') p)
goal (1 subgoal):
1. \<And>nat. r = Suc nat \<Longrightarrow> v = x (rho r p)
[PROOF STEP]
with vote r
[PROOF STATE]
proof (chain)
picking this:
vote (rho r p) = Some v
r = Suc r'
nextState UV_M r' p (rho r' p) (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')) (rho (Suc r') p)
[PROOF STEP]
have nxt0: "next0 r' p (rho r' p) ?msgs (rho r p)" and s0: "step r' = 0"
[PROOF STATE]
proof (prove)
using this:
vote (rho r p) = Some v
r = Suc r'
nextState UV_M r' p (rho r' p) (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')) (rho (Suc r') p)
goal (1 subgoal):
1. next0 r' p (rho r' p) (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')) (rho r p) &&& step r' = 0
[PROOF STEP]
by (auto simp: nextState_def UV_HOMachine_def UV_nextState_def next1_def)
[PROOF STATE]
proof (state)
this:
next0 r' p (rho r' p) (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')) (rho r p)
step r' = 0
goal (1 subgoal):
1. \<And>nat. r = Suc nat \<Longrightarrow> v = x (rho r p)
[PROOF STEP]
from run s0
[PROOF STATE]
proof (chain)
picking this:
HORun UV_M rho HOs
step r' = 0
[PROOF STEP]
have "vote (rho r' p) = None"
[PROOF STATE]
proof (prove)
using this:
HORun UV_M rho HOs
step r' = 0
goal (1 subgoal):
1. vote (rho r' p) = None
[PROOF STEP]
by (rule reset_vote)
[PROOF STATE]
proof (state)
this:
vote (rho r' p) = None
goal (1 subgoal):
1. \<And>nat. r = Suc nat \<Longrightarrow> v = x (rho r p)
[PROOF STEP]
with vote nxt0
[PROOF STATE]
proof (chain)
picking this:
vote (rho r p) = Some v
next0 r' p (rho r' p) (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')) (rho r p)
vote (rho r' p) = None
[PROOF STEP]
have idv: "\<forall>q \<in> msgRcvd ?msgs. ?msgs q = Some (Val v)"
and x: "x (rho r p) = smallestValRcvd ?msgs"
[PROOF STATE]
proof (prove)
using this:
vote (rho r p) = Some v
next0 r' p (rho r' p) (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')) (rho r p)
vote (rho r' p) = None
goal (1 subgoal):
1. \<forall>q\<in>msgRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')). HOrcvdMsgs UV_M r' p (HOs r' p) (rho r') q = Some (Val v) &&& x (rho r p) = smallestValRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r'))
[PROOF STEP]
by (auto simp: next0_def)
[PROOF STATE]
proof (state)
this:
\<forall>q\<in>msgRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')). HOrcvdMsgs UV_M r' p (HOs r' p) (rho r') q = Some (Val v)
x (rho r p) = smallestValRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r'))
goal (1 subgoal):
1. \<And>nat. r = Suc nat \<Longrightarrow> v = x (rho r p)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<forall>q\<in>msgRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')). HOrcvdMsgs UV_M r' p (HOs r' p) (rho r') q = Some (Val v)
x (rho r p) = smallestValRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r'))
goal (1 subgoal):
1. \<And>nat. r = Suc nat \<Longrightarrow> v = x (rho r p)
[PROOF STEP]
from com
[PROOF STATE]
proof (chain)
picking this:
\<forall>r. HOcommPerRd UV_M (HOs r)
[PROOF STEP]
obtain q where "q \<in> msgRcvd ?msgs"
[PROOF STATE]
proof (prove)
using this:
\<forall>r. HOcommPerRd UV_M (HOs r)
goal (1 subgoal):
1. (\<And>q. q \<in> msgRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (force dest: some_common_msg)
[PROOF STATE]
proof (state)
this:
q \<in> msgRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r'))
goal (1 subgoal):
1. \<And>nat. r = Suc nat \<Longrightarrow> v = x (rho r p)
[PROOF STEP]
with idv
[PROOF STATE]
proof (chain)
picking this:
\<forall>q\<in>msgRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')). HOrcvdMsgs UV_M r' p (HOs r' p) (rho r') q = Some (Val v)
q \<in> msgRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r'))
[PROOF STEP]
have "{x . \<exists>qq. ?msgs qq = Some (Val x)} = {v}"
[PROOF STATE]
proof (prove)
using this:
\<forall>q\<in>msgRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')). HOrcvdMsgs UV_M r' p (HOs r' p) (rho r') q = Some (Val v)
q \<in> msgRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r'))
goal (1 subgoal):
1. {x. \<exists>qq. HOrcvdMsgs UV_M r' p (HOs r' p) (rho r') qq = Some (Val x)} = {v}
[PROOF STEP]
by (auto simp: msgRcvd_def)
[PROOF STATE]
proof (state)
this:
{x. \<exists>qq. HOrcvdMsgs UV_M r' p (HOs r' p) (rho r') qq = Some (Val x)} = {v}
goal (1 subgoal):
1. \<And>nat. r = Suc nat \<Longrightarrow> v = x (rho r p)
[PROOF STEP]
hence "smallestValRcvd ?msgs = v"
[PROOF STATE]
proof (prove)
using this:
{x. \<exists>qq. HOrcvdMsgs UV_M r' p (HOs r' p) (rho r') qq = Some (Val x)} = {v}
goal (1 subgoal):
1. smallestValRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')) = v
[PROOF STEP]
by (auto simp: smallestValRcvd_def)
[PROOF STATE]
proof (state)
this:
smallestValRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')) = v
goal (1 subgoal):
1. \<And>nat. r = Suc nat \<Longrightarrow> v = x (rho r p)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
\<forall>q\<in>msgRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')). HOrcvdMsgs UV_M r' p (HOs r' p) (rho r') q = Some (Val v)
x (rho r p) = smallestValRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r'))
smallestValRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')) = v
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<forall>q\<in>msgRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')). HOrcvdMsgs UV_M r' p (HOs r' p) (rho r') q = Some (Val v)
x (rho r p) = smallestValRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r'))
smallestValRcvd (HOrcvdMsgs UV_M r' p (HOs r' p) (rho r')) = v
goal (1 subgoal):
1. v = x (rho r p)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
v = x (rho r p)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 3786, "file": "Heard_Of_uv_UvProof", "length": 34}
|
from .ColorSpace import ColorSpace, ColorSpaces
import cv2
import numpy as np
class Frame():
def __init__(self, img, colorspace):
self.link(img, colorspace)
self.mask = None
def get(self, colorspace):
if isinstance(colorspace, ColorSpaces) or isinstance(colorspace, ColorSpace):
colorspace = ColorSpaces[colorspace.name]
if colorspace in self.colorspace2img:
return self.colorspace2img[colorspace]
else:
self.colorspace2img[colorspace] = colorspace.value.bgr2this(
self.get(ColorSpaces.BGR))
return self.colorspace2img[colorspace]
raise "Couldn't find"
def link(self, img, colorspace):
if isinstance(img, Frame):
img = img.get(colorspace)
self.colorspace2img = {
colorspace: img
}
def copy(self, img, colorspace):
if isinstance(img, Frame):
img = img.get(colorspace)
self.colorspace2img = {
colorspace: np.copy(img)
}
@staticmethod
def copy_of(frame):
this = Frame(np.array([]), ColorSpaces.BGR)
this.copy(frame, ColorSpaces.BGR)
return this
def resolution(self):
for img in self.colorspace2img.values():
height, width = img.shape[0:2]
return width, height
|
{"hexsha": "da3d2d7b58f741b43e88b5e7d57e9e9134183e1b", "size": 1361, "ext": "py", "lang": "Python", "max_stars_repo_path": "VisionSystem/DetectionModel/Frame.py", "max_stars_repo_name": "CallumJHays/g26-egb320-2019", "max_stars_repo_head_hexsha": "6dde6b5d2f72fac3928c5042a27dc50e978c3425", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "VisionSystem/DetectionModel/Frame.py", "max_issues_repo_name": "CallumJHays/g26-egb320-2019", "max_issues_repo_head_hexsha": "6dde6b5d2f72fac3928c5042a27dc50e978c3425", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "VisionSystem/DetectionModel/Frame.py", "max_forks_repo_name": "CallumJHays/g26-egb320-2019", "max_forks_repo_head_hexsha": "6dde6b5d2f72fac3928c5042a27dc50e978c3425", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6862745098, "max_line_length": 85, "alphanum_fraction": 0.6047024247, "include": true, "reason": "import numpy", "num_tokens": 306}
|
"""
https://github.com/gidariss/FewShotWithoutForgetting/blob/master/dataloader.py
"""
import numpy as np
from PIL import Image
from skimage import io
import unittest
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from preprocess.tools import read_csv, load_csv2dict
import sys
import warnings
warnings.filterwarnings('ignore')
sys.dont_write_bytecode = True
def PIL_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
def Default_loader(path):
return io.imread(path)
def RGB_loader(path):
return Image.open(path).convert('RGB')
def Gray_loader(path):
return Image.open(path).convert('L')
class FewShotDataSet(Dataset):
def __init__(self, cfg, transform=None, phase='train', loader=RGB_loader):
super(FewShotDataSet, self).__init__()
self.phase = phase
self.transform = transform
self.loader = loader
self.data_name = cfg.datasets.name
self.way_num = cfg.datasets.way_num
self.shot_num = cfg.datasets.shot_num
self.query_num = cfg.datasets.query_num
self.data_root = cfg.datasets.root
self.img_size = cfg.dataloader.image_size
assert (phase in ['train', 'val', 'test'])
print('Loading FSL dataset - phase {0}'.format(phase))
if phase == 'train':
self.csv_path = cfg.datasets.train_data_dir
elif phase == 'val':
self.csv_path = cfg.datasets.val_data_dir
elif phase == 'test':
self.csv_path = cfg.datasets.test_data_dir
else:
raise ValueError('phase ought to be in [train/test/val]')
self.data_list = read_csv(self.csv_path)
self.class_img_dict, class_list = load_csv2dict(self.csv_path)
self.class_list = sorted(list(class_list))
self.label2Int = {item: idx for idx, item in enumerate(self.class_list)}
self.num_cats = len(self.class_list)
self.train_ordered_labels = [self.label2Int[item] for _, item in self.data_list]
def __getitem__(self, index):
fn, class_name = self.data_list[index]
label = self.label2Int[class_name]
img = self.loader(fn)
if self.transform is not None:
img = self.transform(img)
return img, label
def __len__(self):
return len(self.data_list)
# class BaseDataSetTest(unittest.TestCase):
# def setUp(self) -> None:
# pass
# def tearDown(self):
# pass
#
# def forward(self):
# pass
# if __name__ == '__main__':
# # unittest.main()
# from engine.configs.parser import BaseOptions
# opts = BaseOptions().opts
# dataset = FewShotDataSet(opts)
# pass
|
{"hexsha": "6f38f6d1728a7c0605763a320cf2833b56ca9d65", "size": 2853, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/few_shot_dataset.py", "max_stars_repo_name": "WonderSeven/DSDA", "max_stars_repo_head_hexsha": "88266ea5dd53d918ba3cd74c7d6bbf431a134e95", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2020-04-15T09:24:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-18T04:04:55.000Z", "max_issues_repo_path": "datasets/few_shot_dataset.py", "max_issues_repo_name": "WonderSeven/DSDA", "max_issues_repo_head_hexsha": "88266ea5dd53d918ba3cd74c7d6bbf431a134e95", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datasets/few_shot_dataset.py", "max_forks_repo_name": "WonderSeven/DSDA", "max_forks_repo_head_hexsha": "88266ea5dd53d918ba3cd74c7d6bbf431a134e95", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-04-14T05:49:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-16T05:04:12.000Z", "avg_line_length": 30.0315789474, "max_line_length": 101, "alphanum_fraction": 0.6550998948, "include": true, "reason": "import numpy", "num_tokens": 659}
|
#!/usr/bin/env python3
"""
Construct full edited FS recons for each subject/editor and rerun with appropriate recon-all flags
Author : Mike Tyszka
Place : Caltech
Dates : 2020-05-04 JMT From scratch
2020-05-25 JMT Add insertion of edited data
"""
import os
import numpy as np
import pandas as pd
from nibabel.freesurfer import io
import shutil
from glob import glob
def main():
# Scan assignments directory for editors and subjects
der_dir = '/data2/conte/derivatives'
fs_dir = os.path.join(der_dir, 'freesurfer_6')
fs_edit_dir = os.path.join(der_dir, 'freesurfer_6_edited')
edit_dir = os.path.join(fs_edit_dir, '+Training+')
print('Derivatives directory : {}'.format(der_dir))
print('Original FS subjects : {}'.format(fs_dir))
print('Edited FS subjects : {}'.format(fs_edit_dir))
print('Editor results : {}'.format(edit_dir))
# Init command list
cmd_list = []
for editor in os.listdir(edit_dir):
print('')
print('{}'.format(editor))
for subject in os.listdir(os.path.join(edit_dir, editor)):
print(' {}'.format(subject))
# Find unedited FS recon in main repository
subj_dir = os.path.join(fs_dir, subject)
if not os.path.isdir(subj_dir):
print('* {} not found amongst unedited subjects - skipping'.format(subject))
else:
# Create a new per-editor clone of the original unedited FS recon
edited_subject = '{}-{}'.format(subject, editor)
subj_edit_dir = os.path.join(fs_edit_dir, edited_subject)
# Check whether a clone already exists
if os.path.isdir(subj_edit_dir):
print(' {} already exists - skip cloning'.format(subj_edit_dir))
else:
# Clone unedited recon to edited directory
print(' Cloning {} to {}'.format(subject, fs_edit_dir))
shutil.copytree(subj_dir, subj_edit_dir)
# Init recon-all command for rerun
fs_cmd = 'recon-all -sd {} -subjid {}'.format(fs_edit_dir, edited_subject)
arpial_opt = ''
ar3_opt = ''
# Find edited data for this editor and subject
src_brain_mask = os.path.join(edit_dir, editor, subject, 'brainmask.mgz')
if os.path.isfile(src_brain_mask):
dst_brain_mask = os.path.join(subj_edit_dir, 'mri', 'brainmask.mgz')
print(' Copying brain mask')
print(' From : {}'.format(src_brain_mask))
print(' To : {}'.format(dst_brain_mask))
shutil.copyfile(src_brain_mask, dst_brain_mask)
arpial_opt = ' -autorecon-pial'
src_brain_man = os.path.join(edit_dir, editor, subject, 'brain.finalsurf.manedit.mgz')
if os.path.isfile(src_brain_man):
dst_brain_man = os.path.join(subj_edit_dir, 'mri', 'brain.finalsurf.manedit.mgz')
print(' Copying brain manual edit')
print(' From : {}'.format(src_brain_man))
print(' To : {}'.format(dst_brain_man))
shutil.copyfile(src_brain_man, dst_brain_man)
arpial_opt = ' -autorecon-pial'
src_wm_mask = os.path.join(edit_dir, editor, subject, 'wm.mgz')
if os.path.isfile(src_wm_mask):
dst_wm_mask = os.path.join(subj_edit_dir, 'mri', 'wm.mgz')
print(' Copying white matter mask')
print(' From : {}'.format(src_wm_mask))
print(' To : {}'.format(dst_wm_mask))
shutil.copyfile(src_wm_mask, dst_wm_mask)
fs_cmd += ' -autorecon2-wm'
ar3_opt = ' -autorecon3'
src_wm_cps = os.path.join(edit_dir, editor, subject, 'control.dat')
if os.path.isfile(src_wm_cps):
# Safely create tmp directory for control points
tmp_dir = os.path.join(subj_edit_dir, 'tmp')
os.makedirs(tmp_dir, exist_ok=True)
dst_wm_cps = os.path.join(tmp_dir, 'control.dat')
print(' Copying brain mask')
print(' From : {}'.format(src_wm_cps))
print(' To : {}'.format(dst_wm_cps))
shutil.copyfile(src_wm_cps, dst_wm_cps)
fs_cmd += ' -autorecon2-cp'
ar3_opt = ' -autorecon3'
# Complete options
fs_cmd += ar3_opt + arpial_opt
# Add freesurfer command to job list
cmd_list.append(fs_cmd)
# Write command list
cmds_fname = 'rerun_fsrecon.cmds'
print('Writing Freesurfer commands to {}'.format(cmds_fname))
with open(cmds_fname, 'w') as f:
f.write('\n'.join(cmd_list))
print('Done')
if '__main__' in __name__:
main()
|
{"hexsha": "d6458589748f4641503c618edda08cd18d25e1ca", "size": 4939, "ext": "py", "lang": "Python", "max_stars_repo_path": "prep_rerun_fsrecon.py", "max_stars_repo_name": "jmtyszka/freesurfer-editing-utils", "max_stars_repo_head_hexsha": "ed19d4d2ad75315b77c94d19329a72071b57847a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "prep_rerun_fsrecon.py", "max_issues_repo_name": "jmtyszka/freesurfer-editing-utils", "max_issues_repo_head_hexsha": "ed19d4d2ad75315b77c94d19329a72071b57847a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "prep_rerun_fsrecon.py", "max_forks_repo_name": "jmtyszka/freesurfer-editing-utils", "max_forks_repo_head_hexsha": "ed19d4d2ad75315b77c94d19329a72071b57847a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4166666667, "max_line_length": 98, "alphanum_fraction": 0.5764324762, "include": true, "reason": "import numpy", "num_tokens": 1177}
|
#### Elementwise manipulations (scaling/clamping/type conversion) ####
# This file exists primarily to handle conversions for display and
# saving to disk. Both of these operations require UFixed-valued
# elements, but with display we always want to convert to 8-bit
# whereas saving can handle 16-bit.
# We also can't trust that user images are clamped properly.
# Finally, this supports adjustable contrast limits.
# Structure of MapInfo subtype definitions:
# - type definition
# - constructors for scalars
# - constructors for AbstractArrays
# - similar (syntax: similar(mapi, ToType, FromType))
# - implementation of map() for scalars
# - implementation of map() for AbstractArrays
# map(mapi::MapInfo{T}, x) should return an object of type T (for x not an array)
# map1(mapi::MapInfo{T}, x) is designed to allow T<:Color to work on
# scalars x::Fractional
# Dispatch-based elementwise manipulations
"""
`MapInfo{T}` is an abstract type that encompasses objects designed to
perform intensity or color transformations on pixels. For example,
before displaying an image in a window, you might need to adjust the
contrast settings; `MapInfo` objects provide a means to describe these
transformations without calculating them immediately. This delayed
execution can be useful in many contexts. For example, if you want to
display a movie, it would be quite wasteful to have to first transform
the entire movie; instead, `MapInfo` objects allow one to specify a
transformation to be performed on-the-fly as particular frames are
displayed.
You can create your own custom `MapInfo` objects. For example, given a
grayscale image, you could color "saturated" pixels red using
```jl
immutable ColorSaturated{C<:AbstractRGB} <: MapInfo{C}
end
Base.map{C}(::ColorSaturated{C}, val::Union{Number,Gray}) = ifelse(val == 1, C(1,0,0), C(val,val,val))
imgc = map(ColorSaturated{RGB{U8}}(), img)
```
For pre-defined types see `MapNone`, `BitShift`, `ClampMinMax`, `ScaleMinMax`,
`ScaleAutoMinMax`, and `ScaleSigned`.
"""
abstract MapInfo{T}
eltype{T}(mapi::MapInfo{T}) = T
## MapNone
"`MapNone(T)` is a `MapInfo` object that converts `x` to have type `T`."
immutable MapNone{T} <: MapInfo{T}; end
# Constructors
MapNone{T}(::Type{T}) = MapNone{T}()
MapNone{T}(val::T) = MapNone{T}()
MapNone{T}(A::AbstractArray{T}) = MapNone{T}()
similar{T}(mapi::MapNone, ::Type{T}, ::Type) = MapNone{T}()
# Implementation
map{T}(mapi::MapNone{T}, val::Union{Number,Colorant}) = convert(T, val)
map1(mapi::Union{MapNone{RGB24}, MapNone{ARGB32}}, b::Bool) = ifelse(b, 0xffuf8, 0x00uf8)
map1(mapi::Union{MapNone{RGB24},MapNone{ARGB32}}, val::Fractional) = convert(UFixed8, val)
map1{CT<:Colorant}(mapi::MapNone{CT}, val::Fractional) = convert(eltype(CT), val)
map{T<:Colorant}(mapi::MapNone{T}, img::AbstractImageIndexed{T}) = convert(Image{T}, img)
map{C<:Colorant}(mapi::MapNone{C}, img::AbstractImageDirect{C}) = img # ambiguity resolution
map{T}(mapi::MapNone{T}, img::AbstractArray{T}) = img
## BitShift
"""
`BitShift{T,N}` performs a "saturating rightward bit-shift" operation.
It is particularly useful in converting high bit-depth images to 8-bit
images for the purpose of display. For example,
```
map(BitShift(UFixed8, 8), 0xa2d5uf16) === 0xa2uf8
```
converts a `UFixed16` to the corresponding `UFixed8` by discarding the
least significant byte. However,
```
map(BitShift(UFixed8, 7), 0xa2d5uf16) == 0xffuf8
```
because `0xa2d5>>7 == 0x0145 > typemax(UInt8)`.
When applicable, the main advantage of using `BitShift` rather than
`MapNone` or `ScaleMinMax` is speed.
"""
immutable BitShift{T,N} <: MapInfo{T} end
BitShift{T}(::Type{T}, n::Int) = BitShift{T,n}() # note that this is not type-stable
similar{S,T,N}(mapi::BitShift{S,N}, ::Type{T}, ::Type) = BitShift{T,N}()
# Implementation
immutable BS{N} end
_map{T<:Unsigned,N}(::Type{T}, ::Type{BS{N}}, val::Unsigned) = (v = val>>>N; tm = oftype(val, typemax(T)); convert(T, ifelse(v > tm, tm, v)))
_map{T<:UFixed,N}(::Type{T}, ::Type{BS{N}}, val::UFixed) = reinterpret(T, _map(FixedPointNumbers.rawtype(T), BS{N}, reinterpret(val)))
map{T<:Real,N}(mapi::BitShift{T,N}, val::Real) = _map(T, BS{N}, val)
map{T<:Real,N}(mapi::BitShift{Gray{T},N}, val::Gray) = Gray(_map(T, BS{N}, val.val))
map1{N}(mapi::Union{BitShift{RGB24,N},BitShift{ARGB32,N}}, val::Unsigned) = _map(UInt8, BS{N}, val)
map1{N}(mapi::Union{BitShift{RGB24,N},BitShift{ARGB32,N}}, val::UFixed) = _map(UFixed8, BS{N}, val)
map1{CT<:Colorant,N}(mapi::BitShift{CT,N}, val::UFixed) = _map(eltype(CT), BS{N}, val)
## Clamp types
# The Clamp types just enforce bounds, but do not scale or offset
# Types and constructors
abstract AbstractClamp{T} <: MapInfo{T}
"""
`ClampMin(T, minvalue)` is a `MapInfo` object that clamps pixel values
to be greater than or equal to `minvalue` before converting to type `T`.
See also: `ClampMax`, `ClampMinMax`.
"""
immutable ClampMin{T,From} <: AbstractClamp{T}
min::From
end
ClampMin{T,From}(::Type{T}, min::From) = ClampMin{T,From}(min)
ClampMin{T}(min::T) = ClampMin{T,T}(min)
"""
`ClampMax(T, maxvalue)` is a `MapInfo` object that clamps pixel values
to be less than or equal to `maxvalue` before converting to type `T`.
See also: `ClampMin`, `ClampMinMax`.
"""
immutable ClampMax{T,From} <: AbstractClamp{T}
max::From
end
ClampMax{T,From}(::Type{T}, max::From) = ClampMax{T,From}(max)
ClampMax{T}(max::T) = ClampMax{T,T}(max)
immutable ClampMinMax{T,From} <: AbstractClamp{T}
min::From
max::From
end
"""
`ClampMinMax(T, minvalue, maxvalue)` is a `MapInfo` object that clamps
pixel values to be between `minvalue` and `maxvalue` before converting
to type `T`.
See also: `ClampMin`, `ClampMax`, and `Clamp`.
"""
ClampMinMax{T,From}(::Type{T}, min::From, max::From) = ClampMinMax{T,From}(min,max)
ClampMinMax{T}(min::T, max::T) = ClampMinMax{T,T}(min,max)
"""
`Clamp(C)` is a `MapInfo` object that clamps color values to be within
gamut. For example,
```
map(Clamp(RGB{U8}), RGB(1.2, -0.4, 0.6)) === RGB{U8}(1, 0, 0.6)
```
"""
immutable Clamp{T} <: AbstractClamp{T} end
Clamp{T}(::Type{T}) = Clamp{T}()
similar{T,F}(mapi::ClampMin, ::Type{T}, ::Type{F}) = ClampMin{T,F}(convert(F, mapi.min))
similar{T,F}(mapi::ClampMax, ::Type{T}, ::Type{F}) = ClampMax{T,F}(convert(F, mapi.max))
similar{T,F}(mapi::ClampMinMax, ::Type{T}, ::Type{F}) = ClampMin{T,F}(convert(F, mapi.min), convert(F, mapi.max))
similar{T,F}(mapi::Clamp, ::Type{T}, ::Type{F}) = Clamp{T}()
# Implementation
map{T<:Real,F<:Real}(mapi::ClampMin{T,F}, val::F) = convert(T, max(val, mapi.min))
map{T<:Real,F<:Real}(mapi::ClampMax{T,F}, val::F) = convert(T, min(val, mapi.max))
map{T<:Real,F<:Real}(mapi::ClampMinMax{T,F}, val::F) = convert(T,min(max(val, mapi.min), mapi.max))
map{T<:Fractional,F<:Real}(mapi::ClampMin{Gray{T},F}, val::F) = convert(Gray{T}, max(val, mapi.min))
map{T<:Fractional,F<:Real}(mapi::ClampMax{Gray{T},F}, val::F) = convert(Gray{T}, min(val, mapi.max))
map{T<:Fractional,F<:Real}(mapi::ClampMinMax{Gray{T},F}, val::F) = convert(Gray{T},min(max(val, mapi.min), mapi.max))
map{T<:Fractional,F<:Fractional}(mapi::ClampMin{Gray{T},F}, val::Gray{F}) = convert(Gray{T}, max(val, mapi.min))
map{T<:Fractional,F<:Fractional}(mapi::ClampMax{Gray{T},F}, val::Gray{F}) = convert(Gray{T}, min(val, mapi.max))
map{T<:Fractional,F<:Fractional}(mapi::ClampMinMax{Gray{T},F}, val::Gray{F}) = convert(Gray{T},min(max(val, mapi.min), mapi.max))
map{T<:Fractional,F<:Fractional}(mapi::ClampMin{Gray{T},Gray{F}}, val::Gray{F}) = convert(Gray{T}, max(val, mapi.min))
map{T<:Fractional,F<:Fractional}(mapi::ClampMax{Gray{T},Gray{F}}, val::Gray{F}) = convert(Gray{T}, min(val, mapi.max))
map{T<:Fractional,F<:Fractional}(mapi::ClampMinMax{Gray{T},Gray{F}}, val::Gray{F}) = convert(Gray{T},min(max(val, mapi.min), mapi.max))
map1{T<:Union{RGB24,ARGB32},F<:Fractional}(mapi::ClampMin{T,F}, val::F) = convert(UFixed8, max(val, mapi.min))
map1{T<:Union{RGB24,ARGB32},F<:Fractional}(mapi::ClampMax{T,F}, val::F) = convert(UFixed8, min(val, mapi.max))
map1{T<:Union{RGB24,ARGB32},F<:Fractional}(mapi::ClampMinMax{T,F}, val::F) = convert(UFixed8,min(max(val, mapi.min), mapi.max))
map1{CT<:Colorant,F<:Fractional}(mapi::ClampMin{CT,F}, val::F) = convert(eltype(CT), max(val, mapi.min))
map1{CT<:Colorant,F<:Fractional}(mapi::ClampMax{CT,F}, val::F) = convert(eltype(CT), min(val, mapi.max))
map1{CT<:Colorant,F<:Fractional}(mapi::ClampMinMax{CT,F}, val::F) = convert(eltype(CT), min(max(val, mapi.min), mapi.max))
map{To<:Real}(::Clamp{To}, val::Real) = clamp01(To, val)
map{To<:Real}(::Clamp{Gray{To}}, val::AbstractGray) = Gray(clamp01(To, val.val))
map{To<:Real}(::Clamp{Gray{To}}, val::Real) = Gray(clamp01(To, val))
map1{CT<:AbstractRGB}(::Clamp{CT}, val::Real) = clamp01(eltype(CT), val)
map1{P<:TransparentRGB}(::Clamp{P}, val::Real) = clamp01(eltype(P), val)
# Also available as a stand-alone function
clamp01{T}(::Type{T}, x::Real) = convert(T, min(max(x, zero(x)), one(x)))
clamp01(x::Real) = clamp01(typeof(x), x)
clamp01(x::Colorant) = clamp01(typeof(x), x)
clamp01{Cdest<:AbstractRGB }(::Type{Cdest}, x::AbstractRGB) = (To = eltype(Cdest);
Cdest(clamp01(To, red(x)), clamp01(To, green(x)), clamp01(To, blue(x))))
clamp01{Pdest<:TransparentRGB}(::Type{Pdest}, x::TransparentRGB) = (To = eltype(Pdest);
Pdest(clamp01(To, red(x)), clamp01(To, green(x)), clamp01(To, blue(x)), clamp01(To, alpha(x))))
# clamp is generic for any colorspace; this version does the right thing for any RGB type
clamp(x::Union{AbstractRGB, TransparentRGB}) = clamp01(x)
## ScaleMinMax
"""
`ScaleMinMax(T, min, max, [scalefactor])` is a `MapInfo` object that
clamps the image at the specified `min`/`max` values, subtracts the
`min` value, scales the result by multiplying by `scalefactor`, and
finally converts to type `T`. If `scalefactor` is not specified, it
defaults to scaling the interval `[min,max]` to `[0,1]`.
Alternative constructors include `ScaleMinMax(T, img)` for which
`min`, `max`, and `scalefactor` are computed from the minimum and
maximum values found in `img`.
See also: `ScaleMinMaxNaN`, `ScaleAutoMinMax`, `MapNone`, `BitShift`.
"""
immutable ScaleMinMax{To,From,S<:AbstractFloat} <: MapInfo{To}
min::From
max::From
s::S
function ScaleMinMax(min, max, s)
min >= max && error("min must be smaller than max")
new(min, max, s)
end
end
ScaleMinMax{To,From}(::Type{To}, min::From, max::From, s::AbstractFloat) = ScaleMinMax{To,From,typeof(s)}(min, max, s)
ScaleMinMax{To<:Union{Fractional,Colorant},From}(::Type{To}, mn::From, mx::From) = ScaleMinMax(To, mn, mx, 1.0f0/(convert(Float32, mx)-convert(Float32, mn)))
# ScaleMinMax constructors that take AbstractArray input
ScaleMinMax{To,From<:Real}(::Type{To}, img::AbstractArray{From}, mn::Real, mx::Real) = ScaleMinMax(To, convert(From,mn), convert(From,mx), 1.0f0/(convert(Float32, convert(From, mx))-convert(Float32,convert(From, mn))))
ScaleMinMax{To,From<:Real}(::Type{To}, img::AbstractArray{Gray{From}}, mn::Real, mx::Real) = ScaleMinMax(To, convert(From,mn), convert(From,mx), 1.0f0/(convert(Float32, convert(From,mx))-convert(Float32, convert(From,mn))))
ScaleMinMax{To,From<:Real,R<:Real}(::Type{To}, img::AbstractArray{From}, mn::Gray{R}, mx::Gray{R}) = ScaleMinMax(To, convert(From,mn.val), convert(From,mx.val), 1.0f0/(convert(Float32, convert(From,mx.val))-convert(Float32, convert(From,mn.val))))
ScaleMinMax{To,From<:Real,R<:Real}(::Type{To}, img::AbstractArray{Gray{From}}, mn::Gray{R}, mx::Gray{R}) = ScaleMinMax(To, convert(From,mn.val), convert(From,mx.val), 1.0f0/(convert(Float32, convert(From,mx.val))-convert(Float32, convert(From,mn.val))))
ScaleMinMax{To}(::Type{To}, img::AbstractArray) = ScaleMinMax(To, img, minfinite(img), maxfinite(img))
ScaleMinMax{To,CV<:AbstractRGB}(::Type{To}, img::AbstractArray{CV}) = (imgr = reinterpret(eltype(CV), img); ScaleMinMax(To, minfinite(imgr), maxfinite(imgr)))
similar{T,F,To,From,S}(mapi::ScaleMinMax{To,From,S}, ::Type{T}, ::Type{F}) = ScaleMinMax{T,F,S}(convert(F,mapi.min), convert(F.mapi.max), mapi.s)
# Implementation
function map{To<:Union{Real,AbstractGray},From<:Union{Real,AbstractGray}}(mapi::ScaleMinMax{To,From}, val::From)
g = gray(val)
t = ifelse(g < mapi.min, zero(From), ifelse(g > mapi.max, mapi.max-mapi.min, g-mapi.min))
convert(To, mapi.s*t)
end
function map{To<:Union{Real,AbstractGray},From<:Union{Real,AbstractGray}}(mapi::ScaleMinMax{To,From}, val::Union{Real,Colorant})
map(mapi, convert(From, val))
end
function map1{To<:Union{RGB24,ARGB32},From<:Real}(mapi::ScaleMinMax{To,From}, val::From)
t = ifelse(val < mapi.min, zero(From), ifelse(val > mapi.max, mapi.max-mapi.min, val-mapi.min))
convert(UFixed8, mapi.s*t)
end
function map1{To<:Colorant,From<:Real}(mapi::ScaleMinMax{To,From}, val::From)
t = ifelse(val < mapi.min, zero(From), ifelse(val > mapi.max, mapi.max-mapi.min, val-mapi.min))
convert(eltype(To), mapi.s*t)
end
function map1{To<:Union{RGB24,ARGB32},From<:Real}(mapi::ScaleMinMax{To,From}, val::Union{Real,Colorant})
map1(mapi, convert(From, val))
end
function map1{To<:Colorant,From<:Real}(mapi::ScaleMinMax{To,From}, val::Union{Real,Colorant})
map1(mapi, convert(From, val))
end
## ScaleSigned
"""
`ScaleSigned(T, scalefactor)` is a `MapInfo` object designed for
visualization of images where the pixel's sign has special meaning.
It multiplies the pixel value by `scalefactor`, then clamps to the
interval `[-1,1]`. If `T` is a floating-point type, it stays in this
representation. If `T` is an `AbstractRGB`, then it is encoded as a
magenta (positive)/green (negative) image, with the intensity of the
color proportional to the clamped absolute value.
"""
immutable ScaleSigned{T, S<:AbstractFloat} <: MapInfo{T}
s::S
end
ScaleSigned{T}(::Type{T}, s::AbstractFloat) = ScaleSigned{T, typeof(s)}(s)
ScaleSigned{T}(::Type{T}, img::AbstractArray) = ScaleSigned(T, 1.0f0/maxabsfinite(img))
ScaleSigned(img::AbstractArray) = ScaleSigned(Float32, img)
similar{T,To,S}(mapi::ScaleSigned{To,S}, ::Type{T}, ::Type) = ScaleSigned{T,S}(mapi.s)
map{T}(mapi::ScaleSigned{T}, val::Real) = convert(T, clamppm(mapi.s*val))
function map{C<:AbstractRGB}(mapi::ScaleSigned{C}, val::Real)
x = clamppm(mapi.s*val)
g = UFixed8(abs(x))
ifelse(x >= 0, C(g, zero(UFixed8), g), C(zero(UFixed8), g, zero(UFixed8)))
end
clamppm(x::Real) = ifelse(x >= 0, min(x, one(x)), max(x, -one(x)))
## ScaleAutoMinMax
# Works only on whole arrays, not values
"""
`ScaleAutoMinMax(T)` constructs a `MapInfo` object that causes images
to be dynamically scaled to their specific min/max values, using the
same algorithm for `ScaleMinMax`. When displaying a movie, the min/max
will be recalculated for each frame, so this can result in
inconsistent contrast scaling.
"""
immutable ScaleAutoMinMax{T} <: MapInfo{T} end
ScaleAutoMinMax{T}(::Type{T}) = ScaleAutoMinMax{T}()
ScaleAutoMinMax() = ScaleAutoMinMax{UFixed8}()
similar{T}(mapi::ScaleAutoMinMax, ::Type{T}, ::Type) = ScaleAutoMinMax{T}()
## NaN-nulling mapping
"""
`ScaleMinMaxNaN(smm)` constructs a `MapInfo` object from a
`ScaleMinMax` object `smm`, with the additional property that `NaN`
values map to zero.
See also: `ScaleMinMax`.
"""
immutable ScaleMinMaxNaN{To,From,S} <: MapInfo{To}
smm::ScaleMinMax{To,From,S}
end
"""
`Clamp01NaN(T)` or `Clamp01NaN(img)` constructs a `MapInfo` object
that clamps grayscale or color pixels to the interval `[0,1]`, sending
`NaN` pixels to zero.
"""
immutable Clamp01NaN{T} <: MapInfo{T} end
Clamp01NaN{T}(A::AbstractArray{T}) = Clamp01NaN{T}()
# Implementation
similar{T,F,To,From,S}(mapi::ScaleMinMaxNaN{To,From,S}, ::Type{T}, ::Type{F}) = ScaleMinMaxNaN{T,F,S}(similar(mapi.smm, T, F))
similar{T}(mapi::Clamp01NaN, ::Type{T}, ::Type) = Clamp01NaN{T}()
Base.map{To}(smmn::ScaleMinMaxNaN{To}, g::Number) = isnan(g) ? zero(To) : map(smmn.smm, g)
Base.map{To}(smmn::ScaleMinMaxNaN{To}, g::Gray) = isnan(g) ? zero(To) : map(smmn.smm, g)
function Base.map{T<:RGB}(::Clamp01NaN{T}, c::AbstractRGB)
r, g, b = red(c), green(c), blue(c)
if isnan(r) || isnan(g) || isnan(b)
return T(0,0,0)
end
T(clamp(r, 0, 1), clamp(g, 0, 1), clamp(b, 0, 1))
end
function Base.map{T<:Union{Fractional,Gray}}(::Clamp01NaN{T}, c::Union{Fractional,AbstractGray})
g = gray(c)
if isnan(g)
return T(0)
end
T(clamp(g, 0, 1))
end
# Conversions to RGB{T}, RGBA{T}, RGB24, ARGB32,
# for grayscale, AbstractRGB, and abstract ARGB inputs.
# This essentially "vectorizes" map over a single pixel's color channels using map1
for SI in (MapInfo, AbstractClamp)
for ST in subtypes(SI)
ST.abstract && continue
ST == ScaleSigned && continue # ScaleSigned gives an RGB from a scalar, so don't "vectorize" it
@eval begin
# Grayscale and GrayAlpha inputs
map(mapi::$ST{RGB24}, g::Gray) = map(mapi, g.val)
map(mapi::$ST{RGB24}, g::Real) = (x = map1(mapi, g); convert(RGB24, RGB{UFixed8}(x,x,x)))
function map(mapi::$ST{RGB24}, g::AbstractFloat)
if isfinite(g)
x = map1(mapi, g)
convert(RGB24, RGB{UFixed8}(x,x,x))
else
RGB24(0)
end
end
map{G<:Gray}(mapi::$ST{RGB24}, g::TransparentColor{G}) = map(mapi, gray(g))
map(mapi::$ST{ARGB32}, g::Gray) = map(mapi, g.val)
function map(mapi::$ST{ARGB32}, g::Real)
x = map1(mapi, g)
convert(ARGB32, ARGB{UFixed8}(x,x,x,0xffuf8))
end
function map{G<:Gray}(mapi::$ST{ARGB32}, g::TransparentColor{G})
x = map1(mapi, gray(g))
convert(ARGB32, ARGB{UFixed8}(x,x,x,map1(mapi, g.alpha)))
end
end
for O in (:RGB, :BGR)
@eval begin
map{T}(mapi::$ST{$O{T}}, g::Gray) = map(mapi, g.val)
function map{T}(mapi::$ST{$O{T}}, g::Real)
x = map1(mapi, g)
$O{T}(x,x,x)
end
end
end
for OA in (:RGBA, :ARGB, :BGRA)
exAlphaGray = ST == MapNone ? :nothing : quote
function map{T,G<:Gray}(mapi::$ST{$OA{T}}, g::TransparentColor{G})
x = map1(mapi, gray(g))
$OA{T}(x,x,x,map1(mapi, g.alpha))
end # avoids an ambiguity warning with MapNone definitions
end
@eval begin
map{T}(mapi::$ST{$OA{T}}, g::Gray) = map(mapi, g.val)
function map{T}(mapi::$ST{$OA{T}}, g::Real)
x = map1(mapi, g)
$OA{T}(x,x,x)
end
$exAlphaGray
end
end
@eval begin
# AbstractRGB and abstract ARGB inputs
map(mapi::$ST{RGB24}, rgb::AbstractRGB) =
convert(RGB24, RGB{UFixed8}(map1(mapi, red(rgb)), map1(mapi, green(rgb)), map1(mapi, blue(rgb))))
map{C<:AbstractRGB, TC}(mapi::$ST{RGB24}, argb::TransparentColor{C,TC}) =
convert(RGB24, RGB{UFixed8}(map1(mapi, red(argb)), map1(mapi, green(argb)),
map1(mapi, blue(argb))))
map{C<:AbstractRGB, TC}(mapi::$ST{ARGB32}, argb::TransparentColor{C,TC}) =
convert(ARGB32, ARGB{UFixed8}(map1(mapi, red(argb)), map1(mapi, green(argb)),
map1(mapi, blue(argb)), map1(mapi, alpha(argb))))
map(mapi::$ST{ARGB32}, rgb::AbstractRGB) =
convert(ARGB32, ARGB{UFixed8}(map1(mapi, red(rgb)), map1(mapi, green(rgb)), map1(mapi, blue(rgb))))
end
for O in (:RGB, :BGR)
@eval begin
map{T}(mapi::$ST{$O{T}}, rgb::AbstractRGB) =
$O{T}(map1(mapi, red(rgb)), map1(mapi, green(rgb)), map1(mapi, blue(rgb)))
map{T,C<:AbstractRGB, TC}(mapi::$ST{$O{T}}, argb::TransparentColor{C,TC}) =
$O{T}(map1(mapi, red(argb)), map1(mapi, green(argb)), map1(mapi, blue(argb)))
end
end
for OA in (:RGBA, :ARGB, :BGRA)
@eval begin
map{T, C<:AbstractRGB, TC}(mapi::$ST{$OA{T}}, argb::TransparentColor{C,TC}) =
$OA{T}(map1(mapi, red(argb)), map1(mapi, green(argb)),
map1(mapi, blue(argb)), map1(mapi, alpha(argb)))
map{T}(mapi::$ST{$OA{T}}, argb::ARGB32) = map(mapi, convert(RGBA{UFixed8}, argb))
map{T}(mapi::$ST{$OA{T}}, rgb::AbstractRGB) =
$OA{T}(map1(mapi, red(rgb)), map1(mapi, green(rgb)), map1(mapi, blue(rgb)))
map{T}(mapi::$ST{$OA{T}}, rgb::RGB24) = map(mapi, convert(RGB{UFixed8}, argb))
end
end
end
end
# # Apply to any Colorant
# map(f::Callable, x::Color) = f(x)
# map(mapi, x::Color) = map(mapi, convert(RGB, x))
# map{C<:Color, TC}(f::Callable, x::TransparentColor{C, TC}) = f(convert(ARGB, x))
# map{C<:Color, TC}(mapi, x::TransparentColor{C, TC}) = map(mapi, convert(ARGB, x))
## Fallback definitions of map() for array types
function map{T}(mapi::MapInfo{T}, img::AbstractArray)
out = similar(img, T)
map!(mapi, out, img)
end
map{C<:Colorant,R<:Real}(mapi::MapNone{C}, img::AbstractImageDirect{R}) = mapcd(mapi, img) # ambiguity resolution
map{C<:Colorant,R<:Real}(mapi::MapInfo{C}, img::AbstractImageDirect{R}) = mapcd(mapi, img)
function mapcd{C<:Colorant,R<:Real}(mapi::MapInfo{C}, img::AbstractImageDirect{R})
# For this case we have to check whether color is defined along an array axis
cd = colordim(img)
if cd > 0
dims = setdiff(1:ndims(img), cd)
out = similar(img, C, size(img)[dims])
map!(mapi, out, img, TypeConst{cd})
else
out = similar(img, C)
map!(mapi, out, img)
end
out # note this isn't type-stable
end
function map{T<:Colorant}(mapi::MapInfo{T}, img::AbstractImageIndexed)
out = Image(Array(T, size(img)), properties(img))
map!(mapi, out, img)
end
map!{T,T1,T2,N}(mapi::MapInfo{T1}, out::AbstractArray{T,N}, img::AbstractArray{T2,N}) =
_map_a!(mapi, out, img)
function _map_a!{T,T1,T2,N}(mapi::MapInfo{T1}, out::AbstractArray{T,N}, img::AbstractArray{T2,N})
mi = take(mapi, img)
dimg = data(img)
dout = data(out)
size(dout) == size(dimg) || throw(DimensionMismatch())
for I in eachindex(dout, dimg)
@inbounds dout[I] = map(mi, dimg[I])
end
out
end
take(mapi::MapInfo, img::AbstractArray) = mapi
take{T}(mapi::ScaleAutoMinMax{T}, img::AbstractArray) = ScaleMinMax(T, img)
# Indexed images (colormaps)
map!{T,T1,N}(mapi::MapInfo{T}, out::AbstractArray{T,N}, img::AbstractImageIndexed{T1,N}) =
_mapindx!(mapi, out, img)
function _mapindx!{T,T1,N}(mapi::MapInfo{T}, out::AbstractArray{T,N}, img::AbstractImageIndexed{T1,N})
dimg = data(img)
dout = data(out)
cmap = map(mapi, img.cmap)
for I in eachindex(dout, dimg)
@inbounds dout[I] = cmap[dimg[I]]
end
out
end
# For when color is encoded along dimension CD
# NC is the number of color channels
# This is a very flexible implementation: color can be stored along any dimension, and it handles conversions to
# many different colorspace representations.
for (CT, NC) in ((Union{AbstractRGB,RGB24}, 3), (Union{RGBA,ARGB,ARGB32}, 4), (Union{AGray,GrayA,AGray32}, 2))
for N = 1:4
N1 = N+1
@eval begin
function map!{T<:$CT,T1,T2,CD}(mapi::MapInfo{T}, out::AbstractArray{T1,$N}, img::AbstractArray{T2,$N1}, ::Type{TypeConst{CD}})
mi = take(mapi, img)
dimg = data(img)
dout = data(out)
# Set up the index along the color axis
# We really only need dimension CD, but this will suffice
@nexprs $NC k->(@nexprs $N1 d->(j_k_d = k))
# Loop over all the elements in the output, performing the conversion on each color component
@nloops $N i dout d->(d<CD ? (@nexprs $NC k->(j_k_d = i_d)) : (@nexprs $NC k->(j_k_{d+1} = i_d))) begin
@inbounds @nref($N, dout, i) = @ncall $NC T k->(map1(mi, @nref($N1, dimg, j_k)))
end
out
end
end
end
end
#### MapInfo defaults
# Each "client" can define its own methods. "clients" include UFixed,
# RGB24/ARGB32, and ImageMagick
const bitshiftto8 = ((UFixed10, 2), (UFixed12, 4), (UFixed14, 6), (UFixed16, 8))
# typealias GrayType{T<:Fractional} Union{T, Gray{T}}
typealias GrayArray{T<:Fractional} Union{AbstractArray{T}, AbstractArray{Gray{T}}}
# note, though, that we need to override for AbstractImage in case the
# "colorspace" property is defined differently
# mapinfo{T<:Union{Real,Colorant}}(::Type{T}, img::AbstractArray{T}) = MapNone(img)
"""
`mapi = mapinf(T, img)` returns a `MapInfo` object that is deemed
appropriate for converting pixels of `img` to be of type `T`. `T` can
either be a specific type (e.g., `RGB24`), or you can specify an
abstract type like `Clamp` and it will return one of the `Clamp`
family of `MapInfo` objects.
You can define your own rules for `mapinfo`. For example, the
`ImageMagick` package defines methods for how pixels values should be
converted before saving images to disk.
"""
mapinfo{T<:UFixed}(::Type{T}, img::AbstractArray{T}) = MapNone(img)
mapinfo{T<:AbstractFloat}(::Type{T}, img::AbstractArray{T}) = MapNone(img)
# Grayscale methods
mapinfo(::Type{UFixed8}, img::GrayArray{UFixed8}) = MapNone{UFixed8}()
mapinfo(::Type{Gray{UFixed8}}, img::GrayArray{UFixed8}) = MapNone{Gray{UFixed8}}()
mapinfo(::Type{GrayA{UFixed8}}, img::AbstractArray{GrayA{UFixed8}}) = MapNone{GrayA{UFixed8}}()
for (T,n) in bitshiftto8
@eval mapinfo(::Type{UFixed8}, img::GrayArray{$T}) = BitShift{UFixed8,$n}()
@eval mapinfo(::Type{Gray{UFixed8}}, img::GrayArray{$T}) = BitShift{Gray{UFixed8},$n}()
@eval mapinfo(::Type{GrayA{UFixed8}}, img::AbstractArray{GrayA{$T}}) = BitShift{GrayA{UFixed8},$n}()
end
mapinfo{T<:UFixed,F<:AbstractFloat}(::Type{T}, img::GrayArray{F}) = ClampMinMax(T, zero(F), one(F))
mapinfo{T<:UFixed,F<:AbstractFloat}(::Type{Gray{T}}, img::GrayArray{F}) = ClampMinMax(Gray{T}, zero(F), one(F))
mapinfo{T<:AbstractFloat, R<:Real}(::Type{T}, img::AbstractArray{R}) = MapNone(T)
mapinfo(::Type{RGB24}, img::Union{AbstractArray{Bool}, BitArray}) = MapNone{RGB24}()
mapinfo(::Type{ARGB32}, img::Union{AbstractArray{Bool}, BitArray}) = MapNone{ARGB32}()
mapinfo{F<:Fractional}(::Type{RGB24}, img::GrayArray{F}) = ClampMinMax(RGB24, zero(F), one(F))
mapinfo{F<:Fractional}(::Type{ARGB32}, img::AbstractArray{F}) = ClampMinMax(ARGB32, zero(F), one(F))
# Color->Color methods
mapinfo(::Type{RGB{UFixed8}}, img) = MapNone{RGB{UFixed8}}()
mapinfo(::Type{RGBA{UFixed8}}, img) = MapNone{RGBA{UFixed8}}()
for (T,n) in bitshiftto8
@eval mapinfo(::Type{RGB{UFixed8}}, img::AbstractArray{RGB{$T}}) = BitShift{RGB{UFixed8},$n}()
@eval mapinfo(::Type{RGBA{UFixed8}}, img::AbstractArray{RGBA{$T}}) = BitShift{RGBA{UFixed8},$n}()
end
mapinfo{F<:Fractional}(::Type{RGB{UFixed8}}, img::AbstractArray{RGB{F}}) = Clamp(RGB{UFixed8})
mapinfo{F<:Fractional}(::Type{RGBA{UFixed8}}, img::AbstractArray{RGBA{F}}) = Clamp(RGBA{UFixed8})
# Color->RGB24/ARGB32
mapinfo(::Type{RGB24}, img::AbstractArray{RGB24}) = MapNone{RGB24}()
mapinfo(::Type{ARGB32}, img::AbstractArray{ARGB32}) = MapNone{ARGB32}()
for C in tuple(subtypes(AbstractRGB)..., Gray)
C == RGB24 && continue
@eval mapinfo(::Type{RGB24}, img::AbstractArray{$C{UFixed8}}) = MapNone{RGB24}()
@eval mapinfo(::Type{ARGB32}, img::AbstractArray{$C{UFixed8}}) = MapNone{ARGB32}()
for (T, n) in bitshiftto8
@eval mapinfo(::Type{RGB24}, img::AbstractArray{$C{$T}}) = BitShift{RGB24, $n}()
@eval mapinfo(::Type{ARGB32}, img::AbstractArray{$C{$T}}) = BitShift{ARGB32, $n}()
end
@eval mapinfo{F<:AbstractFloat}(::Type{RGB24}, img::AbstractArray{$C{F}}) = ClampMinMax(RGB24, zero(F), one(F))
@eval mapinfo{F<:AbstractFloat}(::Type{ARGB32}, img::AbstractArray{$C{F}}) = ClampMinMax(ARGB32, zero(F), one(F))
for AC in subtypes(TransparentColor)
length(AC.parameters) == 2 || continue
@eval mapinfo(::Type{ARGB32}, img::AbstractArray{$AC{$C{UFixed8},UFixed8}}) = MapNone{ARGB32}()
@eval mapinfo(::Type{RGB24}, img::AbstractArray{$AC{$C{UFixed8},UFixed8}}) = MapNone{RGB24}()
for (T, n) in bitshiftto8
@eval mapinfo(::Type{ARGB32}, img::AbstractArray{$AC{$C{$T},$T}}) = BitShift{ARGB32, $n}()
@eval mapinfo(::Type{RGB24}, img::AbstractArray{$AC{$C{$T},$T}}) = BitShift{RGB24, $n}()
end
@eval mapinfo{F<:AbstractFloat}(::Type{ARGB32}, img::AbstractArray{$AC{$C{F},F}}) = ClampMinMax(ARGB32, zero(F), one(F))
@eval mapinfo{F<:AbstractFloat}(::Type{RGB24}, img::AbstractArray{$AC{$C{F},F}}) = ClampMinMax(RGB24, zero(F), one(F))
end
end
mapinfo{CT<:Colorant}(::Type{RGB24}, img::AbstractArray{CT}) = MapNone{RGB24}()
mapinfo{CT<:Colorant}(::Type{ARGB32}, img::AbstractArray{CT}) = MapNone{ARGB32}()
# UInt32 conversions will use ARGB32 for images that have an alpha channel,
# and RGB24 when not
mapinfo{CV<:Union{Fractional,Color,AbstractGray}}(::Type{UInt32}, img::AbstractArray{CV}) = mapinfo(RGB24, img)
mapinfo{CV<:TransparentColor}(::Type{UInt32}, img::AbstractArray{CV}) = mapinfo(ARGB32, img)
mapinfo(::Type{UInt32}, img::Union{AbstractArray{Bool},BitArray}) = mapinfo(RGB24, img)
mapinfo(::Type{UInt32}, img::AbstractArray{UInt32}) = MapNone{UInt32}()
# Clamping mapinfo client. Converts to RGB and uses UFixed, clamping
# floating-point values to [0,1].
mapinfo{T<:UFixed}(::Type{Clamp}, img::AbstractArray{T}) = MapNone{T}()
mapinfo{T<:AbstractFloat}(::Type{Clamp}, img::AbstractArray{T}) = ClampMinMax(UFixed8, zero(T), one(T))
let handled = Set()
for ACV in (Color, AbstractRGB)
for CV in subtypes(ACV)
(length(CV.parameters) == 1 && !(CV.abstract)) || continue
CVnew = CV<:AbstractGray ? Gray : RGB
@eval mapinfo{T<:UFixed}(::Type{Clamp}, img::AbstractArray{$CV{T}}) = MapNone{$CVnew{T}}()
@eval mapinfo{CV<:$CV}(::Type{Clamp}, img::AbstractArray{CV}) = Clamp{$CVnew{UFixed8}}()
CVnew = CV<:AbstractGray ? Gray : BGR
AC, CA = alphacolor(CV), coloralpha(CV)
if AC in handled
continue
end
push!(handled, AC)
ACnew, CAnew = alphacolor(CVnew), coloralpha(CVnew)
@eval begin
mapinfo{T<:UFixed}(::Type{Clamp}, img::AbstractArray{$AC{T}}) = MapNone{$ACnew{T}}()
mapinfo{P<:$AC}(::Type{Clamp}, img::AbstractArray{P}) = Clamp{$ACnew{UFixed8}}()
mapinfo{T<:UFixed}(::Type{Clamp}, img::AbstractArray{$CA{T}}) = MapNone{$CAnew{T}}()
mapinfo{P<:$CA}(::Type{Clamp}, img::AbstractArray{P}) = Clamp{$CAnew{UFixed8}}()
end
end
end
end
mapinfo(::Type{Clamp}, img::AbstractArray{RGB24}) = MapNone{RGB{UFixed8}}()
mapinfo(::Type{Clamp}, img::AbstractArray{ARGB32}) = MapNone{BGRA{UFixed8}}()
# Backwards-compatibility
uint32color(img) = map(mapinfo(UInt32, img), img)
uint32color!(buf, img::AbstractArray) = map!(mapinfo(UInt32, img), buf, img)
uint32color!(buf, img::AbstractArray, mi::MapInfo) = map!(mi, buf, img)
uint32color!{T,N}(buf::Array{UInt32,N}, img::AbstractImageDirect{T,N}) =
map!(mapinfo(UInt32, img), buf, img)
uint32color!{T,N,N1}(buf::Array{UInt32,N}, img::AbstractImageDirect{T,N1}) =
map!(mapinfo(UInt32, img), buf, img, TypeConst{colordim(img)})
uint32color!{T,N}(buf::Array{UInt32,N}, img::AbstractImageDirect{T,N}, mi::MapInfo) =
map!(mi, buf, img)
uint32color!{T,N,N1}(buf::Array{UInt32,N}, img::AbstractImageDirect{T,N1}, mi::MapInfo) =
map!(mi, buf, img, TypeConst{colordim(img)})
"""
```
imgsc = sc(img)
imgsc = sc(img, min, max)
```
Applies default or specified `ScaleMinMax` mapping to the image.
"""
sc(img::AbstractArray) = map(ScaleMinMax(UFixed8, img), img)
sc(img::AbstractArray, mn::Real, mx::Real) = map(ScaleMinMax(UFixed8, img, mn, mx), img)
for (fn,T) in ((:float32, Float32), (:float64, Float64), (:ufixed8, UFixed8),
(:ufixed10, UFixed10), (:ufixed12, UFixed12), (:ufixed14, UFixed14),
(:ufixed16, UFixed16))
@eval begin
function $fn{C<:Colorant}(A::AbstractArray{C})
newC = eval(C.name.name){$T}
convert(Array{newC}, A)
end
$fn{C<:Colorant}(img::AbstractImage{C}) = shareproperties(img, $fn(data(img)))
end
end
ufixedsc{T<:UFixed}(::Type{T}, img::AbstractImageDirect) = map(mapinfo(T, img), img)
ufixed8sc(img::AbstractImageDirect) = ufixedsc(UFixed8, img)
|
{"hexsha": "1b65412f52fb34d3e52492e151f6e6277d3b9542", "size": 32574, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/map.jl", "max_stars_repo_name": "rsrock/Images.jl", "max_stars_repo_head_hexsha": "8e4192a04c45be0f93f8b13b189249ed93f394c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/map.jl", "max_issues_repo_name": "rsrock/Images.jl", "max_issues_repo_head_hexsha": "8e4192a04c45be0f93f8b13b189249ed93f394c1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/map.jl", "max_forks_repo_name": "rsrock/Images.jl", "max_forks_repo_head_hexsha": "8e4192a04c45be0f93f8b13b189249ed93f394c1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.5342857143, "max_line_length": 253, "alphanum_fraction": 0.6518388899, "num_tokens": 10646}
|
(*
* Copyright (C) 2014 NICTA
* All rights reserved.
*)
(* Author: David Cock - David.Cock@nicta.com.au *)
section "The Algebra of pGCL"
theory Algebra imports WellDefined begin
text_raw \<open>\label{s:Algebra}\<close>
text \<open>Programs in pGCL have a rich algebraic structure, largely mirroring that for GCL. We show
that programs form a lattice under refinement, with @{term "a \<Sqinter> b"} and @{term "a \<Squnion> b"} as the meet
and join operators, respectively. We also take advantage of the algebraic structure to establish a
framwork for the modular decomposition of proofs.\<close>
subsection \<open>Program Refinement\<close>
text_raw \<open>\label{s:progref}\<close>
text \<open>Refinement in pGCL relates to refinement in GCL exactly as probabilistic entailment relates
to implication. It turns out to have a very similar algebra, the rules of which we establish
shortly.\<close>
definition
refines :: "'s prog \<Rightarrow> 's prog \<Rightarrow> bool" (infix "\<sqsubseteq>" 70)
where
"prog \<sqsubseteq> prog' \<equiv> \<forall>P. sound P \<longrightarrow> wp prog P \<tturnstile> wp prog' P"
lemma refinesI[intro]:
"\<lbrakk> \<And>P. sound P \<Longrightarrow> wp prog P \<tturnstile> wp prog' P \<rbrakk> \<Longrightarrow> prog \<sqsubseteq> prog'"
unfolding refines_def by(simp)
lemma refinesD[dest]:
"\<lbrakk> prog \<sqsubseteq> prog'; sound P \<rbrakk> \<Longrightarrow> wp prog P \<tturnstile> wp prog' P"
unfolding refines_def by(simp)
text \<open>The equivalence relation below will turn out to be that induced by refinement. It is also
the application of @{term equiv_trans} to the weakest precondition.\<close>
definition
pequiv :: "'s prog \<Rightarrow> 's prog \<Rightarrow> bool" (infix "\<simeq>" 70)
where
"prog \<simeq> prog' \<equiv> \<forall>P. sound P \<longrightarrow> wp prog P = wp prog' P"
lemma pequivI[intro]:
"\<lbrakk> \<And>P. sound P \<Longrightarrow> wp prog P = wp prog' P \<rbrakk> \<Longrightarrow> prog \<simeq> prog'"
unfolding pequiv_def by(simp)
lemma pequivD[dest,simp]:
"\<lbrakk> prog \<simeq> prog'; sound P \<rbrakk> \<Longrightarrow> wp prog P = wp prog' P"
unfolding pequiv_def by(simp)
lemma pequiv_equiv_trans:
"a \<simeq> b \<longleftrightarrow> equiv_trans (wp a) (wp b)"
by(auto)
subsection \<open>Simple Identities\<close>
text \<open>The following identities involve only the primitive operations as defined in
\autoref{s:syntax}, and refinement as defined above.\<close>
subsubsection \<open>Laws following from the basic arithmetic of the operators seperately\<close>
lemma DC_comm[ac_simps]:
"a \<Sqinter> b = b \<Sqinter> a"
unfolding DC_def by(simp add:ac_simps)
lemma DC_assoc[ac_simps]:
"a \<Sqinter> (b \<Sqinter> c) = (a \<Sqinter> b) \<Sqinter> c"
unfolding DC_def by(simp add:ac_simps)
lemma DC_idem:
"a \<Sqinter> a = a"
unfolding DC_def by(simp)
lemma AC_comm[ac_simps]:
"a \<Squnion> b = b \<Squnion> a"
unfolding AC_def by(simp add:ac_simps)
lemma AC_assoc[ac_simps]:
"a \<Squnion> (b \<Squnion> c) = (a \<Squnion> b) \<Squnion> c"
unfolding AC_def by(simp add:ac_simps)
lemma AC_idem:
"a \<Squnion> a = a"
unfolding AC_def by(simp)
lemma PC_quasi_comm:
"a \<^bsub>p\<^esub>\<oplus> b = b \<^bsub>(\<lambda>s. 1 - p s)\<^esub>\<oplus> a"
unfolding PC_def by(simp add:algebra_simps)
lemma PC_idem:
"a \<^bsub>p\<^esub>\<oplus> a = a"
unfolding PC_def by(simp add:algebra_simps)
lemma Seq_assoc[ac_simps]:
"A ;; (B ;; C) = A ;; B ;; C"
by(simp add:Seq_def o_def)
lemma Abort_refines[intro]:
"well_def a \<Longrightarrow> Abort \<sqsubseteq> a"
by(rule refinesI, unfold wp_eval, auto dest!:well_def_wp_healthy)
subsubsection \<open>Laws relating demonic choice and refinement\<close>
lemma left_refines_DC:
"(a \<Sqinter> b) \<sqsubseteq> a"
by(auto intro!:refinesI simp:wp_eval)
lemma right_refines_DC:
"(a \<Sqinter> b) \<sqsubseteq> b"
by(auto intro!:refinesI simp:wp_eval)
lemma DC_refines:
fixes a::"'s prog" and b and c
assumes rab: "a \<sqsubseteq> b" and rac: "a \<sqsubseteq> c"
shows "a \<sqsubseteq> (b \<Sqinter> c)"
proof
fix P::"'s \<Rightarrow> real" assume sP: "sound P"
with assms have "wp a P \<tturnstile> wp b P" and "wp a P \<tturnstile> wp c P"
by(auto dest:refinesD)
thus "wp a P \<tturnstile> wp (b \<Sqinter> c) P"
by(auto simp:wp_eval intro:min.boundedI)
qed
lemma DC_mono:
fixes a::"'s prog"
assumes rab: "a \<sqsubseteq> b" and rcd: "c \<sqsubseteq> d"
shows "(a \<Sqinter> c) \<sqsubseteq> (b \<Sqinter> d)"
proof(rule refinesI, unfold wp_eval, rule le_funI)
fix P::"'s \<Rightarrow> real" and s::'s
assume sP: "sound P"
with assms have "wp a P s \<le> wp b P s" and "wp c P s \<le> wp d P s"
by(auto)
thus "min (wp a P s) (wp c P s) \<le> min (wp b P s) (wp d P s)"
by(auto)
qed
subsubsection \<open>Laws relating angelic choice and refinement\<close>
lemma left_refines_AC:
"a \<sqsubseteq> (a \<Squnion> b)"
by(auto intro!:refinesI simp:wp_eval)
lemma right_refines_AC:
"b \<sqsubseteq> (a \<Squnion> b)"
by(auto intro!:refinesI simp:wp_eval)
lemma AC_refines:
fixes a::"'s prog" and b and c
assumes rac: "a \<sqsubseteq> c" and rbc: "b \<sqsubseteq> c"
shows "(a \<Squnion> b) \<sqsubseteq> c"
proof
fix P::"'s \<Rightarrow> real" assume sP: "sound P"
with assms have "\<And>s. wp a P s \<le> wp c P s"
and "\<And>s. wp b P s \<le> wp c P s"
by(auto dest:refinesD)
thus "wp (a \<Squnion> b) P \<tturnstile> wp c P"
unfolding wp_eval by(auto)
qed
lemma AC_mono:
fixes a::"'s prog"
assumes rab: "a \<sqsubseteq> b" and rcd: "c \<sqsubseteq> d"
shows "(a \<Squnion> c) \<sqsubseteq> (b \<Squnion> d)"
proof(rule refinesI, unfold wp_eval, rule le_funI)
fix P::"'s \<Rightarrow> real" and s::'s
assume sP: "sound P"
with assms have "wp a P s \<le> wp b P s" and "wp c P s \<le> wp d P s"
by(auto)
thus "max (wp a P s) (wp c P s) \<le> max (wp b P s) (wp d P s)"
by(auto)
qed
subsubsection \<open>Laws depending on the arithmetic of @{term "a \<^bsub>p\<^esub>\<oplus> b"} and @{term "a \<Sqinter> b"}
together\<close>
lemma DC_refines_PC:
assumes unit: "unitary p"
shows "(a \<Sqinter> b) \<sqsubseteq> (a \<^bsub>p\<^esub>\<oplus> b)"
proof(rule refinesI, unfold wp_eval, rule le_funI)
fix s and P::"'a \<Rightarrow> real" assume sound: "sound P"
from unit have nn_p: "0 \<le> p s" by(blast)
from unit have "p s \<le> 1" by(blast)
hence nn_np: "0 \<le> 1 - p s" by(simp)
show "min (wp a P s) (wp b P s) \<le> p s * wp a P s + (1 - p s) * wp b P s"
proof(cases "wp a P s \<le> wp b P s",
simp_all add:min.absorb1 min.absorb2)
case True note le = this
have "wp a P s = (p s + (1 - p s)) * wp a P s" by(simp)
also have "... = p s * wp a P s + (1 - p s) * wp a P s"
by(simp only: distrib_right)
also {
from le and nn_np have "(1 - p s) * wp a P s \<le> (1 - p s) * wp b P s"
by(rule mult_left_mono)
hence "p s * wp a P s + (1 - p s) * wp a P s \<le>
p s * wp a P s + (1 - p s) * wp b P s"
by(rule add_left_mono)
}
finally show "wp a P s \<le> p s * wp a P s + (1 - p s) * wp b P s" .
next
case False
then have le: "wp b P s \<le> wp a P s" by(simp)
have "wp b P s = (p s + (1 - p s)) * wp b P s" by(simp)
also have "... = p s * wp b P s + (1 - p s) * wp b P s"
by(simp only:distrib_right)
also {
from le and nn_p have "p s * wp b P s \<le> p s * wp a P s"
by(rule mult_left_mono)
hence "p s * wp b P s + (1 - p s) * wp b P s \<le>
p s * wp a P s + (1 - p s) * wp b P s"
by(rule add_right_mono)
}
finally show "wp b P s \<le> p s * wp a P s + (1 - p s) * wp b P s" .
qed
qed
subsubsection \<open>Laws depending on the arithmetic of @{term "a \<^bsub>p\<^esub>\<oplus> b"} and @{term "a \<Squnion> b"}
together\<close>
lemma PC_refines_AC:
assumes unit: "unitary p"
shows "(a \<^bsub>p\<^esub>\<oplus> b) \<sqsubseteq> (a \<Squnion> b)"
proof(rule refinesI, unfold wp_eval, rule le_funI)
fix s and P::"'a \<Rightarrow> real" assume sound: "sound P"
from unit have "p s \<le> 1" by(blast)
hence nn_np: "0 \<le> 1 - p s" by(simp)
show "p s * wp a P s + (1 - p s) * wp b P s \<le>
max (wp a P s) (wp b P s)"
proof(cases "wp a P s \<le> wp b P s")
case True note leab = this
with unit nn_np
have "p s * wp a P s + (1 - p s) * wp b P s \<le>
p s * wp b P s + (1 - p s) * wp b P s"
by(auto intro:add_mono mult_left_mono)
also have "... = wp b P s"
by(auto simp:field_simps)
also from leab
have "... = max (wp a P s) (wp b P s)"
by(auto)
finally show ?thesis .
next
case False note leba = this
with unit nn_np
have "p s * wp a P s + (1 - p s) * wp b P s \<le>
p s * wp a P s + (1 - p s) * wp a P s"
by(auto intro:add_mono mult_left_mono)
also have "... = wp a P s"
by(auto simp:field_simps)
also from leba
have "... = max (wp a P s) (wp b P s)"
by(auto)
finally show ?thesis .
qed
qed
subsubsection \<open>Laws depending on the arithmetic of @{term "a \<Squnion> b"} and @{term "a \<Sqinter> b"} together
\<close>
lemma DC_refines_AC:
"(a \<Sqinter> b) \<sqsubseteq> (a \<Squnion> b)"
by(auto intro!:refinesI simp:wp_eval)
subsubsection \<open>Laws Involving Refinement and Equivalence\<close>
lemma pr_trans[trans]:
fixes A::"'a prog"
assumes prAB: "A \<sqsubseteq> B"
and prBC: "B \<sqsubseteq> C"
shows "A \<sqsubseteq> C"
proof
fix P::"'a \<Rightarrow> real" assume sP: "sound P"
with prAB have "wp A P \<tturnstile> wp B P" by(blast)
also from sP and prBC have "... \<tturnstile> wp C P" by(blast)
finally show "wp A P \<tturnstile> ..." .
qed
lemma pequiv_refl[intro!,simp]:
"a \<simeq> a"
by(auto)
lemma pequiv_comm[ac_simps]:
"a \<simeq> b \<longleftrightarrow> b \<simeq> a"
unfolding pequiv_def
by(rule iffI, safe, simp_all)
lemma pequiv_pr[dest]:
"a \<simeq> b \<Longrightarrow> a \<sqsubseteq> b"
by(auto)
lemma pequiv_trans[intro,trans]:
"\<lbrakk> a \<simeq> b; b \<simeq> c \<rbrakk> \<Longrightarrow> a \<simeq> c"
unfolding pequiv_def by(auto intro!:order_trans)
lemma pequiv_pr_trans[intro,trans]:
"\<lbrakk> a \<simeq> b; b \<sqsubseteq> c \<rbrakk> \<Longrightarrow> a \<sqsubseteq> c"
unfolding pequiv_def refines_def by(simp)
lemma pr_pequiv_trans[intro,trans]:
"\<lbrakk> a \<sqsubseteq> b; b \<simeq> c \<rbrakk> \<Longrightarrow> a \<sqsubseteq> c"
unfolding pequiv_def refines_def by(simp)
text \<open>Refinement induces equivalence by antisymmetry:\<close>
lemma pequiv_antisym:
"\<lbrakk> a \<sqsubseteq> b; b \<sqsubseteq> a \<rbrakk> \<Longrightarrow> a \<simeq> b"
by(auto intro:antisym)
lemma pequiv_DC:
"\<lbrakk> a \<simeq> c; b \<simeq> d \<rbrakk> \<Longrightarrow> (a \<Sqinter> b) \<simeq> (c \<Sqinter> d)"
by(auto intro!:DC_mono pequiv_antisym simp:ac_simps)
lemma pequiv_AC:
"\<lbrakk> a \<simeq> c; b \<simeq> d \<rbrakk> \<Longrightarrow> (a \<Squnion> b) \<simeq> (c \<Squnion> d)"
by(auto intro!:AC_mono pequiv_antisym simp:ac_simps)
subsection \<open>Deterministic Programs are Maximal\<close>
text \<open>Any sub-additive refinement of a deterministic program is in fact an equivalence.
Deterministic programs are thus maximal (under the refinement order) among sub-additive programs.
\<close>
lemma refines_determ:
fixes a::"'s prog"
assumes da: "determ (wp a)"
and wa: "well_def a"
and wb: "well_def b"
and dr: "a \<sqsubseteq> b"
shows "a \<simeq> b"
txt \<open>Proof by contradiction.\<close>
proof(rule pequivI, rule contrapos_pp)
from wb have "feasible (wp b)" by(auto)
with wb have sab: "sub_add (wp b)"
by(auto dest: sublinear_subadd[OF well_def_wp_sublinear])
fix P::"'s \<Rightarrow> real" assume sP: "sound P"
txt \<open>Assume that @{term a} and @{term b} are not equivalent:\<close>
assume ne: "wp a P \<noteq> wp b P"
txt \<open>Find a point at which they differ. As @{term "a \<sqsubseteq> b"},
@{term "wp b P s"} must by strictly greater than @{term "wp a P s"}
here:\<close>
hence "\<exists>s. wp a P s < wp b P s"
proof(rule contrapos_np)
assume "\<not>(\<exists>s. wp a P s < wp b P s)"
hence "\<forall>s. wp b P s \<le> wp a P s" by(auto simp:not_less)
hence "wp b P \<tturnstile> wp a P" by(auto)
moreover from sP dr have "wp a P \<tturnstile> wp b P" by(auto)
ultimately show "wp a P = wp b P" by(auto)
qed
then obtain s where less: "wp a P s < wp b P s" by(blast)
txt \<open>Take a carefully constructed expectation:\<close>
let ?Pc = "\<lambda>s. bound_of P - P s"
have sPc: "sound ?Pc"
proof(rule soundI)
from sP have "\<And>s. 0 \<le> P s" by(auto)
hence "\<And>s. ?Pc s \<le> bound_of P" by(auto)
thus "bounded ?Pc" by(blast)
from sP have "\<And>s. P s \<le> bound_of P" by(auto)
hence "\<And>s. 0 \<le> ?Pc s"
by auto
thus "nneg ?Pc" by(auto)
qed
txt \<open>We then show that @{term "wp b"} violates feasibility, and
thus healthiness.\<close>
from sP have "0 \<le> bound_of P" by(auto)
with da have "bound_of P = wp a (\<lambda>s. bound_of P) s"
by(simp add:maximalD determ_maximalD)
also have "... = wp a (\<lambda>s. ?Pc s + P s) s"
by(simp)
also from da sP sPc have "... = wp a ?Pc s + wp a P s"
by(subst additiveD[OF determ_additiveD], simp_all add:sP sPc)
also from sPc dr have "... \<le> wp b ?Pc s + wp a P s"
by(auto)
also from less have "... < wp b ?Pc s + wp b P s"
by(auto)
also from sab sP sPc have "... \<le> wp b (\<lambda>s. ?Pc s + P s) s"
by(blast)
finally have "\<not>wp b (\<lambda>s. bound_of P) s \<le> bound_of P"
by(simp)
thus "\<not>bounded_by (bound_of P) (wp b (\<lambda>s. bound_of P))"
by(auto)
next
txt \<open>However,\<close>
fix P::"'s \<Rightarrow> real" assume sP: "sound P"
hence "nneg (\<lambda>s. bound_of P)" by(auto)
moreover have "bounded_by (bound_of P) (\<lambda>s. bound_of P)" by(auto)
ultimately
show "bounded_by (bound_of P) (wp b (\<lambda>s. bound_of P))"
using wb by(auto dest!:well_def_wp_healthy)
qed
subsection \<open>The Algebraic Structure of Refinement\<close>
text \<open>Well-defined programs form a half-bounded semilattice under refinement, where @{term Abort}
is bottom, and @{term "a \<Sqinter> b"} is @{term inf}. There is no unique top element, but all
fully-deterministic programs are maximal.
The type that we construct here is not especially useful, but serves as a convenient way to express
this result.\<close>
quotient_type 's program =
"'s prog" / partial : "\<lambda>a b. a \<simeq> b \<and> well_def a \<and> well_def b"
proof(rule part_equivpI)
have "Skip \<simeq> Skip" and "well_def Skip" by(auto intro:wd_intros)
thus "\<exists>x. x \<simeq> x \<and> well_def x \<and> well_def x" by(blast)
show "symp (\<lambda>a b. a \<simeq> b \<and> well_def a \<and> well_def b)"
proof(rule sympI, safe)
fix a::"'a prog" and b
assume "a \<simeq> b"
hence "equiv_trans (wp a) (wp b)"
by(simp add:pequiv_equiv_trans)
thus "b \<simeq> a" by(simp add:ac_simps pequiv_equiv_trans)
qed
show "transp (\<lambda>a b. a \<simeq> b \<and> well_def a \<and> well_def b)"
by(rule transpI, safe, rule pequiv_trans)
qed
instantiation program :: (type) semilattice_inf begin
lift_definition
less_eq_program :: "'a program \<Rightarrow> 'a program \<Rightarrow> bool" is refines
proof(safe)
fix a::"'a prog" and b c d
assume "a \<simeq> b" hence "b \<simeq> a" by(simp add:ac_simps)
also assume "a \<sqsubseteq> c"
also assume "c \<simeq> d"
finally show "b \<sqsubseteq> d" .
next
fix a::"'a prog" and b c d
assume "a \<simeq> b"
also assume "b \<sqsubseteq> d"
also assume "c \<simeq> d" hence "d \<simeq> c" by(simp add:ac_simps)
finally show "a \<sqsubseteq> c" .
qed (* XXX - what's up here? *)
lift_definition
less_program :: "'a program \<Rightarrow> 'a program \<Rightarrow> bool"
is "\<lambda>a b. a \<sqsubseteq> b \<and> \<not> b \<sqsubseteq> a"
proof(safe)
fix a::"'a prog" and b c d
assume "a \<simeq> b" hence "b \<simeq> a" by(simp add:ac_simps)
also assume "a \<sqsubseteq> c"
also assume "c \<simeq> d"
finally show "b \<sqsubseteq> d" .
next
fix a::"'a prog" and b c d
assume "a \<simeq> b"
also assume "b \<sqsubseteq> d"
also assume "c \<simeq> d" hence "d \<simeq> c" by(simp add:ac_simps)
finally show "a \<sqsubseteq> c" .
next
fix a b and c::"'a prog" and d
assume "c \<simeq> d"
also assume "d \<sqsubseteq> b"
also assume "a \<simeq> b" hence "b \<simeq> a" by(simp add:ac_simps)
finally have "c \<sqsubseteq> a" .
moreover assume "\<not> c \<sqsubseteq> a"
ultimately show False by(auto)
next
fix a b and c::"'a prog" and d
assume "c \<simeq> d" hence "d \<simeq> c" by(simp add:ac_simps)
also assume "c \<sqsubseteq> a"
also assume "a \<simeq> b"
finally have "d \<sqsubseteq> b" .
moreover assume "\<not> d \<sqsubseteq> b"
ultimately show False by(auto)
qed
lift_definition
inf_program :: "'a program \<Rightarrow> 'a program \<Rightarrow> 'a program" is DC
proof(safe)
fix a b c d::"'s prog"
assume "a \<simeq> b" and "c \<simeq> d"
thus "(a \<Sqinter> c) \<simeq> (b \<Sqinter> d)" by(rule pequiv_DC)
next
fix a c::"'s prog"
assume "well_def a" "well_def c"
thus "well_def (a \<Sqinter> c)" by(rule wd_intros)
next
fix a c::"'s prog"
assume "well_def a" "well_def c"
thus "well_def (a \<Sqinter> c)" by(rule wd_intros)
qed
instance
proof
fix x y::"'a program"
show "(x < y) = (x \<le> y \<and> \<not> y \<le> x)"
by(transfer, simp)
show "x \<le> x"
by(transfer, auto)
show "inf x y \<le> x"
by(transfer, rule left_refines_DC)
show "inf x y \<le> y"
by(transfer, rule right_refines_DC)
assume "x \<le> y" and "y \<le> x" thus "x = y"
by(transfer, iprover intro:pequiv_antisym)
next
fix x y z::"'a program"
assume "x \<le> y" and "y \<le> z"
thus "x \<le> z"
by(transfer, iprover intro:pr_trans)
next
fix x y z::"'a program"
assume "x \<le> y" and "x \<le> z"
thus "x \<le> inf y z"
by(transfer, iprover intro:DC_refines)
qed
end
instantiation program :: (type) bot begin
lift_definition
bot_program :: "'a program" is Abort
by(auto intro:wd_intros)
instance ..
end
lemma eq_det: "\<And>a b::'s prog. \<lbrakk> a \<simeq> b; determ (wp a) \<rbrakk> \<Longrightarrow> determ (wp b)"
proof(intro determI additiveI maximalI)
fix a b::"'s prog" and P::"'s \<Rightarrow> real"
and Q::"'s \<Rightarrow> real" and s::"'s"
assume da: "determ (wp a)"
assume sP: "sound P" and sQ: "sound Q"
and eq: "a \<simeq> b"
hence "wp b (\<lambda>s. P s + Q s) s =
wp a (\<lambda>s. P s + Q s) s"
by(simp add:sound_intros)
also from da sP sQ
have "... = wp a P s + wp a Q s"
by(simp add:additiveD determ_additiveD)
also from eq sP sQ
have "... = wp b P s + wp b Q s"
by(simp add:pequivD)
finally show "wp b (\<lambda>s. P s + Q s) s = wp b P s + wp b Q s" .
next
fix a b::"'s prog" and c::real
assume da: "determ (wp a)"
assume "a \<simeq> b" hence "b \<simeq> a" by(simp add:ac_simps)
moreover assume nn: "0 \<le> c"
ultimately have "wp b (\<lambda>_. c) = wp a (\<lambda>_. c)"
by(simp add:pequivD const_sound)
also from da nn have "... = (\<lambda>_. c)"
by(simp add:determ_maximalD maximalD)
finally show "wp b (\<lambda>_. c) = (\<lambda>_. c)" .
qed
lift_definition
pdeterm :: "'s program \<Rightarrow> bool"
is "\<lambda>a. determ (wp a)"
proof(safe)
fix a b::"'s prog"
assume "a \<simeq> b" and "determ (wp a)"
thus "determ (wp b)" by(rule eq_det)
next
fix a b::"'s prog"
assume "a \<simeq> b" hence "b \<simeq> a" by(simp add:ac_simps)
moreover assume "determ (wp b)"
ultimately show "determ (wp a)" by(rule eq_det)
qed
lemma determ_maximal:
"\<lbrakk> pdeterm a; a \<le> x \<rbrakk> \<Longrightarrow> a = x"
by(transfer, auto intro:refines_determ)
subsection \<open>Data Refinement\<close>
text \<open>A projective data refinement construction for pGCL. By projective, we mean that the abstract
state is always a function (@{term \<phi>}) of the concrete state. Refinement may be predicated (@{term
G}) on the state.\<close>
definition
drefines :: "('b \<Rightarrow> 'a) \<Rightarrow> ('b \<Rightarrow> bool) \<Rightarrow> 'a prog \<Rightarrow> 'b prog \<Rightarrow> bool"
where
"drefines \<phi> G A B \<equiv> \<forall>P Q. (unitary P \<and> unitary Q \<and> (P \<tturnstile> wp A Q)) \<longrightarrow>
(\<guillemotleft>G\<guillemotright> && (P o \<phi>) \<tturnstile> wp B (Q o \<phi>))"
lemma drefinesD[dest]:
"\<lbrakk> drefines \<phi> G A B; unitary P; unitary Q; P \<tturnstile> wp A Q \<rbrakk> \<Longrightarrow>
\<guillemotleft>G\<guillemotright> && (P o \<phi>) \<tturnstile> wp B (Q o \<phi>)"
unfolding drefines_def by(blast)
text \<open>We can alternatively use G as an assumption:\<close>
lemma drefinesD2:
assumes dr: "drefines \<phi> G A B"
and uP: "unitary P"
and uQ: "unitary Q"
and wpA: "P \<tturnstile> wp A Q"
and G: "G s"
shows "(P o \<phi>) s \<le> wp B (Q o \<phi>) s"
proof -
from uP have "0 \<le> (P o \<phi>) s" unfolding o_def by(blast)
with G have "(P o \<phi>) s = (\<guillemotleft>G\<guillemotright> && (P o \<phi>)) s"
by(simp add:exp_conj_def)
also from assms have "... \<le> wp B (Q o \<phi>) s" by(blast)
finally show "(P o \<phi>) s \<le> ..." .
qed
text \<open>This additional form is sometimes useful:\<close>
lemma drefinesD3:
assumes dr: "drefines \<phi> G a b"
and G: "G s"
and uQ: "unitary Q"
and wa: "well_def a"
shows "wp a Q (\<phi> s) \<le> wp b (Q o \<phi>) s"
proof -
let "?L s'" = "wp a Q s'"
from uQ wa have sL: "sound ?L" by(blast)
from uQ wa have bL: "bounded_by 1 ?L" by(blast)
have "?L \<tturnstile> ?L" by(simp)
with sL and bL and assms
show ?thesis
by(blast intro:drefinesD2[OF dr, where P="?L", simplified])
qed
lemma drefinesI[intro]:
"\<lbrakk> \<And>P Q. \<lbrakk> unitary P; unitary Q; P \<tturnstile> wp A Q \<rbrakk> \<Longrightarrow>
\<guillemotleft>G\<guillemotright> && (P o \<phi>) \<tturnstile> wp B (Q o \<phi>) \<rbrakk> \<Longrightarrow>
drefines \<phi> G A B"
unfolding drefines_def by(blast)
text \<open>Use G as an assumption, when showing refinement:\<close>
lemma drefinesI2:
fixes A::"'a prog"
and B::"'b prog"
and \<phi>::"'b \<Rightarrow> 'a"
and G::"'b \<Rightarrow> bool"
assumes wB: "well_def B"
and withAs:
"\<And>P Q s. \<lbrakk> unitary P; unitary Q;
G s; P \<tturnstile> wp A Q \<rbrakk> \<Longrightarrow> (P o \<phi>) s \<le> wp B (Q o \<phi>) s"
shows "drefines \<phi> G A B"
proof
fix P and Q
assume uP: "unitary P"
and uQ: "unitary Q"
and wpA: "P \<tturnstile> wp A Q"
hence "\<And>s. G s \<Longrightarrow> (P o \<phi>) s \<le> wp B (Q o \<phi>) s"
using withAs by(blast)
moreover
from uQ have "unitary (Q o \<phi>)"
unfolding o_def by(blast)
moreover
from uP have "unitary (P o \<phi>)"
unfolding o_def by(blast)
ultimately
show "\<guillemotleft>G\<guillemotright> && (P o \<phi>) \<tturnstile> wp B (Q o \<phi>)"
using wB by(blast intro:entails_pconj_assumption)
qed
lemma dr_strengthen_guard:
fixes a::"'s prog" and b::"'t prog"
assumes fg: "\<And>s. F s \<Longrightarrow> G s"
and drab: "drefines \<phi> G a b"
shows "drefines \<phi> F a b"
proof(intro drefinesI)
fix P Q::"'s expect"
assume uP: "unitary P" and uQ: "unitary Q"
and wp: "P \<tturnstile> wp a Q"
from fg have "\<And>s. \<guillemotleft>F\<guillemotright> s \<le> \<guillemotleft>G\<guillemotright> s" by(simp add:embed_bool_def)
hence "(\<guillemotleft>F\<guillemotright> && (P o \<phi>)) \<tturnstile> (\<guillemotleft>G\<guillemotright> && (P o \<phi>))" by(auto intro:pconj_mono le_funI simp:exp_conj_def)
also from drab uP uQ wp have "... \<tturnstile> wp b (Q o \<phi>)" by(auto)
finally show "\<guillemotleft>F\<guillemotright> && (P o \<phi>) \<tturnstile> wp b (Q o \<phi>)" .
qed
text \<open>Probabilistic correspondence, @{term pcorres}, is equality on distribution transformers,
modulo a guard. It is the analogue, for data refinement, of program equivalence for program
refinement.\<close>
definition
pcorres :: "('b \<Rightarrow> 'a) \<Rightarrow> ('b \<Rightarrow> bool) \<Rightarrow> 'a prog \<Rightarrow> 'b prog \<Rightarrow> bool"
where
"pcorres \<phi> G A B \<longleftrightarrow>
(\<forall>Q. unitary Q \<longrightarrow> \<guillemotleft>G\<guillemotright> && (wp A Q o \<phi>) = \<guillemotleft>G\<guillemotright> && wp B (Q o \<phi>))"
lemma pcorresI:
"\<lbrakk> \<And>Q. unitary Q \<Longrightarrow> \<guillemotleft>G\<guillemotright> && (wp A Q o \<phi>) = \<guillemotleft>G\<guillemotright> && wp B (Q o \<phi>) \<rbrakk> \<Longrightarrow>
pcorres \<phi> G A B"
by(simp add:pcorres_def)
text \<open>Often easier to use, as it allows one to assume the precondition.\<close>
lemma pcorresI2[intro]:
fixes A::"'a prog" and B::"'b prog"
assumes withG: "\<And>Q s. \<lbrakk> unitary Q; G s \<rbrakk> \<Longrightarrow> wp A Q (\<phi> s)= wp B (Q o \<phi>) s"
and wA: "well_def A"
and wB: "well_def B"
shows "pcorres \<phi> G A B"
proof(rule pcorresI, rule ext)
fix Q::"'a \<Rightarrow> real" and s::'b
assume uQ: "unitary Q"
hence uQ\<phi>: "unitary (Q o \<phi>)" by(auto)
show "(\<guillemotleft>G\<guillemotright> && (wp A Q \<circ> \<phi>)) s = (\<guillemotleft>G\<guillemotright> && wp B (Q \<circ> \<phi>)) s"
proof(cases "G s")
case True note this
moreover
from well_def_wp_healthy[OF wA] uQ have "0 \<le> wp A Q (\<phi> s)" by(blast)
moreover
from well_def_wp_healthy[OF wB] uQ\<phi> have "0 \<le> wp B (Q o \<phi>) s" by(blast)
ultimately show ?thesis
using uQ by(simp add:exp_conj_def withG)
next
case False note this
moreover
from well_def_wp_healthy[OF wA] uQ have "wp A Q (\<phi> s) \<le> 1" by(blast)
moreover
from well_def_wp_healthy[OF wB] uQ\<phi> have "wp B (Q o \<phi>) s \<le> 1"
by(blast dest!:healthy_bounded_byD intro:sound_nneg)
ultimately show ?thesis by(simp add:exp_conj_def)
qed
qed
lemma pcorresD:
"\<lbrakk> pcorres \<phi> G A B; unitary Q \<rbrakk> \<Longrightarrow> \<guillemotleft>G\<guillemotright> && (wp A Q o \<phi>) = \<guillemotleft>G\<guillemotright> && wp B (Q o \<phi>)"
unfolding pcorres_def by(simp)
text \<open>Again, easier to use if the precondition is known to hold.\<close>
lemma pcorresD2:
assumes pc: "pcorres \<phi> G A B"
and uQ: "unitary Q"
and wA: "well_def A" and wB: "well_def B"
and G: "G s"
shows "wp A Q (\<phi> s) = wp B (Q o \<phi>) s"
proof -
from uQ well_def_wp_healthy[OF wA] have "0 \<le> wp A Q (\<phi> s)" by(auto)
with G have "wp A Q (\<phi> s) = \<guillemotleft>G\<guillemotright> s .& wp A Q (\<phi> s)" by(simp)
also {
from pc uQ have "\<guillemotleft>G\<guillemotright> && (wp A Q o \<phi>) = \<guillemotleft>G\<guillemotright> && wp B (Q o \<phi>)"
by(rule pcorresD)
hence "\<guillemotleft>G\<guillemotright> s .& wp A Q (\<phi> s) = \<guillemotleft>G\<guillemotright> s .& wp B (Q o \<phi>) s"
unfolding exp_conj_def o_def by(rule fun_cong)
}
also {
from uQ have "sound Q" by(auto)
hence "sound (Q o \<phi>)" by(auto intro:sound_intros)
with well_def_wp_healthy[OF wB] have "0 \<le> wp B (Q o \<phi>) s" by(auto)
with G have "\<guillemotleft>G\<guillemotright> s .& wp B (Q o \<phi>) s = wp B (Q o \<phi>) s" by(simp)
}
finally show ?thesis .
qed
subsection \<open>The Algebra of Data Refinement\<close>
text \<open>Program refinement implies a trivial data refinement:\<close>
lemma refines_drefines:
fixes a::"'s prog"
assumes rab: "a \<sqsubseteq> b" and wb: "well_def b"
shows "drefines (\<lambda>s. s) G a b"
proof(intro drefinesI2 wb, simp add:o_def)
fix P::"'s \<Rightarrow> real" and Q::"'s \<Rightarrow> real" and s::'s
assume sQ: "unitary Q"
assume "P \<tturnstile> wp a Q" hence "P s \<le> wp a Q s" by(auto)
also from rab sQ have "... \<le> wp b Q s" by(auto)
finally show "P s \<le> wp b Q s" .
qed
text \<open>Data refinement is transitive:\<close>
lemma dr_trans[trans]:
fixes A::"'a prog" and B::"'b prog" and C::"'c prog"
assumes drAB: "drefines \<phi> G A B"
and drBC: "drefines \<phi>' G' B C"
and Gimp: "\<And>s. G' s \<Longrightarrow> G (\<phi>' s)"
shows "drefines (\<phi> o \<phi>') G' A C"
proof(rule drefinesI)
fix P::"'a \<Rightarrow> real" and Q::"'a \<Rightarrow> real" and s::'a
assume uP: "unitary P" and uQ: "unitary Q"
and wpA: "P \<tturnstile> wp A Q"
have "\<guillemotleft>G'\<guillemotright> && \<guillemotleft>G o \<phi>'\<guillemotright> = \<guillemotleft>G'\<guillemotright>"
proof(rule ext, unfold exp_conj_def)
fix x
show "\<guillemotleft>G'\<guillemotright> x .& \<guillemotleft>G o \<phi>'\<guillemotright> x = \<guillemotleft>G'\<guillemotright> x" (is ?X)
proof(cases "G' x")
case False then show ?X by(simp)
next
case True
moreover
with Gimp have "(G o \<phi>') x" by(simp add:o_def)
ultimately
show ?X by(simp)
qed
qed
with uP
have "\<guillemotleft>G'\<guillemotright> && (P o (\<phi> o \<phi>')) = \<guillemotleft>G'\<guillemotright> && ((\<guillemotleft>G\<guillemotright> && (P o \<phi>)) o \<phi>')"
by(simp add:exp_conj_assoc o_assoc)
also {
from uP uQ wpA and drAB
have "\<guillemotleft>G\<guillemotright> && (P o \<phi>) \<tturnstile> wp B (Q o \<phi>)"
by(blast intro:drefinesD)
with drBC and uP uQ
have "\<guillemotleft>G'\<guillemotright> && ((\<guillemotleft>G\<guillemotright> && (P o \<phi>)) o \<phi>') \<tturnstile> wp C ((Q o \<phi>) o \<phi>')"
by(blast intro:unitary_intros drefinesD)
}
finally
show "\<guillemotleft>G'\<guillemotright> && (P o (\<phi> o \<phi>')) \<tturnstile> wp C (Q o (\<phi> o \<phi>'))"
by(simp add:o_assoc)
qed
text \<open>Data refinement composes with program refinement:\<close>
lemma pr_dr_trans[trans]:
assumes prAB: "A \<sqsubseteq> B"
and drBC: "drefines \<phi> G B C"
shows "drefines \<phi> G A C"
proof(rule drefinesI)
fix P and Q
assume uP: "unitary P"
and uQ: "unitary Q"
and wpA: "P \<tturnstile> wp A Q"
note wpA
also from uQ and prAB have "wp A Q \<tturnstile> wp B Q" by(blast)
finally have "P \<tturnstile> wp B Q" .
with uP uQ drBC
show "\<guillemotleft>G\<guillemotright> && (P o \<phi>) \<tturnstile> wp C (Q o \<phi>)" by(blast intro:drefinesD)
qed
lemma dr_pr_trans[trans]:
assumes drAB: "drefines \<phi> G A B"
assumes prBC: "B \<sqsubseteq> C"
shows "drefines \<phi> G A C"
proof(rule drefinesI)
fix P and Q
assume uP: "unitary P"
and uQ: "unitary Q"
and wpA: "P \<tturnstile> wp A Q"
with drAB have "\<guillemotleft>G\<guillemotright> && (P o \<phi>) \<tturnstile> wp B (Q o \<phi>)" by(blast intro:drefinesD)
also from uQ prBC have "... \<tturnstile> wp C (Q o \<phi>)" by(blast)
finally show "\<guillemotleft>G\<guillemotright> && (P o \<phi>) \<tturnstile> ..." .
qed
text \<open>If the projection @{term \<phi>} commutes with the transformer, then data refinement is
reflexive:\<close>
lemma dr_refl:
assumes wa: "well_def a"
and comm: "\<And>Q. unitary Q \<Longrightarrow> wp a Q o \<phi> \<tturnstile> wp a (Q o \<phi>)"
shows "drefines \<phi> G a a"
proof(intro drefinesI2 wa)
fix P and Q and s
assume wp: "P \<tturnstile> wp a Q"
assume uQ: "unitary Q"
have "(P o \<phi>) s = P (\<phi> s)" by(simp)
also from wp have "... \<le> wp a Q (\<phi> s)" by(blast)
also {
from comm uQ have "wp a Q o \<phi> \<tturnstile> wp a (Q o \<phi>)" by(blast)
hence "(wp a Q o \<phi>) s \<le> wp a (Q o \<phi>) s" by(blast)
hence "wp a Q (\<phi> s) \<le> ..." by(simp)
}
finally show "(P o \<phi>) s \<le> wp a (Q o \<phi>) s" .
qed
text \<open>Correspondence implies data refinement\<close>
lemma pcorres_drefine:
assumes corres: "pcorres \<phi> G A C"
and wC: "well_def C"
shows "drefines \<phi> G A C"
proof
fix P and Q
assume uP: "unitary P" and uQ: "unitary Q"
and wpA: "P \<tturnstile> wp A Q"
from wpA have "P o \<phi> \<tturnstile> wp A Q o \<phi>" by(simp add:o_def le_fun_def)
hence "\<guillemotleft>G\<guillemotright> && (P o \<phi>) \<tturnstile> \<guillemotleft>G\<guillemotright> && (wp A Q o \<phi>)"
by(rule exp_conj_mono_right)
also from corres uQ
have "... = \<guillemotleft>G\<guillemotright> && (wp C (Q o \<phi>))" by(rule pcorresD)
also
have "... \<tturnstile> wp C (Q o \<phi>)"
proof(rule le_funI)
fix s
from uQ have "unitary (Q o \<phi>)" by(rule unitary_intros)
with well_def_wp_healthy[OF wC] have nn_wpC: "0 \<le> wp C (Q o \<phi>) s" by(blast)
show "(\<guillemotleft>G\<guillemotright> && wp C (Q o \<phi>)) s \<le> wp C (Q o \<phi>) s"
proof(cases "G s")
case True
with nn_wpC show ?thesis by(simp add:exp_conj_def)
next
case False note this
moreover {
from uQ have "unitary (Q o \<phi>)" by(simp)
with well_def_wp_healthy[OF wC] have "wp C (Q o \<phi>) s \<le> 1" by(auto)
}
moreover note nn_wpC
ultimately show ?thesis by(simp add:exp_conj_def)
qed
qed
finally show "\<guillemotleft>G\<guillemotright> && (P o \<phi>) \<tturnstile> wp C (Q o \<phi>)" .
qed
text \<open>Any \emph{data} refinement of a deterministic program is correspondence. This is the
analogous result to that relating program refinement and equivalence.\<close>
lemma drefines_determ:
fixes a::"'a prog" and b::"'b prog"
assumes da: "determ (wp a)"
and wa: "well_def a"
and wb: "well_def b"
and dr: "drefines \<phi> G a b"
shows "pcorres \<phi> G a b"
txt \<open>The proof follows exactly the same form
as that for program refinement: Assuming that correspondence
\emph{doesn't} hold, we show that @{term "wp b"} is not feasible,
and thus not healthy, contradicting the assumption.\<close>
proof(rule pcorresI, rule contrapos_pp)
from wb show "feasible (wp b)" by(auto)
note ha = well_def_wp_healthy[OF wa]
note hb = well_def_wp_healthy[OF wb]
from wb have "sublinear (wp b)" by(auto)
moreover from hb have "feasible (wp b)" by(auto)
ultimately have sab: "sub_add (wp b)" by(rule sublinear_subadd)
fix Q::"'a \<Rightarrow> real"
assume uQ: "unitary Q"
hence uQ\<phi>: "unitary (Q o \<phi>)" by(auto)
assume ne: "\<guillemotleft>G\<guillemotright> && (wp a Q o \<phi>) \<noteq> \<guillemotleft>G\<guillemotright> && wp b (Q o \<phi>)"
hence ne': "wp a Q o \<phi> \<noteq> wp b (Q o \<phi>)" by(auto)
txt \<open>From refinement, @{term "\<guillemotleft>G\<guillemotright> && (wp a Q o \<phi>)"}
lies below @{term "\<guillemotleft>G\<guillemotright> && wp b (Q o \<phi>)"}.\<close>
from ha uQ
have gle: "\<guillemotleft>G\<guillemotright> && (wp a Q o \<phi>) \<tturnstile> wp b (Q o \<phi>)" by(blast intro!:drefinesD[OF dr])
have le: "\<guillemotleft>G\<guillemotright> && (wp a Q o \<phi>) \<tturnstile> \<guillemotleft>G\<guillemotright> && wp b (Q o \<phi>)"
unfolding exp_conj_def
proof(rule le_funI)
fix s
from gle have "\<guillemotleft>G\<guillemotright> s .& (wp a Q o \<phi>) s \<le> wp b (Q o \<phi>) s"
unfolding exp_conj_def by(auto)
hence "\<guillemotleft>G\<guillemotright> s .& (\<guillemotleft>G\<guillemotright> s .& (wp a Q o \<phi>) s) \<le> \<guillemotleft>G\<guillemotright> s .& wp b (Q o \<phi>) s"
by(auto intro:pconj_mono)
moreover from uQ ha have "wp a Q (\<phi> s) \<le> 1"
by(auto dest:healthy_bounded_byD)
moreover from uQ ha have "0 \<le> wp a Q (\<phi> s)"
by(auto)
ultimately
show "\<guillemotleft> G \<guillemotright> s .& (wp a Q \<circ> \<phi>) s \<le> \<guillemotleft> G \<guillemotright> s .& wp b (Q \<circ> \<phi>) s"
by(simp add:pconj_assoc)
qed
txt \<open>If the programs do not correspond, the terms must differ somewhere, and given the previous
result, the second must be somewhere strictly larger than the first:\<close>
have nle: "\<exists>s. (\<guillemotleft>G\<guillemotright> && (wp a Q o \<phi>)) s < (\<guillemotleft>G\<guillemotright> && wp b (Q o \<phi>)) s"
proof(rule contrapos_np[OF ne], rule ext, rule antisym)
fix s
from le show "(\<guillemotleft>G\<guillemotright> && (wp a Q o \<phi>)) s \<le> (\<guillemotleft>G\<guillemotright> && wp b (Q o \<phi>)) s"
by(blast)
next
fix s
assume "\<not> (\<exists>s. (\<guillemotleft>G\<guillemotright> && (wp a Q \<circ> \<phi>)) s < (\<guillemotleft>G\<guillemotright> && wp b (Q \<circ> \<phi>)) s)"
thus " (\<guillemotleft>G\<guillemotright> && (wp b (Q \<circ> \<phi>))) s \<le> (\<guillemotleft>G\<guillemotright> && (wp a Q \<circ> \<phi>)) s"
by(simp add:not_less)
qed
from this obtain s where less_s:
"(\<guillemotleft>G\<guillemotright> && (wp a Q \<circ> \<phi>)) s < (\<guillemotleft>G\<guillemotright> && wp b (Q \<circ> \<phi>)) s"
by(blast)
txt \<open>The transformers themselves must differ at this point:\<close>
hence larger: "wp a Q (\<phi> s) < wp b (Q \<circ> \<phi>) s"
proof(cases "G s")
case True
moreover from ha uQ have "0 \<le> wp a Q (\<phi> s)"
by(blast)
moreover from hb uQ\<phi> have "0 \<le> wp b (Q o \<phi>) s"
by(blast)
moreover note less_s
ultimately show ?thesis by(simp add:exp_conj_def)
next
case False
moreover from ha uQ have "wp a Q (\<phi> s) \<le> 1"
by(blast)
moreover {
from uQ have "bounded_by 1 (Q o \<phi>)"
by(blast)
moreover from unitary_sound[OF uQ]
have "sound (Q o \<phi>)" by(auto)
ultimately have "wp b (Q o \<phi>) s \<le> 1"
using hb by(auto)
}
moreover note less_s
ultimately show ?thesis by(simp add:exp_conj_def)
qed
from less_s have "(\<guillemotleft>G\<guillemotright> && (wp a Q \<circ> \<phi>)) s \<noteq> (\<guillemotleft>G\<guillemotright> && wp b (Q \<circ> \<phi>)) s"
by(force)
txt \<open>@{term G} must also hold, as otherwise both would be zero.\<close>
hence G_s: "G s"
proof(rule contrapos_np)
assume nG: "\<not> G s"
moreover from ha uQ have "wp a Q (\<phi> s) \<le> 1"
by(blast)
moreover {
from uQ have "bounded_by 1 (Q o \<phi>)"
by(blast)
moreover from unitary_sound[OF uQ]
have "sound (Q o \<phi>)" by(auto)
ultimately have "wp b (Q o \<phi>) s \<le> 1"
using hb by(auto)
}
ultimately
show "(\<guillemotleft>G\<guillemotright> && (wp a Q \<circ> \<phi>)) s = (\<guillemotleft>G\<guillemotright> && wp b (Q \<circ> \<phi>)) s"
by(simp add:exp_conj_def)
qed
txt \<open>Take a carefully constructed expectation:\<close>
let ?Qc = "\<lambda>s. bound_of Q - Q s"
have bQc: "bounded_by 1 ?Qc"
proof(rule bounded_byI)
fix s
from uQ have "bound_of Q \<le> 1" and "0 \<le> Q s" by(auto)
thus "bound_of Q - Q s \<le> 1" by(auto)
qed
have sQc: "sound ?Qc"
proof(rule soundI)
from bQc show "bounded ?Qc" by(auto)
show "nneg ?Qc"
proof(rule nnegI)
fix s
from uQ have "Q s \<le> bound_of Q" by(auto)
thus "0 \<le> bound_of Q - Q s" by(auto)
qed
qed
txt \<open>By the maximality of @{term "wp a"}, @{term "wp b"} must violate feasibility, by mapping
@{term s} to something strictly greater than @{term "bound_of Q"}.\<close>
from uQ have "0 \<le> bound_of Q" by(auto)
with da have "bound_of Q = wp a (\<lambda>s. bound_of Q) (\<phi> s)"
by(simp add:maximalD determ_maximalD)
also have "wp a (\<lambda>s. bound_of Q) (\<phi> s) = wp a (\<lambda>s. Q s + ?Qc s) (\<phi> s)"
by(simp)
also {
from da have "additive (wp a)" by(blast)
with uQ sQc
have "wp a (\<lambda>s. Q s + ?Qc s) (\<phi> s) =
wp a Q (\<phi> s) + wp a ?Qc (\<phi> s)" by(subst additiveD, blast+)
}
also {
from ha and sQc and bQc
have "\<guillemotleft>G\<guillemotright> && (wp a ?Qc o \<phi>) \<tturnstile> wp b (?Qc o \<phi>)"
by(blast intro!:drefinesD[OF dr])
hence "(\<guillemotleft>G\<guillemotright> && (wp a ?Qc o \<phi>)) s \<le> wp b (?Qc o \<phi>) s"
by(blast)
moreover from sQc and ha
have "0 \<le> wp a (\<lambda>s. bound_of Q - Q s) (\<phi> s)"
by(blast)
ultimately
have "wp a ?Qc (\<phi> s) \<le> wp b (?Qc o \<phi>) s"
using G_s by(simp add:exp_conj_def)
hence "wp a Q (\<phi> s) + wp a ?Qc (\<phi> s) \<le> wp a Q (\<phi> s) + wp b (?Qc o \<phi>) s"
by(rule add_left_mono)
also with larger
have "wp a Q (\<phi> s) + wp b (?Qc o \<phi>) s <
wp b (Q o \<phi>) s + wp b (?Qc o \<phi>) s"
by(auto)
finally
have "wp a Q (\<phi> s) + wp a ?Qc (\<phi> s) <
wp b (Q o \<phi>) s + wp b (?Qc o \<phi>) s" .
}
also from sab and unitary_sound[OF uQ] and sQc
have "wp b (Q o \<phi>) s + wp b (?Qc o \<phi>) s \<le>
wp b (\<lambda>s. (Q o \<phi>) s + (?Qc o \<phi>) s) s"
by(blast)
also have "... = wp b (\<lambda>s. bound_of Q) s"
by(simp)
finally
show "\<not> feasible (wp b)"
proof(rule contrapos_pn)
assume fb: "feasible (wp b)"
have "bounded_by (bound_of Q) (\<lambda>s. bound_of Q)" by(blast)
hence "bounded_by (bound_of Q) (wp b (\<lambda>s. bound_of Q))"
using uQ by(blast intro:feasible_boundedD[OF fb])
hence "wp b (\<lambda>s. bound_of Q) s \<le> bound_of Q" by(blast)
thus "\<not> bound_of Q < wp b (\<lambda>s. bound_of Q) s" by(simp)
qed
qed
subsection \<open>Structural Rules for Correspondence\<close>
lemma pcorres_Skip:
"pcorres \<phi> G Skip Skip"
by(simp add:pcorres_def wp_eval)
text \<open>Correspondence composes over sequential composition.\<close>
lemma pcorres_Seq:
fixes A::"'b prog" and B::"'c prog"
and C::"'b prog" and D::"'c prog"
and \<phi>::"'c \<Rightarrow> 'b"
assumes pcAB: "pcorres \<phi> G A B"
and pcCD: "pcorres \<phi> H C D"
and wA: "well_def A" and wB: "well_def B"
and wC: "well_def C" and wD: "well_def D"
and p3p2: "\<And>Q. unitary Q \<Longrightarrow> \<guillemotleft>I\<guillemotright> && wp B Q = wp B (\<guillemotleft>H\<guillemotright> && Q)"
and p1p3: "\<And>s. G s \<Longrightarrow> I s"
shows "pcorres \<phi> G (A;;C) (B;;D)"
proof(rule pcorresI)
fix Q::"'b \<Rightarrow> real"
assume uQ: "unitary Q"
with well_def_wp_healthy[OF wC] have uCQ: "unitary (wp C Q)" by(auto)
from uQ well_def_wp_healthy[OF wD] have uDQ: "unitary (wp D (Q o \<phi>))"
by(auto dest:unitary_comp)
have p3p1: "\<And>R S. \<lbrakk> unitary R; unitary S; \<guillemotleft>I\<guillemotright> && R = \<guillemotleft>I\<guillemotright> && S \<rbrakk> \<Longrightarrow>
\<guillemotleft>G\<guillemotright> && R = \<guillemotleft>G\<guillemotright> && S"
proof(rule ext)
fix R::"'c \<Rightarrow> real" and S::"'c \<Rightarrow> real" and s::'c
assume a3: "\<guillemotleft>I\<guillemotright> && R = \<guillemotleft>I\<guillemotright> && S"
and uR: "unitary R" and uS: "unitary S"
show "(\<guillemotleft>G\<guillemotright> && R) s = (\<guillemotleft>G\<guillemotright> && S) s"
proof(simp add:exp_conj_def, cases "G s")
case False note this
moreover from uR have "R s \<le> 1" by(blast)
moreover from uS have "S s \<le> 1" by(blast)
ultimately show "\<guillemotleft>G\<guillemotright> s .& R s = \<guillemotleft>G\<guillemotright> s .& S s"
by(simp)
next
case True note p1 = this
with p1p3 have "I s" by(blast)
with fun_cong[OF a3, where x=s] have "1 .& R s = 1 .& S s"
by(simp add:exp_conj_def)
with p1 show "\<guillemotleft>G\<guillemotright> s .& R s = \<guillemotleft>G\<guillemotright> s .& S s"
by(simp)
qed
qed
show "\<guillemotleft>G\<guillemotright> && (wp (A;;C) Q o \<phi>) = \<guillemotleft>G\<guillemotright> && wp (B;;D) (Q o \<phi>)"
proof(simp add:wp_eval)
from uCQ pcAB have "\<guillemotleft>G\<guillemotright> && (wp A (wp C Q) \<circ> \<phi>) =
\<guillemotleft>G\<guillemotright> && wp B ((wp C Q) \<circ> \<phi>)"
by(auto dest:pcorresD)
also have "\<guillemotleft>G\<guillemotright> && wp B ((wp C Q) \<circ> \<phi>) =
\<guillemotleft>G\<guillemotright> && wp B (wp D (Q \<circ> \<phi>))"
proof(rule p3p1)
from uCQ well_def_wp_healthy[OF wB] show "unitary (wp B (wp C Q \<circ> \<phi>))"
by(auto intro:unitary_comp)
from uDQ well_def_wp_healthy[OF wB] show "unitary (wp B (wp D (Q \<circ> \<phi>)))"
by(auto)
from uQ have "\<guillemotleft> H \<guillemotright> && (wp C Q \<circ> \<phi>) = \<guillemotleft> H \<guillemotright> && wp D (Q \<circ> \<phi>)"
by(blast intro:pcorresD[OF pcCD])
thus "\<guillemotleft> I \<guillemotright> && wp B (wp C Q \<circ> \<phi>) = \<guillemotleft> I \<guillemotright> && wp B (wp D (Q \<circ> \<phi>))"
by(simp add:p3p2 uCQ uDQ)
qed
finally show "\<guillemotleft>G\<guillemotright> && (wp A (wp C Q) \<circ> \<phi>) = \<guillemotleft>G\<guillemotright> && wp B (wp D (Q \<circ> \<phi>))" .
qed
qed
subsection \<open>Structural Rules for Data Refinement\<close>
lemma dr_Skip:
fixes \<phi>::"'c \<Rightarrow> 'b"
shows "drefines \<phi> G Skip Skip"
proof(intro drefinesI2 wd_intros)
fix P::"'b \<Rightarrow> real" and Q::"'b \<Rightarrow> real" and s::'c
assume "P \<tturnstile> wp Skip Q"
hence "(P o \<phi>) s \<le> wp Skip Q (\<phi> s)" by(simp, blast)
thus "(P o \<phi>) s \<le> wp Skip (Q o \<phi>) s" by(simp add:wp_eval)
qed
lemma dr_Abort:
fixes \<phi>::"'c \<Rightarrow> 'b"
shows "drefines \<phi> G Abort Abort"
proof(intro drefinesI2 wd_intros)
fix P::"'b \<Rightarrow> real" and Q::"'b \<Rightarrow> real" and s::'c
assume "P \<tturnstile> wp Abort Q"
hence "(P o \<phi>) s \<le> wp Abort Q (\<phi> s)" by(auto)
thus "(P o \<phi>) s \<le> wp Abort (Q o \<phi>) s" by(simp add:wp_eval)
qed
lemma dr_Apply:
fixes \<phi>::"'c \<Rightarrow> 'b"
assumes commutes: "f o \<phi> = \<phi> o g"
shows "drefines \<phi> G (Apply f) (Apply g)"
proof(intro drefinesI2 wd_intros)
fix P::"'b \<Rightarrow> real" and Q::"'b \<Rightarrow> real" and s::'c
assume wp: "P \<tturnstile> wp (Apply f) Q"
hence "P \<tturnstile> (Q o f)" by(simp add:wp_eval)
hence "P (\<phi> s) \<le> (Q o f) (\<phi> s)" by(blast)
also have "... = Q ((f o \<phi>) s)" by(simp)
also with commutes
have "... = ((Q o \<phi>) o g) s" by(simp)
also have "... = wp (Apply g) (Q o \<phi>) s"
by(simp add:wp_eval)
finally show "(P o \<phi>) s \<le> wp (Apply g) (Q o \<phi>) s" by(simp)
qed
lemma dr_Seq:
assumes drAB: "drefines \<phi> P A B"
and drBC: "drefines \<phi> Q C D"
and wpB: "\<guillemotleft>P\<guillemotright> \<tturnstile> wp B \<guillemotleft>Q\<guillemotright>"
and wB: "well_def B"
and wC: "well_def C"
and wD: "well_def D"
shows "drefines \<phi> P (A;;C) (B;;D)"
proof
fix R and S
assume uR: "unitary R" and uS: "unitary S"
and wpAC: "R \<tturnstile> wp (A;;C) S"
from uR
have "\<guillemotleft>P\<guillemotright> && (R o \<phi>) = \<guillemotleft>P\<guillemotright> && (\<guillemotleft>P\<guillemotright> && (R o \<phi>))"
by(simp add:exp_conj_assoc)
also {
from well_def_wp_healthy[OF wC] uR uS
and wpAC[unfolded eval_wp_Seq o_def]
have "\<guillemotleft>P\<guillemotright> && (R o \<phi>) \<tturnstile> wp B (wp C S o \<phi>)"
by(auto intro:drefinesD[OF drAB])
with wpB well_def_wp_healthy[OF wC] uS
sublinear_sub_conj[OF well_def_wp_sublinear, OF wB]
have "\<guillemotleft>P\<guillemotright> && (\<guillemotleft>P\<guillemotright> && (R o \<phi>)) \<tturnstile> wp B (\<guillemotleft>Q\<guillemotright> && (wp C S o \<phi>))"
by(auto intro!:entails_combine dest!:unitary_sound)
}
also {
from uS well_def_wp_healthy[OF wC]
have "\<guillemotleft>Q\<guillemotright> && (wp C S o \<phi>) \<tturnstile> wp D (S o \<phi>)"
by(auto intro!:drefinesD[OF drBC])
with well_def_wp_healthy[OF wB] well_def_wp_healthy[OF wC]
well_def_wp_healthy[OF wD] and unitary_sound[OF uS]
have "wp B (\<guillemotleft>Q\<guillemotright> && (wp C S o \<phi>)) \<tturnstile> wp B (wp D (S o \<phi>))"
by(blast intro!:mono_transD)
}
finally
show "\<guillemotleft>P\<guillemotright> && (R o \<phi>) \<tturnstile> wp (B;;D) (S o \<phi>)"
unfolding wp_eval o_def .
qed
lemma dr_repeat:
fixes \<phi> :: "'a \<Rightarrow> 'b"
assumes dr_ab: "drefines \<phi> G a b"
and Gpr: "\<guillemotleft>G\<guillemotright> \<tturnstile> wp b \<guillemotleft>G\<guillemotright>"
and wa: "well_def a"
and wb: "well_def b"
shows "drefines \<phi> G (repeat n a) (repeat n b)" (is "?X n")
proof(induct n)
show "?X 0" by(simp add:dr_Skip)
fix n
assume IH: "?X n"
thus "?X (Suc n)" by(auto intro!:dr_Seq Gpr assms wd_intros)
qed
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Example/afp-2020-05-16/thys/pGCL/Algebra.thy"}
|
#
# io_disc.py
# Contains helpful dictionaries and other global variables for plotting
# Also contains functions to read output files and plot data
#
import numpy as np
import matplotlib.pyplot as plt
import filefinder as ff
from multigraph import multigraph, multigraph_legend,multigraph_legend_points
nprofcol = 11
nlayercol = 11
nlogcol = 11
# Variables for profile file
profilekeys = ['r','sigma','cs','kappa','gamma','mu', 'T', 'tau', 'nu','alpha', 'Q']
profilelabels = [r'r (AU)',r'$\Sigma$ (g cm $^{-2}$)',r' $c_s$ (cm s$^{-1}$)',r'$\kappa$ (cm$^{2}$ g$^{-1}$)',
r'$\gamma$',r'$\mu$',r'$T_c$ (K)',r'$ \tau $',r'$\nu_g$',r'$\alpha_{g}$',r'$ Q $']
profilexlabel = profilelabels[0]
# Log the y axis? True/False
profileylog=[True, True, True,True,False,False,True,True,True,True,True]
# y limits - set defaults first
profileymin=[]
profileymax=[]
for i in range(nprofcol):
profileymin.append(0.0)
profileymax.append(0.0)
# Now define any non-default limits
# sigma
profileymin[1] = 1.0e1
profileymax[1] = 1.0e6
#cs
profileymin[2] = 1.0e1
profileymax[2] = 1.0e7
#kappa
profileymin[3] = 1.0e-10
profileymax[3] = 1.0e5
# alpha
profileymin[9] = 1.0e-5
profileymax[9] = 1.0e0
# Q
profileymin[10] = 1.0e0
profileymax[10] = 1.0e2
# Variables for log data
logkeys = ['t', 'dt','mdisc', 'tot_lumin', 'sig_max', 'mgrav', 'mmag', 'grav_max', 'mag_max', 'mdot_grav', 'mdot_mag','mdot_wind' ]
loglabels = [r't (yr)', r'dt (yr)', r'$M_{disc}$ ($M_{\odot}$)',
r'$L_{tot}$ ($L_{\odot}$)',r' $\Sigma_{grav,max}$ (g cm$^{-2}$)',
r'$M_{grav}$ ($M_{\odot}$)',r'$M_{MRI}$ ($M_{\odot}$)',
r' $\Sigma_{grav,max}$ (g cm$^{-2}$)', r' $\Sigma_{MRI,max}$ (g cm$^{-2}$)',
r'$\dot{M}_{grav}$ ($M_{\odot} yr^{-1}$)', r'$\dot{M}_{mag}$ $(M_{\odot} yr^{-1}$)',r'$\dot{M}_{wind}$ $(M_{\odot} yr^{-1}$)']
logxlabel = loglabels[0]
# Log the y axis? True/False
logylog=[False, False, False,True,True,True,True,True,True,True,True,True]
# y limits - set defaults first
logymin=[]
logymax=[]
for i in range(nlogcol):
logymin.append(0.0)
logymax.append(0.0)
# Some global variables for planet plotting
mearth = 0.00314 # Earth mass in Jupiter masses
# Minimum and maximum planet sizes (in pixels)
minplanetsize = 50
maxplanetsize = 200
# Colours for different classes
earthcolour = '#0099ff'
neptunecolour = '#60b28c'
jupitercolour = 'red'
BDcolour = '#663300'
################################
# END OF VARIABLE DEFINITIONS
################################
def read_profile(profilefile):
'''Reads profile data from file'''
f = open(profilefile,'r')
line = f.readline()
arr = np.fromstring(line.strip(), dtype=float, sep=" ")
f.close()
time = arr[0]
ngrid = int(arr[1])
print 'Reading file ',profilefile
print 'Time: '+str(time)+ ' yr'
profdata = np.genfromtxt(profilefile,skip_header=1)
profdata.reshape(profdata.size/nprofcol,nprofcol)
return time, profdata
def read_planets(planetfile,verbose=True):
'''Reads planetary data from file'''
f = open(planetfile, 'r')
line = f.readline()
arr = np.fromstring(line.strip(), dtype=float, sep=" ")
time = arr[0]
nplanet = int(arr[1])
nactive = int(arr[2])
if(verbose):
print 'Number of planets: ',nplanet
print 'Those of which are active: ',nactive
active = np.zeros(nplanet)
mp = np.zeros(nplanet)
ap = np.zeros(nplanet)
tmig = np.zeros(nplanet)
for i in range(nplanet):
line = f.readline()
arr = np.fromstring(line.strip(), sep=" ")
active[i] = arr[0]
mp[i] = arr[1]
ap[i] = arr[2]
tmig[i] = arr[3]
if(verbose):
print active[i],mp[i],ap[i],tmig[i]
return time,nplanet,nactive, active,mp,ap,tmig
def get_planet_size_and_colour(nplanet,mp):
'''Given a planet mass, returns a colour and size'''
planetcolours = []
planetsizes = 100*mp[:]
for i in range(nplanet):
if(planetsizes[i]<minplanetsize):
planetsizes[i]=minplanetsize
if(planetsizes[i]>maxplanetsize):
planetsizes[i]=maxplanetsize
for i in range(nplanet):
if(mp[i]<1.5*mearth):
planetcolours.append(earthcolour)
elif(mp[i]>1.5*mearth and mp[i]<5*mearth):
planetcolours.append(neptunecolour)
elif(mp[i]>5*mearth and mp[i]<13.0):
planetcolours.append(jupitercolour)
else:
planetcolours.append(BDcolour)
return planetsizes,planetcolours
def read_log(logfile):
'''Reads the .log file'''
return np.genfromtxt(logfile)
def plot_profile_multifiles_variable(prefix, add_planets=False):
'''Reads multiple profile files and plots a specific variable'''
filenames = ff.find_sorted_local_input_fileset(prefix+"*profile*")
nfiles = len(filenames)
initial = input('Starting filenumber? ')
final = input('Final filenumber? ')
print 'Now select variable to plot: here are the choices'
for i in range(len(profilekeys)):
print str(i+1)+': '+profilekeys[i]
var = input('Which variable (1-'+str(len(profilekeys))+')? ')
var = var-1
if(final>nfiles):
print "Limiting count to available files"
final = nfiles
fig1 = plt.figure()
ax = fig1.add_subplot(111)
if(add_planets):
planetfiles = ff.find_sorted_local_input_fileset(prefix+"*planets*")
initial = initial-1
final = final - 1
for i in range(initial,final):
time,profdata = read_profile(filenames[i])
if(add_planets):
t,nplanet,nactive, active,mp,ap,tmig = read_planets(planetfiles[i],verbose=False)
# Setup planet points for plotting
xpoints = np.zeros(nplanet)
ypoints = np.zeros(nplanet)
if profileymin[var]!=profileymax[var]:
ypoints[:] = 2.0*profileymin[var]
else:
ypoints[:] = 2.0*np.min(profdata[:,var])
planetsizes,planetcolours = get_planet_size_and_colour(nplanet,mp)
if(profileylog[var]):
ax.set_xscale('log')
ax.set_yscale('log')
if(profileymin[var]!=profileymax[var]):
ax.set_ylim(profileymin[var],profileymax[var])
line1 = ax.plot(profdata[:,0],profdata[:,var])
ax.set_xlabel(profilexlabel)
ax.set_ylabel(profilelabels[var])
ax.text(0.9, 0.9,'t = '+str(np.round(time,2))+' yr',
bbox=dict(edgecolor='black',facecolor='none'), horizontalalignment='center',
verticalalignment='center',transform = ax.transAxes)
if(add_planets):
ax.scatter(ap,ypoints,s=planetsizes,facecolor=planetcolours)
outputfile =profilekeys[var]+'_'+filenames[i]+'.png'
print 'Saving to ',outputfile
plt.savefig(outputfile, format='png')
ax.clear()
def plot_profile_data(profilefile):
'''Reads a given profile file and plots all variables at that snapshot'''
time,profdata = read_profile(profilefile)
# Set up plot data for multigraph function
# Filenames
profileoutputstring=[]
for i in range(nprofcol):
profileoutputstring.append(profilekeys[i]+'_'+profilefile)
legendstring=[]
# Legend Label
for i in range(nprofcol):
legendstring.append('t = '+str(np.round(time,2))+' yr',)
multigraph_legend(profdata,nprofcol,profilexlabel,profilelabels,profileylog,profileymin,profileymax,profileoutputstring,legendstring)
return time,profdata
def plot_profile_data_planets(profilefile,planetfile):
'''Reads a given profile file and plots all variables (and planets) at that snapshot'''
time,profdata = read_profile(profilefile)
t,nplanet,nactive,active,mp,ap,tmig = read_planets(planetfile)
if(np.abs(time-t)>1.0e-30):
print "Warning: times of profile/planet files don't match"
# Set up plot data for multigraph function
# Filenames
profileoutputstring=[]
for i in range(nprofcol):
profileoutputstring.append(profilekeys[i]+'_'+profilefile)
legendstring=[]
# Legend Label
for i in range(nprofcol):
legendstring.append('t = '+str(np.round(time,2))+' yr',)
# Setup planet points for plotting
xpoints = np.zeros((nplanet,nprofcol))
ypoints = np.zeros((nplanet,nprofcol))
for i in range(nprofcol):
if profileymin[i]!=profileymax[i]:
ypoints[:,i] = 2.0*profileymin[i]
else:
ypoints[:,i] = 2.0*np.min(profdata[:,i])
planetsizes,planetcolours = get_planet_size_and_colour(nplanet,mp)
print ap, ypoints, planetcolours
multigraph_legend_points(profdata,nprofcol,profilexlabel,profilelabels,profileylog,profileymin,profileymax,profileoutputstring,legendstring,ap,ypoints,planetsizes,planetcolours)
return time,profdata,nplanet,nactive,active,mp,ap
def plot_log_data(logfile):
''' Plots log file data'''
logdata = read_log(logfile)
logoutputstring = []
for i in range(nprofcol):
logoutputstring.append(logkeys[i]+'_'+logfile)
multigraph(logdata,nlogcol,logxlabel,loglabels,logylog,logymin,logymax,logoutputstring)
return logdata
def obtain_planet_tracks(prefix):
'''Reads all planetary data and creates tracks for each planet'''
planetfiles = ff.find_sorted_local_input_fileset(prefix+"*planets*")
time,nplanet,nactive,active,mp,ap,tmig = read_planets(planetfiles[1])
time_all = np.zeros(len(planetfiles)-1)
active_all = np.zeros((nplanet,len(planetfiles)-1))
ap_all = np.zeros((nplanet,len(planetfiles)-1))
mp_all = np.zeros((nplanet,len(planetfiles)-1))
tmig_all= np.zeros((nplanet,len(planetfiles)-1))
for i in range(len(planetfiles)-1):
time_all[i],nplanet,nactive,active, mp,ap,tmig = read_planets(planetfiles[i+1],verbose=False)
active_all[:,i] = active[:]
mp_all[:,i] = mp[:]
ap_all[:,i] = ap[:]
tmig_all[:,i] = tmig[:]
return time_all, nplanet,active_all,ap_all, mp_all,tmig_all
|
{"hexsha": "02f8d3fe9f30b66158e266e5c544548d64cfeca9", "size": 10242, "ext": "py", "lang": "Python", "max_stars_repo_path": "plot/io_disc.py", "max_stars_repo_name": "dh4gan/visag", "max_stars_repo_head_hexsha": "bf3698459c0b41c9097807f9baf32eee3ca0bdbe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plot/io_disc.py", "max_issues_repo_name": "dh4gan/visag", "max_issues_repo_head_hexsha": "bf3698459c0b41c9097807f9baf32eee3ca0bdbe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plot/io_disc.py", "max_forks_repo_name": "dh4gan/visag", "max_forks_repo_head_hexsha": "bf3698459c0b41c9097807f9baf32eee3ca0bdbe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2148760331, "max_line_length": 181, "alphanum_fraction": 0.628392892, "include": true, "reason": "import numpy", "num_tokens": 2900}
|
\cutname{herd.html}
The tool \herd{} is a memory model simulator.
Users may write simple, single events,
axiomatic models of their own and run litmus tests on top
of their model.
The \herd{} distribution already includes some models.
The authors of~\herd{} are Jade Alglave and Luc Maranget.
\section{Writing simple models}
This section introduces \cat{}, our language for describing memory models.
The \cat{} language is a domain specific language for writing and executing
memory models. From the language perspective, \cat{} is loosely inspired
by OCaml. That is, it is a functional language, with similar syntax
and constructs.
The basic values of \cat{} are sets of events, which include memory events
but also additional events such as fence events,
and relations over events.
\subsection{Sequential consistency}
The simulator \herd{} accepts models written in text files.
For instance here is \afile{sc.cat},
the definition of the sequentially consistent (SC) model in the partial-order
style:
\verbatiminput{sc.cat}
The model above illustrates some features of model definitions:
\begin{enumerate}
\item A model file starts with a tag (here \verb+SC+), which can also be a
string (in double quotes) in case the tag includes special characters or spaces.
\item Pre-defined bindings. Here \verb+po+ (program order)
and \texttt{rf} (read from) are pre-defined.
The remaining two communication relations (\texttt{co} and~\texttt{fr})
are computed by the included file \verb+cos.cat+, which we describe later
--- See Sec.~\ref{sec:cos}.
For simplicity, we may as well assume that \verb+co+
and~\verb+fr+ are pre-defined.
\item The computation of new relations from other relations,
and their binding to a name with the \verb+let+ construct.
Here, a new relation \verb+com+ is the union ``\texttt{|}'' of
the three pre-defined communication relations.
\item The performance of some checks. Here the relation ``\verb+po | com+''
(\emph{i.e.} the union of program order \textrel{po} and of communication
relations) is required to be acyclic.
Checks can be given names by suffixing them with
``\texttt{as~}\textit{name}''.
This last feature will be used in Sec.~\ref{name:check}
\end{enumerate}
%We postpone the discussion of the \verb+show+ instruction, see
%Sec.~\ref{sec:show}.
One can then run some litmus test, for instance \atest{SB}
(for \emph{Store Buffering},
see also Sec.~\ref{litmus:simple}), on top of the SC model:
\begin{verbatim}
% herd7 -model ./sc.cat SB.litmus
Test SB Allowed
States 3
0:EAX=0; 1:EAX=1;
0:EAX=1; 1:EAX=0;
0:EAX=1; 1:EAX=1;
No
Witnesses
Positive: 0 Negative: 3
Condition exists (0:EAX=0 /\ 1:EAX=0)
Observation SB Never 0 3
Hash=7dbd6b8e6dd4abc2ef3d48b0376fb2e3
\end{verbatim}
The output of \herd{} mainly consists in
the list of final states that are allowed by the simulated model.
Additional output relates to the test condition.
One sees that the test condition does not validate on top of SC,
as ``\texttt{No}'' appears just after the list of final states
and as there is no ``Positive'' witness.
Namely, the condition ``\verb+exists (0:EAX=0 /\ 1:EAX=0)+''
reflects a non-SC behaviour, see Sec.~\ref{intro:candidate}.
\label{intro:candidate}%
The simulator \herd{} works by generating all candidate executions
of a given test.
By ``candidate execution'' we mean a choice of events,
program order~\textrel{po}, of the read-from relation~\texttt{rf}
and of final writes to memory
(last write to a given location)\footnote{Alternatively,
we may adopt the simpler view that
a candidate execution includes a choice of all communication relations.}.
In the case of the \ltest{SB} example, we get the following four executions:
\begin{center}
\img{SB-00}\quad\quad
\img{SB-01}\quad\quad
\img{SB-02}\quad\quad
\img{SB-03}
\end{center}
Indeed, there is no choice for the program order \textrel{po}, as there are no
conditional jumps in this example; and no choice for the final
writes either, as there is only one store per location, which
must be \textrel{co}-after the initial stores (pictured as small red dots).
Then, there are two read events from locations $x$ and~$y$ respectively,
which take their values either from the initial stores or from
the stores in program. As a result, there are four possible executions.
The model \afile{sc.cat} gets executed on each of the four
candidate executions. The three first executions
are accepted and the last one is rejected, as it presents a cycle
in \texttt{po | fr}.
On the following diagram,
the cycle is obvious:
\begin{center}\img{SB+SC}\end{center}
\subsection{Total Store Order (TSO)}
However, the non-SC execution \ahrefloc{x86:classic}{shows up} on x86 machines,
whose memory model is TSO. As TSO relaxes the write-to-read order, we attempt
to write a TSO model \afile{tso-00.cat}, by simply removing write-to-read
pairs from the acyclicity check:
\verbatiminput{tso-00.cat}
This model illustrates several features
of model definitions:
\begin{itemize}
\item New predefined sets: \verb+W+, \verb+R+ and~\verb+M+, which are
the sets of read events, write events and of memory events, respectively.
\item The cartesian product operator ``\verb+*+'' that returns the cartesian
product of two event sets as a relation.
\item The intersection operator ``\verb+&+'' that operates on sets and
relations.
\end{itemize}
As a result, the effect of the declaration
\verb+let po-tso = po & (W*W | R*M)+ is to define \verb+po-tso+
as the program order on memory events minus write-to-read pairs.
We run \atest{SB} on top of the tentative TSO model:
\begin{verbatim}
% herd7 -model tso-00.cat SB.litmus
Test SB Allowed
States 4
0:EAX=0; 1:EAX=0;
0:EAX=0; 1:EAX=1;
0:EAX=1; 1:EAX=0;
0:EAX=1; 1:EAX=1;
Ok
Witnesses
Positive: 1 Negative: 3
...
\end{verbatim}
\label{sb:image}The non-SC behaviour is now accepted, as write-to-read \textrel{po}-pairs
do not participate to the acyclicity check any more. In effect, this allows
the \ahref{SB-03.png}{last execution} above,
as $\textrel{ghb}$ (\emph{i.e.}
\verb+po-tso | com-tso+) is acyclic.
\begin{center}\img{SB+TSO}\end{center}
However,
our model \afile{tso-00.cat} is flawed: it is still to strict,
forbidding some behaviours that the TSO model should accept.
Consider the test \atest{SB+rfi-pos},
which is test \atest{STFW-PPC} for X86 from Sec.~\ref{stfw} with a normalised name (see Sec.~\ref{sec:names}).
This test targets the following execution:
\begin{center}\img{SB+rfi-pos}\end{center}
Namely the test condition
\verb+exists (0:EAX=1 /\ 0:EBX=0 /\ 1:EAX=1 /\ 1:EBX=0)+
specifies that Thread~$0$ writes~$1$ into location~$x$,
reads the value $1$~from the location~$x$ (possibly by store forwarding) and
then reads the value~$0$ from the location~$y$;
while Thread~$1$ writes~$1$ into~$y$,
reads~$1$ from~$y$ and then reads~$0$ from~$x$.
Hence, this test derives from the previous~\atest{SB}
by adding loads in the middle, those loads
being satisfied from local stores.
As can be seen by running the test on top of the \afile{tso-00.cat}
model, the target execution is forbidden:
\begin{verbatim}
% herd7 -model tso-00.cat SB+rfi-pos.litmus
Test SB+rfi-pos Allowed
States 15
0:EAX=0; 0:EBX=0; 1:EAX=0; 1:EBX=0;
...
0:EAX=1; 0:EBX=1; 1:EAX=1; 1:EBX=1;
No
Witnesses
Positive: 0 Negative: 15
..
\end{verbatim}
However, running the test with litmus demonstrates that the behaviour
is observed on some X86 machine:
\begin{verbatim}
% arch
x86_64
% litmus7 -mach x86 SB+rfi-pos.litmus
...
Test SB+rfi-pos Allowed
Histogram (4 states)
11589 *>0:EAX=1; 0:EBX=0; 1:EAX=1; 1:EBX=0;
3993715:>0:EAX=1; 0:EBX=1; 1:EAX=1; 1:EBX=0;
3994308:>0:EAX=1; 0:EBX=0; 1:EAX=1; 1:EBX=1;
388 :>0:EAX=1; 0:EBX=1; 1:EAX=1; 1:EBX=1;
Ok
Witnesses
Positive: 11589, Negative: 7988411
Condition exists (0:EAX=1 /\ 0:EBX=0 /\ 1:EAX=1 /\ 1:EBX=0) is validated
...
\end{verbatim}
As a conclusion, our tentative TSO model is too strong.
The following diagram pictures its \textrel{ghb} relation:
\begin{center}\img{SB+rfi-pos+TER}\end{center}
One easily sees that \textrel{ghb} is cyclic, whereas it should not.
Namely, the internal read-from relation~\textrel{rfi} does
not create global order in the TSO model.
Hence, \textrel{rfi} is not included in \textrel{ghb}.
We rephrase our tentative TSO model, resulting into the new model
\afile{tso-01.cat}:
\verbatiminput{tso-01.cat}
As can be observed above \texttt{rfi} (internal read-from) is no longer
included in \textrel{ghb}. However, \texttt{rfe} (external read-from)
still is. Notice that \texttt{rfe} and~\texttt{rfi} are pre-defined.
As intended, this new tentative TSO model allows the behaviour of test~\atest{SB+rfi-pos}:
\begin{verbatim}
% herd7 -model tso-01.cat SB+rfi-pos.litmus
Test SB+rfi-pos Allowed
States 16
...
0:EAX=1; 0:EBX=1; 1:EAX=1; 1:EBX=0;
...
Ok
Witnesses
Positive: 1 Negative: 15
...
\end{verbatim}
And indeed, the global-happens-before relation is no-longer cyclic:
\begin{center}\img{SB+rfi-pos+BIS}\end{center}
We are not done yet, as our model is too weak in two aspects.
First, it has no semantics for fences.
As a result the test \atest{SB+mfences} is allowed, whereas it should
be forbidden, as this is the very purpose of the fence \texttt{mfence}.
\begin{center}\img{SB+mfences}\end{center}
One easily solves this issue by first defining the \verb+mfence+
that relates events with a \verb+MFENCE+ event \texttt{po}-in-between them;
and then by adding \verb+mfence+ to the definition of \verb+po-tso+:
\begin{verbatim}
let mfence = po & (_ * MFENCE) ; po
let po-tso = po & (W*W | R*M) | mfence
\end{verbatim}
Notice how the relation \verb+mfence+ is defined from two pre-defined sets:
``\verb+_+'' the universal set of all events and \verb+MFENCE+ the set
of fence events generated by the X86 \texttt{mfence} instruction.
An alternative, more precise definition, is possible:
\begin{verbatim}
let mem-to-mfence = po & M * MFENCE
let mfence-to-mem = po & MFENCE * M
let mfence = mem-to-mfence; mfence-to-mem
\end{verbatim}
This alternative definition of \texttt{mfence},
although yielding a smaller relation, is equivalent to the original one
for our purpose of checking \texttt{ghb} acyclicity.
But the resulting model is still too weak,
as it allows some behaviours that any model must
reject for the sake of single thread correctness.
The following test \atest{CoRWR} illustrates the issue:
\verbatiminput{CoRWR.litmus}
The test final condition targets the following execution candidate:
\begin{center}\img{CoRWR}\end{center}
The TSO check ``\verb+acyclic po-tso|com-tso+'' does not suffice to reject
two absurd behaviours pictured in the execution diagram above:
(1) the read~$a$ is allowed to
read from the \textrel{po}-after write~$b$, as \textrel{rfi} is not included
in \textrel{com-tso}; and~(2)
the read~$c$ is allowed to read the initial value of location~$x$
although the initial write~$d$ is \textrel{co}-before the write~$b$,
since \verb+po & (W * R)+ is not in \textrel{po-tso}.
\label{defuniproc}For any model, we rule out those very
untimely behaviours by the so-called
\textsc{uniproc}
check that states that executions projected on events that access one variable
only are SC.
In practice, having defined \verb+po-loc+ as \verb+po+ restricted to
events that touch the same address (\emph{i.e.}
as \verb+po & loc+), we further require the acyclicity
of the relation \verb+po-loc|fr|rf|co+.
In the TSO case, the \textsc{uniproc}~check can be
somehow simplified by considering only
the cycles in \verb+po-loc|fr|rf|co+ that
are not already rejected by the main check of the model.
This amounts to design specific checks for the two relations that are
not global in TSO: \verb+rfi+ and \verb+po & (W*R)+.
Doing so, we finally produce a correct model for TSO \afile{tso-02.cat}:
\verbatiminput{tso-02.cat}
This last model illustrates another feature of \cat{}:
\herd{} may also performs irreflexivity checks with the keyword
``\verb+irreflexive+''.
\subsection{Sequential consistency, total order definition}
We now illustrate another style of model.
We consider the original definition of sequential consistency~\cite{lam79}.
An execution is SC when there exists a total (strict) order~\verb+S+ on events such that:
\begin{enumerate}
\item $S$ includes the program order~\verb+po+;
\item \label{rfcond}and read events read from the most recent write events in the past,
\emph{i.e.} a read~$r$ from location~$x$ reads the value stored by
the \verb+S+-maximal write amongst those writes to location~$x$
that \verb+S+-precede~$r$.
\end{enumerate}
So we could just generate all total strict orders amongst events,
and filter those ``scheduling order candidates'' according to the two rules
above.
\label{sec:final:initial}Things are a bit more complex in~\herd{}, due to the presence of initial and final writes. Up to now we have ignored those writes,
we are now going to integrate them explicitly.
Initial writes are write events that initialise the memory locations.
Initial writes are not generated by the instructions of the test.
Instead, they are created by \herd{} machinery, and are available
from model text as the set \verb+IW+.
Final writes may be generated by program instructions, and, when such,
they must be ordered by~$S$.
A final write is a write to a phantom read performed once
program execution is over.
The constraint on final writes
originates from \herd{} technique to enumerate execution candidates:
actual execution candidates also include a choice of final writes for
the locations that are observed in the test final condition\footnote{Doing
so permits pruning executions that are irrelevant to the test final condition,
see \herd{} option \ahrefloc{speedcheck:opt}{\opt{-speedcheck}}}.
As test outcome (\emph{i.e.} the final values of observed locations) is
settled before executing the model, it is important \emph{not} to accept
executions that yield a different outcome. Doing so may validate outcomes
that should be rejected.
In practice, the final write $w_f$
to location~$x$ must follow all other writes to~$x$ in~$S$.
Considering that the set of final writes is available to \cat{}~models
as the pre-defined set~\verb+FW+,
the constraint on final writes
can be expressed as a relation:
\begin{verbatim}
let preSC = loc & (W \ FW) * FW
\end{verbatim}
Where \verb+loc+ is a predefined relation that relates all events
that access the same location.
By contrast with final writes, initial writes are not generated
by program instructions, and it is possible not to order them completely.
In particular, it is not useful to order initial writes to different locations,
nor the initial write to location~$x$ with any access to location~$y$.
Notice that we could include initial writes in~$S$ as we did for
final writes. Not doing so will slightly improve efficiency.
\label{intro:linearisations}%
Finally, the strict order~$S$ is not just any order on events,
it is a topological order of the events generated by threads
(implemented as the set \verb+~IW+. \emph{i.e.} the complement of the set
of initial writes) that extends the pre-order~\verb+preSC+.
We can generate all such topological orders with the \cat{}~primitive
\verb+linearisations+:
\begin{verbatim}
let allS = linearisations(~IW,preSC)
\end{verbatim}
The call \texttt{linearisations($E$,$r$)}, where $E$ is a set of events
and~$r$ is a relation on events, returns the set of all total
strict orders defined on~$S$ that extend~$r$. Notice that if~$r$ is cyclic,
the empty set is returned.
\label{intro:with}We now need to iterate over the set~\verb+allS+.
We do so with the \verb+with+ construct:
\begin{verbatim}
with S from allS
\end{verbatim}
It is important to notice that the construct above extends the current
execution candidate (\emph{i.e.} a choice of events, plus a choice of
two relations~\texttt{po} and~\texttt{rf}) with a candidate strict order~$S$.
In other words, the scope of the iteration is the remainder of the model text.
Once model execution terminates for a choice of~$S$
(some element of~\texttt{allS}), model execution restarts just
after the \texttt{with} construct, with variable~$S$ bound to
the next choice picked in~\texttt{allS}.
As a first consistency check, we check that $S$ includes the program order:
\begin{verbatim}
empty po \ S as PoCons
\end{verbatim}
Notice that, to check for inclusion, we test the emptiness of relation
difference (operator ``\verb+\+'').
It remains to check that the \texttt{rf} relation of the execution candidate
is the same as the one defined by condition~\ref{rfcond}.
To that aim, we complement~$S$ with the constraint over initial
writes that must precede all events to their location:
\begin{verbatim}
let S = S | loc & IW * (M \ IW)
\end{verbatim}
Observe that $S$ is no longer a total strict order. However, it is still a total
strict order when restricted to events that access a given location,
which is all that matters for condition~\ref{rfcond} to give a value
to all reads. As regards our SC model, we define \texttt{rf-S}
the read-from relation induced by~$S$ as follows:
\begin{verbatim}
let WRS = W * R & S & loc (* Writes from the past, same location *)
let rf-S = WRS \ (S;WRS) (* Most recent amongst them *)
\end{verbatim}
The definition is a two-step process: we first define
a relation~\texttt{WRS} from writes to reads (to the same location)
that follow them in~$S$. Observe that,
by complementing~$S$ with initial writes, we achieve that for any read~$r$
there exists at least a write~$w$ such that $(w,r) \in \texttt{WRS}$.
It then remains to filter out non-maximal writes in \texttt{WRS}
as we do in the definition of \texttt{rf-S}, by the means of
the difference operator ``\verb+\+''.
We then check the equality of \texttt{rf} (pre-defined as part of the candidate
execution) and of \texttt{rf-S} by double inclusion:
\begin{verbatim}
empty rf \ rf-S as RfCons
empty rf-S \ rf as RfCons
\end{verbatim}
As an example, he show six attempts of \texttt{po} compatible $S$~orders
for the non-SC outcome of the test~\atest{SB} in figure~\ref{sblamport}.
\begin{figure}[htp]
\caption{\label{sblamport}Failed attempts of SC scheduling orders~$S$.}
\begin{center}
\img{SB+L-00}\quad\img{SB+L-01}\quad\img{SB+L-02}\\
\img{SB+L-03}\quad\img{SB+L-04}\quad\img{SB+L-05}
\end{center}
\end{figure}
Observe that all attempts fail as \texttt{rf} and \texttt{rf-S}
are different in all diagrams.
We also show all successful SC scheduling in figure~\ref{sbok}.
\begin{figure}[htp]
\caption{\label{sbok}SC executions of test~\atest{SB}.}
\begin{center}
\img{SB+OK-00}\quad\img{SB+OK-01}\quad\img{SB+OK-02}\\
\img{SB+OK-03}\quad\img{SB+OK-04}\quad\img{SB+OK-05}
\end{center}
\end{figure}
For reference we provide our complete model~\afile{lamport.cat}
\verbatiminput{lamport.cat}
\subsection{Computing \label{sec:cos}coherence orders}
All the models seen so far include the file \afile{cos.cat} that define
``coherence relations'', written~\texttt{co}.
This section describes the file~\texttt{cos.cat}.
It can be skipped in first reading, as users may find sufficient
to include the file.
For a given location~$x$ the coherence order is a total strict order on the
write events to location~$x$. The coherence relation~\texttt{co} is the union
of those orders for all locations.
In this section, we show how to compute all possible coherence orders for
a candidate execution.
We seize the opportunity to introduce advanced features of the \cat{}
language, such as functions and pattern matching over sets.
Possible coherence orders for a given location~$x$
are not totally arbitrary in two aspects:
\begin{enumerate}
\item The write events to location~$x$ include
the initial write event to location~$x$. The initial write to~$x$ must come
first in any coherence order for~$x$.
\item One of the writes to~$x$ performed by the test (may) have been declared
to be final by \herd{} machinery prior to model execution.
In that case, the final write to~$x$ must come last in any coherence order
for~$x$.
\end{enumerate}
See Sec.~\ref{sec:final:initial} for details on initial and final writes.
We can express the two conditions above for all locations of the program
as a relation \texttt{co0}:
\begin{verbatim}
let co0 = loc & (IW*(W\IW)|(W\FW)*FW)
\end{verbatim}
Where the pre-defined sets \texttt{IW} and~\texttt{FW} are the sets
of all initial and final writes respectively.
%TODO exemple of co0 on 2+2W
Then, assuming that $W_x$ is the set of all writes to location~$x$, one
can compute the set of all possible coherence orders for~$x$ with
the \texttt{linearisations} primitive as \texttt{linearisations($W_x$,co0)}.
In practice, we define a function that takes the set~$W_x$ as an argument:
\begin{verbatim}
let makeCoX(Wx) = linearisations(Wx,co0)
\end{verbatim}
The \texttt{linearisations} primitive is introduced in
Sec.~\ref{intro:linearisations}. It returns all topological sorts
of the events of the set~\texttt{Wx} that are compatible
with the relation~\texttt{co0}.
In fact, we want to compute the set of all possible \texttt{co} relations,
\emph{i.e.} all the unions of all the possible coherence orders for all
locations~$x$. To that end we use another \cat{} primitive:
\texttt{partition($S$)}, which takes a set of events as argument and
returns a set of set of events $T = \{S_1,\ldots,S_n\}$, where each
$S_i$ is the set of all events in $S$ that act on location $L_i$,
and, of course $S$ is the union $\bigcup_{i=1}^{i=n} S_i$.
Hence we shall compute the set of all \texttt{Wx} sets
as \texttt{partition(W)},
where \texttt{W} is the pre-defined set of all writes (including initial
writes).
For combining the effect of the \texttt{partition} and \texttt{linearisations}
primitives, we first define a \texttt{map} function that, given a set~$S=
\{e_1,\ldots,e_n\}$ and a function $f$, returns the set
$\{f(e_1),\ldots,f(e_n)\}$:
\begin{verbatim}
let map f =
let rec do_map S = match S with
|| {} -> {}
|| e ++ S -> f e ++ do_map S
end in
do_map
\end{verbatim}
The \texttt{map} function is written in curried style.
That is one calls it as \texttt{map~$f$~$S$}, parsed
as \texttt{(map~$f$)~$S$}. More precisely, the left-most function
call~\texttt{(map~$f$)} returns a function.
Here it returns~\texttt{do\_map} with free variable \texttt{f} being bound
to the argument~$f$.
The definition of~\texttt{map} illustrate several new features:
\begin{enumerate}
\item The empty set constant~``\verb+{}+'',
and the set addition operator \texttt{$e$ ++ $S$} that returns the set~$S$
augmented with element~$e$.
\item Recursive function definitions. The function~\verb+do_map+
is recursive as it calls itself.
\item Pattern matching on sets.
This construct, similar to OCaml pattern matching on lists, discriminates
between empty (\verb+|| {} ->+~$e_0$) and non-empty
(\verb!|| e ++ es ->!~$e_1$) sets.
In the second case of a non-empty set, the expression~$e_1$ is evaluated
in a context extended with two bindings: a binding from the variable~\texttt{e}
to an arbitrary element of the matched set, and a binding from
the variable~\texttt{es} to the matched set minus the arbitrary element.
\end{enumerate}
Then, we generate the set of all possible coherence orders
for all locations~$x$ as follows:
\begin{verbatim}
let allCoX = map makeCoX (partition(W))
\end{verbatim}
Notice that \texttt{allCoX} is a set of sets of relations,
each element being the set of all possible coherence orders
for a specific~$x$.
We still need to generate all possible \texttt{co} relations,
that is all unions of the possible coherence orders for
all locations~$x$. It can be done by another \cat{} function:
\texttt{cross}, which takes a set of sets $S = \{S_1, S_2, \ldots, S_n\}$ as
argument and returns all possible unions built by picking elements from each of
the $S_i$:
$$
\left\{\, e_1 \cup e_2 \cup \cdots \cup e_n \mid
e_1 \in S_1, e_2 \in S_2, \ldots, e_n \in S_n \,\right\}
$$
One may notice that if $S$ is empty, then \texttt{cross} should
return one relation exactly: the empty relation, \emph{i.e.} the neutral
element of the union operator.
This choice for \texttt{cross($\emptyset$)} is natural
when we define \texttt{cross} inductively:
$$
\texttt{cross}(S_1 \mathop{\texttt{++}} S) =
\bigcup_{e_1 \in S_1, t \in \texttt{cross}(S)} \left\{ e_1 \cup t \right\}
$$
In the definition above, we simply build
\texttt{cross($S_1 \mathop{\texttt{++}} S$)} by building the set
of all unions of one relation~$e_1$ picked in~$S_1$
and of one relation~$t$ picked in $\texttt{cross}(S)$.
So as to write~\texttt{cross},
we first define a classical \texttt{fold} function over sets:
given a set $S = \{ e_1, e_2, \ldots, e_n\}$, an initial value~$y_0$
and a function $f$~that takes a pair $(e,y)$ as argument,
\texttt{fold} computes:
$$
f (e_{i_1},f (e_{i_2}, \ldots, f(e_{i_n},y_0)))
$$
where $i_1, i_2, \ldots, i_n$ defines a permutation
of the indices $1, 2, \ldots, n$.
\begin{verbatim}
let fold f =
let rec fold_rec (es,y) = match es with
|| {} -> y
|| e ++ es -> fold_rec (es,f (e,y))
end in
fold_rec
\end{verbatim}
The function~\texttt{fold} is written in the same curried style as~\texttt{map}.
Notice that the inner function~\verb+fold_rec+ takes one argument.
However this argument is a pair.
As a gentle example of \texttt{fold} usage, we could have
defined~\texttt{map} as:
\begin{verbatim}
let map f = fun S -> fold (fun (e,y) -> f e ++ y) (S,{})
\end{verbatim}
This example also introduce ``anonymous'' functions.
As a more involved example of \texttt{fold} usage, we
write the function~\texttt{cross}.
\begin{verbatim}
let rec cross S = match S with
|| {} -> { 0 } (* 0 is the empty relation *)
|| S1 ++ S ->
let ts = cross S in
fold
(fun (e1,r) -> map (fun t -> e1 | t) ts | r)
(S1,{})
end
\end{verbatim}
The function~\texttt{cross} is a recursive function over a set (of sets).
Its code follows the inductive definition given above.
Finally, we generate all possible \texttt{co} relations by:
\begin{verbatim}
let allCo = cross allCoX
\end{verbatim}
The file~\afile{cos.cat} goes on by iterating over \texttt{allCo} using
the \texttt{with $x$ from~$S$} construct:
\begin{verbatim}
with co from allCo
\end{verbatim}
See Sec.~\ref{intro:with} for details on this construct.
Once~\texttt{co} has been defined, one defines~\texttt{fr} and
internal and external variations:
\begin{verbatim}
(* From now, co is a coherence relation *)
let coi = co & int
let coe = co & ext
(* Compute fr *)
let fr = rf^-1 ; co
let fri = fr & int
let fre = fr & ext
\end{verbatim}
The pre-defined relation \texttt{ext} (resp. \texttt{int}) relates
events generated by different (resp. the same) threads.
\section{Producing pictures of executions}
The simulator \herd{} can be instructed to produce pictures of
executions.
Those pictures are instrumental in understanding and
debugging models.
It is important to understand that \herd{} does not produce pictures
by default. To get pictures one must instruct \herd{} to produce
pictures of some executions with the \opt{-show} option.
This option accepts specific keywords, its default being ``\opt{none}'',
instructing \herd{} not to produce any picture.
A frequently used keyword is ``\opt{prop}'' that means ``show the executions
that validate the proposition in the final condition''.
Namely, the final condition in litmus test is a quantified
boolean proposition as for instance ``\verb+exists (0:EAX=0 /\ 1:EAX=0)+'' at the end of test \atest{SB}.
But this is not enough, users also have to specify what to do with the picture:
save it in file in the DOT format of the
\ahref{http://graphviz.org/}{\prog{graphviz} graph visualization software}, or
display the image,\footnote{This option requires
the Postscript visualiser \ahref{\urlgv}{\prog{gv}}.} or both.
One instructs \herd{} to save images with the \opt{-o }\textit{dirname} option,
where \textit{dirname} is the name of a directory, which must exists.
Then, when processing the file \textit{name}\texttt{.litmus},
\herd{} will create a file \textit{name}\texttt{.dot} into the
directory~\textit{dirname}.
For displaying images, one uses the \opt{-gv} option.
\label{sec:show}As an example,
so as to display the image of the non-SC behaviour of \atest{SB}, one
should invoke \herd{} as:
\begin{verbatim}
% herd7 -model tso-02.cat -show prop -gv SB.litmus
\end{verbatim}
\aname{sb:cluster}{As}
a result, users should see a window popping and displaying this image:
\begin{center}\img{SB+CLUSTER}\end{center}
Notice that we got the PNG version of this image as follows:
\begin{verbatim}
% herd7 -model tso-02.cat -show prop -o /tmp SB.litmus
% dot -Tpng /tmp/SB.dot -o SB+CLUSTER.png
\end{verbatim}
That is, we applied the \prog{dot} tool from the
\ahref{\urlgraphviz}{\prog{graphviz}} package, using the appropriate option
to produce a PNG image.
One may observe that there are \verb+ghb+ arrows in the diagram.
This results from the \verb+show ghb+ instruction
at the end of the model file~\afile{tso-02.cat}.
\subsection{Graph modes}
The image \ahrefloc{sb:cluster}{above} much differs from
the one in Sec.~\ref{sb:image} that describes the same execution
and that is reproduced in Fig.~\ref{fig:sb}
\begin{figure}
\caption{\label{fig:sb}The non-SC behaviour of \atest{SB} is allowed by TSO}
\begin{center}
\img{SB+TSO}
\end{center}
\end{figure}
\label{mode:example}In effect, \herd{} can produce three styles
of pictures, \prog{dot} clustered pictures, \prog{dot} free pictures,
and \prog{neato} pictures with explicit placement of the
events of one thread as a column.
The style is commanded by the \opt{-graph} option that accepts three
possible arguments: \opt{cluster} (default), \opt{free} and~\opt{columns}.
The following pictures show
the effect of graph styles on the \atest{SB}~example:
\begin{center}
\begin{tabular}{*{3}{p{.25\linewidth}}}
\multicolumn{1}{c}{\opt{-graph cluster}} &
\multicolumn{1}{c}{\opt{-graph free}} &
\multicolumn{1}{c}{\opt{-graph columns}}\\
\img{SB+SQUISHED} \qquad &
\img{SB+FREE}\qquad &
\img{SB+COLUMNS}
\end{tabular}
\end{center}
Notice that we used another option \opt{-squished true} that much reduces
the information displayed in nodes. Also notice that
the first two pictures are formatted by \prog{dot},
while the rightmost picture is formatted by \prog{neato}.
One may also observe that the ``\opt{-graph columns}'' picture does not
look exactly like Fig.~\ref{fig:sb}. For instance the
\textrel{ghb} arrows are thicker in the figure.
There are many parameters to control \prog{neato} (and~\prog{dot}),
many of which are accessible to \herd{} users by the means of appropriate
options. We do not intend to describe them all.
However, users can reproduce the style of the diagram of this manual using
yet another feature of \herd: \ahrefloc{herd:configfile}{configuration files}
that contains settings for \herd{} options and that are loaded with the
\opt{-conf~}\textit{name} option.
In this manual we mostly used the \afile{doc.cfg} configuration file.
As this file is present in \herd{} distribution, users
can use the diagram style of this manual:
\begin{verbatim}
% herd7 -conf doc.cfg ...
\end{verbatim}
\subsection{\label{show:forbidden}Showing forbidden executions}
Images are produced or displayed once the model has been executed.
As a consequence,
forbidden executions won't appear by default.
Consider for instance the test \atest{SB+mfences},
where the \texttt{mfence} instruction is used to forbid
\atest{SB} non-SC execution. Running \herd{} as
\begin{verbatim}
% herd7 -model tso-02.cat -conf doc.cfg -show prop -gv SB+mfences.litmus
\end{verbatim}
will produce no picture, as the TSO model forbids the target execution
of~\textsf{SB+mfences}.
To get a picture, we can run \textsf{SB+mfences} on top of the minimal
model, a pre-defined model that allows all executions:
\begin{verbatim}
% herd7 -model minimal -conf doc.cfg -show prop -gv SB+mfences.litmus
\end{verbatim}
And we get the picture:
\begin{center}\img{SB+mfences}\end{center}
It is worth mentioning again that although the minimal model allows all
executions, the final condition
selects the displayed picture, as we have specified the
\opt{-show prop} option.
\label{name:check}The picture above shows \verb+mfence+ arrows, as all
fence relations are displayed by the minimal model.
However, it does not show the \verb+ghb+ relation, as the minimal
model knows nothing of it.
To display~\verb+ghb+ we could write another model file that would be just as
\afile{tso-02.cat}, with checks erased.
The simulator \herd{} provides a simpler technique:
one can instruct \herd{} to ignore
either all checks (\opt{-through invalid}), or a selection of checks
(\opt{-skipchecks~\textit{name$_1$},\ldots,\textit{name$_n$}}).
Thus, either of the following two commands
\begin{verbatim}
% herd7 -through invalid -model tso-02.cat -conf doc.cfg -show prop -gv SB+mfences.litmus
% herd7 -skipcheck tso -model tso-02.cat -conf doc.cfg -show prop -gv SB+mfences.litmus
\end{verbatim}
will produce the picture we wish:
\begin{center}\img{SB+mfences+GHB}\end{center}
Notice that \verb+mfence+ and~\verb+ghb+ are displayed because
of the instruction ``\verb+show mfence ghb+'' (fence relation are not shown
by default);
while \opt{-skipcheck tso} works because the \afile{tso-02.cat} model
names its main check with ``\verb+as tso+''.
The image above is barely readable.
For such graphs with many relations, the \verb+cluster+ and~\verb+free+ modes
are worth a try. The commands:
\begin{verbatim}
% herd7 -skipcheck tso -model tso-02.cat -conf doc.cfg -show prop -graph cluster -gv SB+mfences.litmus
% herd7 -skipcheck tso -model tso-02.cat -conf doc.cfg -show prop -graph free -gv SB+mfences.litmus
\end{verbatim}
will produce the images:
\begin{center}
\begin{tabular}{p{.33\linewidth}@{\hspace*{6em}}p{.33\linewidth}}
\img{SB+mfences+CLUSTER}
&
\img{SB+mfences+FREE}
\end{tabular}
\end{center}
Namely, command line options are scanned left-to-right,
so that most of the settings of \afile{doc.cfg} are kept\footnote{The setting of \opt{showthread} is also changed, by the omitted \opt{-showthread true} command line option}
(for instance thick \verb+ghb+ arrows), while the graph mode is overridden.
\section{\label{herd:language}Model definitions}
We describe our \cat{}~language for defining models.
The syntax of the language is given in BNF-like notation. Terminal
symbols are set in typewriter font (\synt{\T{like} \T{this}}).
Non-terminal symbols are set in italic font (\synt{\NT{like} \NT{that}}).
A~vertical bar \synt{\ldots\orelse\ldots}
denotes alternative.
Square brackets \synt{\boption{}\ldots\eoption{}} denote optional components. Curly brackets
\synt{\brepet{}\ldots\erepet{}} denotes zero,
one or several repetitions of the enclosed
components.
Parentheses \synt{\bparen{}\ldots\eparen{}} denote grouping.
Model source files may contain comments of the OCaml type
(\verb+(*+\ldots \verb+*)+, can be nested), or line comments starting with
``\verb+//+'' and running until end of line.
\subsection{\label{overview}Overview}
The \cat{} language is much inspired by OCaml, featuring immutable bindings,
first-class functions, pattern matching, etc.
However, \cat{} is a domain specific language, with important differences
from OCaml.
\begin{enumerate}
\item
Base values are specialised, they are sets of events and relations
over events. There are also tags, akin to C~enumerations or OCaml
``constant'' constructors and first class functions. Moreover, events
can be extracted from sets and pair of events (element of relations)
from relations.
There are two structured values: tuples of values and sets of values.
Once should notice that primitive set of events and structured set of events are not the same
thing. In fact, the language prevents the construction of structured set of events.
Similarily, there are no structured sets of elements of relations, there are only relations.
\item There is a distinction between expressions that evaluate
to some value, and instructions that are executed for their effect.
\end{enumerate}
A model, or \cat{} program is a sequence of instructions.
At startup, pre-defined identifiers are bound to event sets and relations
over events.
Those pre-defined identifiers describe a candidate execution
(in the sense of the memory model).
Executing the model means allowing or forbiding that candidate
execution.
\subsection{\label{language:identifier}Identifiers}
\begin{syntax}
\NT{letter} \is \T{a} \ldots\T{z}
\orelse \T{A} \ldots\T{Z}
\sep
\NT{digit} \is \T{0} \ldots\T{9}
\sep
\NT{id} \is \NT{letter} \brepet{} \NT{letter} \orelse \NT{digit}
\orelse \T{\_} \orelse \T{.} \orelse \T{-} \erepet
\end{syntax}
Identifiers are rather standard: they are a sequence of letters, digits,
``\texttt{\_}'' (the underscore character), ``\texttt{.}'' (the dot character)
and ``\texttt{-}'' (the minus character),
starting with a letter.
Using the minus character inside identifiers may look a bit surprising.
We did so as to allow identifiers such as \texttt{po-loc}.
\label{sec:predef}At startup, pre-defined identifiers are bound to
event sets and to relations
between events.
Those pre-defined identifiers first describe the events of
the candidate execution as various sets, as described by the first table
of figure~\ref{predefset}.
\begin{figure}[htp]
\caption{\label{predefset}Pre-defined event sets.}
\begin{idtable}
\textrel{emptyset} & empty set of events \\
\textrel{W} & write events \\
\textrel{R} & read events \\
\textrel{M} & memory events &
we have $\textrel{M} = \textrel{W} \cup \textrel{R}$\\
\textrel{IW} & initial writes &
feed reads that read from the initial state\\
\textrel{FW} & final writes & writes that are observed at the end of test execution\\
\textrel{B} & branch events\\
\textrel{RMW} & read-modify-write events\\
\textrel{F} & fence events\\
\textit{NAME} & specific fence events & those depend on the test architecture\\
\end{idtable}
\begin{desctable}{architecture}{fence sets}
\textrel{X86} & \textrel{MFENCE}, \textrel{SFENCE}, \textrel{LFENCE}\\
\textrel{PPC} & \textrel{SYNC}, \textrel{LWSYNC}, \textrel{EIEIO}, \textrel{ISYNC}\\
\textrel{ARM} & \textrel{DMB}, \textrel{DMB.ST}, \textrel{DSB}, \textrel{DSB.ST}, \textrel{ISB}\\
\textrel{MIPS} & \textrel{SYNC}\\
\textrel{AArch64} & \textrel{DMB.SY}, \textrel{DMB.ST}, \textrel{DMB.ST}, \ldots
\textrel{DSB.SY}, \textrel{DSB.ST}, \textrel{DSB.ST}, \ldots\\
\end{desctable}
\end{figure}
Specific fence event sets depends on the test architecture,
their name is always uppercase and derive from the mnemonic of
the instruction that generates them.
The second table of figure~\ref{predefset} shows
a (non-exhaustive) list.
Other pre-defined identifiers are relations.
Most of those are the program order~\tid{po} and its refinements:
\begin{idtable}
$\po$ & program order & instruction order lifted to events \\
$\addr$ & address dependency & the address of the second event depends on
the value loaded by the first (read) event\\
$\data$ & data dependency & the value stored by the second (write)
event depends on
the value loaded by the first (read) event\\
$\ctrl$ & control dependency &
the second event is in a branch controled by the value loaded by the
commfirst (read) event\\
$\rmwr$ & read-exclusive write-exclusive pair &
relate the read and write events emitted
by matching successful load-reserve store conditional instructions,
or atomic rmw instructions.\\
$\amo$ & atomic modify &
relate the read and write events emitted
by atomic rmw instructions.
\end{idtable}
Finally, a few pre-defined relations describe the execution
candidate structure and write-to-read communication:
\begin{idtable}
$\id$ & identity & relates each event to itself\\
$\locr$ & same location & events that touch the same address\\
$\extr$ & external & events from different threads\\
$\intr$ & internal & events from the same thread\\
$\rf$ & read-from & links a write $w$ to a read $r$ taking its value from $w$ \\
\end{idtable}
Some additional relations are defined by library files written in the \cat{}
language, see Sec.~\ref{sec:library}.
\subsection{\label{language:expression}Expressions}
Expressions are evaluated by \herd, yielding a value.
\begin{syntax}
\NT{expr} \is{} \T{0}
\alt \NT{id}
\alt \NT{tag}
\alt \T{(}\T{)} \orelse \T{(} \NT{expr} \T{,} \NT{expr} \brepet{} \T{,} \NT{expr} \erepet \T{)}
\alt \T{\{}\T{\}} \orelse \T{\{} \NT{expr} \brepet{} \T{,} \NT{expr} \erepet \T{\}}
\alt \NT{expr}\T{*} \orelse \NT{expr}\T{+} \orelse \NT{expr}\T{?}
\orelse \NT{expr}\T{\textasciicircum-1}
\alt \T{\textasciitilde}\NT{expr}
\alt \T{[}\NT{expr}\T{]}
\alt \NT{expr}\T{|}\NT{expr} \orelse
\NT{expr}\T{++}\NT{expr} \orelse
\NT{expr}\T{;}\NT{expr} \orelse
\NT{expr}\T{\textbackslash}\NT{expr} \orelse
\NT{expr}\T{\&}\NT{expr} \orelse
\NT{expr} \T{*} \NT{expr}
\alt \NT{expr} \NT{expr}
\alt \T{fun} \NT{pat} \T{->} \NT{expr}
\alt \T{let} \boption{} \T{rec} \eoption \NT{binding} \brepet{} \T{and} \NT{binding} \erepet{} \T{in} \NT{expr}
\alt \T{match} \NT{expr} \T{with} \NT{clauses} \T{end}
\alt \T{(}\NT{expr}\T{)} \orelse \T{begin} \NT{expr} \T{end}
\alt \T{instructions} \NT{id}\T{[}\NT{taglist}\T{]}
\sep
\sep
\NT{tag} \is \T{'} \NT{id}
\sep
\NT{taglist} \is \NT{tag} \T{,} \NT{taglist}
\sep
\NT{pat} \is \NT{id} \orelse \T{(}\T{)} \orelse \T{(} \NT{id} \brepet{} \T{,} \NT{id} \erepet \T{)}
\sep
\NT{binding} \is \NT{valbinding} \orelse \NT{funbinding}
\sep
\NT{valbinding} \is \NT{id} \T{=} \NT{expr}
\sep
\NT{funbinding} \is \NT{id} \NT{pat} \T{=} \NT{expr}
\sep
\sep
\NT{clauses} \is \NT{tagclauses} \orelse \NT{setclauses}
\sep
\NT{tagclauses} \is \boption{} \T{||} \eoption \NT{tag} \T{->} \NT{expr}
\brepet{} \T{||} \NT{tag} \T{->} \NT{expr} \erepet
\boption \T{\_} \T{->} \NT{expr} \eoption
\sep
\NT{setclauses} \is \boption{} \T{||} \eoption \T{\{}\T{\}} \T{->} \NT{expr}
\T{||} \NT{id} \T{++} \NT{id} \T{->} \NT{expr}
\end{syntax}
\subsubsection*{Simple expressions}
Simple expressions are the empty relation (keyword~\synt{\T{0}}),
identifiers~\synt{\NT{id}} and tags~\synt{\NT{tag}}. Identifiers are bound to
values, either before the execution (see pre-defined identifiers in
Sec.~\ref{sec:predef}), or by the model itself. Tags are constants similar to
C enum values or OCaml constant constructors. Tags must be declared with the
\T{enum} instruction. We go back to \T{enum} and tags in Sec.~\ref{sec:enum}
and \ref{sec:bell}.
\subsubsection*{Tuples}
Tuples include a constant, the empty tuple \synt{\T{(}\T{)}},
and constructed tuples
\synt{\T{(} \NT{expr}_1 \T{,} \NT{expr}_1 \T{,}\ldots \T{,} \NT{expr}_n\T{)}},
with $n \geq 2$. In other words there is no tuple of size one.
Syntax \synt{\T{(} \NT{expr} \T{)}} denotes grouping and has the same
value as~\synt{\NT{expr}}.
\subsubsection*{Explicit sets of values}
Explicit sets are written as the comma separated
list of their elements between curly braces:
\synt{\T{\{} \NT{expr}_1 \T{,} \NT{expr}_1 \T{,}\ldots \T{,} \NT{expr}_n\T{\}}},
with $n \geq 0$.
Sets are homogenous, in the sense that sets hold elements of the same type.
In case the values \synt{\NT{expr}_k} are events, the result will be a primitive event set
In case they are elements of relations, the result
will be a relation (and not a set of event pairs).
The empty set~\synt{\T{\{}\T{\}}} is the empty set of events and the empty relation.
\subsubsection*{Operator expressions}
Most operators are overloaded and apply to event sets, relations and explicit sets.
However, by nature, some operators apply to relations only (as sequence and transitive closure
below) or to sets only (as the cartesian product). Additionally, an event in a context where
an event set is expected will be promoted to a singleton event set. The situation of an
elementary relation in a relation context is similar.
The transitive and reflexive-transitive closure of an expression are performed
by the postfix operators \T{+} and~\T{*}.
The postfix operator \T{\textasciicircum-1} performs relation inversion.
The construct \synt{\NT{expr}\T{?}} (option) evaluates to the union
of \NT{expr} value and of the identity relation.
Notice that postfix operators operate on relations only.
There is one prefix operator~\T{\textasciitilde} that performs
relation and set complement.
Finally, there is one last unary operator: \synt{\T{[}\NT{expr}\T{]}}
that evaluate \synt{\NT{expr}} to an event set and returns the identity
relation over this set.
Infix operators are
\T{|} (union), \T{++} (set addition),
\T{;} (sequence), \T{\&} (intersection), \T{\textbackslash} (set difference),
and~\T{*} (cartesian product).
Infix operators are listed in order of increasing precedence,
while postfix and prefix operators bind tighter than infix operators.
All infix operators are right-associative,
except set difference which is left-associative, and cartesian product
which is non-associative.
The union, intersection and difference operators apply to relations
and all kinds of sets. The cartesian product takes two event sets as arguments and returns
a relation.
The addition operator \synt{\NT{expr}_1 \T{++} \NT{expr}_2} operates on
sets: the value of \synt{\NT{expr}_2} must be a set of values (or an event, or
an element of relation)
~$S$ and the operator returns the set~$S$ augmented with the value of
\synt{\NT{expr}_1} (or a new event set, or a new relation).
By exception, the arguments to the addition operator can also be an elementary relation
and realation. It then yields a relation.
For the record, given two relations $r_1$ and~$r_2$,
the sequence $r_1; r_2$ is defined
as $\{ (x,y) \mid \exists z, (x,z) \in r_1 \wedge (z,y) \in r_2\}$.
\subsubsection*{Function calls}
Functions calls are written \synt{\NT{expr}_1 \NT{expr}_2}.
That is, functions are of arity one and the application operator
is left implicit. Notice that function application binds tighter
than all binary operators and looser that postfix operators.
Furthermore the implicit application operator is left-associative.
The \cat{} language has call-by-value semantics. That is,
the effective parameter
\synt{\NT{expr}_2} is evaluated before being bound to the
function formal parameter(s).
N-ary functions can be encoded either using tuples as arguments
or by curryfication (\emph{i.e.} as functions that return functions).
Considering binary functions, in the former case,
a function call is written
\synt{\NT{expr}_1 \T{(} \NT{expr}_2 \T{,} \NT{expr}_3\T{)}};
while in the latter case, a function call is written
\synt{\NT{expr}_1~\NT{expr}_2~\NT{expr}_3}
(which by left-associativity, is to be understood
as \synt{\T{(}\NT{expr}_1~\NT{expr}_2\T{)}~\NT{expr}_3}).
The two forms of function call are not interchangeable, using one or the
other depends on the definition of the function.
\subsubsection*{Functions}
Functions are first class values, as reflected by the anonymous
function construct \synt{\T{fun} \NT{pat} \T{->} \NT{expr}}.
A function takes one argument only.
In the case where this argument is a tuple, it may be destructured
by the means of a tuple pattern. That is \synt{\NT{pat}}
above is \synt{\T{(} \NT{id}_1 \T{,} \ldots \NT{id}_n\T{)}}.
For instance here is a function that takes a tuple of
relations (or sets) as argument and return their symmetric difference:
\begin{verbatim}
fun (a,b) -> (a\b)|(b\a)
\end{verbatim}
Functions have the usual static scoping semantics:
variables that appear free in function bodies
(\synt{\NT{expr}} above) are bound to
the value of such free variable at function creation time.
As a result one may also write the symmetric difference function
as follows:
\begin{verbatim}
fun a -> fun b -> (a\b)|(b\a)
\end{verbatim}
\subsubsection*{\label{bindings}Local bindings}
The local binding construct
\synt{\T{let} \boption{} \T{rec} \eoption{} \nt{bindings} \T{in} \NT{expr}}
binds the names defined by \nt{bindings}
for evaluating the expression \NT{expr}.
Both non-recursive and recursive bindings are allowed.
The function binding
\synt{\NT{id} \NT{pat} \T{=} \NT{expr}} is syntactic sugar
for \synt{\NT{id} \T{=} \T{fun} \NT{pat} \T{->} \NT{expr}}.
The construct
\begin{center}
\synt{\T{let} \NT{pat}_1 \T{=} \NT{expr}_1 \T{and} \ldots \T{and} \NT{pat}_n \T{=} \NT{expr}_n} \T{in} \NT{expr}
\end{center}
evaluates \synt{\nt{expr}_1,\ldots, \nt{expr}_n},
and binds the names in the patterns
\synt{\nt{pat}_1,\ldots, \nt{pat}_n} to the resulting values.
The bindings for \synt{\nt{pat} \T{=} \nt{expr}} are as follows:
if \nt{pat} is \T{(}\T{)}, then \nt{expr} must evaluate to the empty
tuple;
if \nt{pat} is \synt{\nt{id}} or \synt{\T{(}\nt{id}\T{)}},
then \nt{id} is bound to the value of~\synt{\nt{expr}};
if \nt{pat} is a proper tuple pattern
\synt{\T{(}\nt{id}_1\T{,}\ldots \T{,}\nt{id}_n\T{)}} with $n \geq 2$,
then \synt{\nt{expr}} must evaluate to a tuple value of size~$n$
$(v_1,\ldots,v_n)$ and the names $\nt{id}_1,\ldots,\nt{id}_n$ are
bound to the values $v_1,\ldots,v_n$.
By exception, in the case $n=2$, the expression \synt{\nt{expr}} may evaluate to
an elementary relation. If so, the value behaves as a tuple of arity two.
\aname{letrec}{The} construct
\begin{center}
\synt{\T{let} \T{rec} \NT{pat}_1 \T{=} \NT{expr}_1 \T{and} \ldots \T{and} \NT{pat}_n \T{=} \NT{expr}_n} \T{in} \NT{expr}
\end{center}
computes the least fixpoint of the equations
$\nt{pat}_1 = \nt{expr}_1$,\ldots, $\nt{pat}_n = \nt{expr}_n$.
It then binds the names in the patterns
\synt{\nt{pat}_1,\ldots, \nt{pat}_n} to the
resulting values.
The least fixpoint computation applies to set and relation values,
(using inclusion for ordering); and to
functions (using the usual definition ordering).
\subsubsection*{Pattern matching over tags}
The syntax for pattern matching over tags is:
\begin{center}
\synt{\T{match} \NT{expr} \T{with} \NT{tag}_1 \T{->} \NT{expr}_1
\T{||} \cdots \T{||} \NT{tag}_n \T{->} \NT{expr}_n
\T{||} \T{\_} \T{->} \NT{expr}_d
\T{end}}
\end{center}
The value of the match expression is computed as follow: first evaluate
\synt{\nt{expr}} to some value~$v$, which must be a tag~$t$.
Then $v$ is compared with the tags \synt{\nt{tag}_1,\ldots,\nt{tag}_n},
in that order.
If some tag pattern~\synt{\nt{tag}_i} equals~$t$, then the value of the
match is the value of the corresponding expression~\synt{\nt{expr}_i}.
Otherwise, the value of the match is the value of the default
expression~\synt{\nt{expr}_d}.
As the default clause~\synt{\T{\_} \T{->} \NT{expr}_d} is optional,
the match construct may fail.
\subsubsection*{Pattern matching over sets}
The syntax for pattern matching over sets is:
\begin{center}
\synt{\T{match} \NT{expr} \T{with}
\T{\{}\T{\}} \T{->} \NT{expr}_1
\T{||} \NT{id}_1 \T{++} \NT{id}_2 \T{->} \NT{expr}_2
\T{end}}
\end{center}
The value of the match expression is computed as follow: first evaluate
\synt{\nt{expr}} to some value~$v$, which must be a set of values.
If $v$ is the empty set, that the value of the match is the
value of the corresponding expression~\synt{\nt{expr}_1}.
Otherwise, $v$ is a non-empty set, then let $v_e$ be some element in~$v$
and $v_r$ be the set~$v$ minus the element~$v_e$.
The value of the match is the value of \synt{\nt{expr}_2} in a context
where \synt{\nt{id}_1} is bound to~$v_e$ and \synt{\nt{id}_2} is bound
to~$v_r$.
The construct also also applies to primitive event sets and relations.
If the matched expression is non-empty, then the bound element is an event
and an element of relation, respectively. One easily rebuilds an event set
(or a relation) for instance by using the singleton construct as
\synt{\T{\{} \NT{id}_1 \T{\}}}.
\subsubsection*{Parenthesised expressions}
The expression \synt{\T{(}\NT{expr}\T{)}}
has the same value as \synt{\NT{expr}}.
Notice that a parenthesised expression
can also be written as \synt{\T{begin} \NT{expr} \T{end}}.
\subsection{\label{language:instruction}Instructions}
Instruction are executed for their effect.
There are three kinds of effects: adding new bindings,
checking a condition, and specifying relations that are shown in pictures.
\begin{syntax}
\NT{instruction} \is{} \T{let} \boption{} \T{rec} \eoption \NT{binding} \brepet{} \T{and} \NT{binding} \erepet{}
\alt \boption \T{flag} \eoption \NT{check} \NT{expr} \boption \T{as} \NT{id}\eoption
\alt \T{enum} \NT{id} \T{=} \boption \T{||} \NT{tag}
\brepet \T{||} \NT{tag} \erepet
\alt \T{procedure} \NT{id} \NT{pat} \T{=} \brepet \NT{instruction} \erepet \T{end}
\alt \T{call} \NT{id} \NT{expr} \boption \T{as} \NT{id}\eoption
\alt \T{show} \NT{expr} \T{as} \NT{id}
\alt \T{show} \NT{id} \brepet \T{,} \NT{id} \erepet
\alt \T{unshow} \NT{id} \brepet \T{,} \NT{id} \erepet
\alt \T{forall} \NT{id} \T{in} \NT{expr} \T{do} \brepet{} \NT{instruction} \erepet \T{end}
\alt \T{with} \NT{id} \T{from} \NT{expr}
\alt \T{include} \nt{string}
\alt \T{if} \T{variant} \nt{string} \brepet \NT{instruction} \erepet \boption \T{else} \brepet \NT{instruction} \erepet \eoption \T{end}
\sep
\NT{check} \is \NT{checkname} \orelse \T{\textasciitilde} \NT{checkname}
\sep
\NT{checkname} \is \T{acyclic} \orelse \T{irreflexive} \orelse \T{empty}
\end{syntax}
\subsubsection*{Bindings}
The \T{let} and \T{let}~\T{rec} constructs bind value names for the rest
of model execution.
See the subsection on \ahrefloc{bindings}{bindings}
in Section~\ref{language:expression}
for additional information on the syntax and semantics of bindings.
Recursive definitions computes fixpoints of relations.
For instance, the following fragment computes the transitive closure of
all communication relations:
\begin{verbatim}
let com = rf | co | fr
let rec complus = com | (complus ; complus)
\end{verbatim}
Notice that the instruction \verb-let complus = (rf|co|fr)+- is equivalent.
Notice that \herd{} assumes that recursive definitions are well-formed,
\emph{i.e.} that they yield an increasing functional.
The result of ill-formed definitions is undefined.
Although \herd{} features recursive functions, those cannot be used
to compute a transitive closure, due to the lack of some construct
say to test relation equality. Nevertheless, one can
write a generic transitive closure
function by using a local recursive binding:
\begin{verbatim}
let tr(r) = let rec t = r | (t;t) in t
\end{verbatim}
Again, notice that the instruction \verb-let tr (r) = r+- is equivalent.
Thanks to pattern matching constructs,
recursive functions are useful to compute over sets (and tags).
For instance here is the definition of a function \texttt{power} that compute
power sets:
\begin{verbatim}
let rec power S = match S with
|| {} -> { {} }
|| e ++ S ->
let rec add_e RR = match RR with
|| {} -> { }
|| R ++ RR -> R ++ (e ++ R) ++ add_e RR
end in
add_e (power S)
end
\end{verbatim}
\subsubsection*{\label{sec:check}Checks}
The construct
\begin{center}\synt{\NT{check} \NT{expr}}\end{center}
evaluates \nt{expr} and applies the check \nt{check}.
There are six checks: the three basic acyclicity (keyword~\T{acyclic}),
irreflexivity (keyword~\T{irreflexive})
and emptyness (keyword~\T{empty}); and their
negations.
If the check succeeds, execution goes on. Otherwise, execution stops.
\label{name:check:def}The performance of a
check can optionally be named by appending
\synt{\T{as} \NT{id}} after it.
The feature permits not to perform some checks at user's will,
thanks to the \ahrefloc{skipchecks}{\opt{-skipchecks~}\nt{id}}
command line~option.
A check can also be flagged, by prefixing it with the \T{flag}
keyword. Flagged checks must be named with the \T{as} construct.
Failed flagged checks do \emph{not} stop execution.
Instead successful flagged checks are recorded under their name,
for \herd{} machinery to handle flagged executions later.
Flagged checks are useful for models that define conditions
over executions that impact the semantics of the whole program.
This is typically the case of data races.
Let us assume that some relation \verb+race+ has been defined,
such that an non-empty \verb+race+ relation in some execution
would make the whole program undefined. We would then write:
\begin{verbatim}
flag ~empty race as undefined
\end{verbatim}
Then, \herd{} will indicate in its output that some
execution have been flagged as \verb+undefined+.
\subsubsection*{Procedure definition and call}
Procedures are similar to functions except that they have no results:
the body of a procedure is a list of instructions
and the procedure will be called for the effect of executing
those instructions. Intended usage of procedures is to define checks
that are executed later. However, the body of a procedure may
consist in any kind of instructions.
Notice that procedure calls can be named with the \T{as} keyword.
The intention is to control the performance of procedure calls
from the command line, exactly as for checks (see
\ahrefloc{name:check:def}{above}).
As an example of procedure,
one may define the following \verb+uniproc+ procedure with
no arguments:
\begin{verbatim}
procedure uniproc() =
let com = fr | rf | co in
acyclic com | po
end
\end{verbatim}
Then one can perform the acyclicity check (see
\ahrefloc{sec:check}{previous section}) by executing the instruction:
\begin{verbatim}
call uniproc()
\end{verbatim}
As a result the execution will stop if the acyclicity check fails,
or continue otherwise.
Procedures are lexically scoped as functions are.
Additionally, the bindings performed during the execution of a procedure call
are discarded when the procedure returns, all other effects performed
(namely flags and shows) are retained.
\subsubsection*{Show (and unshow) directives}
\label{show:def}The constructs:
\begin{center}
\synt{\T{show} \NT{id} \brepet \T{,} \NT{id} \erepet}\quad{and}\quad\synt{\T{unshow} \NT{id} \brepet \T{,} \NT{id} \erepet}
\end{center}
take (non-empty, comma separated) lists of identifiers as arguments.
The \T{show} construct adds the present values of identifiers for being
shown in pictures.
The \T{unshow} construct removes the identifiers from shown relations.
The more sophisticated construct
\begin{center}\synt{\T{show} \NT{expr} \T{as} \NT{id}}\end{center}
evaluates \nt{expr} to a relation, which will be shown in pictures with
label~\nt{id}.
Hence \synt{\T{show} \nt{id}} can be viewed as a shorthand
for \synt{\T{show} \nt{id} \T{as} \nt{id}}
\subsubsection*{Iteration over sets}
The \T{forall} iteration construct
permits the iteration of checks (in fact of any kind of instructions)
over a set. Syntax is:
\begin{center}
\T{forall} \NT{id} \T{in} \NT{expr} \T{do} \nt{instructions} \T{end}
\end{center}
The expression \synt{\nt{expr}} must evaluate to a set~$S$.
Then, the list of instructions \nt{instructions} is executed
for all bindings of the name~\nt{id} to some element of~$S$.
In practice, as failed checks stop execution, this amounts
to check the conjunction of the checks performed by \nt{instructions}
for all the elements of~$S$.
Similarly to procedure calls,
the bindings performed during the execution of an iteration
are discarded at iteration ends, all other effects performed are
retained.
\subsubsection*{Candidate execution extension}
This construct permits the extension of the current candidate
execution by one binding.
Syntax is \synt{\T{with} \NT{id} \T{from} \NT{expr}}.
The expression \nt{expr} is evaluated to a set~$S$.
Then the remainder of the model is executed for each choice
of element~$e$ in~$S$ in a context extended by a binding
of the name~\nt{id} to~$e$.
An example of the construct usage is described in Sec.~\ref{intro:with}.
\subsubsection*{Model inclusion}
The construct \synt{\T{include} \T{"}\nt{filename}\T{"}} is interpreted as
the inclusion of the model contained in the file whose name is given as
an argument to the \synt{\T{include}} instruction.
In practice, the list of intructions defined by the included model file
are executed.
The string argument is delimited by double quotes ``\verb+"+'',
which, of course, are not part of the filename.
Files are searched according to \herd{} rules --- see Sec.~\ref{herd:searchpath}.
Inclusion is performed only \emph{once}. Subsequent \synt{\T{include}} instructions
are not executed and a warning is issued.
\subsubsection*{Conditional execution}
The conditional instruction allows some control over model execution by setting variants.
\ahrefloc{opt:variant}{Variants} are predefined tags set on the
command line or in configuration files.
The
construct \synt{\T{if} \T{variant} \T{"}\nt{tag}\T{"} \nt{instructions}_1 \T{else} \nt{instructions}_2 \T{end}}
behaves as follows: if the variant \textit{tag} is set, then the
(possibly empty) list of instructions \synt{\nt{instructions}_1} is
executed, otherwise the (optional, possibly empty) list of
instructions \synt{\nt{instructions}_2} is executed. If \textit{tag}
is not a recognised variant tag, a warning is issued and the
non-existent variant is assumed to~be~unset.
\subsection*{Bell extensions}
Users can attain more genericity in their models by defining a {\tt bell} file,
as an addendum, or rather preamble, to a {\tt cat} file.
\subsubsection*{\label{sec:enum}Enumerations}
The \T{enum} construct defines a set of enumerated values or tags. Syntax is
\begin{center}
\synt{\T{enum} \NT{id} \T{=} \NT{tag}_1 \T{||} \cdots \T{||} \NT{tag}_n}
\end{center}
The construct has two main effects.
It first defines the tags \synt{\nt{tag}_1,\ldots,\nt{tag}_n}.
Notice that tags do not exist before being defined, that is
evaluating the expression \nt{tag} is an error without a prior
\T{enum} that defines the tag~\nt{tag}. Tags are typed in the sense
that they belong to the tag type \nt{id} and that tags from
different types cannot be members of the same set.
The second effect of the construct is to define a set of tags~\nt{id}
as the set of all tags listed in the construct.
That is, the \T{enum} construct performs the binding of~\nt{id}
to \synt{\T{\{} \nt{tag}_1,\ldots,\nt{tag}_n\T{\}}}.
\emph{Scopes} are a special case of enumeration: the construct {\tt enum
scopes} must be used to define hierarchical models such as Nvidia GPUs.
% (see e.g.~\cite{abd15}).
An {\tt enum scopes} declaration must be paired with two functions {\tt narrower} and {\tt wider} that implement the hierarchy amongst scopes. For example:
\begin{verbatim}
enum scopes = 'discography || 'I || 'II || 'III || 'IV
let narrower(t) = match t with
|| 'discography -> {'I, 'II, 'III, 'IV}
end
let wider(t) = match t with
|| 'I -> 'discography
|| 'II -> 'discography
|| 'III -> 'discography
|| 'IV -> 'discography
end
\end{verbatim}
Here we define five scopes, where the first one, {\tt discography}, is wider
than all the other ones.
\subsubsection*{Instructions}
The predefined sets of events \textrel{W}, \textrel{R}, \textrel{RMW},
\textrel{F}, and \textrel{B} can be \emph{annotated} with user-defined tags
(see Sec.~\ref{sec:enum}).
The construct :
\begin{center}
\T{instructions}\NT{id}\T{[}\NT{taglist} \T{]}
\end{center}
takes the identifier of a pre-defined set and a possibly empty, square bracketed
list of tags.
%jade@Luc: du coup il faudrait peut etre les distinguer dans la def des
%expressions?
The primitive {\tt tag2instrs} yields, given a tag {\tt 't}, the set of
instructions bearing the annotation {\tt t} that was previously declared in an
enumeration type.
%\fixme{jade: enumeration type? c'est comme ca que ca se dit?}
The primitive {\tt tag2scope} yields, given a tag {\tt 't}, the relation
between instructions TODO
\subsection{\label{language:model}Models}
\begin{syntax}
\NT{model} \is \boption \NT{id} \eoption \boption \NT{model-comment} \eoption \brepet \NT{instruction} \erepet
\sep
\NT{model-comment} \is \NT{id} \orelse \nt{string}
\end{syntax}
A model is a list of instructions preceded by an optional architecture
specification and an optional comment.
Architecture specification is a name that follows \herd{} conventions for identifiers. Identifiers that are not valid architecture names, such as \texttt{AArch64}, \texttt{PPC} etc., are silently ignored.
The following \synt{\NT{model-comment}} can be either a name or a string enclosed in double quotes~``\verb+"+''.
If only one name is present, it will act both as tentative architecture specification and as a comment.
When present, model architecture specifications will be checked against test architectures, see also option~\ahrefloc{opt:archcheck}{\opt{-archcheck}}.
Models operate on candidate executions
(see Sec.~\ref{sec:predef}),
instructions are executed in sequence,
until one instruction stops, or until the end of the instruction list.
In that latter case, the model accepts the execution.
The accepted execution is then passed over to the rest of \herd{}
engine, in order to collect final states of locations
and to display pictures. Notice that the \synt{\NT{model-comment}} will appear
in those picture legends.
\iffalse
\subsubsection*{Model options}
Model options control some experimental features of \herd.
More precisely, by default, \herd{} includes a complete
coherence order relation in every candidate execution,
and does not represent initial writes by plain memory
write events. Said otherwise, by default,
model files have options \T{withco} and \T{whithoutinit}.
The generation of all possible coherence orders by \herd{} engine
is a source of inefficiency that can be alleviated by having the
model itself compute the sub-relation of \texttt{co} that is really useful.
Such models must have option \T{withoutco}, so as to
prevent \herd{} engine from generating all coherence orders.
Instead, \herd{} will represent initial writes as plain write events
(\emph{i.e.} option \T{withoutco} implies \T{withinit})
identify last writes in coherence oders, and pass the model a
reduced~\texttt{co} relation, named \texttt{co0}, that will,
for any memory location~$x$,
relate the initial write to~$x$ to all writes to~$x$, and all writes
to~$x$ to the final write to~$x$.
It is then the model responsability to compute the remainder
of \texttt{co} from the program read events.
The model \ahref{uniproccat.txt}{\texttt{uniproccat.cat}} from the distribution
gives an example of such an advanced model.
The option \T{withinit} can also be given alone so as to instruct
\herd{} engine to represent initial writes as plain write events.
In such a situation, \herd{} will compute complete coherence
orders~\texttt{co} that include those explicit initial writes as
minimal elements.
Observe that the representation of initial writes as events
can be also controlled from the
command-line (see option~\ahrefloc{opt:initwrites}{\opt{-initwrites}})
and that command line settings override model options.
\fi
\subsection{\label{sec:primitive}Primitives}
TODO:
\subsection{\label{sec:library}Library}
\subsubsection*{Standard library}
The standard library is a \cat{} file~\texttt{stdlib.cat}
which all models include by default.
It defines a a few convenient relations that are thus
available to all models.
\begin{idtable}
$\poloc$ & \po{} restricted to the same address &
events are in \po{} and touch the same address, namely $\po \cap \locr$\\
$\rfe$ & external read-from & read-from by different threads, namely $\rf \cap \extr$\\
$\rfi$ & internal read-from & read-from by the same thread, namely $\rf \cap \intr$\\
\end{idtable}
\subsubsection*{Coherence orders}
\bgroup\let\rln\tid
For most models, a complete list of communication relations would
also include \co{} and~\fr{}.
Those can be defined by including the file \texttt{cos.cat}
(see Sec.~\ref{sec:cos}).
\begin{idtable}
$\co$ & coherence & total strict order over writes to the same address \\
$\fr$ & from-read & links a read $r$ to a write $w'$ $\co$-after the write $w$ from which $r$ takes its value \\
\coi, \fri & internal communications &
communication between events of the same thread\\
\coe, \fre & external communications &
communication between events of different threads
\end{idtable}
Notice that the internal and external sub-relations of \co{} and~\fr{}
are also defined.
\egroup
\subsubsection*{Fences}
\bgroup\let\rln\tid
Fence relations denote the presence of a specific
fence (or barrier) in-between two events.
Those can be defined by including architecture specific files.
\begin{desctable}{file}{relations}
x86fences.cat & \mfence{}, \sfence, \lfence{}\\
ppcfences.cat & \sync, \lwsync, \eieio, \isync, \ctrlisync{}\\
armfences.cat & \dsb, \dmb, \dsbst, \dmbst, \isb{}, \ctrlisb{}\\
mipsfences.cat & \sync\\
aarch64fences.cat & \ldots
\end{desctable}
In other words, models for, say, ARM machines should include the following
instruction:
\begin{verbatim}
include "armfences.cat"
\end{verbatim}
Notice that for the Power (PPC) (resp. ARM) architecture,
an additional relation \ctrlisync{} (res. \ctrlisb) is defined.
The relation \ctrlisync{} reads control +\isync{}.
It means that the branch to the instruction that generates the
second event additionnaly contains
a \texttt{isync} fence preceeding that instruction.
For reference, here is a possible definition of \ctrlisync:
\begin{verbatim}
let ctrlisync = ctrl & (_ * ISYNC); po
\end{verbatim}
One may define all fence relations by including the file
\texttt{fences.cat}. As a result, fence relations that are
relevant to the architecture of the test being simulated are properly defined,
while irrelevant fence relations are the empty relation.
This feature proves convenient for writing generic models that apply
to several concrete architectures.
\egroup
\section{Usage of \herd}
\subsection{Arguments}
The command \herd{} handles its arguments like \litmus.
That is, \herd{} interprets its argument as file names.
Those files are either a single litmus test
when having extension \file{.litmus}, or a list of file names
when prefixed by \file{@}.
\subsection{Options}
There are many command line options.
We describe the more useful ones:
\paragraph*{General behaviour}
\begin{description}
\item[{\tt -version}] Show version number and exit.
\item[{\tt -libdir}] Show installation directory and exit.
\item[{\tt -I <name>}] Add directory~\opt{name} at the end of
\prog{herd} \ahrefloc{herd:searchpath}{search path}. If the given directory
starts with \texttt{+} it is taken relative to \herd{} library directory.
\item[{\tt -v}] Be verbose, can be repeated to increase verbosity.
\item[{\tt -q}] Be quiet, suppress any diagnostic message.
\item[{\tt -conf <name>}] Read configuration file~\opt{name}.
\ahrefloc{herd:configfile}{Configuration files} have a very simple syntax:
a line ``\textit{opt}\texttt{ }\textit{arg}'' has the same effect as
the command-line option ``\texttt{-}\textit{opt} \textit{arg}''.
\item[{\tt -o <dest>}] Output files into directory \opt{<dest>}.
Notice that \opt{<dest>} must exist.
At the moment \herd{} may output one \texttt{.dot} file per processed test:
the file for test \textit{base}\texttt{.litmus}
is named \textit{base}\texttt{.dot}.
By default \herd{} does not generate \texttt{.dot} files.
\item[{\tt -suffix <suf>}] Change the name of \texttt{.dot} files
into \textit{base}\textit{suff}\texttt{.dot}. Useful when several \texttt{.dot} files derive from the same test. Default is the empty string (no suffix).
\item[{\tt \aname{opt:gv}{-gv}}] Fork the \ahref{\urlgv}{\prog{gv} Postscript viewer} to display execution diagrams.
\item[{\tt \aname{opt:evince}{-evince}}] Fork the evince document viewer to display execution diagrams. This option provides an alternative to the
{\tt gv} viewer.
\item[{\tt -dumpes <bool>}]
Dump genererated event structures and exit. Default is \opt{false}.
Event structures will be dumped in a \texttt{.dot} file whose
name is determined as usual --- See options \opt{-o} and \opt{-suffix} above.
Optionally the event structures can be displayed with the \opt{-gv} option.
\item[{\tt -unroll <int>}] The setting \opt{-unroll }$n$ performs backwards
jumps $n$ times. This is a workaround for one of \herd{} main limitation:
\herd{} does not really handle loops. Default is~\opt{2}.
\item[{\tt -hexa <bool>}] Print numbers in hexadecimal. Default is \opt{false}
(numbers are printed in decimal).
\end{description}
\paragraph{Engine control}
The main purpose of \herd{} is to run tests on top of memory models.
For a given test, \herd{} performs a three stage process:
\begin{enumerate}
\item Generate candidate executions.
\item For each candidate execution, run the model.
The model may reject or accept the execution.
\item For each candidate execution that the model accepts,
record observed locations and, if so instructed,
a diagram of the execution.
\end{enumerate}
We now describe options that control those three stages.
\begin{description}
\item[{\tt -model (cav12|minimal|uniproc|<filename>.cat)}]
Select model, this option accept one tag or one file name
with extension~\texttt{.cat}.
Tags instruct \herd{} to select an internal model,
while file names are read for a model definition.
Documented model tags are:
\begin{itemize}
\item \opt{cav12}, the model of~\cite{mms12} (Power);
\item \opt{minimal}, the minimal model that allows all executions;
\item \opt{uniproc}, the uniproc model that checks single-thread correctness.
\end{itemize}
In fact, \herd{} accepts potentially infinitely many models,
as models can given in text files in an adhoc language described in
Sec.~\ref{herd:language}.
The \herd{} distribution includes several such models:
\afile{minimal.cat} and \afile{uniproc.cat}
are the text file versions of the homonymous internal models, but may
produce pictures that show different relations.
The \herd{} distribution also includes models for a variety of architectures
and several models for the C~language.
Model files are searched according to the same
\ahrefloc{herd:searchpath}{rules}
as configuration files.
Some architectures have a default model:
\opt{arm.cat} model for ARM, \opt{ppc.cat} model for PPC,
\opt{x86tso.cat} for X86, and \opt{aarch64.cat} for AArch64 (ARMv8).
\item[{\tt -through (all|invalid|none)}]
Let additional executions reach the final stage of \herd{} engine.
This option permits users to generate pictures of forbidden executions, which
are otherwise rejected at an early stage of \herd{} engine --- see Sec.~\ref{show:forbidden}.
Namely, the default~``\opt{none}'' let only valid (according to the
active model) executions through.
The behaviour of this option differs between internal and text file models:
\begin{itemize}
\item For internal models:
the tag~\opt{all} let all executions go through;
while the tag~\opt{invalid} will reject executions that violate uniproc,
while letting other forbidden execution go through.
\item Text file models: the tags \opt{all} and~\opt{invalid} let all
executions go through. For such models, a more precise control over
executions that reach \herd{} final phase can be achieved
with the option~\opt{-skipcheck} --- see next option.
\end{itemize}
Default is~\opt{none}.
\item[{\tt -skipchecks <\textit{name}$_1$,\ldots,\textit{name}$_n$>}]
\aname{skipchecks}{This option}
applies to text file models. It instructs \herd{} to ignore
the outcomes of the given checks. For the option to operate, checks must
be named in the model file with the \texttt{as }\textit{name} construct --
see Sec.~\ref{name:check:def}.
Notice that the arguments to \opt{-skipcheck} options cumulate.
That is, ``\opt{-skipcheck }\textit{name}$_1$ \opt{-skipcheck }\textit{name}$_2$'' acts like ``\opt{-skipcheck }\textit{name}$_1$\texttt{,}\textit{name}$_2$''.
\item [{\tt -strictskip <bool>}] Setting this option (\opt{-strictskip true}),
will change the behaviour of the previous option \opt{-skipcheck}:
it will let executions go through when the skipped checks yield
false and the unskipped checks yield true. This option comes handy
when one want to observe the executions that fail one (or several) checks
while passing others. Default is \opt{false}.
\item[{\tt -optace <bool>}] Optimise the axiomatic candidate execution stage.
When enabled by \opt{-optace true}, \herd{} does not generate candidate
executions that fail the uniproc test. The default is ``\opt{true}''
for internal models (except the minimal model), and ``\opt{false}'' for
text file models. Notice that \opt{-model uniproc.cat}
and \opt{-model minimal.cat -optace true} should yield identical results,
the second being faster.
Setting \opt{-optace true} can lower the execution time significantly,
but one should pay attention not to design models that forget the uniproc
condition.
\item[{\tt \aname{opt:archcheck}{-archcheck} <bool>}]
Control compatibily check of test and cat file
architecture. More precisely, some cat files are specific to an architecture.
Running tests of a different architecture on them will result in
cryptic error messages. In that situation, compatibility check will
yield a more understandable diagnostic. Default is~\opt{true}.
\item[{\tt \aname{opt:show}{-show} (prop|neg|all|cond|wit|none)}]
Select execution diagrams for picture display and generation.
Execution diagrams are shown according to
the final condition of test. The final condition is a quantified boolean
proposition \verb+exists +$p$, \verb+~exists +$p$, or \verb+forall +$p$.
The semantics of recognised tags is as follows:
\begin{itemize}
\item \opt{prop} Picture executions for which $p$ is true.
\item \opt{neg} Picture executions for which $p$ is false.
\item \opt{all} Picture all executions.
\item \opt{cond} Picture executions that validate the condition,
\emph{i.e.} $p$ is true for \verb+exists+ and \verb+forall+, and false
for \verb+~exists+.
\item \opt{wit} Picture ``\emph{interesting}'' executions,
\emph{i.e.} $p$ is true for \verb+exists+ and \verb+~exists+,
and false for \verb+forall+.
\item \opt{none} Picture no execution.
\end{itemize}
Default is \opt{none}.
\item[{\tt \aname{opt:initwrites}{-initwrites} <bool>}]
Represent init writes as plain write events, default is \opt{false} except
for specifically tagged generic models --- see ``Model options''
in Sec.~\ref{language:model}.
\item[{\tt \aname{opt:variant}{-variant} <\textit{tag}$_1$,\ldots,\textit{tag}$_n$>}]
Activate variation(s) of models. Most variations are minor change in instruction semantics,
are used for experimental purpose and remain undocumented. With the exception of tag
``\texttt{mixed}'' that command mixed-size mode. Mixed-size mode is appropriate for mixed-size
tests that performs accesses of different sizes, as well as indexed accesses.
\end{description}
\paragraph*{Discard some observations}
Those options intentionally omit some of the final states that \herd{} would
normally generate.
\begin{description}
\item[{\tt -speedcheck (false|true|fast)}]
\aname{speedcheck:opt}{When} enabled by \opt{-speedcheck true}
or \opt{-speedcheck fast}, attempt to settle the test condition.
That is, \herd{} will
generate a subset of executions (those named ``\emph{interesting}'' above)
in place of all executions.
With setting \opt{-speedcheck fast},
\herd{} will additionally stop as soon as a condition \verb+exists +$p$ is validated, and as soon as a condition \verb+~exists +$p$ or
\verb+forall +$p$ is invalidated. Default is \opt{false}.
\item[{\tt -nshow <int>}]
Stop once \verb+<int>+ pictures have been collected. Default is to
collect all (specified, see option \ahrefloc{opt:show}{\opt{-show}}) pictures.
\end{description}
\paragraph*{Control \texttt{dot} pictures}
These options control the content of DOT images.
We first describe options that act at the general level.
\begin{description}
\item[{\tt -graph (cluster|free|columns)}] Select main mode for graphs.
See Sec.~\ref{mode:example}. The default is \opt{cluster}.
\item[{\tt -dotmode (plain|fig)}] The setting \opt{-dotmode fig}
produces output that includes the proper escape
sequence for translating \texttt{.dot} files
to \texttt{.fig} files (\emph{e.g.} with \texttt{dot -Tfig\ldots}).
Default is \opt{plain}.
\item[{\tt -dotcom (dot|neato|circo)}] Select the command that formats
graphs displayed by the \ahrefloc{opt:gv}{\opt{-gv}} option.
The default is \opt{dot} for the \opt{cluster} and~\opt{free} graph modes,
and \opt{neato} for the \opt{columns} graph mode.
\item[{\tt -showevents (all|mem|noregs)}] Control which events are
pictured:
\begin{itemize}
\item \opt{all} Picture all events.
\item \opt{mem} Picture memory events.
\item \opt{noregs} Picture all events except register events,
\emph{i.e.} memory, fences and branch events.
\end{itemize}
Default is \opt{noregs}.
\item[{\tt -showinitwrites <bool>}] Show initial write events
(when existing, see option~\ahrefloc{opt:initwrites}{-initwrites})
in pictures. Default is \opt{true}.
\item[{\tt -mono <bool>}] The setting \opt{-mono true} commands monochrome
pictures. This option acts upon default color selection. Thus, it
has no effect on colors given explicitely with the
\ahrefloc{opt:edgeattr}{\opt{-edgeattr}} option.
\item[{\tt -scale <float>}]
Global scale factor for graphs in \opt{columns} mode.
Default is \opt{1.0}.
\item[{\tt -xscale <float>}]
Global scale factor for graphs in \opt{columns} mode, x direction.
Default is \opt{1.0}.
\item[{\tt -yscale <float>}]
Global scale factor for graphs in \opt{columns} mode, y direction.
Default is \opt{1.0}.
\item[{\tt -showthread <bool>}] Show thread numbers in figures.
In \opt{cluster} mode where the events of a thread are clustered,
thread cluster have a label.
In \opt{free} mode \textrel{po} edges are suffixed by a thread number.
In \opt{columns} mode, columhs have a header node that shows
the thread number. Default is~\opt{true}.
\item[{\tt -texmacros <bool>}] Use latex commands in some text of pictures.
If activated (\opt{-showthread true}), thread numbers are shown as
\verb+\myth{+$n$\verb+}+. Assembler instructions are locations in nodes
are argument to an \verb+\asm+ command. It user responsability to define
those commands in their \LaTeX{} documents that include the pictures.
Possible definitions are \verb+\newcommand{\myth}[1]{Thread~#1}+
and \verb+\newcommand{\asm}[1]{\texttt{#1}}+.
Default is~\opt{false}.
\end{description}
A few options control picture legends.
\begin{description}
\item[{\tt -showlegend <bool>}]
Add a legend to pictures. By default legends show the test name and
a comment from the executed model.
This comment is the first item
of model syntax --- see Sec~\ref{language:model}.
Default is~\opt{true}.
\item[{\tt -showkind <bool>}]
Show test kind in legend.
The kind derive from the quantifier of test final condition,
kind \texttt{Allow} being \verb+exists+,
kind \texttt{Forbid} being \verb+~exists+,
and kind \texttt{Require} being \verb+forall+.
Default is~\opt{false}.
\item[{\tt -shortlegend <bool>}]
Limit legend to test name. Default is~\opt{false}.
\end{description}
A few options control what is shown in nodes
and on their sizes, \emph{i.e.}
on how events are pictured.
\begin{description}
\item[{\tt -squished <bool>}] The setting \opt{-squished true} drastically
limits the information displayed in graph nodes. This is usually what
is wanted in modes \opt{free} and~\opt{columns}. Default is~\opt{false}.
\item[{\tt -fixedsize <bool>}] This setting is meaningfull in
\opt{columns} graph mode and for squished nodes. When set by
\opt{-fixedsize true} it forces node width to be $65\%$ of the space
between columns. This may sometime yield a nice edge routing. Default is~\opt{false}
\item[{\tt -extrachars <float>}] This setting is meaningful in
\opt{columns} graph mode and for squished nodes.
When the size of nodes is not fixed (\emph{i.e.} \opt{-fixedsize false} and default), \herd{} computes the width of nodes by counting caracters in node
labels and scaling the result by the font size.
The setting \opt{-extrachars~}$v$ commands adding the value $v$ before scaling.
Negative values are of course accepted. Default is \opt{0.0}.
\item[{\tt -showobserved <bool>}] Highlight observed memory read events with
stars ``\texttt{*}''. A memory read is observed when the value it reads
is stored in a register that appears in final states.
Default is~\opt{false}.
\item[{\tt -brackets <bool>}] Show brackets around locations. Default
is~\opt{false}.
\end{description}
Then we list options that offer some control on which edges are shown.
We recall that the main controls over the shown and unshown edges are
the \verb+show+ and \verb+unshow+ directives in model definitions ---
see Sec.~\ref{show:def}.
However, some edges can be controled only with options (or configuration
files) and the \opt{-unshow} option proves convenient.
\begin{description}
\item[{\tt -showpo <bool>}] Show program order (\textrel{po}) edges.
Default is~\opt{true}.
Default is~\opt{false}.
\item[{\tt -showinitrf <bool>}] Show read-from edges from initial state.
Default is~\opt{false}.
\item[{\tt -showfinalrf <bool>}] Show read-from edges to the final state,
\emph{i.e} show the last store to locations. Default is~\opt{false}.
\emph{i.e} show the last store to locations. Default is~\opt{false}.
\item[{\tt -showfr <bool>}] Show from-read edges. Default is~\opt{true}.
\item[{\tt \aname{opt:doshow}{-doshow} <\textit{name}$_1$,\ldots,\textit{name}$_n$>}]
Do show edges labelled with \textit{name}$_1$,\ldots,\textit{name}$_n$.
This setting applies when names are bound in model definition.
\item[{\tt -unshow <\textit{name}$_1$,\ldots,\textit{name}$_n$>}]
Do not show edges labelled with \textit{name}$_1$,\ldots,\textit{name}$_n$.
This setting applies at the very last momement and thus cancels any
\verb+show+ directive in model definition and any \opt{-doshow} command
line~option.
\end{description}
Other options offer some control over some of the attributes defined in
\ahref{\urlgraphviz}{Graphviz software} documentation.
Notice that the controlled attributes are omitted from DOT files
when no setting is present.
For instance in the absence of a \opt{-spline <tag>} option, \herd{}
will generate no definition for the \texttt{splines} attribute thus
resorting to DOT tools defaults.
Most of the following
options accept the \opt{none}~argument that restores their
default behaviour.
\begin{description}
\item[{\tt -splines (spline|true|line|false|polyline|ortho|curved|none)}]
Define the value of the \texttt{splines} attribute. Tags are replicated in
output files as the value of the attribute, except for \opt{none}.
\item[{\tt -margin <float|none>}] Specifies the \texttt{margin} attribute of graphs.
\item[{\tt -pad <float|none>}] Specifies the \texttt{pad} attribute of graphs.
\item[{\tt -sep <string|none>}] Specifies the \texttt{sep} attribute of graphs.
Notice that the argument is an arbitray string, so as to allow DOT general
syntax for this attribute.
\item[{\tt -fontname <string|none>}] Specifies the graph fontname attribute.
\item[{\tt -fontsize <int|none>}] Specifies the fontsize attribute~$n$ of all
text in the graph.
\item[{\tt -edgefontsizedelta <int>}] option \opt{-edgefontsizedelta }$m$ sets
the fontsize attributes of edges to $n+m$, where $n$ is the argument to
the \opt{-fontsize} option. Default is \opt{0}. This option has no effect if
fontsize is unset.
\item[{\tt -penwidth <float|none>}] Specifies the \texttt{penwidth} attribute of
edges.
\item[{\tt -arrowsize <float|none>}] Specifies the \texttt{arrowsize}
attribute of edges.
\item[{\tt \aname{opt:edgeattr}{-edgeattr} <label,attribute,value>}]
Give value \opt{value} to attribute \opt{attribute} of all edges labelled
\opt{label}. This powerful option permits alternative styles for edges.
For instance, the \textrel{ghb} edges of the diagrams of this document
are thick purple (blueviolet) arrows thanks to the settings:
\opt{-edgeattr ghb,color,blueviolet}
\opt{-edgeattr ghb,penwidth,3.0}
\opt{-edgeattr ghb,arrowsize,1.2}. Notice that the settings performed
by the \opt{-edgeattr} option override other settings.
This option has no default.
\end{description}
\paragraph*{Change input}
Those options are the same as the ones
of~\litmus{} --- see Sec.~\ref{change:input}.
\begin{description}
\item[{\tt -names <file>}] Run \herd{} only on tests whose names are
listed in \texttt{<file>}.
\item[{\tt -rename <file>}] Change test names.
\item[{\tt -kinds <file>}] Change test kinds.
This amonts to changing the quantifier of final conditions, with
kind \texttt{Allow} being \verb+exists+,
kind \texttt{Forbid} being \verb+~exists+
and kind \texttt{Require} being \verb+forall+.
\item[{\tt -conds <file>}] Change the final condition of tests.
This is by far the most useful of these options:
in combination with option \opt{-show prop} it permits a fine grain
selection of execution pictures.
% --- see Sec.~\ref{example:invalid}.
\end{description}
\subsection{\label{herd:configfile}{Configuration files}}
The syntax of configuration files is minimal:
lines ``\textit{key} \textit{arg}'' are interpreted
as setting the value of parameter~\textit{key} to \textit{arg}.
Each parameter has a corresponding option,
usually \opt{-}\textit{key}, except for the single letter
option \opt{-v} whose parameter is \opt{verbose}.
As command line option are processed left-to-right,
settings from a configuration file (option \opt{-conf})
can be overridden by a later command line option.
Configuration files will be used mostly for controling pictures.
Some configuration files are
are present in the distribution.
As an example, here is the configuration file \afile{apoil.cfg},
which can be used to display images in \opt{free} mode.
\verbatiminput{apoil.cfg}
The configuration above is commented with line comments that starts
with ``\verb+#+''.
The above configuration file comes handy to eye-proof model output,
even for relatively complex tests, such as \atest{IRIW+lwsyncs}
and \atest{IRIW+syncs}:
\begin{verbatim}
% herd7 -conf apoil.cfg -show prop -doshow prop -gv IRIW+lwsyncs.litmus
% herd7 -through invalid -conf apoil.cfg -show prop -doshow prop -gv IRIW+syncs.litmus
\end{verbatim}
In the command lines above, \ahrefloc{opt:show}{\opt{-show prop}}
instructs \herd{} to produce images of the executions that validate
the final test condtion, while \ahrefloc{opt:doshow}{\opt{-doshow prop}}
instructs \herd{} to display the relation named ``\texttt{prop}''.
In spite of the unfortunate name clashes, those are not to be confused\ldots
We run the two tests on top of the default Power model that computes,
amongst others, a \texttt{prop} relation. The model rejects executions
with a cyclic \textrel{prop}.
One can then see that the relation \textrel{prop} is acyclic
for \ltest{IRIW+lwsyncs} and cyclic for \ltest{IRIW+syncs}:
\begin{center}\img{IRIW+lwsyncs+APOIL}\quad\quad\img{IRIW+syncs+APOIL}\end{center}
Notice that we used the option~\opt{-through invalid} in the case
of \ltest{IRIW+syncs} as we would otherwise have no image.
\subsection{\label{herd:searchpath}File searching}
Configuration and model files are searched first in the current directory;
then in the search path;
then in any directory specified
by setting the shell environment variable \texttt{HERDLIB};
and then in herd installation directory, which is defined
while compiling~\herd.
%\section{Extensions to Herd}
%
%\begin{quote}\it
%This section describes several extensions to \herd{} that have been implemented by Tyler Sorensen and John Wickerson in collaboration with the original authors. In due course, the contents of this section will probably be merged into the previous sections to form a cohesive manual.
%\end{quote}
%
%\subsection{Additional command line options}
%
%\begin{description}
%\item[{\tt \aname{opt:dumplem}{-dumplem}}] Convert the given herd model to Lem format, and exit. The resultant Lem file is generated on {\tt stdout}.
%\begin{quote}\emph{Note.} It is necessary to provide a litmus test when invoking \herd{} in this way, even though the litmus test will not be examined. This is due to a minor technical problem.\end{quote}
%\end{description}
%
%
%
%\subsection{Curried function application}
%
%A test-instruction is used to enforce a contract between the programming language and the programmer. There are two types: \emph{provided conditions} and \emph{undefined-unless conditions}. The latter are indicated with the \synt{\T{undefined\_unless}} keyword. A provided condition is an obligation on the programming language; that is, the programmer can assume that every execution of their program will satisfy every provided condition. An undefined-unless condition is an obligation on the programmer; that is, the compiler-writer can assume that every execution of the program will satisfy every undefined-unless condition. In other words, if a program has an execution that violates an undefined-unless condition, then its behaviour is completely undefined.
%The syntax for instructions thus becomes:
%
%\begin{syntax}
%\nt{instruction} \is{} \ldots
%\alt \T{undefined\_unless} \NT{check} \NT{expr} \boption \T{as} \NT{id}\eoption
%\end{syntax}
%\subsection{Additional identifiers}
%
%\begin{quote}\it
%This is an appendix to Section~\ref{language:identifier}.
%\end{quote}
%\paragraph*{Pre-defined relations}
%
%Here are some more pre-defined relations.
%\begin{idtable}
%{\tt unv} & universal relation & relates every event in the structure to every other event %\\
%{\tt int-$s$} & internal at given scope & \emph{(applicable only to scoped memory models)} relates events that are in the same part of the execution hierarchy \\
%{\tt ext-$s$} & external at given scope & \emph{(applicable only to scoped memory models)} relates events that are in different parts of the execution hierarchy \\
%\end{idtable}
%Here, $s$ ranges over the following values:
%
%\begin{center}
%\begin{tabular}{ll}
%value of $s$ & description (in OpenCL terminology) \\
%\hline
%{\tt wi}, {\tt thread} & work-item \\
%{\tt sg}, {\tt warp} & sub-group \\
%{\tt wg}, {\tt block}, {\tt cta} & work-group \\
%{\tt kernel} & kernel \\
%{\tt dev} & device \\
%\end{tabular}
%\end{center}
%
%For example, {\tt int-cta} relates all events that are in the same work-group, while {\tt ext-wi} relates all events that are in different work-items (threads).
%
%We provide the following additional fence relations: \verb"membar.cta", \verb"membar.gl", \verb"membar.sys" (PTX). C++ and OpenCL fences do not appear in this list because those fences are modelled as events rather than relations. By modelling these fences as events, we are better able to attach parameters to them, such as the memory-order (C++ and OpenCL) and the memory-scope (OpenCL).
%
%In C++ models, the following relations are pre-defined.
%\begin{idtable}
%{\tt asw} & additional synchronises-with & links every initial write to every event that is not an initial write \\
%{\tt lo} & lock order & a total order on all mutex operations (similar to {\tt co}, but for mutexes instead of writes) \\
%\end{idtable}
%\paragraph*{Pre-defined sets}
%Here are some pre-defined sets, available in all models.
%\begin{idtable}
%{\tt \_} & universal set & the set of all events in the structure \\
%{\tt R} & read events & set of all reads from memory \\
%{\tt W} & write events & set of all writes to memory \\
%{\tt M} & memory events & set of all reads from and writes to memory \\
%{\tt B} & barrier events & is a barrier \\
%{\tt A} & atomic events & is an atomic event \\
%{\tt P} & plain events & is not an atomic event \\
%{\tt I} & initial writes & is an initial write event \\
%\end{idtable}
%Having defined these sets, it is now possible to write expressions of the form {\tt RW($e$)} as {\tt [R * W] \& $e$}. However, the simulation in the latter case may be less efficient, owing to the need to construct the intermediate relation {\tt [R * W]}.
%In C++ and OpenCL models, the following sets are pre-defined.
%
%\begin{idtable}
%{\tt rmw} & read-modify-writes & the set of all read-modify-write events \\
%{\tt brmw} & blocked read-modify-writes & events representing an attempted read-modify-write operation that has become stuck \\
%{\tt F} & fences & the set of all fences \\
%{\tt acq} & acquire & atomic events with ``acquire'' memory-order \\
%{\tt rel} & release & atomic events with ``release'' memory-order \\
%{\tt acq\_rel} & acquire/release & atomic events with ``acquire/release'' memory-order \\
%{\tt rlx} & relaxed & atomic events with ``relaxed'' memory-order \\
%{\tt sc} & sequentially consistent & atomic events with ``sequentially consistent'' memory-order \\
%{\tt na} & non-atomics & non-atomic events \\
%\end{idtable}
%In C++ models, the following sets are pre-defined.
%
%\begin{idtable}
%{\tt lk} & locks & the set of all lock events \\
%{\tt ls} & successful locks & the set of all successful lock events \\
%{\tt ul} & unlocks & the set of all unlock events \\
%{\tt con} & consume & atomic events with ``consume'' memory-order \\
%{\tt atomicloc} & atomic locations & events acting on atomic locations \\
%{\tt nonatomicloc} & non-atomic locations & events acting on non-atomic locations \\
%{\tt mutexloc} & mutex locations & events acting on mutex locations \\
%\end{idtable}
%In OpenCL models, the following sets are pre-defined.
%
%\begin{idtable}
%{\tt gF} & global fences & the set of all global fences \\
%{\tt lF} & local fences & the set of all local fences \\
%{\tt wi} & work-item scope & events with ``work-item'' memory-scope \\
%{\tt sg} & sub-group scope & events with ``sub-group'' memory-scope \\
%{\tt wg} & work-group scope & events with ``work-group'' memory-scope \\
%{\tt dev} & device scope & events with ``device'' memory-scope \\
%{\tt all\_dev} & all-devices scope & events with ``all\_svn\_devices'' memory-scope \\
%\end{idtable}
|
{"hexsha": "bcc2728ddfe865b81e6f4b6bee2beb3e385c52ad", "size": 97335, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/herd.tex", "max_stars_repo_name": "patrick-rivos/herdtools7", "max_stars_repo_head_hexsha": "232b3e0f047c0daa1cbab6fa66cf21598b830811", "max_stars_repo_licenses": ["CECILL-B"], "max_stars_count": 103, "max_stars_repo_stars_event_min_datetime": "2016-11-28T10:00:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T05:06:00.000Z", "max_issues_repo_path": "doc/herd.tex", "max_issues_repo_name": "patrick-rivos/herdtools7", "max_issues_repo_head_hexsha": "232b3e0f047c0daa1cbab6fa66cf21598b830811", "max_issues_repo_licenses": ["CECILL-B"], "max_issues_count": 193, "max_issues_repo_issues_event_min_datetime": "2017-02-28T19:04:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T14:33:24.000Z", "max_forks_repo_path": "doc/herd.tex", "max_forks_repo_name": "patrick-rivos/herdtools7", "max_forks_repo_head_hexsha": "232b3e0f047c0daa1cbab6fa66cf21598b830811", "max_forks_repo_licenses": ["CECILL-B"], "max_forks_count": 39, "max_forks_repo_forks_event_min_datetime": "2016-12-14T23:10:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T21:35:17.000Z", "avg_line_length": 44.0828804348, "max_line_length": 765, "alphanum_fraction": 0.7401448605, "num_tokens": 28114}
|
from Model import create_model
from tensorflow.keras.datasets.mnist import load_data
import numpy as np
from Layers import *
(x_train, y_train), (x_test, y_test) = load_data()
#---------------------------------------------
# The following method would create the model
#---------------------------------------------
model = create_model()
epochs = 100
losses = []
loss_function = MSE
lr = 0.1
for epoch in range(epochs):
print('Epoch {}'.format(epoch+1))
for i, (image, label) in enumerate(zip(x_train, y_train)):
prediction = model.forward(image)
classes = prediction.shape[0]
one_hot = [0 for i in range(classes)]
one_hot[label]=1
loss = loss_function(prediction, label)
error = loss.forward()
print(error)
error_gradient = loss.backward()
losses.append(error)
gradient = model.backward(error_gradient)
if i%10==0:
print('Loss for step {} :{}'.format(i+1,loss))
|
{"hexsha": "e5f0653670314ee253947b34b7c7b50ff2559682", "size": 1041, "ext": "py", "lang": "Python", "max_stars_repo_path": "Train.py", "max_stars_repo_name": "karanbali/CSE-673-PytorchNano", "max_stars_repo_head_hexsha": "b70da22c2ce45fbcd087ffe0f283db75f0ab5446", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Train.py", "max_issues_repo_name": "karanbali/CSE-673-PytorchNano", "max_issues_repo_head_hexsha": "b70da22c2ce45fbcd087ffe0f283db75f0ab5446", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Train.py", "max_forks_repo_name": "karanbali/CSE-673-PytorchNano", "max_forks_repo_head_hexsha": "b70da22c2ce45fbcd087ffe0f283db75f0ab5446", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3947368421, "max_line_length": 62, "alphanum_fraction": 0.5504322767, "include": true, "reason": "import numpy", "num_tokens": 221}
|
SUBROUTINE HDOTS ( ix, iy, ilwid, iret )
C************************************************************************
C* HDOTS - PS *
C* *
C* This subroutine draws a dot on a graphics device. *
C* *
C* HDOTS ( IX, IY, ILWID, IRET ) *
C* *
C* Input parameters: *
C* IX INTEGER X coordinates *
C* IY INTEGER Y coordinates *
C* ILWID INTEGER Line width *
C* *
C* Output parameters: *
C* IRET INTEGER Return code *
C** *
C* Log: *
C* M. desJardins/GSFC 2/91 *
C* M. desJardins/NMC 4/91 Added psplot *
C* J. Whistler/SSAI 6/91 Fixed bad placement of psplot *
C* A. Chang/EAI 2/94 Modified to call C routine *
C* S. Maxwell/GSC 6/97 Documentation changes *
C************************************************************************
C------------------------------------------------------------------------
CALL PDOTS ( ix, iy, ilwid, iret )
C*
RETURN
END
|
{"hexsha": "8fa3dc0c3c57304855598d125d0da289ceb983c5", "size": 985, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "gempak/source/driver/active/ps/hdots.f", "max_stars_repo_name": "oxelson/gempak", "max_stars_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2015-06-03T15:26:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T22:36:03.000Z", "max_issues_repo_path": "gempak/source/driver/active/ps/hdots.f", "max_issues_repo_name": "oxelson/gempak", "max_issues_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 60, "max_issues_repo_issues_event_min_datetime": "2015-05-11T21:36:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:22:42.000Z", "max_forks_repo_path": "gempak/source/driver/active/ps/hdots.f", "max_forks_repo_name": "oxelson/gempak", "max_forks_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2016-06-06T21:55:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T18:23:28.000Z", "avg_line_length": 33.9655172414, "max_line_length": 73, "alphanum_fraction": 0.4274111675, "num_tokens": 301}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.