source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
cluster_list_impl.h | /* The MIT License (MIT)
*
* (c) Jürgen Simon 2014 (juergen.simon@uni-bonn.de)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef M3D_CLUSTERLIST_IMPL_H
#define M3D_CLUSTERLIST_IMPL_H
#include <meanie3D/defines.h>
#include <meanie3D/namespaces.h>
#include <meanie3D/clustering/cluster.h>
#include <meanie3D/utils/set_utils.h>
#include <algorithm>
#include <sstream>
#include <stdlib.h>
#include <netcdf>
#include <vector>
#include <set>
#include <stdexcept>
#include <boost/tokenizer.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include "cluster_list.h"
namespace m3D {
using namespace std;
using namespace netCDF;
using namespace utils;
#if WITH_VTK
using utils::VisitUtils;
#endif
#pragma mark -
#pragma mark Macros
#define sup(v1, v2) (v1 > v2 ? v1:v2)
#define inf(v1, v2) (v1 < v2 ? v1:v2)
#pragma mark -
#pragma mark Constructors/Destructors et. Al.
template<typename T>
ClusterList<T>::ClusterList()
: file(NULL), tracking_performed(false), highest_id(0), highest_uuid(0) {
};
template<typename T>
ClusterList<T>::ClusterList(const string &source,
const vector<string> &variables,
const vector<string> &dimensions,
const vector<string> &dimension_variables,
long timestamp,
int ti,
bool orig_pts)
: file(NULL), tracking_performed(false), highest_id(0), highest_uuid(0), variables(variables),
dimensions(dimensions), dimension_variables(dimension_variables), source_file(source), time_index(ti),
timestamp(timestamp), m_use_original_points_only(orig_pts) {};
template<typename T>
ClusterList<T>::ClusterList(
const typename Cluster<T>::list &list,
const string &source,
const vector<string> &vars,
const vector<string> &dims,
const vector<string> &dim_vars,
long timestamp,
int ti,
bool orig_pts)
: file(NULL), tracking_performed(false), highest_id(0), highest_uuid(0), variables(vars), dimensions(dims),
dimension_variables(dim_vars), source_file(source), time_index(ti), timestamp(timestamp),
m_use_original_points_only(orig_pts), clusters(list) {};
template<typename T>
ClusterList<T>::ClusterList(const ClusterList &o)
: file(o.file), filename(o.filename), variables(o.variables), dimensions(dimensions),
dimension_variables(o.dimension_variables), source_file(o.source_file), clusters(o.clusters),
tracking_performed(o.tracking_performed), tracking_time_difference(o.tracking_time_difference),
tracked_ids(o.tracked_ids), dropped_ids(o.dropped_ids), new_ids(o.new_ids), splits(o.splits),
merges(o.merges), highest_id(o.highest_id), highest_uuid(o.highest_uuid), timestamp(o.timestamp),
time_index(o.time_index), m_use_original_points_only(o.m_use_original_points_only) {};
#pragma mark -
#pragma mark Accessing the list
template<typename T>
size_t
ClusterList<T>::size() const {
return clusters.size();
}
template<typename T>
typename Cluster<T>::ptr
ClusterList<T>::operator[](size_t index) {
return clusters.at(index);
}
template<typename T>
void
ClusterList<T>::clear(bool deletion_flag) {
typename Cluster<T>::list::const_iterator ci;
for (ci = clusters.begin(); ci != clusters.end(); ++ci) {
typename Cluster<T>::ptr c = *ci;
c->clear(deletion_flag);
delete c;
}
clusters.clear();
}
#pragma mark -
#pragma mark Adding / Removing points
template<typename T>
void
ClusterList<T>::apply_size_threshold(unsigned int min_cluster_size, const bool &show_progress) {
boost::progress_display *progress = NULL;
if (show_progress) {
cout << endl << "Applying size threshold of " << min_cluster_size << " ... ";
start_timer();
progress = new boost::progress_display(clusters.size());
}
size_t axe_count = 0;
if (min_cluster_size > 1) {
typename Cluster<T>::list::iterator it;
for (it = clusters.begin(); it != clusters.end();) {
progress->operator++();
typename Cluster<T>::ptr sc = *it;
if (sc->size() < min_cluster_size) {
it = clusters.erase(it);
axe_count++;
} else {
it++;
}
}
}
if (show_progress) {
cout << "done. (Removed " << axe_count << " objects in " << stop_timer() << "s)" << endl;
delete progress;
}
}
#pragma mark -
#pragma mark Writing/Reading
template<typename T>
void
ClusterList<T>::save() {
if (this->filename.empty()) {
throw std::runtime_error("Can not use save() because cluster list was not written or read before.");
}
this->write(this->filename);
}
template<typename T>
void
ClusterList<T>::write(const std::string &path) {
using namespace utils::vectors;
try {
NcFile *file = NULL;
this->filename = std::string(path);
bool file_existed = boost::filesystem::exists(path);
std::string filename = file_existed ? path + "-new" : path;
// Make sure the new file is deleted if it exists.
// This can happen if a previous run didn't fully complete.
if (boost::filesystem::exists(filename)) {
boost::filesystem::remove(filename);
}
// We need to get the size of the dimensions and other
// data from somewhere. For this we have to rely on either
// the source being present, or a previous instance of the
// cluster file (in case of overwriting).
string source_path = file_existed ? path : source_file;
NcFile *sourcefile = new NcFile(source_path, NcFile::read);
if (sourcefile == NULL || sourcefile->isNull()) {
cerr << "FATAL:could not open file '" << source_path
<< "' for obtaining dimension data" << endl;
exit(EXIT_FAILURE);
}
try {
// Be aware of the fact that this->ncFile is probably
// open from the tracking at this point. It also needs
// to be open, because the dimensions etc. are referencing
// it. This creates a paradoxical situation, which is solved
// by creating a new file with an altered name first, writing
// it off and finally deleting the original when done, replacing
// the original in that way.
file = new NcFile(filename, NcFile::replace);
} catch (const netCDF::exceptions::NcException &e) {
cerr << "FATAL:exception opening file " << filename
<< " for writing : " << e.what() << endl;
exit(EXIT_FAILURE);
}
// write version attribute
file->putAtt("version", m3D::VERSION);
// Create feature-space variables
vector<string> featurespace_variables = dimension_variables;
for (size_t i = 0; i < variables.size(); i++) {
featurespace_variables.push_back(variables[i]);
}
// This is one dimension of the clusters and also the rank
// (spatial rank + value rank) of the featurespace
NcDim dim = file->addDim("rank", (int) featurespace_variables.size());
// Record the individual ranks as well
file->putAtt("spatial_rank", ncInt, (int) dimensions.size());
file->putAtt("value_rank", ncInt, (int) variables.size());
// General dimension/variable info
file->putAtt("variables", to_string(variables));
file->putAtt("dimensions", to_string(dimensions));
file->putAtt("dimension_variables", to_string(dimension_variables));
// The actual variables composing the featurespace
file->putAtt("featurespace_variables", to_string(featurespace_variables));
// Add 'time' information
netcdf::add_time(file, this->timestamp, true);
file->putAtt("time_index", ncInt, time_index);
// copy dimensions
vector<NcDim> ncDimensions
= netcdf::copy_dimensions(dimensions, sourcefile, file);
// Create dummy variables, attributes and other meta-info
file->putAtt("num_clusters", ncInt, (int) clusters.size());
file->putAtt("source", this->source_file);
// Save highest ID
unsigned long long hid = boost::numeric_cast<unsigned long long>(this->highest_id);
file->putAtt("highest_id", boost::lexical_cast<std::string>(hid));
// Save highest UUID
unsigned long long huuid = boost::numeric_cast<unsigned long long>(this->highest_uuid);
file->putAtt("highest_uuid", boost::lexical_cast<std::string>(huuid));
// Record IDs in attribute
id_set_t cluster_ids;
for (size_t ci = 0; ci < clusters.size(); ci++)
cluster_ids.insert(clusters[ci]->id);
file->putAtt("cluster_ids", sets::to_string(cluster_ids));
// Add tracking meta-info
if (this->tracking_performed) {
file->putAtt("tracking_performed", "yes");
file->putAtt("tracking_time_difference", ncInt, this->tracking_time_difference);
file->putAtt("tracked_ids", sets::to_string(this->tracked_ids));
file->putAtt("new_ids", sets::to_string(this->new_ids));
file->putAtt("dropped_ids", sets::to_string(this->dropped_ids));
file->putAtt("merges", maps::id_map_to_string(this->merges));
file->putAtt("splits", maps::id_map_to_string(this->splits));
}
// Copy dimension variables including data. This is required
// so that on reading a coordinate system can be constructed
for (size_t i = 0; i < dimension_variables.size(); i++) {
string var = dimension_variables[i];
netcdf::copy_variable<T>(var, sourcefile, file, true);
}
// Copy other variables without data
for (size_t i = 0; i < variables.size(); i++) {
string var = variables[i];
netcdf::copy_variable<T>(var, sourcefile, file, false);
}
// Featurespace Variables
std::string fvarnames = to_string(variables);
// Add cluster dimensions and variables
for (size_t ci = 0; ci < clusters.size(); ci++) {
typename Cluster<T>::ptr cluster = clusters.at(ci);
// NOTE: some problem exists with the normal id_t used
// everywhere else and NetCDF. Using unsigned long long
// produces a compiler warning but also correct results.
unsigned long long cid = (unsigned long long) cluster->id;
unsigned long long uuid = (unsigned long long) cluster->uuid;
// Create a dimension
stringstream dim_name(stringstream::in | stringstream::out);
dim_name << "cluster_dim_" << cid;
NcDim cluster_dim;
try {
cluster_dim = file->addDim(dim_name.str(), cluster->size());
} catch (const netCDF::exceptions::NcException &e) {
cerr << "ERROR:exception creating dimension " << dim_name.str()
<< ":" << e.what() << endl;
exit(EXIT_FAILURE);
}
// Create variable
stringstream var_name(stringstream::in | stringstream::out);
var_name << "cluster_" << cid;
vector<NcDim> dims(2);
dims[0] = cluster_dim;
dims[1] = dim;
NcVar var;
try {
var = file->addVar(var_name.str(), ncDouble, dims);
var.setCompression(false, true, 3);
} catch (const netCDF::exceptions::NcException &e) {
cerr << "ERROR:exception creating dimension " << var_name.str()
<< ":" << e.what() << endl;
continue;
}
// size
var.putAtt("size", ncInt, (int) cluster->size());
// margin flag
std::string flag = (cluster->has_margin_points() ? "Y" : "N");
var.putAtt("has_margin_points", flag);
// check if there's any merge
id_map_t::iterator mi = this->merges.find(cluster->id);
if (mi != this->merges.end()) {
std::string merged_from = utils::sets::to_string(mi->second);
var.putAtt("merged_from", merged_from);
}
// check if there's any split
for (mi = this->splits.begin(); mi != this->splits.end(); mi++) {
id_set_t csplits = mi->second;
if (csplits.find(cluster->id) != csplits.end()) {
std::string split_from = boost::lexical_cast<string>(mi->first);
var.putAtt("split_from", split_from);
break;
}
}
// id
var.putAtt("id", boost::lexical_cast<string>(cid));
// uuid
var.putAtt("uuid", boost::lexical_cast<string>(uuid));
// mode
string mode = to_string(cluster->mode);
var.putAtt("mode", mode);
// displacement
string displacement = to_string(cluster->displacement);
var.putAtt("displacement", displacement);
// bounding box min
string bound_min = to_string(cluster->get_bounding_box_min());
var.putAtt("bounding_box_min", bound_min);
// bounding box max
string bound_max = to_string(cluster->get_bounding_box_max());
var.putAtt("bounding_box_max", bound_max);
// Write cluster away
size_t numElements = cluster->size() * cluster->rank();
T *data = (T *) malloc(sizeof(T) * numElements);
if (data == NULL) {
cerr << "FATAL:out of memory" << endl;
exit(EXIT_FAILURE);
}
for (size_t pi = 0; pi < cluster->size(); pi++) {
typename Point<T>::ptr p = cluster->at(pi);
for (size_t di = 0; di < dim.getSize(); di++) {
size_t index = pi * cluster->rank() + di;
data[index] = p->values.at(di);
}
}
var.putVar(data);
delete data;
}
if (file_existed) {
// close the original and delete it
if (this->file != NULL) {
delete this->file;
this->file = NULL;
}
if (boost::filesystem::remove(path)) {
boost::system::error_code ec;
boost::filesystem::rename(path + "-new", path, ec);
if (ec.value() != boost::system::errc::success) {
cerr << "ERROR: could not rename " << (path + "-new")
<< " to " << path << ":" << ec.message() << endl;
}
} else {
// for some reason, the old file could not be removed. In
// this case, just move it aside and try again
cerr << "ERROR: could not delete " << path << endl;
std::string moved_path = path + "-moved";
cerr << "renaming " << path << " to " << moved_path << endl;
boost::system::error_code ec;
boost::filesystem::rename(path, moved_path, ec);
if (ec.value() == boost::system::errc::success) {
boost::filesystem::rename(path + "-new", path, ec);
if (ec.value() != boost::system::errc::success) {
cerr << "ERROR: could not rename " << (path + "-new") << " to " << path << endl;
cerr << "REASON: " << ec.message() << endl;
}
} else {
cerr << "ERROR: could not move " << path << " to " << moved_path << endl;
cerr << "REASON: " << ec.message() << endl;
}
}
}
this->file = file;
} catch (const std::exception &e) {
std::cerr << "ERROR:exception while writing cluster file: " << e.what() << endl;
throw e;
}
}
template<typename T>
typename ClusterList<T>::ptr
ClusterList<T>::read(const std::string &path, CoordinateSystem<T> **cs_ptr) {
// meta-info
vector<string> variables;
vector<string> dimensions;
vector<string> dimension_variables;
vector<string> featurespace_variables;
string source_file;
id_set_t cluster_ids;
bool tracking_performed = false;
id_set_t tracked_ids;
id_set_t new_ids;
id_set_t dropped_ids;
id_map_t merges;
id_map_t splits;
int tracking_time_difference = NO_TIME;
int time_index = NO_TIME;
timestamp_t timestamp = 0;
m3D::id_t highest_id = NO_ID;
m3D::uuid_t highest_uuid = NO_UUID;
typename Cluster<T>::list list;
NcFile *file = NULL;
file = new NcFile(path, NcFile::read);
try {
// Read the dimensions
NcDim fs_dim = file->getDim("rank");
// variables
string buffer;
file->getAtt("variables").getValues(buffer);
variables = vectors::from_string<string>(buffer);
// dimensions
file->getAtt("dimensions").getValues(buffer);
dimensions = vectors::from_string<string>(buffer);
// dimension variables
file->getAtt("dimension_variables").getValues(buffer);
dimension_variables = vectors::from_string<string>(buffer);
// featurespace variables
file->getAtt("featurespace_variables").getValues(buffer);
featurespace_variables = vectors::from_string<string>(buffer);
// Read time index
file->getAtt("time_index").getValues(&time_index);
// Read time
timestamp = netcdf::get_time_checked<timestamp_t>(path, 0);
// Source file
file->getAtt("source").getValues(source_file);
int number_of_clusters;
file->getAtt("num_clusters").getValues(&number_of_clusters);
std::string value;
file->getAtt("cluster_ids").getValues(value);
cluster_ids = sets::from_string<m3D::id_t>(value);
// Tracking-related
try {
NcGroupAtt tracked = file->getAtt("tracking_performed");
if (!tracked.isNull()) {
tracked.getValues(value);
tracking_performed = (value == "yes");
}
if (tracking_performed) {
file->getAtt("tracked_ids").getValues(value);
tracked_ids = sets::from_string<id_t>(value);
file->getAtt("new_ids").getValues(value);
new_ids = sets::from_string<id_t>(value);
file->getAtt("dropped_ids").getValues(value);
dropped_ids = sets::from_string<id_t>(value);
file->getAtt("merges").getValues(value);
merges = utils::maps::id_map_from_string(value);
file->getAtt("splits").getValues(value);
splits = utils::maps::id_map_from_string(value);
file->getAtt("highest_id").getValues(value);
highest_id = boost::lexical_cast<m3D::id_t>(value);
file->getAtt("tracking_time_difference").getValues(&tracking_time_difference);
}
file->getAtt("highest_uuid").getValues(value);
highest_uuid = boost::lexical_cast<m3D::uuid_t>(value);
} catch (netCDF::exceptions::NcException &e) {
}
// Read the feature-variables
file->getAtt("featurespace_variables").getValues(value);
featurespace_variables = vectors::from_string<string>(value);
// Coordinate system wanted?
CoordinateSystem<T> *cs = new CoordinateSystem<T>(file, dimensions, dimension_variables);
if (cs_ptr != NULL) {
*cs_ptr = cs;
}
// Read clusters one by one
id_set_t::iterator cid_iter;
for (cid_iter = cluster_ids.begin(); cid_iter != cluster_ids.end(); cid_iter++) {
// Identifier
m3D::id_t cid = *cid_iter;
// cluster dimension
stringstream dim_name(stringstream::in | stringstream::out);
dim_name << "cluster_dim_" << cid;
NcDim cluster_dim = file->getDim(dim_name.str().c_str());
size_t cluster_size = cluster_dim.getSize();
// Read the variable
stringstream var_name(stringstream::in | stringstream::out);
var_name << "cluster_" << cid;
NcVar var = file->getVar(var_name.str().c_str());
// mode
std::string mode_str;
var.getAtt("mode").getValues(mode_str);
vector<T> mode = vectors::from_string<T>(mode_str);
var.getAtt("uuid").getValues(value);
m3D::uuid_t uuid = boost::lexical_cast<m3D::uuid_t>(value);
// displacement
std::string displacement_str;
var.getAtt("displacement").getValues(displacement_str);
vector<T> displacement = vectors::from_string<T>(displacement_str);
std::string bounds_min_str;
var.getAtt("bounding_box_min").getValues(bounds_min_str);
vector<T> bounds_min = vectors::from_string<T>(bounds_min_str);
std::string bounds_max_str;
var.getAtt("bounding_box_max").getValues(bounds_max_str);
vector<T> bounds_max = vectors::from_string<T>(bounds_max_str);
// margin flag
std::string margin_char;
var.getAtt("has_margin_points").getValues(margin_char);
bool margin_flag = margin_char == "Y";
// Create a cluster object
typename Cluster<T>::ptr cluster = new Cluster<T>(mode, dimensions.size());
cluster->id = cid;
cluster->uuid = uuid;
cluster->mode = mode;
cluster->displacement = displacement;
cluster->set_bounding_box_min(bounds_min);
cluster->set_bounding_box_max(bounds_max);
cluster->set_has_margin_points(margin_flag);
// Read the cluster
size_t numElements = cluster_size * cluster->rank();
T *data = (T *) malloc(sizeof(T) * numElements);
if (data == NULL) {
cerr << "FATAL:out of memory" << endl;
exit(EXIT_FAILURE);
}
var.getVar(data);
for (size_t pi = 0; pi < cluster_size; pi++) {
vector<T> values(cluster->rank(), 0.0);
// copy point from data
for (size_t di = 0; di < cluster->rank(); di++) {
values[di] = data[pi * cluster->rank() + di];
}
// get coordinate subvector
vector<T> coordinate(values.begin(), values.begin() + cs->rank());
// transform to gridpoint
try {
vector<int> gp(cs->rank(), 0);
cs->reverse_lookup(coordinate, gp);
// only when this succeeds do we have the complete
// set of data for the point
typename Point<T>::ptr p = PointFactory<T>::get_instance()->create();
p->values = values;
p->coordinate = coordinate;
p->gridpoint = gp;
// add to cluster
cluster->add_point(p);
} catch (std::out_of_range &e) {
cerr << "ERROR:reverse coordinate transformation failed for coordinate=" << coordinate << endl;
}
}
delete data;
list.push_back(cluster);
}
if (cs_ptr == NULL) {
delete cs;
}
} catch (const std::exception &e) {
cerr << "ERROR:exception " << e.what() << endl;
throw e;
}
// When reading, use the cluster file itself as source
// path so that the timestamp can be read. Set the real
// source path up afterwards.
ClusterList<T>::ptr cl = new ClusterList(list,
source_file,
variables,
dimensions,
dimension_variables,
time_index,
false);
cl->timestamp = timestamp;
cl->highest_id = highest_id;
cl->highest_uuid = highest_uuid;
cl->tracking_time_difference = tracking_time_difference;
if (tracking_performed) {
cl->tracked_ids = tracked_ids;
cl->new_ids = new_ids;
cl->dropped_ids = dropped_ids;
cl->merges = merges;
cl->splits = splits;
}
cl->filename = path;
cl->file = file;
return cl;
}
template<typename T>
bool sortBySize(const typename Cluster<T>::ptr c1, const typename Cluster<T>::ptr c2) {
return c1->size() < c2->size();
}
template<typename T>
void
ClusterList<T>::print(bool includePoints) {
std::sort(clusters.begin(), clusters.end(), sortBySize < T > );
for (size_t ci = 0; ci < clusters.size(); ci++) {
typename Cluster<T>::ptr c = clusters[ci];
c->print(includePoints);
}
}
#pragma mark -
#pragma mark Clustering by Graph Theory
template<typename T>
T
ClusterList<T>::weight_function_tendency(typename Point<T>::ptr p,
const WeightFunction<T> *weight_function,
const typename Point<T>::list &neighbours,
ArrayIndex<T> &index) {
T result = 0;
if (!neighbours.empty()) {
T wx = weight_function->operator()(p);
for (size_t ni = 0; ni < neighbours.size(); ni++) {
Point<T> *n = neighbours.at(ni);
if (n == p) continue;
result += (weight_function->operator()(n) - wx);
}
}
return result;
}
template<typename T>
void
ClusterList<T>::aggregate_zeroshifts(FeatureSpace<T> *fs,
const WeightFunction<T> *weight_function,
ArrayIndex<T> &index,
bool coalesceWithStrongestNeighbour,
bool show_progress) {
using namespace utils::vectors;
boost::progress_display *progress = NULL;
// find the zero-shift points
typedef set<typename Point<T>::ptr> pset_t;
pset_t zeroshifts;
#if WITH_OPENMP
#pragma omp parallel for schedule(dynamic)
#endif
for (size_t i = 0; i < fs->points.size(); i++) {
Point<T> *current_point = fs->points[i];
#if REPLACE_ZEROSHIFT_VECTORS
//
// #209
//
// replace the zero-shift vectors with the average of their neighbours
typename Point<T>::list neighbours = find_neighbours(current_point->gridpoint, index);
if (!neighbours.empty()) {
vector<T> m(current_point->values.size(), 0.0);
for (size_t ni = 0; ni < neighbours.size(); ni++) {
M3DPoint<T> *n = (M3DPoint<T> *) neighbours.at(ni);
if (n == current_point) continue;
m += n->shift;
}
// average, rounded to grid
m /= ((T) neighbours.size());
current_point->shift = fs->coordinate_system->round_to_grid(m);
vector<T> spatial_shift = fs->spatial_component(m);
current_point->gridded_shift = fs->coordinate_system->rounded_gridpoint(spatial_shift);
}
#endif
// If the vector is (still) zero, add to the list of zeroshift points
if (vector_norm(fs->spatial_component(current_point->shift)) == 0) {
#if WITH_OPENMP
#pragma omp critical
#endif
zeroshifts.insert(current_point);
}
}
if (show_progress) {
cout << endl << "Clustering zero-shift areas ...";
progress = new boost::progress_display(zeroshifts.size());
start_timer();
}
for (typename pset_t::iterator pi = zeroshifts.begin(); pi != zeroshifts.end(); pi++) {
if (show_progress) {
progress->operator++();
}
Point<T> *current_point = *pi;
typename Point<T>::list neighbours = index.find_neighbours(current_point->gridpoint);
for (size_t ni = 0; ni < neighbours.size(); ni++) {
Point<T> *n = neighbours.at(ni);
if (n == current_point) continue;
// only consider neighbours that are zero-shift themselves
if (vector_norm(fs->spatial_component(n->shift)) == 0) {
if (current_point->cluster == NULL && n->cluster == NULL) {
// Neither current point nor neighbour have cluster
// => create new cluster
size_t spatial_dims = fs->coordinate_system->rank();
typename Cluster<T>::ptr c = new Cluster<T>(current_point->values, spatial_dims);
c->add_point(current_point);
c->add_point(n);
clusters.push_back(c);
} else if (current_point->cluster == NULL && n->cluster != NULL) {
// neighbour has cluster
// => add current point to neighbour's cluster
n->cluster->add_point(current_point);
} else if (current_point->cluster != NULL && n->cluster == NULL) {
// current point has cluster
// => add neighbour to current point's cluster
current_point->cluster->add_point(n);
} else if ((current_point->cluster != NULL && n->cluster != NULL)
&& (current_point->cluster != n->cluster)) {
// current point's cluster and neighbour's cluster
// => merge current point's cluster into neighbour's cluster
typename Cluster<T>::ptr c = current_point->cluster;
n->cluster->add_points(c->get_points(), false);
clusters.erase(find(clusters.begin(), clusters.end(), c));
delete c;
}
}
}
}
#if ADD_STRONGEST_NEIGHBOUR
// Find neighbours that are not part of the clusters yet
// and have a stronger weight function response. Assign those
// to the zero-shift cluster as 'crystallization' points
// TODO: if it works, incorporate into above loop to save time
for (size_t i = 0; i < clusters.size(); i++) {
typename Cluster<T>::ptr c = clusters.at(i);
typename Point<T>::list::iterator pi;
T strongest_response = numeric_limits<T>::min();
T strongest_own_response = numeric_limits<T>::min();
typename Point<T>::ptr strongest_point = NULL;
for (pi = c->points.begin(); pi != c->points.end(); pi++) {
typename Point<T>::ptr p = *pi;
// track own response
T own_response = weight_function->operator()(p);
if (own_response > strongest_own_response) {
strongest_own_response = own_response;
}
// Find the neighbour with the strongest response
typename Point<T>::list neighbours = find_neighbours(p->gridpoint, index);
for (size_t ni = 0; ni < neighbours.size(); ni++) {
M3DPoint<T> *n = (M3DPoint<T> *) neighbours.at(ni);
T response = weight_function->operator()(n);
if (response > strongest_response) {
strongest_response = response;
strongest_point = n;
}
}
}
if (strongest_response > strongest_own_response && strongest_point != NULL) {
// found a higher point in the vicinity
c->add_point(strongest_point);
}
}
#endif
// Assign ID
if (show_progress) {
cout << "done (found " << clusters.size() << " zero-shift clusters in " << stop_timer() << "s)." << endl;
delete progress;
}
}
template<typename T>
void
ClusterList<T>::aggregate_cluster_graph(FeatureSpace<T> *fs,
const WeightFunction<T> *weight_function,
bool coalesceWithStrongestNeighbour,
bool show_progress) {
using namespace utils::vectors;
// PointIndex<T>::write_index_searches = true;
boost::progress_display *progress = NULL;
#if DEBUG_GRAPH_AGGREGATION
for (size_t i = 0; i < fs->points.size(); i++) {
Point<T> *p = fs->points[i];
if (p->coordinate.size() != p->gridpoint.size() || p->gridpoint.size() > 5) {
cerr << "ERROR: bogus point " << p << endl;
}
}
#endif
vector<size_t> dimensions = fs->coordinate_system->get_dimension_sizes();
ArrayIndex<T> index(dimensions, fs->points, false);
#if DEBUG_GRAPH_AGGREGATION
for (size_t i = 0; i < fs->points.size(); i++) {
Point<T> *p = fs->points[i];
if (p->coordinate.size() != p->gridpoint.size()) {
cerr << "ERROR: bogus point " << p << endl;
}
}
#endif
this->aggregate_zeroshifts(fs, weight_function, index, coalesceWithStrongestNeighbour, show_progress);
#if DEBUG_GRAPH_AGGREGATION
for (size_t i = 0; i < fs->points.size(); i++) {
Point<T> *p = fs->points[i];
if (p->coordinate.size() != p->gridpoint.size()) {
cerr << "ERROR: bogus point " << p << endl;
}
}
#endif
#if WRITE_ZEROSHIFT_CLUSTERS
typename Cluster<T>::list::iterator ci;
size_t id = 0;
for (ci = clusters.begin(); ci != clusters.end(); ci++) {
typename Cluster<T>::ptr c = *ci;
c->id = id++;
}
NetCDFDataStore<T> *ds = (NetCDFDataStore<T> *) fs->data_store();
boost::filesystem::path path(ds->filename());
std::string basename = path.stem().generic_string() + "-zeroshift";
VisitUtils<T>::write_clusters_vtu(this, fs->coordinate_system, basename);
#endif
// Sanity checking
// this->check_clusters(fs,index);
size_t cluster_id = this->clusters.size();
if (show_progress) {
cout << endl << "Analysing meanshift vector graph ...";
start_timer();
progress = new boost::progress_display(fs->points.size());
}
// #if WITH_OPENMP
// #pragma omp parallel for schedule(dynamic)
// #endif
for (size_t i = 0; i < fs->points.size(); i++) {
if (show_progress) {
// #if WITH_OPENMP
// #pragma omp critical
// #endif
progress->operator++();
}
Point<T> *current_point = fs->points[i];
// skip zeroshift and non-original points
if (vector_norm(fs->spatial_component(current_point->shift)) == 0 || !current_point->isOriginalPoint)
continue;
// Find the predecessor through gridded shift
vector<int> gridpoint = current_point->gridpoint + current_point->gridded_shift;
Point<T> *predecessor = NULL;
try {
predecessor = index.get(gridpoint);
} catch (std::invalid_argument &e) {
#if DEBUG_GRAPH_AGGREGATION
cout << "gridpoint=" << current_point->gridpoint
<< " + gridded_shift = " << current_point->gridded_shift
<< " = " << gridpoint
<< " which caused exception " << e.what() << endl;
#endif
}
// Start testing
if (predecessor != NULL) {
// we're pointing to somebody?
current_point->isBoundary = true;
// whoever we're pointing to, he's
// not a boundary point.
predecessor->isBoundary = false;
#if DEBUG_GRAPH_AGGREGATION
cout << endl;
cout << "current point : " << current_point << " @ " << current_point->gridpoint << " (" << current_point->cluster << ")" << endl;
cout << "predecessor : " << predecessor << " @ " << predecessor->gridpoint << " (" << predecessor->cluster << ")" << endl;
// cout << "(reverse lookup of " << x << " = " << gp << ")" << endl;
#endif
if (current_point->cluster == NULL && predecessor->cluster == NULL) {
// Neither point has a cluster
// => create new one
// #if WITH_OPENMP
// #pragma omp critical
// #endif
{
typename Cluster<T>::ptr c = new Cluster<T>(current_point->values,
fs->coordinate_system->rank());
c->id = cluster_id++;
c->add_point(current_point);
c->add_point(predecessor);
clusters.push_back(c);
#if DEBUG_GRAPH_AGGREGATION
cout << "created new cluster " << c
<< " (" << c->size()
<< " points)" << endl;
#endif
}
} else if (current_point->cluster == NULL && predecessor->cluster != NULL) {
// current point has no cluster, but predecessor has one
// => add current point to predecessor's cluster
// #if WITH_OPENMP
// #pragma omp critical
// #endif
predecessor->cluster->add_point(current_point);
#if DEBUG_GRAPH_AGGREGATION
cout << "added current point to cluster "
<< predecessor->cluster << " ("
<< predecessor->cluster->size()
<< " points)" << endl;
#endif
} else if (current_point->cluster != NULL && predecessor->cluster == NULL) {
// current point has a cluster, but predecessor has none
// => add predecessor to current point's cluster
// #if WITH_OPENMP
// #pragma omp critical
// #endif
current_point->cluster->add_point(predecessor);
#if DEBUG_GRAPH_AGGREGATION
cout << "added predecessor to cluster "
<< current_point->cluster
<< " (" << current_point->cluster->size()
<< " points)" << endl;
#endif
} else if (current_point->cluster != predecessor->cluster) {
// both points have different clusters
// => merge current cluster's points to predecessor's cluster
// and delete current cluster
// Save a little time by merging the smaller into the bigger cluster
typename Cluster<T>::ptr merged;
typename Cluster<T>::ptr mergee;
if (current_point->cluster->size() >= predecessor->cluster->size()) {
merged = current_point->cluster;
mergee = predecessor->cluster;
} else {
merged = predecessor->cluster;
mergee = current_point->cluster;
}
#if DEBUG_GRAPH_AGGREGATION
cout << "merging cluster " << mergee << " (" << mergee->size() << " points)"
<< "into " << merged << " (" << merged->size() << " points)"
<< endl;
#endif
// #if WITH_OPENMP
// #pragma omp critical
// #endif
{
// absorb predecessor
merged->add_points(mergee->get_points(), false);
// remove it
typename Cluster<T>::list::iterator fi = find(clusters.begin(), clusters.end(), mergee);
clusters.erase(fi);
delete mergee;
}
} else {
// both points are already part of the same cluster
// => do nothing
#if DEBUG_GRAPH_AGGREGATION
cout << "Both points are part of the same cluster. Skip." << endl;
#endif
}
}
}
if (show_progress) {
cout << "done. (Found " << clusters.size() << " clusters in " << stop_timer() << "s)" << endl;
delete progress;
}
// TODO: parallelize
if (coalesceWithStrongestNeighbour) {
if (show_progress) {
cout << endl << "Running coalescence post-procesing ";
start_timer();
}
for (size_t i = 0; i < clusters.size(); i++) {
typename Cluster<T>::ptr c = clusters.at(i);
T strongest_response = numeric_limits<T>::min();
T strongest_own_response = numeric_limits<T>::min();
typename Cluster<T>::ptr strongest_cluster = NULL;
typename Point<T>::list::iterator pi;
for (pi = c->get_points().begin(); pi != c->get_points().end(); pi++) {
typename Point<T>::ptr p = *pi;
// track own response
T own_response = weight_function->operator()(p);
if (own_response > strongest_own_response) {
strongest_own_response = own_response;
}
// Find the neighbour with the strongest response
typename Point<T>::list neighbours = index.find_neighbours(p->gridpoint);
for (size_t ni = 0; ni < neighbours.size(); ni++) {
Point<T> *n = neighbours.at(ni);
// only interested in different clusters here
if (n->cluster == c) {
continue;
}
// figure out the response
T response = weight_function->operator()(n);
if (response > strongest_response) {
strongest_response = response;
strongest_cluster = n->cluster;
}
}
}
if (strongest_response >= strongest_own_response && strongest_cluster != NULL) {
// found a higher ranking cluster in the direct
// vicinity. Merge!
c->add_points(strongest_cluster->get_points(), false);
typename Cluster<T>::list::iterator cfi = find(clusters.begin(), clusters.end(), strongest_cluster);
if (cfi != clusters.end()) {
clusters.erase(cfi);
delete strongest_cluster;
}
// start over!
// TODO: this could be done a little smarter, probably
// by remembering the clusters to be deleted and skip
// them in the above procedure, then remove them later
// in bulk?
cout << ".";
i = 0;
}
}
if (show_progress) {
cout << "done. (Coalesced " << clusters.size() << " clusters in " << stop_timer() << "s)" << endl;
}
}
// Finally remove all points from all clusters, that were
// not part of the original data set, as well as make their
// modes the arithmetic mean of the remaining points
if (show_progress) {
cout << endl << "Erasing non-original points ...";
start_timer();
progress = new boost::progress_display(clusters.size());
}
for (typename Cluster<T>::list::iterator clit = clusters.begin(); clit != clusters.end();) {
if (show_progress) {
progress->operator++();
}
typename Cluster<T>::ptr c = *clit;
vector<T> mode = vector<T>(fs->rank(), 0.0);
typename Point<T>::list keepers;
// Make pointers unique
typedef std::set<typename Point<T>::ptr> point_set_t;
point_set_t point_set;
point_set.insert(c->get_points().begin(), c->get_points().end());
// Iterate over the unique set
for (typename point_set_t::iterator si = point_set.begin(); si != point_set.end(); ++si) {
typename Point<T>::ptr p = *si;
if (p->isOriginalPoint) {
keepers.push_back(p);
mode += p->values;
}
}
if (keepers.empty()) {
// removed them all? Kill cluster
clusters.erase(clit);
delete c;
} else {
mode /= ((T) keepers.size());
c->set_points(keepers);
c->mode = mode;
clit++;
}
}
if (show_progress) {
cout << "done. (Result: " << clusters.size() << " clusters in " << stop_timer() << "s)" << endl;
delete progress;
}
// PointIndex<T>::write_index_searches = false;
}
template<typename T>
typename Cluster<T>::list
ClusterList<T>::neighbours_of(typename Cluster<T>::ptr cluster,
ArrayIndex<T> &index) {
typename Cluster<T>::list neighbouring_clusters;
typename Point<T>::list::const_iterator pi;
for (pi = cluster->points.begin(); pi != cluster->points.end(); pi++) {
typename Point<T>::ptr p = *pi;
typename Point<T>::list neighbours = this->find_neighbours(index, p->gridpoint);
typename Point<T>::list::const_iterator ni;
for (ni = neighbours->begin(); ni != neighbours->end(); ni++) {
Point<T> *n = *ni;
// Exclude points, that have not been clustered.
// This can happen because scale-space filtering
// creates new points, but those are not associated
// with clusters in later steps
if (n->cluster == NULL) continue;
if (n->cluster != p->cluster) {
typename Cluster<T>::list::const_iterator fi = find(neighbouring_clusters.begin(),
neighbouring_clusters.end(), n->cluster);
if (fi == neighbouring_clusters.end()) {
neighbouring_clusters.push_back(n->cluster);
}
}
}
}
return neighbouring_clusters;
}
template<typename T>
typename Point<T>::list
ClusterList<T>::get_boundary_points(typename Cluster<T>::ptr c1,
typename Cluster<T>::ptr c2,
ArrayIndex<T> &index) {
typename Point<T>::list boundary_points;
typename Point<T>::list::const_iterator pi;
for (pi = c1->points.begin(); pi != c1->points.end(); pi++) {
typename Point<T>::ptr p = *pi;
typename Point<T>::list neighbours = find_neighbours(index, p->gridpoint);
typename Point<T>::list::const_iterator ni;
for (ni = neighbours->begin(); ni != neighbours->end(); ni++) {
typename Point<T>::ptr n = *ni;
if (n->cluster == c2) {
// check every time to avoid double adding
typename Point<T>::list::const_iterator fi = find(boundary_points.begin(), boundary_points.end(),
n);
if (fi == boundary_points.end()) {
boundary_points.push_back(n);
}
fi = find(boundary_points.begin(), boundary_points.end(), p);
if (fi == boundary_points.end()) {
boundary_points.push_back(p);
}
}
}
}
for (pi = c2->points.begin(); pi != c2->points.end(); pi++) {
typename Point<T>::ptr p = *pi;
typename Point<T>::list neighbours = find_neighbours(index, p->gridpoint);
typename Point<T>::list::const_iterator ni;
for (ni = neighbours->begin(); ni != neighbours->end(); ni++) {
typename Point<T>::ptr n = *ni;
if (n->cluster == c1) {
// check every time to avoid double adding
typename Point<T>::list::const_iterator fi = find(boundary_points.begin(), boundary_points.end(),
n);
if (fi == boundary_points.end()) {
boundary_points.push_back(n);
}
fi = find(boundary_points.begin(), boundary_points.end(), p);
if (fi == boundary_points.end()) {
boundary_points.push_back(p);
}
}
}
}
}
template<typename T>
void
ClusterList<T>::write_boundaries(const WeightFunction<T> *weight_function,
FeatureSpace<T> *fs,
PointIndex<T> *index,
const vector<T> &resolution) {
// collate the data
typedef vector<typename Point<T>::list> boundaries_t;
boundaries_t boundaries;
typedef vector<std::string> boundary_key_t;
boundary_key_t boundary_keys;
vector<T> var_c1, var_c2, var_boundary;
vector<T> range_factor_c1, range_factor_c2;
vector<typename Cluster<T>::id_t> cluster_index_1, cluster_index_2;
typename Cluster<T>::list::const_iterator ci;
for (ci = clusters.begin(); ci != clusters.end(); ci++) {
typename Cluster<T>::ptr c = *ci;
typename Cluster<T>::list neighbours = neighbours_of(c, index, resolution, weight_function);
if (neighbours.size() > 0) {
// go over the list of neighbours and find candidates for merging
typename Cluster<T>::list::const_iterator ni;
for (ni = neighbours.begin(); ni != neighbours.end(); ni++) {
typename Cluster<T>::ptr n = *ni;
std::string key = boost::lexical_cast<string>(inf(c->id, n->id)) + "-" +
boost::lexical_cast<string>(sup(c->id, n->id));
typename boundary_key_t::const_iterator fi = find(boundary_keys.begin(), boundary_keys.end(), key);
if (fi == boundary_keys.end()) {
boundary_keys.push_back(key);
typename Point<T>::list boundary_points;
this->get_boundary_points(c, n, boundary_points, index, resolution);
if (boundary_points.size() == 0) continue;
boundaries.push_back(boundary_points);
var_boundary.push_back(relative_variability(weight_function, boundary_points));
var_c1.push_back(relative_variability(weight_function, c->points));
var_c2.push_back(relative_variability(weight_function, n->points));
range_factor_c1.push_back(dynamic_range_factor(c, boundary_points, weight_function));
range_factor_c2.push_back(dynamic_range_factor(n, boundary_points, weight_function));
cluster_index_1.push_back(c->id);
cluster_index_2.push_back(n->id);
}
}
}
}
#if WITH_VTK
for (size_t index = 0; index < boundaries.size(); index++) {
typename Point<T>::list b = boundaries[index];
std::string fn = fs->filename() + "_boundary_" + boost::lexical_cast<string>(index) + ".vtk";
boost::replace_all(fn, "/", "_");
boost::replace_all(fn, "..", "");
VisitUtils<T>::write_pointlist_vtk(fn, &b, fs->coordinate_system->rank());
}
#endif
std::string fn = fs->filename() + "_boundary_correlations.txt";
std::ofstream f(fn.c_str());
f << "#\t"
<< "c1\t"
<< "c2\t"
// << "var_c1\t"
// << "var_c2\t"
// << "var_b\t"
<< "drf_1\t"
<< "drf_2\t"
<< std::endl;
for (size_t index = 0; index < boundaries.size(); index++) {
f << index << "\t"
<< cluster_index_1[index] << "\t"
<< cluster_index_2[index] << "\t"
// << var_c1[index] << "\t"
// << var_c2[index] << "\t"
// << var_boundary[index] << "\t"
<< range_factor_c1[index] << "\t"
<< range_factor_c2[index] << std::endl;
}
}
template<typename T>
typename Cluster<T>::ptr
ClusterList<T>::merge_clusters(typename Cluster<T>::ptr c1, typename Cluster<T>::ptr c2) {
vector<T> merged_mode = (T) 0.5 * (c1->mode + c2->mode);
typename Cluster<T>::ptr merged_cluster = new Cluster<T>(merged_mode, this->dimensions.size());
merged_cluster->add_points(c1->points);
merged_cluster->add_points(c2->points);
merged_cluster->id = c1->id;
if (c1->m_weight_range_calculated && c2->m_weight_range_calculated) {
merged_cluster->m_min_weight = inf(c1->m_min_weight, c2->m_min_weight);
merged_cluster->m_max_weight = sup(c1->m_max_weight, c2->m_max_weight);
merged_cluster->m_weight_range_calculated = true;
}
return merged_cluster;
}
template<typename T>
void
ClusterList<T>::erase_identifiers() {
for (size_t i = 0; i < clusters.size(); i++) {
clusters[i]->id = m3D::NO_ID;
}
}
template<typename T>
struct clear_cluster
{
void operator()(void *p) {
static_cast<typename Point<T>::ptr> (p)->cluster = NULL;
};
};
template<typename T>
void
ClusterList<T>::reset_clustering(FeatureSpace<T> *fs) {
for_each(fs->points.begin(), fs->points.end(), clear_cluster<T>());
}
template<typename T>
void
ClusterList<T>::sanity_check(const FeatureSpace<T> *fs) {
size_t point_count = 0;
for (size_t i = 0; i < clusters.size(); i++) {
point_count += clusters[i]->points.size();
}
assert(point_count == fs->size());
}
#pragma mark -
#pragma mark Coalescence Merging
template<typename T>
bool
ClusterList<T>::are_neighbours(const Cluster<T> *c1,
const Cluster<T> *c2,
ArrayIndex<T> &index) {
bool isNeighbour = false;
typename Point<T>::list::const_iterator pi;
for (pi = c1->points.begin(); pi != c1->points.end(); pi++) {
typename Point<T>::ptr p = *pi;
typename Point<T>::list neighbours = this->find_neighbours(c1, index);
typename Point<T>::list::const_iterator ni;
for (ni = neighbours->begin(); ni != neighbours->end(); ni++) {
Point<T> *n = *ni;
if (n->cluster == c2) {
isNeighbour = true;
break;
}
}
}
return isNeighbour;
}
// Sort all clusters in ascending order by weight response
template<typename T>
class ModalWeightComparator
{
private:
const WeightFunction<T> *m_weight;
public:
ModalWeightComparator(const WeightFunction<T> *w) {
m_weight = w;
}
bool
operator()(const Cluster<T> *c1, const Cluster<T> *c2) {
T w1 = c1->modal_weight_response(m_weight);
T w2 = c2->modal_weight_response(m_weight);
return w1 < w2;
}
};
} //namespace
#endif
|
omp_bug6.c | /******************************************************************************
* FILE: omp_bug6.c
* DESCRIPTION:
* Fails compilation in most cases.
* Compare to omp_orphan.c.
* AUTHOR: Blaise Barney 6/05
* LAST REVISED: 06/30/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define VECLEN 100
float a[VECLEN], b[VECLEN];
float dotprod ()
{
int i,tid;
float sum;
tid = omp_get_thread_num();
#pragma omp for reduction(+:sum)
for (i=0; i < VECLEN; i++)
{
sum = sum + (a[i]*b[i]);
printf(" tid= %d i=%d\n",tid,i);
}
}
int main (int argc, char *argv[]) {
int i;
float sum;
for (i=0; i < VECLEN; i++)
a[i] = b[i] = 1.0 * i;
sum = 0.0;
#pragma omp parallel shared(sum)
dotprod();
printf("Sum = %f\n",sum);
}
|
pdgetrf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pzgetrf.c, normal z -> d, Fri Sep 28 17:38:11 2018
*
**/
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
#include <plasma_core_blas.h>
#define A(m, n) (double*)plasma_tile_addr(A, m, n)
/******************************************************************************/
void plasma_pdgetrf(plasma_desc_t A, int *ipiv,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Return if failed sequence.
if (sequence->status != PlasmaSuccess)
return;
// Read parameters from the context.
plasma_context_t *plasma = plasma_context_self();
// Set tiling parameters.
int ib = plasma->ib;
int minmtnt = imin(A.mt, A.nt);
for (int k = 0; k < minmtnt; k++) {
double *a00, *a20;
a00 = A(k, k);
a20 = A(A.mt-1, k);
// Create fake dependencies of the whole panel on its individual tiles.
// These tasks are inserted to generate a correct DAG rather than
// doing any useful work.
for (int m = k+1; m < A.mt-1; m++) {
double *amk = A(m, k);
#pragma omp task depend (in:amk[0]) \
depend (inout:a00[0]) \
priority(1)
{
// Do some funny work here. It appears so that the compiler
// might not insert the task if it is completely empty.
int l = 1;
l++;
}
}
int ma00k = (A.mt-k-1)*A.mb;
int na00k = plasma_tile_nmain(A, k);
int lda20 = plasma_tile_mmain(A, A.mt-1);
int nvak = plasma_tile_nview(A, k);
int mvak = plasma_tile_mview(A, k);
int ldak = plasma_tile_mmain(A, k);
int num_panel_threads = imin(plasma->max_panel_threads,
minmtnt-k);
// panel
#pragma omp task depend(inout:a00[0:ma00k*na00k]) \
depend(inout:a20[0:lda20*nvak]) \
depend(out:ipiv[k*A.mb:mvak]) \
priority(1)
{
volatile int *max_idx = (int*)malloc(num_panel_threads*sizeof(int));
if (max_idx == NULL)
plasma_request_fail(sequence, request, PlasmaErrorOutOfMemory);
volatile double *max_val =
(double*)malloc(num_panel_threads*sizeof(
double));
if (max_val == NULL)
plasma_request_fail(sequence, request, PlasmaErrorOutOfMemory);
volatile int info = 0;
plasma_barrier_t barrier;
plasma_barrier_init(&barrier);
if (sequence->status == PlasmaSuccess) {
// If nesting would not be expensive on architectures such as
// KNL, this would resolve the issue with deadlocks caused by
// tasks expected to run are in fact not launched.
//#pragma omp parallel for shared(barrier)
// schedule(dynamic,1)
// num_threads(num_panel_threads)
#pragma omp taskloop untied shared(barrier) \
num_tasks(num_panel_threads) \
priority(2)
for (int rank = 0; rank < num_panel_threads; rank++) {
{
plasma_desc_t view =
plasma_desc_view(A,
k*A.mb, k*A.nb,
A.m-k*A.mb, nvak);
plasma_core_dgetrf(view, &ipiv[k*A.mb], ib,
rank, num_panel_threads,
max_idx, max_val, &info,
&barrier);
if (info != 0)
plasma_request_fail(sequence, request, k*A.mb+info);
}
}
}
#pragma omp taskwait
free((void*)max_idx);
free((void*)max_val);
for (int i = k*A.mb+1; i <= imin(A.m, k*A.mb+nvak); i++)
ipiv[i-1] += k*A.mb;
}
// update
for (int n = k+1; n < A.nt; n++) {
double *a01, *a11, *a21;
a01 = A(k, n);
a11 = A(k+1, n);
a21 = A(A.mt-1, n);
int ma11k = (A.mt-k-2)*A.mb;
int na11n = plasma_tile_nmain(A, n);
int lda21 = plasma_tile_mmain(A, A.mt-1);
int nvan = plasma_tile_nview(A, n);
#pragma omp task depend(in:a00[0:ma00k*na00k]) \
depend(in:a20[0:lda20*nvak]) \
depend(in:ipiv[k*A.mb:mvak]) \
depend(inout:a01[0:ldak*nvan]) \
depend(inout:a11[0:ma11k*na11n]) \
depend(inout:a21[0:lda21*nvan]) \
priority(n == k+1)
{
if (sequence->status == PlasmaSuccess) {
// geswp
int k1 = k*A.mb+1;
int k2 = imin(k*A.mb+A.mb, A.m);
plasma_desc_t view =
plasma_desc_view(A, 0, n*A.nb, A.m, nvan);
plasma_core_dgeswp(PlasmaRowwise, view, k1, k2, ipiv, 1);
// trsm
plasma_core_dtrsm(PlasmaLeft, PlasmaLower,
PlasmaNoTrans, PlasmaUnit,
mvak, nvan,
1.0, A(k, k), ldak,
A(k, n), ldak);
// gemm
for (int m = k+1; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
#pragma omp task priority(n == k+1)
{
plasma_core_dgemm(
PlasmaNoTrans, PlasmaNoTrans,
mvam, nvan, A.nb,
-1.0, A(m, k), ldam,
A(k, n), ldak,
1.0, A(m, n), ldam);
}
}
}
#pragma omp taskwait
}
}
}
// Multidependency of the whole ipiv on the individual chunks
// corresponding to tiles.
for (int m = 0; m < minmtnt; m++) {
// insert dummy task
#pragma omp task depend (in:ipiv[m*A.mb]) \
depend (inout:ipiv[0])
{
int l = 1;
l++;
}
}
// pivoting to the left
for (int k = 0; k < minmtnt-1; k++) {
double *a10, *a20;
a10 = A(k+1, k);
a20 = A(A.mt-1, k);
int ma10k = (A.mt-k-2)*A.mb;
int na00k = plasma_tile_nmain(A, k);
int lda20 = plasma_tile_mmain(A, A.mt-1);
int nvak = plasma_tile_nview(A, k);
#pragma omp task depend(in:ipiv[0:imin(A.m,A.n)]) \
depend(inout:a10[0:ma10k*na00k]) \
depend(inout:a20[0:lda20*nvak])
{
if (sequence->status == PlasmaSuccess) {
plasma_desc_t view =
plasma_desc_view(A, 0, k*A.nb, A.m, A.nb);
int k1 = (k+1)*A.mb+1;
int k2 = imin(A.m, A.n);
plasma_core_dgeswp(PlasmaRowwise, view, k1, k2, ipiv, 1);
}
}
// Multidependency of individual tiles on the whole panel.
for (int m = k+2; m < A.mt-1; m++) {
double *amk = A(m, k);
#pragma omp task depend (in:a10[0]) \
depend (inout:amk[0])
{
// Do some funny work here. It appears so that the compiler
// might not insert the task if it is completely empty.
int l = 1;
l++;
}
}
}
}
|
Analyzer.h | #ifndef ANALYZER_H
#define ANALYZER_H
/*************************************************************
* Copyright: (C) 2012 by Markus Schordan *
* Author : Markus Schordan *
* License : see file LICENSE in the CodeThorn distribution *
*************************************************************/
#include <iostream>
#include <fstream>
#include <set>
#include <string>
#include <sstream>
#include <list>
#include <vector>
#include <omp.h>
#include <boost/unordered_set.hpp>
#include <boost/unordered_map.hpp>
#include "Timer.h"
#include "AstTerm.h"
#include "Labeler.h"
#include "CFAnalysis.h"
#include "RoseAst.h"
#include "SgNodeHelper.h"
#include "ExprAnalyzer.h"
#include "StateRepresentations.h"
#include "TransitionGraph.h"
#include "PropertyValueTable.h"
#include "CTIOLabeler.h"
#include "VariableValueMonitor.h"
// we use INT_MIN, INT_MAX
#include "limits.h"
#include "AstNodeInfo.h"
namespace CodeThorn {
/*!
* \author Markus Schordan
* \date 2012.
*/
typedef std::list<const EState*> EStateWorkList;
typedef std::pair<int, const EState*> FailedAssertion;
typedef std::pair<PState, std::list<int> > PStatePlusIOHistory;
enum AnalyzerMode { AM_ALL_STATES, AM_LTL_STATES };
/*!
* \author Markus Schordan
* \date 2012.
*/
class Analyzer {
friend class Visualizer;
friend class VariableValueMonitor;
public:
Analyzer();
~Analyzer();
protected:
static Sawyer::Message::Facility logger;
public:
static void initDiagnostics();
static std::string nodeToString(SgNode* node);
void initAstNodeInfo(SgNode* node);
bool isInExplicitStateMode();
bool isActiveGlobalTopify();
void initializeSolver1(std::string functionToStartAt,SgNode* root, bool oneFunctionOnly);
void initializeTraceSolver(std::string functionToStartAt,SgNode* root);
void continueAnalysisFrom(EState* newStartEState);
PState analyzeAssignRhs(PState currentPState,VariableId lhsVar, SgNode* rhs,ConstraintSet& cset);
EState analyzeVariableDeclaration(SgVariableDeclaration* nextNodeToAnalyze1,EState currentEState, Label targetLabel);
void addToWorkList(const EState* estate);
const EState* addToWorkListIfNew(EState estate);
const EState* takeFromWorkList();
bool isInWorkList(const EState* estate);
bool isEmptyWorkList();
const EState* topWorkList();
const EState* popWorkList();
void swapWorkLists();
size_t memorySizeContentEStateWorkLists();
void recordTransition(const EState* sourceEState, Edge e, const EState* targetEState);
void printStatusMessage(bool);
bool isStartLabel(Label label);
// determines whether lab is a function call label of a function
// call of the form 'x=f(...)' and returns the varible-id of the
// lhs, if a valid pointer is provided
bool isFunctionCallWithAssignment(Label lab,VariableId* varId=0);
std::list<EState> transferEdgeEState(Edge edge, const EState* estate);
std::list<EState> transferFunctionCall(Edge edge, const EState* estate);
std::list<EState> transferFunctionCallLocalEdge(Edge edge, const EState* estate);
std::list<EState> transferFunctionCallExternal(Edge edge, const EState* estate);
std::list<EState> transferFunctionCallReturn(Edge edge, const EState* estate);
std::list<EState> transferFunctionExit(Edge edge, const EState* estate);
std::list<EState> transferReturnStmt(Edge edge, const EState* estate);
std::list<EState> transferVariableDeclaration(SgVariableDeclaration* decl,Edge edge, const EState* estate);
std::list<EState> transferExprStmt(SgNode* nextNodeToAnalyze1, Edge edge, const EState* estate);
std::list<EState> transferIdentity(Edge edge, const EState* estate);
std::list<EState> transferAssignOp(SgAssignOp* assignOp, Edge edge, const EState* estate);
std::list<EState> transferIncDecOp(SgNode* nextNodeToAnalyze2, Edge edge, const EState* estate);
std::list<EState> transferTrueFalseEdge(SgNode* nextNodeToAnalyze2, Edge edge, const EState* estate);
SgNode* getCond(SgNode* node);
void generateAstNodeInfo(SgNode* node);
bool checkEStateSet();
bool isConsistentEStatePtrSet(std::set<const EState*> estatePtrSet);
bool checkTransitionGraph();
//! requires init
void runSolver4();
void runSolver5();
void runSolver8();
void runSolver9();
void runSolver10();
void runSolver11();
void runSolver12();
void runSolver();
//! The analyzer requires a CFAnalysis to obtain the ICFG.
void setCFAnalyzer(CFAnalysis* cf) { cfanalyzer=cf; }
CFAnalysis* getCFAnalyzer() const { return cfanalyzer; }
//void initializeVariableIdMapping(SgProject* project) { variableIdMapping.computeVariableSymbolMapping(project); }
// access functions for computed information
VariableIdMapping* getVariableIdMapping() { return &variableIdMapping; }
CTIOLabeler* getLabeler() const {
CTIOLabeler* ioLabeler=dynamic_cast<CTIOLabeler*>(cfanalyzer->getLabeler());
ROSE_ASSERT(ioLabeler);
return ioLabeler;
}
Flow* getFlow() { return &flow; }
PStateSet* getPStateSet() { return &pstateSet; }
EStateSet* getEStateSet() { return &estateSet; }
TransitionGraph* getTransitionGraph() { return &transitionGraph; }
ConstraintSetMaintainer* getConstraintSetMaintainer() { return &constraintSetMaintainer; }
//private: TODO
Flow flow;
SgNode* startFunRoot;
CFAnalysis* cfanalyzer;
enum ExplorationMode { EXPL_DEPTH_FIRST, EXPL_BREADTH_FIRST, EXPL_LOOP_AWARE, EXPL_LOOP_AWARE_SYNC, EXPL_RANDOM_MODE1 };
void eventGlobalTopifyTurnedOn();
bool isIncompleteSTGReady();
bool isPrecise();
PropertyValueTable reachabilityResults;
int reachabilityAssertCode(const EState* currentEStatePtr);
void setExplorationMode(ExplorationMode em) { _explorationMode=em; }
ExplorationMode getExplorationMode() { return _explorationMode; }
void setSkipSelectedFunctionCalls(bool defer) {
_skipSelectedFunctionCalls=true;
exprAnalyzer.setSkipSelectedFunctionCalls(true);
}
void setSkipArrayAccesses(bool skip) {
exprAnalyzer.setSkipArrayAccesses(skip);
}
bool getSkipArrayAccesses() {
return exprAnalyzer.getSkipArrayAccesses();
}
ExprAnalyzer* getExprAnalyzer();
std::list<FailedAssertion> getFirstAssertionOccurences(){return _firstAssertionOccurences;}
void incIterations() {
if(isPrecise()) {
#pragma omp atomic
_iterations+=1;
} else {
#pragma omp atomic
_approximated_iterations+=1;
}
}
bool isLoopCondLabel(Label lab);
int getApproximatedIterations() { return _approximated_iterations; }
int getIterations() { return _iterations; }
string getVarNameByIdCode(int varIdCode) {return variableIdMapping.variableName(variableIdMapping.variableIdFromCode(varIdCode));};
void mapGlobalVarInsert(std::string name, int* addr);
//! compute the VariableIds of variable declarations
VariableIdMapping::VariableIdSet determineVariableIdsOfVariableDeclarations(set<SgVariableDeclaration*> decls);
//! compute the VariableIds of SgInitializedNamePtrList
VariableIdMapping::VariableIdSet determineVariableIdsOfSgInitializedNames(SgInitializedNamePtrList& namePtrList);
std::set<std::string> variableIdsToVariableNames(VariableIdMapping::VariableIdSet);
typedef std::list<SgVariableDeclaration*> VariableDeclarationList;
VariableDeclarationList computeUnusedGlobalVariableDeclarationList(SgProject* root);
VariableDeclarationList computeUsedGlobalVariableDeclarationList(SgProject* root);
bool isFailedAssertEState(const EState* estate);
bool isVerificationErrorEState(const EState* estate);
//! adds a specific code to the io-info of an estate which is checked by isFailedAsserEState and determines a failed-assert estate. Note that the actual assert (and its label) is associated with the previous estate (this information can therefore be obtained from a transition-edge in the transition graph).
EState createFailedAssertEState(const EState estate, Label target);
EState createVerificationErrorEState(const EState estate, Label target);
//! list of all asserts in a program
std::list<SgNode*> listOfAssertNodes(SgProject *root);
//! rers-specific error_x: assert(0) version
std::list<std::pair<SgLabelStatement*,SgNode*> > listOfLabeledAssertNodes(SgProject *root);
void initLabeledAssertNodes(SgProject* root) {
_assertNodes=listOfLabeledAssertNodes(root);
}
size_t getNumberOfErrorLabels();
std::string labelNameOfAssertLabel(Label lab) {
std::string labelName;
for(std::list<std::pair<SgLabelStatement*,SgNode*> >::iterator i=_assertNodes.begin();i!=_assertNodes.end();++i)
if(lab==getLabeler()->getLabel((*i).second))
labelName=SgNodeHelper::getLabelName((*i).first);
//assert(labelName.size()>0);
return labelName;
}
bool isCppLabeledAssertLabel(Label lab) {
return labelNameOfAssertLabel(lab).size()>0;
}
InputOutput::OpType ioOp(const EState* estate) const;
void setDisplayDiff(int diff) { _displayDiff=diff; }
void setResourceLimitDiff(int diff) { _resourceLimitDiff=diff; }
void setSolver(int solver) { _solver=solver; }
int getSolver() { return _solver;}
void setSemanticFoldThreshold(int t) { _semanticFoldThreshold=t; }
void setNumberOfThreadsToUse(int n) { _numberOfThreadsToUse=n; }
int getNumberOfThreadsToUse() { return _numberOfThreadsToUse; }
std::list<std::pair<SgLabelStatement*,SgNode*> > _assertNodes;
VariableId globalVarIdByName(std::string varName) { return globalVarName2VarIdMapping[varName]; }
void setTreatStdErrLikeFailedAssert(bool x) { _treatStdErrLikeFailedAssert=x; }
boost::unordered_map <std::string,int*> mapGlobalVarAddress;
boost::unordered_map <int*,std::string> mapAddressGlobalVar;
void setCompoundIncVarsSet(set<VariableId> ciVars);
void setSmallActivityVarsSet(set<VariableId> ciVars);
void setAssertCondVarsSet(set<VariableId> acVars);
enum GlobalTopifyMode {GTM_IO, GTM_IOCF, GTM_IOCFPTR, GTM_COMPOUNDASSIGN, GTM_FLAGS};
void setGlobalTopifyMode(GlobalTopifyMode mode);
void setExternalErrorFunctionName(std::string externalErrorFunctionName);
// enables external function semantics
void enableExternalFunctionSemantics();
void disableExternalFunctionSemantics();
bool isUsingExternalFunctionSemantics() { return _externalFunctionSemantics; }
void setModeLTLDriven(bool ltlDriven) { transitionGraph.setModeLTLDriven(ltlDriven); }
bool getModeLTLDriven() { return transitionGraph.getModeLTLDriven(); }
long analysisRunTimeInSeconds();
void setVariableValueThreshold(int threshold) { variableValueMonitor.setThreshold(threshold); }
void set_finished(std::vector<bool>& v, bool val);
bool all_false(std::vector<bool>& v);
private:
GlobalTopifyMode _globalTopifyMode;
set<VariableId> _compoundIncVarsSet;
set<VariableId> _smallActivityVarsSet;
set<VariableId> _assertCondVarsSet;
set<int> _inputVarValues;
std::list<int> _inputSequence;
std::list<int>::iterator _inputSequenceIterator;
ExprAnalyzer exprAnalyzer;
VariableIdMapping variableIdMapping;
EStateWorkList* estateWorkListCurrent;
EStateWorkList* estateWorkListNext;
EStateWorkList estateWorkListOne;
EStateWorkList estateWorkListTwo;
EStateSet estateSet;
PStateSet pstateSet;
ConstraintSetMaintainer constraintSetMaintainer;
TransitionGraph transitionGraph;
TransitionGraph backupTransitionGraph;
set<const EState*> transitionSourceEStateSetOfLabel(Label lab);
int _displayDiff;
int _resourceLimitDiff;
int _numberOfThreadsToUse;
int _semanticFoldThreshold;
VariableIdMapping::VariableIdSet _variablesToIgnore;
int _solver;
AnalyzerMode _analyzerMode;
set<const EState*> _newNodesToFold;
long int _maxTransitions;
long int _maxIterations;
long int _maxBytes;
long int _maxSeconds;
long int _maxTransitionsForcedTop;
long int _maxIterationsForcedTop;
long int _maxBytesForcedTop;
long int _maxSecondsForcedTop;
PState _startPState;
std::list<EState> elistify();
std::list<EState> elistify(EState res);
// only used in LTL-driven mode
void setStartEState(const EState* estate);
/*! if state exists in stateSet, a pointer to the existing state is returned otherwise
a new state is entered into stateSet and a pointer to it is returned.
*/
const PState* processNew(PState& s);
const PState* processNewOrExisting(PState& s);
const EState* processNew(EState& s);
const EState* processNewOrExisting(EState& s);
const EState* processCompleteNewOrExisting(const EState* es);
void topifyVariable(PState& pstate, ConstraintSet& cset, VariableId varId);
bool isTopified(EState& s);
EStateSet::ProcessingResult process(EState& s);
EStateSet::ProcessingResult process(Label label, PState pstate, ConstraintSet cset, InputOutput io);
const ConstraintSet* processNewOrExisting(ConstraintSet& cset);
EState createEState(Label label, PState pstate, ConstraintSet cset);
EState createEState(Label label, PState pstate, ConstraintSet cset, InputOutput io);
VariableValueMonitor variableValueMonitor;
bool _treatStdErrLikeFailedAssert;
bool _skipSelectedFunctionCalls;
ExplorationMode _explorationMode;
bool _topifyModeActive;
bool _explicitArrays;
// loop-aware mode
int _swapWorkListsCount; // currently only used for debugging purposes
int _iterations;
int _approximated_iterations;
int _curr_iteration_cnt;
int _next_iteration_cnt;
bool _externalFunctionSemantics;
string _externalErrorFunctionName; // the call of this function causes termination of analysis
string _externalNonDetIntFunctionName;
string _externalNonDetLongFunctionName;
string _externalExitFunctionName;
Timer _analysisTimer;
// =======================================================================
// ========================== LTLAnalyzer ================================
// =======================================================================
public:
bool isTerminationRelevantLabel(Label label);
bool isLTLRelevantEState(const EState* estate);
bool isLTLRelevantLabel(Label label);
bool isStdIOLabel(Label label);
std::set<const EState*> nonLTLRelevantEStates();
std::string generateSpotSTG();
// reduces all states different to stdin and stdout.
void stdIOFoldingOfTransitionGraph();
void semanticFoldingOfTransitionGraph();
// bypasses and removes all states that are not standard I/O states
// (old version, works correctly, but has a long execution time)
void removeNonIOStates();
// bypasses and removes all states that are not stdIn/stdOut/stdErr/failedAssert states
void reduceToObservableBehavior();
// erases transitions that lead directly from one output state to another output state
void removeOutputOutputTransitions();
// erases transitions that lead directly from one input state to another input state
void removeInputInputTransitions();
// cuts off all paths in the transition graph that lead to leaves
// (recursively until only paths of infinite length remain)
void pruneLeavesRec();
// connects start, input, output and worklist states according to possible paths in the transition graph.
// removes all states and transitions that are not necessary for the graph that only consists of these new transitions. The two parameters allow to select input and/or output states to remain in the STG.
void reduceGraphInOutWorklistOnly(bool includeIn=true, bool includeOut=true, bool includeErr=false);
// extracts input sequences leading to each discovered failing assertion where discovered for the first time.
// stores results in PropertyValueTable "reachabilityResults".
// returns length of the longest of these sequences if it can be guaranteed that all processed traces are the
// shortest ones leading to the individual failing assertion (returns -1 otherwise).
int extractAssertionTraces();
// LTLAnalyzer
private:
//returns a list of transitions representing existing paths from "startState" to all possible input/output/error states (no output -> output)
// collection of transitions to worklist states currently disabled. the returned set has to be deleted by the calling function.
boost::unordered_set<Transition*>* transitionsToInOutErrAndWorklist( const EState* startState,
bool includeIn,
bool includeOut,
bool includeErr);
boost::unordered_set<Transition*>* transitionsToInOutErrAndWorklist( const EState* currentState,
const EState* startState,
boost::unordered_set<Transition*>* results,
boost::unordered_set<const EState*>* visited,
bool includeIn, bool includeOut, bool includeErr);
// adds a string representation of the shortest input path from start state to assertEState to reachabilityResults. returns the length of the
// counterexample input sequence.
int addCounterexample(int assertCode, const EState* assertEState);
// returns a list of EStates from source to target. Target has to come before source in the STG (reversed trace).
std::list<const EState*>reverseInOutSequenceBreadthFirst(const EState* source, const EState* target, bool counterexampleWithOutput = false);
// returns a list of EStates from source to target (shortest input path).
// please note: target has to be a predecessor of source (reversed trace)
std::list<const EState*> reverseInOutSequenceDijkstra(const EState* source, const EState* target, bool counterexampleWithOutput = false);
std::list<const EState*> filterStdInOutOnly(std::list<const EState*>& states, bool counterexampleWithOutput = false) const;
std::string reversedInOutRunToString(std::list<const EState*>& run);
//returns the shortest possible number of input states on the path leading to "target".
int inputSequenceLength(const EState* target);
// begin of solver 9 functions
bool searchForIOPatterns(PState* startPState, int assertion_id, std::list<int>& inputSuffix, std::list<int>* partialTrace = NULL, int* inputPatternLength=NULL);
bool containsPatternTwoRepetitions(std::list<int>& sequence);
bool containsPatternTwoRepetitions(std::list<int>& sequence, int startIndex, int endIndex);
bool computePStateAfterInputs(PState& pState, std::list<int>& inputs, int thread_id, std::list<int>* iOSequence=NULL);
bool computePStateAfterInputs(PState& pState, int input, int thread_id, std::list<int>* iOSequence=NULL);
bool searchPatternPath(int assertion_id, PState& pState, std::list<int>& inputPattern, std::list<int>& inputSuffix, int thread_id,std::list<int>* iOSequence=NULL);
std::list<int> inputsFromPatternTwoRepetitions(std::list<int> pattern2r);
string convertToCeString(std::list<int>& ceAsIntegers, int maxInputVal);
int pStateDepthFirstSearch(PState* startPState, int maxDepth, int thread_id, std::list<int>* partialTrace, int maxInputVal, int patternLength, int PatternIterations);
// end of solver 9 functions
void generateSpotTransition(std::stringstream& ss, const Transition& t);
//less than comarisions on two states according to (#input transitions * #output transitions)
bool indegreeTimesOutdegreeLessThan(const EState* a, const EState* b);
public:
//stores a backup of the created transitionGraph
void storeStgBackup();
//load previous backup of the transitionGraph, storing the current version as a backup instead
void swapStgWithBackup();
//solver 8 becomes the active solver used by the analyzer. Deletion of previous data iff "resetAnalyzerData" is set to true.
void setAnalyzerToSolver8(EState* startEState, bool resetAnalyzerData);
// first: list of new states (worklist), second: set of found existing states
typedef pair<EStateWorkList,EStateSet> SubSolverResultType;
SubSolverResultType subSolver(const EState* currentEStatePtr);
PropertyValueTable* loadAssertionsToReconstruct(string filePath);
void insertInputVarValue(int i) { _inputVarValues.insert(i); }
void addInputSequenceValue(int i) { _inputSequence.push_back(i); }
void resetToEmptyInputSequence() { _inputSequence.clear(); }
void resetInputSequenceIterator() { _inputSequenceIterator=_inputSequence.begin(); }
const EState* getEstateBeforeMissingInput() {return _estateBeforeMissingInput;}
const EState* getLatestErrorEState() {return _latestErrorEState;}
int numberOfInputVarValues() { return _inputVarValues.size(); }
std::set<int> getInputVarValues() { return _inputVarValues; }
void setStgTraceFileName(std::string filename) {
_stg_trace_filename=filename;
std::ofstream fout;
fout.open(_stg_trace_filename.c_str()); // create new file/overwrite existing file
fout<<"START"<<endl;
fout.close(); // close. Will be used with append.
}
private:
std::string _stg_trace_filename;
public:
// only used temporarily for binary-binding prototype
std::map<std::string,VariableId> globalVarName2VarIdMapping;
std::vector<bool> binaryBindingAssert;
void setAnalyzerMode(AnalyzerMode am) { _analyzerMode=am; }
void setMaxTransitions(size_t maxTransitions) { _maxTransitions=maxTransitions; }
void setMaxIterations(size_t maxIterations) { _maxIterations=maxIterations; }
void setMaxTransitionsForcedTop(size_t maxTransitions) { _maxTransitionsForcedTop=maxTransitions; }
void setMaxIterationsForcedTop(size_t maxIterations) { _maxIterationsForcedTop=maxIterations; }
void setMaxBytes(long int maxBytes) { _maxBytes=maxBytes; }
void setMaxBytesForcedTop(long int maxBytesForcedTop) { _maxBytesForcedTop=maxBytesForcedTop; }
void setMaxSeconds(long int maxSeconds) { _maxSeconds=maxSeconds; }
void setMaxSecondsForcedTop(long int maxSecondsForcedTop) { _maxSecondsForcedTop=maxSecondsForcedTop; }
void setStartPState(PState startPState) { _startPState=startPState; }
void setReconstructMaxInputDepth(size_t inputDepth) { _reconstructMaxInputDepth=inputDepth; }
void setReconstructMaxRepetitions(size_t repetitions) { _reconstructMaxRepetitions=repetitions; }
void setReconstructPreviousResults(PropertyValueTable* previousResults) { _reconstructPreviousResults = previousResults; };
void setPatternSearchMaxDepth(size_t iODepth) { _patternSearchMaxDepth=iODepth; }
void setPatternSearchRepetitions(size_t patternReps) { _patternSearchRepetitions=patternReps; }
void setPatternSearchMaxSuffixDepth(size_t suffixDepth) { _patternSearchMaxSuffixDepth=suffixDepth; }
void setPatternSearchAssertTable(PropertyValueTable* patternSearchAsserts) { _patternSearchAssertTable = patternSearchAsserts; };
void setPatternSearchExploration(ExplorationMode explorationMode) { _patternSearchExplorationMode = explorationMode; };
private:
int _reconstructMaxInputDepth;
int _reconstructMaxRepetitions;
PropertyValueTable* _reconstructPreviousResults;
PropertyValueTable* _patternSearchAssertTable;
int _patternSearchMaxDepth;
int _patternSearchRepetitions;
int _patternSearchMaxSuffixDepth;
ExplorationMode _patternSearchExplorationMode;
std::list<FailedAssertion> _firstAssertionOccurences;
const EState* _estateBeforeMissingInput;
const EState* _latestOutputEState;
const EState* _latestErrorEState;
}; // end of class Analyzer
} // end of namespace CodeThorn
#include "RersSpecialization.h"
#endif
|
main.c | #include <omp.h>
#include <stdio.h>
int main() {
int i, N = 10;
omp_set_num_threads(10);
#pragma omp parallel
{
for (i = 0; i < N; i++) {
printf("%d\n", i);
}
}
}
|
pr80853.c | /* PR middle-end/80853 */
/* { dg-do run } */
__attribute__((noinline, noclone)) void
foo (int *p)
{
#pragma omp for reduction(+:p[:4])
for (int i = 0; i < 64; i++)
{
p[0] += i;
p[1] += i / 2;
p[2] += 2 * i;
p[3] += 3 * i;
}
}
int
main ()
{
int p[4] = { 0, 0, 0, 0 };
#pragma omp parallel
foo (p);
if (p[0] != 63 * 64 / 2
|| p[1] != 31 * 32
|| p[2] != 63 * 64
|| p[3] != 3 * 63 * 64 / 2)
__builtin_abort ();
return 0;
}
|
test.c | #include <stdio.h>
#include <math.h>
#define NUM_DIVS 1000000
#define NUM_ITS 100
double calc_pi()
{
double div_width = 1.0/NUM_DIVS;
double curx = 0.0;
double cury = 0.0;
double sum = 0.0;
int i = 0;
//#pragma omp parallel for private(curx, cury) reduction(+: sum)
for(i = 0; i < NUM_DIVS; i++)
{
curx = ((double)i + 0.5) * div_width;
cury = 4.0 / (1 + sin(curx));
sum += div_width * cury;
}
return sum;
}
int main(int argc, char** argv)
{
int i = 0;
double pi = 0.0;
for(i = 0; i < NUM_ITS; i++)
{
pi = calc_pi();
}
printf("Pi: %1.16f\n", pi);
return 0;
}
|
archive_blake2sp_ref.c | /*
BLAKE2 reference source code package - reference C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "archive_platform.h"
#include "archive_blake2.h"
#include "archive_blake2_impl.h"
#define PARALLELISM_DEGREE 8
/*
blake2sp_init_param defaults to setting the expecting output length
from the digest_length parameter block field.
In some cases, however, we do not want this, as the output length
of these instances is given by inner_length instead.
*/
static int blake2sp_init_leaf_param( blake2s_state *S, const blake2s_param *P )
{
int err = blake2s_init_param(S, P);
S->outlen = P->inner_length;
return err;
}
static int blake2sp_init_leaf( blake2s_state *S, size_t outlen, size_t keylen, uint32_t offset )
{
blake2s_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, offset );
store16( &P->xof_length, 0 );
P->node_depth = 0;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2sp_init_leaf_param( S, P );
}
static int blake2sp_init_root( blake2s_state *S, size_t outlen, size_t keylen )
{
blake2s_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, 0 );
store16( &P->xof_length, 0 );
P->node_depth = 1;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2s_init_param( S, P );
}
int blake2sp_init( blake2sp_state *S, size_t outlen )
{
size_t i;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2sp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, 0, (uint32_t)i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2sp_init_key( blake2sp_state *S, size_t outlen, const void *key, size_t keylen )
{
size_t i;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2sp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, keylen, (uint32_t)i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2sp_update( blake2sp_state *S, const void *pin, size_t inlen )
{
const unsigned char * in = (const unsigned char *)pin;
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
size_t i;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S->S[i], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2sp_final( blake2sp_state *S, void *out, size_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
size_t i;
if(out == NULL || outlen < S->outlen) {
return -1;
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2S_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES;
if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES;
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left );
}
blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES );
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( S->R, out, S->outlen );
}
int blake2sp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
blake2s_state S[PARALLELISM_DEGREE][1];
blake2s_state FS[1];
size_t i;
/* Verify parameters */
if ( NULL == in && inlen > 0 ) return -1;
if ( NULL == out ) return -1;
if ( NULL == key && keylen > 0) return -1;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( keylen > BLAKE2S_KEYBYTES ) return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S[i], outlen, keylen, (uint32_t)i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */
if( keylen > 0 )
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S[i], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
if( inlen__ > i * BLAKE2S_BLOCKBYTES )
{
const size_t left = inlen__ - i * BLAKE2S_BLOCKBYTES;
const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES;
blake2s_update( S[i], in__, len );
}
blake2s_final( S[i], hash[i], BLAKE2S_OUTBYTES );
}
if( blake2sp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( FS, out, outlen );
}
#if defined(BLAKE2SP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( void )
{
uint8_t key[BLAKE2S_KEYBYTES];
uint8_t buf[BLAKE2_KAT_LENGTH];
size_t i, step;
for( i = 0; i < BLAKE2S_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
/* Test simple API */
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2S_OUTBYTES];
blake2sp( hash, BLAKE2S_OUTBYTES, buf, i, key, BLAKE2S_KEYBYTES );
if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) )
{
goto fail;
}
}
/* Test streaming API */
for(step = 1; step < BLAKE2S_BLOCKBYTES; ++step) {
for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) {
uint8_t hash[BLAKE2S_OUTBYTES];
blake2sp_state S;
uint8_t * p = buf;
size_t mlen = i;
int err = 0;
if( (err = blake2sp_init_key(&S, BLAKE2S_OUTBYTES, key, BLAKE2S_KEYBYTES)) < 0 ) {
goto fail;
}
while (mlen >= step) {
if ( (err = blake2sp_update(&S, p, step)) < 0 ) {
goto fail;
}
mlen -= step;
p += step;
}
if ( (err = blake2sp_update(&S, p, mlen)) < 0) {
goto fail;
}
if ( (err = blake2sp_final(&S, hash, BLAKE2S_OUTBYTES)) < 0) {
goto fail;
}
if (0 != memcmp(hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES)) {
goto fail;
}
}
}
puts( "ok" );
return 0;
fail:
puts("error");
return -1;
}
#endif
|
seq_multivector.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
*
* Member functions for hypre_Vector class.
*
*****************************************************************************/
#include "seq_multivector.h"
#include "_hypre_utilities.h"
#include <stdlib.h>
#include <string.h>
#include <assert.h>
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorCreate
*--------------------------------------------------------------------------*/
hypre_Multivector *
hypre_SeqMultivectorCreate( HYPRE_Int size, HYPRE_Int num_vectors )
{
hypre_Multivector *mvector;
mvector = (hypre_Multivector *) hypre_MAlloc(sizeof(hypre_Multivector));
hypre_MultivectorNumVectors(mvector) = num_vectors;
hypre_MultivectorSize(mvector) = size;
hypre_MultivectorOwnsData(mvector) = 1;
hypre_MultivectorData(mvector) = NULL;
mvector->num_active_vectors=0;
mvector->active_indices=NULL;
return mvector;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorInitialize( hypre_Multivector *mvector )
{
HYPRE_Int ierr = 0, i, size, num_vectors;
size = hypre_MultivectorSize(mvector);
num_vectors = hypre_MultivectorNumVectors(mvector);
if (NULL==hypre_MultivectorData(mvector))
hypre_MultivectorData(mvector) =
(HYPRE_Complex *) hypre_MAlloc(sizeof(HYPRE_Complex)*size*num_vectors);
/* now we create a "mask" of "active" vectors; initially all active */
if (NULL==mvector->active_indices)
{
mvector->active_indices=hypre_CTAlloc(HYPRE_Int, num_vectors);
for (i=0; i<num_vectors; i++) mvector->active_indices[i] = i;
mvector->num_active_vectors=num_vectors;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorSetDataOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorSetDataOwner(hypre_Multivector *mvector, HYPRE_Int owns_data)
{
HYPRE_Int ierr=0;
hypre_MultivectorOwnsData(mvector) = owns_data;
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorDestroy(hypre_Multivector *mvector)
{
HYPRE_Int ierr=0;
if (NULL!=mvector)
{
if (hypre_MultivectorOwnsData(mvector) && NULL!=hypre_MultivectorData(mvector))
hypre_TFree( hypre_MultivectorData(mvector) );
if (NULL!=mvector->active_indices)
hypre_TFree(mvector->active_indices);
hypre_TFree(mvector);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorSetMask
* (this routine accepts mask in "zeros and ones format, and converts it to
the one used in the structure "hypre_Multivector")
*-------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorSetMask(hypre_Multivector *mvector, HYPRE_Int * mask)
{
HYPRE_Int i, num_vectors = mvector->num_vectors;
if (mvector->active_indices != NULL) hypre_TFree(mvector->active_indices);
mvector->active_indices=hypre_CTAlloc(HYPRE_Int, num_vectors);
mvector->num_active_vectors=0;
if (mask!=NULL)
for (i=0; i<num_vectors; i++)
{
if ( mask[i] )
mvector->active_indices[mvector->num_active_vectors++]=i;
}
else
for (i=0; i<num_vectors; i++)
mvector->active_indices[mvector->num_active_vectors++]=i;
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorSetConstantValues
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorSetConstantValues(hypre_Multivector *v, HYPRE_Complex value)
{
HYPRE_Int i, j, start_offset, end_offset;
HYPRE_Int size = hypre_MultivectorSize(v);
HYPRE_Complex *vector_data = hypre_MultivectorData(v);
if (v->num_active_vectors == v->num_vectors)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < v->num_vectors*size; j++) vector_data[j] = value;
}
else
{
for (i = 0; i < v->num_active_vectors; i++)
{
start_offset = v->active_indices[i]*size;
end_offset = start_offset+size;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = start_offset; j < end_offset; j++) vector_data[j]= value;
}
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorSetRandomValues
*
* returns vector of values randomly distributed between -1.0 and +1.0
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorSetRandomValues(hypre_Multivector *v, HYPRE_Int seed)
{
HYPRE_Int i, j, start_offset, end_offset;
HYPRE_Int size = hypre_MultivectorSize(v);
HYPRE_Complex *vector_data = hypre_MultivectorData(v);
hypre_SeedRand(seed);
/* comment from vector.c: RDF: threading this loop may cause problems
because of hypre_Rand() */
if (v->num_active_vectors == v->num_vectors)
{
for (j = 0; j < v->num_vectors*size; j++)
vector_data[j] = 2.0 * hypre_Rand() - 1.0;
}
else
{
for (i = 0; i < v->num_active_vectors; i++)
{
start_offset = v->active_indices[i]*size;
end_offset = start_offset+size;
for (j = start_offset; j < end_offset; j++)
vector_data[j]= 2.0 * hypre_Rand() - 1.0;
}
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorCopy
* copies data from x to y
* y should have already been initialized at the same size as x
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorCopy(hypre_Multivector *x, hypre_Multivector *y)
{
HYPRE_Int i, size, num_bytes, num_active_vectors, *x_active_ind, * y_active_ind;
HYPRE_Complex *x_data, *y_data, *dest, * src;
hypre_assert (x->size == y->size && x->num_active_vectors == y->num_active_vectors);
num_active_vectors = x->num_active_vectors;
size = x->size;
x_data = x->data;
y_data = y->data;
x_active_ind=x->active_indices;
y_active_ind=y->active_indices;
if (x->num_active_vectors == x->num_vectors &&
y->num_active_vectors == y->num_vectors)
{
num_bytes = x->num_vectors * size * sizeof(HYPRE_Complex);
memcpy(y_data, x_data, num_bytes);
}
else
{
num_bytes = size*sizeof(HYPRE_Complex);
for (i=0; i < num_active_vectors; i++)
{
src=x_data + size * x_active_ind[i];
dest = y_data + size * y_active_ind[i];
memcpy(dest,src,num_bytes);
}
}
return 0;
}
HYPRE_Int
hypre_SeqMultivectorCopyWithoutMask(hypre_Multivector *x ,
hypre_Multivector *y)
{
HYPRE_Int byte_count;
hypre_assert (x->size == y->size && x->num_vectors == y->num_vectors);
byte_count = sizeof(HYPRE_Complex) * x->size * x->num_vectors;
memcpy(y->data,x->data,byte_count);
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorAxpy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorAxpy(HYPRE_Complex alpha, hypre_Multivector *x,
hypre_Multivector *y)
{
HYPRE_Int i, j, size, num_active_vectors, *x_active_ind, *y_active_ind;
HYPRE_Complex *x_data, *y_data, *src, *dest;
hypre_assert (x->size == y->size && x->num_active_vectors == y->num_active_vectors);
x_data = x->data;
y_data = y->data;
size = x->size;
num_active_vectors = x->num_active_vectors;
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
if (x->num_active_vectors == x->num_vectors &&
y->num_active_vectors == y->num_vectors)
{
for(i = 0; i < x->num_vectors*size; i++) dest[i] += alpha * src[i];
}
else
{
for(i = 0; i < num_active_vectors; i++)
{
src = x_data + x_active_ind[i]*size;
dest = y_data + y_active_ind[i]*size;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < size; j++) dest[j] += alpha * src[j];
}
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorByDiag: " y(<y_mask>) = alpha(<mask>) .* x(<x_mask>) "
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorByDiag(hypre_Multivector *x, HYPRE_Int *mask, HYPRE_Int n,
HYPRE_Complex *alpha, hypre_Multivector *y)
{
HYPRE_Int i, j, size, num_active_vectors, *x_active_ind, *y_active_ind;
HYPRE_Int *al_active_ind, num_active_als;
HYPRE_Complex *x_data, *y_data, *dest, *src, current_alpha;
hypre_assert (x->size == y->size && x->num_active_vectors == y->num_active_vectors);
/* build list of active indices in alpha */
al_active_ind = hypre_TAlloc(HYPRE_Int,n);
num_active_als = 0;
if (mask!=NULL)
for (i=0; i<n; i++)
{
if (mask[i])
al_active_ind[num_active_als++]=i;
}
else
for (i=0; i<n; i++)
al_active_ind[num_active_als++]=i;
hypre_assert (num_active_als==x->num_active_vectors);
x_data = x->data;
y_data = y->data;
size = x->size;
num_active_vectors = x->num_active_vectors;
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
for(i = 0; i < num_active_vectors; i++)
{
src = x_data + x_active_ind[i]*size;
dest = y_data + y_active_ind[i]*size;
current_alpha=alpha[ al_active_ind[i] ];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < size; j++)
dest[j] = current_alpha*src[j];
}
hypre_TFree(al_active_ind);
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorInnerProd
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_SeqMultivectorInnerProd(hypre_Multivector *x, hypre_Multivector *y,
HYPRE_Real *results )
{
HYPRE_Int i, j, k, size, *x_active_ind, *y_active_ind;
HYPRE_Int x_num_active_vectors, y_num_active_vectors;
HYPRE_Complex *x_data, *y_data, *y_ptr, *x_ptr;
HYPRE_Real current_product;
hypre_assert (x->size==y->size);
x_data = x->data;
y_data = y->data;
size = x->size;
x_num_active_vectors = x->num_active_vectors;
y_num_active_vectors = y->num_active_vectors;
/* we assume that "results" points to contiguous array of (x_num_active_vectors X
y_num_active_vectors) doubles */
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
for(j = 0; j < y_num_active_vectors; j++)
{
y_ptr = y_data + y_active_ind[j]*size;
for (i = 0; i < x_num_active_vectors; i++)
{
x_ptr = x_data + x_active_ind[i]*size;
current_product = 0.0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(k) reduction(+:current_product) HYPRE_SMP_SCHEDULE
#endif
for(k = 0; k < size; k++)
current_product += x_ptr[k] * hypre_conj(y_ptr[k]);
/* column-wise storage for results */
*results++ = current_product;
}
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorInnerProdDiag
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_SeqMultivectorInnerProdDiag(hypre_Multivector *x,
hypre_Multivector *y, HYPRE_Real *diagResults)
{
HYPRE_Complex *x_data, *y_data, *y_ptr, *x_ptr;
HYPRE_Real current_product;
HYPRE_Int i, k, size, num_active_vectors, *x_active_ind, *y_active_ind;
hypre_assert(x->size==y->size && x->num_active_vectors == y->num_active_vectors);
x_data = x->data;
y_data = y->data;
size = x->size;
num_active_vectors = x->num_active_vectors;
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
for (i=0; i<num_active_vectors; i++)
{
x_ptr = x_data + x_active_ind[i]*size;
y_ptr = y_data + y_active_ind[i]*size;
current_product = 0.0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(k) reduction(+:current_product) HYPRE_SMP_SCHEDULE
#endif
for(k=0; k<size; k++)
current_product += x_ptr[k] * hypre_conj(y_ptr[k]);
*diagResults++ = current_product;
}
return 0;
}
HYPRE_Int
hypre_SeqMultivectorByMatrix(hypre_Multivector *x, HYPRE_Int rGHeight, HYPRE_Int rHeight,
HYPRE_Int rWidth, HYPRE_Complex* rVal, hypre_Multivector *y)
{
HYPRE_Int i, j, k, size, gap, *x_active_ind, *y_active_ind;
HYPRE_Complex *x_data, *y_data, *x_ptr, *y_ptr, current_coef;
hypre_assert(rHeight>0);
hypre_assert (rHeight==x->num_active_vectors && rWidth==y->num_active_vectors);
x_data = x->data;
y_data = y->data;
size = x->size;
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
gap = rGHeight - rHeight;
for (j=0; j<rWidth; j++)
{
y_ptr = y_data + y_active_ind[j]*size;
/* ------ set current "y" to first member in a sum ------ */
x_ptr = x_data + x_active_ind[0]*size;
current_coef = *rVal++;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE
#endif
for (k=0; k<size; k++)
y_ptr[k] = current_coef * x_ptr[k];
/* ------ now add all other members of a sum to "y" ----- */
for (i=1; i<rHeight; i++)
{
x_ptr = x_data + x_active_ind[i]*size;
current_coef = *rVal++;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE
#endif
for (k=0; k<size; k++)
y_ptr[k] += current_coef * x_ptr[k];
}
rVal += gap;
}
return 0;
}
HYPRE_Int
hypre_SeqMultivectorXapy (hypre_Multivector *x, HYPRE_Int rGHeight, HYPRE_Int rHeight,
HYPRE_Int rWidth, HYPRE_Complex* rVal, hypre_Multivector *y)
{
HYPRE_Complex *x_data, *y_data, *x_ptr, *y_ptr, current_coef;
HYPRE_Int i, j, k, size, gap, *x_active_ind, *y_active_ind;
hypre_assert (rHeight==x->num_active_vectors && rWidth==y->num_active_vectors);
x_data = x->data;
y_data = y->data;
size = x->size;
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
gap = rGHeight - rHeight;
for (j=0; j<rWidth; j++)
{
y_ptr = y_data + y_active_ind[j]*size;
for (i=0; i<rHeight; i++)
{
x_ptr = x_data + x_active_ind[i]*size;
current_coef = *rVal++;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE
#endif
for (k=0; k<size; k++)
y_ptr[k] += current_coef * x_ptr[k];
}
rVal += gap;
}
return 0;
}
|
opencl_keychain_fmt_plug.c | /*
* Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format.
*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted. */
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_keychain;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_keychain);
#else
#include <string.h>
#include <openssl/des.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "misc.h"
#include "options.h"
#include "common-opencl.h"
#define FORMAT_LABEL "keychain-opencl"
#define FORMAT_NAME "Mac OS X Keychain"
#define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL 3DES"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define SWAP(n) \
(((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24))
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(*salt_struct)
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_ALIGN MEM_ALIGN_WORD
#define SALTLEN 20
#define IVLEN 8
#define CTLEN 48
#define uint8_t unsigned char
#define uint16_t unsigned short
#define uint32_t ARCH_WORD_32
#define OCL_CONFIG "keychain"
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH];
} keychain_password;
typedef struct {
uint32_t v[32/4];
} keychain_hash;
typedef struct {
uint8_t length;
uint8_t salt[SALTLEN];
uint32_t iterations;
uint32_t outlen;
} keychain_salt;
static int *cracked;
static int any_cracked;
static struct fmt_tests keychain_tests[] = {
{"$keychain$*10f7445c8510fa40d9ef6b4e0f8c772a9d37e449*f3d19b2a45cdcccb*8c3c3b1c7d48a24dad4ccbd4fd794ca9b0b3f1386a0a4527f3548bfe6e2f1001804b082076641bbedbc9f3a7c33c084b", "password"},
// these were generated with pass_gen.pl. NOTE, they ALL have the data (which gets encrypted) which was decrypted from the above hash.
{"$keychain$*a88cd6fbaaf40bc5437eee015a0f95ab8ab70545*b12372b1b7cb5c1f*1f5c596bcdd015afc126bc86f42dd092cb9d531d14a0aafaa89283f1bebace60562d497332afbd952fd329cc864144ec", "password"},
{"$keychain$*23328e264557b93204dc825c46a25f7fb1e17d4a*19a9efde2ca98d30*6ac89184134758a95c61bd274087ae0cffcf49f433c7f91edea98bd4fd60094e2936d99e4d985dec98284379f23259c0", "hhh"},
{"$keychain$*927717d8509db73aa47c5e820e3a381928b5e048*eef33a4a1483ae45*a52691580f17e295b8c2320947968503c605b2784bfe4851077782139f0de46f71889835190c361870baa56e2f4e9e43", "JtR-Jumbo"},
{"$keychain$*1fab88d0b8ea1a3d303e0aef519796eb29e46299*3358b0e77d60892f*286f975dcd191024227514ed9939d0fa94034294ba1eca6d5c767559e75e944b5a2fcb54fd696be64c64f9d069ce628a", "really long password -----------------------------"},
{NULL}
};
static struct custom_salt {
unsigned char salt[SALTLEN];
unsigned char iv[IVLEN];
unsigned char ct[CTLEN];
} *salt_struct;
static cl_int cl_error;
static keychain_password *inbuffer;
static keychain_hash *outbuffer;
static keychain_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
size_t insize, outsize, settingsize, cracked_size;
#define MIN(a, b) (((a) > (b)) ? (b) : (a))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#define OCL_CONFIG "keychain"
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static size_t get_task_max_size()
{
return 0;
}
static size_t get_default_workgroup()
{
if (cpu(device_info[gpu_id]))
return get_platform_vendor_id(platform_id) == DEV_INTEL ?
8 : 1;
else
return 64;
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(keychain_password) * gws;
outsize = sizeof(keychain_hash) * gws;
settingsize = sizeof(keychain_salt);
cracked_size = sizeof(*cracked) * gws;
inbuffer = mem_calloc(insize);
outbuffer = mem_alloc(outsize);
cracked = mem_calloc(cracked_size);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(cracked);
}
static void done(void)
{
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
}
static void init(struct fmt_main *self)
{
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
PLAINTEXT_LENGTH,
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL,
warn, 1, self, create_clobj, release_clobj,
sizeof(keychain_password), 0);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
if (strncmp(ciphertext, "$keychain$*", 11) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 11;
if ((p = strtok(ctcopy, "*")) == NULL) /* salt */
goto err;
if(strlen(p) != SALTLEN * 2)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* iv */
goto err;
if(strlen(p) != IVLEN * 2)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* ciphertext */
goto err;
if(strlen(p) != CTLEN * 2)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
salt_struct = mem_calloc_tiny(sizeof(struct custom_salt),
MEM_ALIGN_WORD);
ctcopy += 11; /* skip over "$keychain$*" */
p = strtok(ctcopy, "*");
for (i = 0; i < SALTLEN; i++)
salt_struct->salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
for (i = 0; i < IVLEN; i++)
salt_struct->iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
for (i = 0; i < CTLEN; i++)
salt_struct->ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)salt_struct;
}
static void set_salt(void *salt)
{
salt_struct = (struct custom_salt *)salt;
memcpy((char*)currentsalt.salt, salt_struct->salt, 20);
currentsalt.length = 20;
currentsalt.iterations = 1000;
currentsalt.outlen = 24;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy salt to gpu");
}
#undef set_key
static void set_key(char *key, int index)
{
uint8_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint8_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int kcdecrypt(unsigned char *key, unsigned char *iv, unsigned char *data)
{
unsigned char out[CTLEN];
int pad, n, i;
DES_cblock key1, key2, key3;
DES_cblock ivec;
DES_key_schedule ks1, ks2, ks3;
memset(out, 0, sizeof(out));
memcpy(key1, key, 8);
memcpy(key2, key + 8, 8);
memcpy(key3, key + 16, 8);
DES_set_key((C_Block *) key1, &ks1);
DES_set_key((C_Block *) key2, &ks2);
DES_set_key((C_Block *) key3, &ks3);
memcpy(ivec, iv, 8);
DES_ede3_cbc_encrypt(data, out, CTLEN, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT);
// now check padding
pad = out[47];
if(pad > 8)
// "Bad padding byte. You probably have a wrong password"
return -1;
if(pad != 4) /* possible bug here, is this assumption always valid? */
return -1;
n = CTLEN - pad;
for(i = n; i < CTLEN; i++)
if(out[i] != pad)
// "Bad padding. You probably have a wrong password"
return -1;
return 0;
}
#if 0
//#ifdef DEBUG
static void print_hex(unsigned char *str, int len)
{
int i;
for (i = 0; i < len; ++i)
printf("%02x", str[i]);
printf("\n");
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index;
global_work_size = (count + local_work_size - 1) / local_work_size * local_work_size;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
/// Copy data to gpu
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, &local_work_size, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
if (!kcdecrypt((unsigned char*)outbuffer[index].v,
salt_struct->iv, salt_struct->ct))
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_opencl_keychain = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
keychain_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
displacement_criteria.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_DISPLACEMENT_CRITERIA )
#define KRATOS_DISPLACEMENT_CRITERIA
/* System includes */
/* External includes */
/* Project includes */
#include "includes/model_part.h"
#include "includes/define.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class DisplacementCriteria
* @ingroup KratosCore
* @brief This is a convergence criteria that employes the increment on the solution as criteria
* @details The reactions from the RHS are not computed in the solution
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace
>
class DisplacementCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( DisplacementCriteria );
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef std::size_t IndexType;
typedef std::size_t SizeType;
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
explicit DisplacementCriteria(
TDataType NewRatioTolerance,
TDataType AlwaysConvergedNorm)
: ConvergenceCriteria< TSparseSpace, TDenseSpace >()
{
mRatioTolerance = NewRatioTolerance;
mAlwaysConvergedNorm = AlwaysConvergedNorm;
}
/** Copy constructor.
*/
explicit DisplacementCriteria( DisplacementCriteria const& rOther )
:BaseType(rOther)
,mRatioTolerance(rOther.mRatioTolerance)
,mAlwaysConvergedNorm(rOther.mAlwaysConvergedNorm)
,mReferenceDispNorm(rOther.mReferenceDispNorm)
{
}
/** Destructor.
*/
~DisplacementCriteria() override {}
///@}
///@name Operators
///@{
/**
* Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param A System matrix (unused)
* @param Dx Vector of results (variations on nodal variables)
* @param b RHS vector (residual + reactions)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& A,
const TSystemVectorType& Dx,
const TSystemVectorType& b
) override
{
const TDataType approx_zero_tolerance = std::numeric_limits<TDataType>::epsilon();
const SizeType size_Dx = Dx.size();
if (size_Dx != 0) { //if we are solving for something
SizeType size_solution;
TDataType final_correction_norm = CalculateFinalCorrectionNorm(size_solution, rDofSet, Dx);
TDataType ratio = 0.0;
CalculateReferenceNorm(rDofSet);
if (mReferenceDispNorm < approx_zero_tolerance) {
KRATOS_WARNING("DisplacementCriteria") << "NaN norm is detected. Setting reference to convergence criteria" << std::endl;
mReferenceDispNorm = final_correction_norm;
}
if(final_correction_norm < approx_zero_tolerance) {
ratio = 0.0;
} else {
ratio = final_correction_norm/mReferenceDispNorm;
}
const TDataType float_size_solution = static_cast<TDataType>(size_solution);
const TDataType absolute_norm = (final_correction_norm/std::sqrt(float_size_solution));
KRATOS_INFO_IF("DISPLACEMENT CRITERION", this->GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) << " :: [ Obtained ratio = " << ratio << "; Expected ratio = " << mRatioTolerance << "; Absolute norm = " << absolute_norm << "; Expected norm = " << mAlwaysConvergedNorm << "]" << std::endl;
rModelPart.GetProcessInfo()[CONVERGENCE_RATIO] = ratio;
rModelPart.GetProcessInfo()[RESIDUAL_NORM] = absolute_norm;
if ( ratio <= mRatioTolerance || absolute_norm<mAlwaysConvergedNorm ) { // || (final_correction_norm/x.size())<=1e-7)
KRATOS_INFO_IF("DISPLACEMENT CRITERION", this->GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) << "Convergence is achieved" << std::endl;
return true;
} else {
return false;
}
} else { //in this case all the displacements are imposed!
return true;
}
}
/**
* This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the problem. (unused)
*/
void Initialize(
ModelPart& rModelPart
) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
}
/**
* This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param A System matrix (unused)
* @param Dx Vector of results (variations on nodal variables)
* @param b RHS vector (residual + reactions)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& A,
const TSystemVectorType& Dx,
const TSystemVectorType& b
) override
{
BaseType::InitializeSolutionStep(rModelPart, rDofSet, A, Dx, b);
}
/**
* This function finalizes the solution step
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param A System matrix (unused)
* @param Dx Vector of results (variations on nodal variables)
* @param b RHS vector (residual + reactions)
*/
void FinalizeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& A,
const TSystemVectorType& Dx,
const TSystemVectorType& b
) override
{
BaseType::FinalizeSolutionStep(rModelPart, rDofSet, A, Dx, b);
}
///@}
///@name Operations
///@{
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "DisplacementCriteria";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
TDataType mRatioTolerance; /// The ratio threshold for the norm of the residual
TDataType mAlwaysConvergedNorm; /// The absolute value threshold for the norm of the residual
TDataType mReferenceDispNorm; /// The norm at the beginning of the iterations
///@}
///@name Private Operators
///@{
/**
* @brief This method computes the reference norm
* @details It checks if the dof is fixed
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
*/
void CalculateReferenceNorm(DofsArrayType& rDofSet)
{
TDataType reference_disp_norm = TDataType();
TDataType dof_value;
#pragma omp parallel for reduction(+:reference_disp_norm)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = rDofSet.begin() + i;
if(it_dof->IsFree()) {
dof_value = it_dof->GetSolutionStepValue();
reference_disp_norm += dof_value * dof_value;
}
}
mReferenceDispNorm = std::sqrt(reference_disp_norm);
}
/**
* @brief This method computes the final norm
* @details It checks if the dof is fixed
* @param rDofNum The number of DoFs
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param Dx Vector of results (variations on nodal variables)
*/
TDataType CalculateFinalCorrectionNorm(
SizeType& rDofNum,
DofsArrayType& rDofSet,
const TSystemVectorType& Dx
)
{
// Initialize
TDataType final_correction_norm = TDataType();
SizeType dof_num = 0;
// Loop over Dofs
#pragma omp parallel for reduction(+:final_correction_norm,dof_num)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = rDofSet.begin() + i;
IndexType dof_id;
TDataType variation_dof_value;
if (it_dof->IsFree()) {
dof_id = it_dof->EquationId();
variation_dof_value = Dx[dof_id];
final_correction_norm += std::pow(variation_dof_value, 2);
dof_num++;
}
}
rDofNum = dof_num;
return std::sqrt(final_correction_norm);
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class DisplacementCriteria */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_DISPLACEMENT_CRITERIA defined */
|
nested.c | // RUN: %libomp-compile-and-run | FileCheck %s
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck --check-prefix=THREADS %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN
#include "callback.h"
#include <omp.h>
#include <unistd.h>
int main()
{
int condition=0;
omp_set_nested(1);
print_frame(0);
#pragma omp parallel num_threads(4)
{
print_frame_from_outlined_fn(1);
print_ids(0);
print_ids(1);
print_frame(0);
//get all implicit task events before starting nested:
#pragma omp barrier
#pragma omp parallel num_threads(4)
{
print_frame_from_outlined_fn(1);
print_ids(0);
print_ids(1);
print_ids(2);
print_frame(0);
OMPT_SIGNAL(condition);
OMPT_WAIT(condition,16);
#pragma omp barrier
print_fuzzy_address(1);
print_ids(0);
}
print_fuzzy_address(2);
print_ids(0);
}
print_fuzzy_address(3);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: parallel_data initially not null
// CHECK-NOT: 0: task_data initially not null
// CHECK-NOT: 0: thread_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER:[0-9]+]]
// CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// Note that we cannot ensure that the worker threads have already called barrier_end and implicit_task_end before parallel_end!
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// THREADS: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// THREADS: {{^}}[[MASTER_ID:[0-9]+]]: __builtin_frame_address(0)=[[MAIN_REENTER:0x[0-f]+]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=0x{{[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER:[0-9]+]]
// nested parallel masters
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[MASTER_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]]
// THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}}
// THREADS: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit=[[EXIT]], parent_task_frame.reenter=0x{{[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[NESTED_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[MASTER_ID]]: __builtin_frame_address({{.}})=[[NESTED_EXIT:0x[0-f]+]]
// THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_EXIT]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=0x{{[0-f]+}}
// THREADS: {{^}}[[MASTER_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}}
// THREADS: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[NESTED_REENTER:0x[0-f]+]]
// THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end
// explicit barrier
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], codeptr_ra=[[BARRIER_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_EXIT]], reenter_frame=0x{{[0-f]+}}
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[BARRIER_RETURN_ADDRESS]]
// THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NESTED_EXIT]], reenter_frame=[[NULL]]
// implicit barrier
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}}
// THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]], codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}}
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]], codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}}
// THREADS: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[NESTED_RETURN_ADDRESS]]
// THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// implicit barrier
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}}
// THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]], codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}}
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]], codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}}
// THREADS: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// nested parallel worker threads
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// can't reliably tell which parallel region is the parent...
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}
// THREADS: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
return 0;
}
|
Graph.h | /*
* Graph.h
*
* Created on: 01.06.2014
* Author: Christian Staudt (christian.staudt@kit.edu), Klara Reichard
* (klara.reichard@gmail.com), Marvin Ritter (marvin.ritter@gmail.com)
*/
#ifndef GRAPH_H_
#define GRAPH_H_
#include <algorithm>
#include <functional>
#include <queue>
#include <stack>
#include <stdexcept>
#include <unordered_set>
#include <utility>
#include <vector>
#include "../Globals.h"
#include "../auxiliary/FunctionTraits.h"
#include "../auxiliary/Log.h"
#include "../auxiliary/Random.h"
#include "../viz/Point.h"
#include "Coordinates.h"
namespace NetworKit {
/**
* A weighted edge used for the graph constructor with
* initializer list syntax.
*/
struct WeightedEdge {
node u, v;
edgeweight weight;
WeightedEdge(node u, node v, edgeweight w) : u(u), v(v), weight(w) {}
};
inline bool operator<(const WeightedEdge &e1, const WeightedEdge &e2) {
return e1.weight < e2.weight;
}
struct Edge {
node u, v;
Edge(node _u, node _v, bool sorted = false) {
if (sorted) {
u = std::min(_u, _v);
v = std::max(_u, _v);
} else {
u = _u;
v = _v;
}
}
};
inline bool operator==(const Edge &e1, const Edge &e2) {
return e1.u == e2.u && e1.v == e2.v;
}
} // namespace NetworKit
namespace std {
template <> struct hash<NetworKit::Edge> {
size_t operator()(const NetworKit::Edge &e) const {
return hash_node(e.u) ^ hash_node(e.v);
}
hash<NetworKit::node> hash_node;
};
} // namespace std
namespace NetworKit {
// forward declaration to randomization/CurveballImpl.h
namespace CurveballDetails {
class CurveballMaterialization;
}
/**
* @ingroup graph
* A graph (with optional weights) and parallel iterator methods.
*/
class Graph final {
friend class ParallelPartitionCoarsening;
friend class GraphBuilder;
friend class CurveballDetails::CurveballMaterialization;
private:
// graph attributes
count id; //!< unique graph id, starts at 0
std::string name; //!< name of the graph, initially G#ID
// scalars
count n; //!< current number of nodes
count m; //!< current number of edges
count storedNumberOfSelfLoops; //!< current number of self loops, edges which
//!< have the same origin and target
node
z; //!< current upper bound of node ids, z will be the id of the next node
edgeid omega; //!< current upper bound of edge ids, will be the id of the next
//!< edge
count t; //!< current time step
bool weighted; //!< true if the graph is weighted, false otherwise
bool directed; //!< true if the graph is directed, false otherwise
bool edgesIndexed; //!< true if edge ids have been assigned
// per node data
std::vector<bool> exists; //!< exists[v] is true if node v has not been
//!< removed from the graph
Coordinates<float> coordinates; //!< coordinates of nodes (if present)
std::vector<count> inDeg; //!< only used for directed graphs, number of edges
//!< incoming per node
std::vector<count>
outDeg; //!< degree of every node, zero if node was removed. For directed
//!< graphs only outgoing edges count
std::vector<std::vector<node>>
inEdges; //!< only used for directed graphs, inEdges[v] contains all nodes
//!< u that have an edge (u, v)
std::vector<std::vector<node>>
outEdges; //!< (outgoing) edges, for each edge (u, v) v is saved in
//!< outEdges[u] and for undirected also u in outEdges[v]
std::vector<std::vector<edgeweight>>
inEdgeWeights; //!< only used for directed graphs, same schema as inEdges
std::vector<std::vector<edgeweight>>
outEdgeWeights; //!< same schema (and same order!) as outEdges
std::vector<std::vector<edgeid>>
inEdgeIds; //!< only used for directed graphs, same schema as inEdges
std::vector<std::vector<edgeid>>
outEdgeIds; //!< same schema (and same order!) as outEdges
/**
* Returns the next unique graph id.
*/
count getNextGraphId();
/**
* Returns the index of node u in the array of incoming edges of node v. (for
* directed graphs inEdges is searched, while for indirected outEdges is
* searched, which gives the same result as indexInOutEdgeArray).
*/
index indexInInEdgeArray(node v, node u) const;
/**
* Returns the index of node v in the array of outgoing edges of node u.
*/
index indexInOutEdgeArray(node u, node v) const;
/**
* Computes the weighted in/out degree of a graph.
*
* @param inDegree whether to compute the in degree or the out degree.
*/
edgeweight computeWeightedDegree(const node &v,
const bool inDegree = false) const;
/**
* Computes the maximum in/out degree of the graph.
*
* @param inDegree wheter to compute the in degree or the out degree.
*/
count computeMaxDegree(const bool inDegree = false) const;
/**
* Computes the maximum in/out weighted degree of the graph
*
* @param inDegree whether to compute the in degree or the out degree
*/
edgeweight computeMaxWeightedDegree(const bool inDegree = false) const;
/**
* Returns the edge weight of the outgoing edge of index i in the outgoing
* edges of node u
* @param u The node
* @param i The index
* @return The weight of the outgoing edge or defaultEdgeWeight if the graph
* is unweighted
*/
template <bool hasWeights>
inline edgeweight getOutEdgeWeight(node u, index i) const;
/**
* Returns the edge weight of the incoming edge of index i in the incoming
* edges of node u
*
* @param u The node
* @param i The index in the incoming edge array
* @return The weight of the incoming edge
*/
template <bool hasWeights>
inline edgeweight getInEdgeWeight(node u, index i) const;
/**
* Returns the edge id of the edge of index i in the outgoing edges of node u
*
* @param u The node
* @param i The index in the outgoing edges
* @return The edge id
*/
template <bool graphHasEdgeIds>
inline edgeid getOutEdgeId(node u, index i) const;
/**
* Returns the edge id of the edge of index i in the incoming edges of node u
*
* @param u The node
* @param i The index in the incoming edges of u
* @return The edge id
*/
template <bool graphHasEdgeIds>
inline edgeid getInEdgeId(node u, index i) const;
/**
* @brief Returns if the edge (u, v) shall be used in the iteration of all
* edgesIndexed
*
* @param u The source node of the edge
* @param v The target node of the edge
* @return If the node shall be used, i.e. if v is not none and in the
* undirected case if u >= v
*/
template <bool graphIsDirected>
inline bool useEdgeInIteration(node u, node v) const;
/**
* @brief Implementation of the for loop for outgoing edges of u
*
* Note: If all (valid) outgoing edges shall be considered, graphIsDirected
* needs to be set to true
*
* @param u The node
* @param handle The handle that shall be executed for each edge
* @return void
*/
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline void forOutEdgesOfImpl(node u, L handle) const;
/**
* @brief Implementation of the for loop for incoming edges of u
*
* For undirected graphs, this is the same as forOutEdgesOfImpl but u and v
* are changed in the handle
*
* @param u The node
* @param handle The handle that shall be executed for each edge
* @return void
*/
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline void forInEdgesOfImpl(node u, L handle) const;
/**
* @brief Implementation of the for loop for all edges, @see forEdges
*
* @param handle The handle that shall be executed for all edges
* @return void
*/
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline void forEdgeImpl(L handle) const;
/**
* @brief Parallel implementation of the for loop for all edges, @see
* parallelForEdges
*
* @param handle The handle that shall be executed for all edges
* @return void
*/
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline void parallelForEdgesImpl(L handle) const;
/**
* @brief Summation variant of the parallel for loop for all edges, @see
* parallelSumForEdges
*
* @param handle The handle that shall be executed for all edges
* @return void
*/
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline double parallelSumForEdgesImpl(L handle) const;
/*
* In the following definition, Aux::FunctionTraits is used in order to only
* execute lambda functions with the appropriate parameters. The
* decltype-return type is used for determining the return type of the lambda
* (needed for summation) but also determines if the lambda accepts the
* correct number of parameters. Otherwise the return type declaration fails
* and the function is excluded from overload resoluation. Then there are
* multiple possible lambdas with three (third parameter id or weight) and two
* (second parameter can be second node id or edge weight for neighbor
* iterators). This is checked using Aux::FunctionTraits and std::enable_if.
* std::enable_if only defines the type member when the given bool is true,
* this bool comes from std::is_same which compares two types. The function
* traits give either the parameter type or if it is out of bounds they define
* type as void.
*/
/**
* Triggers a static assert error when no other method is chosen. Because of
* the use of "..." as arguments, the priority of this method is lower than
* the priority of the other methods. This method avoids ugly and unreadable
* template substitution error messages from the other declarations.
*/
template <class F, void * = (void *)0>
typename Aux::FunctionTraits<F>::result_type edgeLambda(F &, ...) const {
// the strange condition is used in order to delay the eveluation of the
// static assert to the moment when this function is actually used
static_assert(!std::is_same<F, F>::value,
"Your lambda does not support the required parameters or the "
"parameters have the wrong type.");
return std::declval<typename Aux::FunctionTraits<
F>::result_type>(); // use the correct return type (this won't compile)
}
/**
* Calls the given function f if its fourth argument is of the type edgeid and
* third of type edgeweight Note that the decltype check is not enough as
* edgeweight can be casted to node and we want to assure that .
*/
template <
class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 3) &&
std::is_same<edgeweight, typename Aux::FunctionTraits<
F>::template arg<2>::type>::value &&
std::is_same<edgeid, typename Aux::FunctionTraits<F>::template arg<
3>::type>::value>::type * = (void *)0>
auto edgeLambda(F &f, node u, node v, edgeweight ew, edgeid id) const
-> decltype(f(u, v, ew, id)) {
return f(u, v, ew, id);
}
/**
* Calls the given function f if its third argument is of the type edgeid,
* discards the edge weight Note that the decltype check is not enough as
* edgeweight can be casted to node.
*/
template <
class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 2) &&
std::is_same<edgeid, typename Aux::FunctionTraits<F>::template arg<
2>::type>::value &&
std::is_same<node, typename Aux::FunctionTraits<F>::template arg<1>::
type>::value /* prevent f(v, weight, eid) */
>::type * = (void *)0>
auto edgeLambda(F &f, node u, node v, edgeweight, edgeid id) const
-> decltype(f(u, v, id)) {
return f(u, v, id);
}
/**
* Calls the given function f if its third argument is of type edgeweight,
* discards the edge id Note that the decltype check is not enough as node can
* be casted to edgeweight.
*/
template <class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 2) &&
std::is_same<edgeweight,
typename Aux::FunctionTraits<F>::template arg<
2>::type>::value>::type * = (void *)0>
auto edgeLambda(F &f, node u, node v, edgeweight ew, edgeid /*id*/) const
-> decltype(f(u, v, ew)) {
return f(u, v, ew);
}
/**
* Calls the given function f if it has only two arguments and the second
* argument is of type node, discards edge weight and id Note that the
* decltype check is not enough as edgeweight can be casted to node.
*/
template <
class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 1) &&
std::is_same<node, typename Aux::FunctionTraits<F>::template arg<
1>::type>::value>::type * = (void *)0>
auto edgeLambda(F &f, node u, node v, edgeweight /*ew*/, edgeid /*id*/) const
-> decltype(f(u, v)) {
return f(u, v);
}
/**
* Calls the given function f if it has only two arguments and the second
* argument is of type edgeweight, discards the first node and the edge id
* Note that the decltype check is not enough as edgeweight can be casted to
* node.
*/
template <class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 1) &&
std::is_same<edgeweight,
typename Aux::FunctionTraits<F>::template arg<
1>::type>::value>::type * = (void *)0>
auto edgeLambda(F &f, node u, node v, edgeweight ew, edgeid /*id*/) const
-> decltype(f(u, ew)) {
return f(v, ew);
}
/**
* Calls the given function f if it has only one argument, discards the first
* node id, the edge weight and the edge id
*/
template <class F, void * = (void *)0>
auto edgeLambda(F &f, node, node v, edgeweight, edgeid) const
-> decltype(f(v)) {
return f(v);
}
/**
* Calls the given BFS handle with distance parameter
*/
template <class F>
auto callBFSHandle(F &f, node u, count dist) const -> decltype(f(u, dist)) {
return f(u, dist);
}
/**
* Calls the given BFS handle without distance parameter
*/
template <class F>
auto callBFSHandle(F &f, node u, count) const -> decltype(f(u)) {
return f(u);
}
public:
/**
* Create a graph of @a n nodes. The graph has assignable edge weights if @a
* weighted is set to <code>true</code>. If @a weighted is set to
* <code>false</code> each edge has edge weight 1.0 and any other weight
* assignment will be ignored.
* @param n Number of nodes.
* @param weighted If set to <code>true</code>, the graph has edge weights.
* @param directed If set to @c true, the graph will be directed.
*/
Graph(count n = 0, bool weighted = false, bool directed = false);
Graph(const Graph &G, bool weighted, bool directed);
/**
* Generate a weighted graph from a list of edges. (Useful for small
* graphs in unit tests that you do not want to read from a file.)
*
* @param[in] edges list of weighted edges
*/
Graph(std::initializer_list<WeightedEdge> edges);
/**
* Create a graph as copy of @a other.
* @param other The graph to copy.
*/
Graph(const Graph &other) = default;
/** Default move constructor */
Graph(Graph &&other) = default;
/** Default destructor */
~Graph() = default;
/** Default move assignment operator */
Graph &operator=(Graph &&other) = default;
/** Default copy assignment operator */
Graph &operator=(const Graph &other) = default;
/** EDGE IDS **/
/**
* Initially assign integer edge identifiers.
*
* @param force Force re-indexing of edges even if they have already been
* indexed
*/
void indexEdges(bool force = false);
/**
* Checks if edges have been indexed
*
* @return bool if edges have been indexed
*/
bool hasEdgeIds() const { return edgesIndexed; }
/**
* Get the id of the given edge.
*/
edgeid edgeId(node u, node v) const;
/**
* Get an upper bound for the edge ids in the graph.
* @return An upper bound for the edge ids.
*/
index upperEdgeIdBound() const { return omega; }
/** GRAPH INFORMATION **/
/**
* Get the ID of this graph. The ID is a unique unsigned integer given to
* every graph on construction.
*/
count getId() const { return id; }
/**
* Return the type of the graph.
* Graph: not weighted, undirected
* WeightedGraph: weighted, undirected
* DirectedGraph: not weighted, directed
* WeightedDirectedGraph: weighted, directed
*/
std::string typ() const;
/**
* Try to save some memory by shrinking internal data structures of the graph.
* Only run this once you finished editing the graph. Otherwise it will cause
* unnecessary reallocation of memory.
*/
void shrinkToFit();
/**
* Compacts the adjacency arrays by re-using no longer neede slots from
* deleted edges.
*/
void compactEdges();
/**
* Sorts the adjacency arrays by node id. While the running time is linear
* this temporarily duplicates the memory.
*/
void sortEdges();
/**
* Set name of graph to @a name.
* @param name The name.
*/
void setName(std::string name) { this->name = name; }
/*
* Returns the name of the graph.
* @return The name of the graph.
*/
std::string getName() const { return name; }
/**
* Returns a string representation of the graph.
* @return A string representation.
*/
std::string toString() const;
/* COPYING */
/*
* Copies all nodes to a new graph
* @return graph with the same nodes.
*/
Graph copyNodes() const;
/* NODE MODIFIERS */
/**
* Add a new node to the graph and return it.
* @return The new node.
*/
node addNode();
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Add a new node to the graph with coordinates @a x and @y and return it.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like
// any other node attribute")]]
node addNode(float x, float y);
/**
* Remove a node @a v and all incident edges from the graph.
*
* Incoming as well as outgoing edges will be removed.
*
* @param u Node.
*/
void removeNode(node v);
/**
* Check if node @a v exists in the graph.
*
* @param v Node.
* @return @c true if @a v exists, @c false otherwise.
*/
bool hasNode(node v) const { return (v < z) && this->exists[v]; }
/**
* Restores a previously deleted node @a v with its previous id in the graph.
*
* @param v Node.
*
*/
void restoreNode(node v);
// SET OPERATIONS
/**
* Appends another graph to this graph as a new subgraph. Performs node
* id remapping.
* @param G [description]
*/
void append(const Graph &G);
/**
* Modifies this graph to be the union of it and another graph.
* Nodes with the same ids are identified with each other.
* @param G [description]
*/
void merge(const Graph &G);
// SUBGRAPHS
Graph subgraphFromNodes(const std::unordered_set<node> &nodes) const;
/** NODE PROPERTIES **/
/**
* Returns the number of outgoing neighbors of @a v.
*
* @param v Node.
* @return The number of outgoing neighbors.
*/
count degree(node v) const { return outDeg[v]; }
/**
* Get the number of incoming neighbors of @a v.
*
* @param v Node.
* @return The number of incoming neighbors.
* @note If the graph is not directed, the outgoing degree is returned.
*/
count degreeIn(node v) const { return directed ? inDeg[v] : outDeg[v]; }
/**
* Get the number of outgoing neighbors of @a v.
*
* @param v Node.
* @return The number of outgoing neighbors.
*/
count degreeOut(node v) const { return outDeg[v]; }
/**
* Returns the maximum out-degree of the graph.
*
* @return The maximum out-degree of the graph.
*/
count maxDegree() const;
/**
* Returns the maximum in-degree of the graph.
*
* @return The maximum in-degree of the graph.
*/
count maxDegreeIn() const;
/**
* Check whether @a v is isolated, i.e. degree is 0.
* @param v Node.
* @return @c true if the node is isolated (= degree is 0)
*/
bool isIsolated(node v) const {
return outDeg[v] == 0 && (!directed || inDeg[v] == 0);
}
/**
* Returns the weighted degree of @a v.
*
* @param v Node.
* @return Weighted degree of @a v.
* @note For directed graphs this is the sum of weights of all outgoing edges.
* of @a v.
*/
edgeweight weightedDegree(const node &v) const;
/**
* Returns the maximum weighted degree of the graph.
*
* @return Maximum weighted degree of the graph.
* @note For directed graphs this is the sum of weights of all outgoing edges.
*/
edgeweight maxWeightedDegree() const;
/**
* Returns the maximum weighted in degree of the graph.
*
* @return Maximum weighted in degree of the graph.
* @note For directed graphs this is the sum of weights of all in-going edges.
*/
edgeweight maxWeightedDegreeIn() const;
/**
* Returns the weighted in-degree of @a v.
*
* @param v Node.
* @return Weighted in-degree of @a v.
* @note For directed graphs this is the sum of weights of all ingoing edges.
* of @a v.
*/
edgeweight weightedDegreeIn(const node &v) const;
/**
* Returns the volume of the @a v, which is the weighted degree with
* self-loops counted twice.
*
* @param v Node.
* @return The volume of the @a v.
*/
edgeweight volume(node v) const;
/**
* Returns a random node of the graph.
* @return A random node.
*/
node randomNode() const;
/**
* Returns a random neighbor of @a u and @c none if degree is zero.
*
* @param u Node.
* @return A random neighbor of @a u.
*/
node randomNeighbor(node u) const;
/* EDGE MODIFIERS */
/**
* Insert an edge between the nodes @a u and @a v. If the graph is weighted
* you can optionally set a weight for this edge. The default weight is 1.0.
* Note: Multi-edges are not supported and will NOT be handled consistently by
* the graph data structure.
* @param u Endpoint of edge.
* @param v Endpoint of edge.
* @param weight Optional edge weight.
*/
void addEdge(node u, node v, edgeweight ew = defaultEdgeWeight);
/**
* Removes the undirected edge {@a u,@a v}.
* @param u Endpoint of edge.
* @param v Endpoint of edge.
*/
void removeEdge(node u, node v);
/**
* Efficiently removes all the edges adjacent to a set of nodes that is not
* connected to the rest of the graph. This is meant to optimize the Kadabra
* algorithm.
* @param nodesInSet vector of nodes that form a connected component that is
* isolated from the rest of the graph.
*/
void removeEdgesFromIsolatedSet(const std::vector<node> &nodesInSet);
/**
* Removes all the edges in the graph.
*/
void removeAllEdges();
/**
* Removes all self-loops in the graph.
*/
void removeSelfLoops();
/**
* Changes the edges {@a s1, @a t1} into {@a s1, @a t2} and the edge {@a s2,
* @a t2} into {@a s2, @a t1}.
*
* If there are edge weights or edge ids, they are preserved. Note that no
* check is performed if the swap is actually possible, i.e. does not generate
* duplicate edges.
*
* @param s1 The first source
* @param t1 The first target
* @param s2 The second source
* @param t2 The second target
*/
void swapEdge(node s1, node t1, node s2,
node t2);
/**
* Checks if undirected edge {@a u,@a v} exists in the graph.
* @param u Endpoint of edge.
* @param v Endpoint of edge.
* @return <code>true</code> if the edge exists, <code>false</code> otherwise.
*/
bool hasEdge(node u, node v) const;
/**
* Returns a random edge. By default a random node u is chosen and then some
* random neighbor v. So the probability of choosing (u, v) highly depends on
* the degree of u. Setting uniformDistribution to true, will give you a real
* uniform distributed edge, but will be very slow. So only use
* uniformDistribution for single calls outside of any loops.
*/
std::pair<node, node> randomEdge(bool uniformDistribution = false) const;
/**
* Returns a vector with nr random edges. The edges are chosen uniform random.
*/
std::vector<std::pair<node, node>> randomEdges(count nr) const;
/* GLOBAL PROPERTIES */
/**
* Returns <code>true</code> if this graph supports edge weights other
* than 1.0.
* @return <code>true</code> if this graph supports edge weights other
* than 1.0.
*/
bool isWeighted() const { return weighted; }
/**
* Return @c true if this graph supports directed edges.
* @return @c true if this graph supports directed edges.
*/
bool isDirected() const { return directed; }
/**
* Return <code>true</code> if graph contains no nodes.
* @return <code>true</code> if graph contains no nodes.
*/
bool isEmpty() const { return n == 0; }
/**
* Return the number of nodes in the graph.
* @return The number of nodes.
*/
count numberOfNodes() const { return n; }
/**
* Return the number of edges in the graph.
* @return The number of edges.
*/
count numberOfEdges() const { return m; }
/**
* @return a pair (n, m) where n is the number of nodes and m is the number of
* edges
*/
std::pair<count, count> const size() const { return {n, m}; };
/**
* @return the density of the graph
*/
double density() const {
count n = numberOfNodes();
count m = numberOfEdges();
count loops = numberOfSelfLoops();
m -= loops;
double d;
if (isDirected()) {
d = m / (double)(n * (n - 1));
} else {
d = (2 * m) / (double)(n * (n - 1));
}
return d;
}
/**
* Return the number of loops {v,v} in the graph.
* @return The number of loops.
* @note This involves calculation, so store result if needed multiple times.
*/
count numberOfSelfLoops() const;
/**
* Get an upper bound for the node ids in the graph.
* @return An upper bound for the node ids.
*/
index upperNodeIdBound() const { return z; }
/**
* Check for invalid graph states, such as multi-edges.
* @return False if the graph is in invalid state.
*/
bool checkConsistency() const;
/* DYNAMICS */
/**
* Trigger a time step - increments counter.
*/
void timeStep() { t++; }
/**
* Get time step counter.
* @return Time step counter.
*/
count time() { return t; }
/* COORDINATES */
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Sets the coordinate of @a v to @a value.
*
* @param v Node.
* @param value The coordinate of @a v.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like
// any other node attribute")]]
void setCoordinate(node v, Point<float> value) {
coordinates.setCoordinate(v, value);
}
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Get the coordinate of @a v.
* @param v Node.
* @return The coordinate of @a v.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like
// any other node attribute")]]
Point<float> &getCoordinate(node v) { return coordinates.getCoordinate(v); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Get minimum coordinate of all coordinates with respect to dimension @a dim.
* @param dim The dimension to search for minimum.
* @return The minimum coordinate in dimension @a dim.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like
// any other node attribute")]]
float minCoordinate(count dim) { return coordinates.minCoordinate(dim); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Get maximum coordinate of all coordinates with respect to dimension @a dim.
* @param dim The dimension to search for maximum.
* @return The maximum coordinate in dimension @a dim.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like
// any other node attribute")]]
float maxCoordinate(count dim) { return coordinates.maxCoordinate(dim); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Initializes the coordinates for the nodes in graph.
* @note This has to be called once and before you set coordinates. Call this
* method again if new nodes have been added.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like
// any other node attribute")]]
void initCoordinates() { coordinates.init(z); }
/* EDGE ATTRIBUTES */
/**
* Return edge weight of edge {@a u,@a v}. Returns 0 if edge does not exist.
* BEWARE: Running time is \Theta(deg(u))!
*
* @param u Endpoint of edge.
* @param v Endpoint of edge.
* @return Edge weight of edge {@a u,@a v} or 0 if edge does not exist.
*/
edgeweight weight(node u, node v) const;
/**
* Set the weight of an edge. If the edge does not exist,
* it will be inserted.
*
* @param[in] u endpoint of edge
* @param[in] v endpoint of edge
* @param[in] weight edge weight
*/
void setWeight(node u, node v, edgeweight ew);
/**
* Increase the weight of an edge. If the edge does not exist,
* it will be inserted.
*
* @param[in] u endpoint of edge
* @param[in] v endpoint of edge
* @param[in] weight edge weight
*/
void increaseWeight(node u, node v, edgeweight ew);
/* SUMS */
/**
* Returns the sum of all edge weights.
* @return The sum of all edge weights.
*/
edgeweight totalEdgeWeight() const;
/* Collections */
/**
* Get list of all nodes.
* @return List of all nodes.
*/
std::vector<node> nodes() const;
/**
* Get list of edges as node pairs.
* @return List of edges as node pairs.
*/
std::vector<std::pair<node, node>> edges() const;
/**
* Get list of neighbors of @a u.
*
* @param u Node.
* @return List of neighbors of @a u.
*/
std::vector<node> neighbors(node u) const;
/**
* Get i-th (outgoing) neighbor of @a u.
* WARNING: This function is deprecated or only temporary.
*
* @param u Node.
* @param i index; should be in [0, degreeOut(u))
* @return @a i -th (outgoing) neighbor of @a u, or @c none if no such
* neighbor exists.
*/
template <bool graphIsDirected> node getIthNeighbor(node u, index i) const {
node v = outEdges[u][i];
if (useEdgeInIteration<graphIsDirected>(u, v))
return v;
else
return none;
}
/* Derivative Graphs */
/**
* Return an undirected version of this graph.
*
* @return undirected graph.
*/
Graph toUndirected() const;
/**
* Return an unweighted version of this graph.
*
* @return unweighted graph.
*/
Graph toUnweighted() const;
/**
* Return the transpose of this graph. The graph must be directed.
*
* @return transpose of the graph.
*/
Graph transpose() const;
/* NODE ITERATORS */
/**
* Iterate over all nodes of the graph and call @a handle (lambda closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template <typename L> void forNodes(L handle) const;
/**
* Iterate randomly over all nodes of the graph and call @a handle (lambda
* closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template <typename L> void parallelForNodes(L handle) const;
/** Iterate over all nodes of the graph and call @a handle (lambda closure) as
* long as @a condition remains true. This allows for breaking from a node
* loop.
*
* @param condition Returning <code>false</code> breaks the loop.
* @param handle Takes parameter <code>(node)</code>.
*/
template <typename C, typename L>
void forNodesWhile(C condition, L handle) const;
/**
* Iterate randomly over all nodes of the graph and call @a handle (lambda
* closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template <typename L> void forNodesInRandomOrder(L handle) const;
/**
* Iterate in parallel over all nodes of the graph and call handler (lambda
* closure). Using schedule(guided) to remedy load-imbalances due to e.g.
* unequal degree distribution.
*
* @param handle Takes parameter <code>(node)</code>.
*/
template <typename L> void balancedParallelForNodes(L handle) const;
/**
* Iterate over all undirected pairs of nodes and call @a handle (lambda
* closure).
*
* @param handle Takes parameters <code>(node, node)</code>.
*/
template <typename L> void forNodePairs(L handle) const;
/**
* Iterate over all undirected pairs of nodes in parallel and call @a handle
* (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code>.
*/
template <typename L> void parallelForNodePairs(L handle) const;
/* EDGE ITERATORS */
/**
* Iterate over all edges of the const graph and call @a handle (lambda
* closure).
*
* @param handle Takes parameters <code>(node, node)</code>, <code>(node,
* node, edgweight)</code>, <code>(node, node, edgeid)</code> or <code>(node,
* node, edgeweight, edgeid)</code>.
*/
template <typename L> void forEdges(L handle) const;
/**
* Iterate in parallel over all edges of the const graph and call @a handle
* (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code> or <code>(node,
* node, edgweight)</code>, <code>(node, node, edgeid)</code> or <code>(node,
* node, edgeweight, edgeid)</code>.
*/
template <typename L> void parallelForEdges(L handle) const;
/* NEIGHBORHOOD ITERATORS */
/**
* Iterate over all neighbors of a node and call @a handle (lamdba closure).
*
* @param u Node.
* @param handle Takes parameter <code>(node)</code> or <code>(node,
* edgeweight)</code> which is a neighbor of @a u.
* @note For directed graphs only outgoing edges from @a u are considered.
* A node is its own neighbor if there is a self-loop.
*
*/
template <typename L> void forNeighborsOf(node u, L handle) const;
/**
* Iterate over all incident edges of a node and call @a handle (lamdba
* closure).
*
* @param u Node.
* @param handle Takes parameters <code>(node, node)</code>, <code>(node,
* node, edgeweight)</code>, <code>(node, node, edgeid)</code> or <code>(node,
* node, edgeweight, edgeid)</code> where the first node is @a u and the
* second is a neighbor of @a u.
* @note For undirected graphs all edges incident to @a u are also outgoing
* edges.
*/
template <typename L> void forEdgesOf(node u, L handle) const;
/**
* Iterate over all neighbors of a node and call handler (lamdba closure).
* For directed graphs only incoming edges from u are considered.
*/
template <typename L> void forInNeighborsOf(node u, L handle) const;
/**
* Iterate over all incoming edges of a node and call handler (lamdba
* closure).
* @note For undirected graphs all edges incident to u are also incoming
* edges.
*
* Handle takes parameters (u, v) or (u, v, w) where w is the edge weight.
*/
template <typename L> void forInEdgesOf(node u, L handle) const;
/* REDUCTION ITERATORS */
/**
* Iterate in parallel over all nodes and sum (reduce +) the values returned
* by the handler
*/
template <typename L> double parallelSumForNodes(L handle) const;
/**
* Iterate in parallel over all edges and sum (reduce +) the values returned
* by the handler
*/
template <typename L> double parallelSumForEdges(L handle) const;
/* GRAPH SEARCHES */
/**
* Iterate over nodes in breadth-first search order starting from r until
* connected component of r has been visited.
*
* @param r Node.
* @param handle Takes parameter <code>(node)</code>.
*/
template <typename L> void BFSfrom(node r, L handle) const;
template <typename L>
void BFSfrom(const std::vector<node> &startNodes, L handle) const;
template <typename L> void BFSEdgesFrom(node r, L handle) const;
/**
* Iterate over nodes in depth-first search order starting from r until
* connected component of r has been visited.
*
* @param r Node.
* @param handle Takes parameter <code>(node)</code>.
*/
template <typename L> void DFSfrom(node r, L handle) const;
template <typename L> void DFSEdgesFrom(node r, L handle) const;
};
/* NODE ITERATORS */
template <typename L> void Graph::forNodes(L handle) const {
for (node v = 0; v < z; ++v) {
if (exists[v]) {
handle(v);
}
}
}
template <typename L> void Graph::parallelForNodes(L handle) const {
#pragma omp parallel for
for (omp_index v = 0; v < static_cast<omp_index>(z); ++v) {
if (exists[v]) {
handle(v);
}
}
}
template <typename C, typename L>
void Graph::forNodesWhile(C condition, L handle) const {
for (node v = 0; v < z; ++v) {
if (exists[v]) {
if (!condition()) {
break;
}
handle(v);
}
}
}
template <typename L> void Graph::forNodesInRandomOrder(L handle) const {
std::vector<node> randVec = nodes();
std::shuffle(randVec.begin(), randVec.end(), Aux::Random::getURNG());
for (node v : randVec) {
handle(v);
}
}
template <typename L> void Graph::balancedParallelForNodes(L handle) const {
#pragma omp parallel for schedule( \
guided) // TODO: define min block size (and test it!)
for (omp_index v = 0; v < static_cast<omp_index>(z); ++v) {
if (exists[v]) {
handle(v);
}
}
}
template <typename L> void Graph::forNodePairs(L handle) const {
for (node u = 0; u < z; ++u) {
if (exists[u]) {
for (node v = u + 1; v < z; ++v) {
if (exists[v]) {
handle(u, v);
}
}
}
}
}
template <typename L> void Graph::parallelForNodePairs(L handle) const {
#pragma omp parallel for schedule(guided)
for (omp_index u = 0; u < static_cast<omp_index>(z); ++u) {
if (exists[u]) {
for (node v = u + 1; v < z; ++v) {
if (exists[v]) {
handle(u, v);
}
}
}
}
}
/* EDGE ITERATORS */
/* HELPERS */
template <bool hasWeights> // implementation for weighted == true
inline edgeweight Graph::getOutEdgeWeight(node u, index i) const {
return outEdgeWeights[u][i];
}
template <> // implementation for weighted == false
inline edgeweight Graph::getOutEdgeWeight<false>(node, index) const {
return defaultEdgeWeight;
}
template <bool hasWeights> // implementation for weighted == true
inline edgeweight Graph::getInEdgeWeight(node u, index i) const {
return inEdgeWeights[u][i];
}
template <> // implementation for weighted == false
inline edgeweight Graph::getInEdgeWeight<false>(node, index) const {
return defaultEdgeWeight;
}
template <bool graphHasEdgeIds> // implementation for hasEdgeIds == true
inline edgeid Graph::getOutEdgeId(node u, index i) const {
return outEdgeIds[u][i];
}
template <> // implementation for hasEdgeIds == false
inline edgeid Graph::getOutEdgeId<false>(node, index) const {
return 0;
}
template <bool graphHasEdgeIds> // implementation for hasEdgeIds == true
inline edgeid Graph::getInEdgeId(node u, index i) const {
return inEdgeIds[u][i];
}
template <> // implementation for hasEdgeIds == false
inline edgeid Graph::getInEdgeId<false>(node, index) const {
return 0;
}
template <bool graphIsDirected> // implementation for graphIsDirected == true
inline bool Graph::useEdgeInIteration(node /* u */, node v) const {
return v != none;
}
template <> // implementation for graphIsDirected == false
inline bool Graph::useEdgeInIteration<false>(node u, node v) const {
return u >= v;
}
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline void Graph::forOutEdgesOfImpl(node u, L handle) const {
for (index i = 0; i < outEdges[u].size(); ++i) {
node v = outEdges[u][i];
if (useEdgeInIteration<graphIsDirected>(u, v)) {
edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i),
getOutEdgeId<graphHasEdgeIds>(u, i));
}
}
}
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline void Graph::forInEdgesOfImpl(node u, L handle) const {
if (graphIsDirected) {
for (index i = 0; i < inEdges[u].size(); i++) {
node v = inEdges[u][i];
if (useEdgeInIteration<true>(u, v)) {
edgeLambda<L>(handle, u, v, getInEdgeWeight<hasWeights>(u, i),
getInEdgeId<graphHasEdgeIds>(u, i));
}
}
} else {
for (index i = 0; i < outEdges[u].size(); ++i) {
node v = outEdges[u][i];
if (useEdgeInIteration<true>(u, v)) {
edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i),
getOutEdgeId<graphHasEdgeIds>(u, i));
}
}
}
}
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline void Graph::forEdgeImpl(L handle) const {
for (node u = 0; u < z; ++u) {
forOutEdgesOfImpl<graphIsDirected, hasWeights, graphHasEdgeIds, L>(u,
handle);
}
}
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline void Graph::parallelForEdgesImpl(L handle) const {
#pragma omp parallel for schedule(guided)
for (omp_index u = 0; u < static_cast<omp_index>(z); ++u) {
forOutEdgesOfImpl<graphIsDirected, hasWeights, graphHasEdgeIds, L>(u,
handle);
}
}
template <bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds,
typename L>
inline double Graph::parallelSumForEdgesImpl(L handle) const {
double sum = 0.0;
#pragma omp parallel for reduction(+ : sum)
for (omp_index u = 0; u < static_cast<omp_index>(z); ++u) {
for (index i = 0; i < outEdges[u].size(); ++i) {
node v = outEdges[u][i];
// undirected, do not iterate over edges twice
// {u, v} instead of (u, v); if v == none, u > v is not fulfilled
if (useEdgeInIteration<graphIsDirected>(u, v)) {
sum += edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i),
getOutEdgeId<graphHasEdgeIds>(u, i));
}
}
}
return sum;
}
template <typename L> void Graph::forEdges(L handle) const {
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edgeIds
forEdgeImpl<false, false, false, L>(handle);
break;
case 1: // weighted, undirected, no edgeIds
forEdgeImpl<false, true, false, L>(handle);
break;
case 2: // unweighted, directed, no edgeIds
forEdgeImpl<true, false, false, L>(handle);
break;
case 3: // weighted, directed, no edgeIds
forEdgeImpl<true, true, false, L>(handle);
break;
case 4: // unweighted, undirected, with edgeIds
forEdgeImpl<false, false, true, L>(handle);
break;
case 5: // weighted, undirected, with edgeIds
forEdgeImpl<false, true, true, L>(handle);
break;
case 6: // unweighted, directed, with edgeIds
forEdgeImpl<true, false, true, L>(handle);
break;
case 7: // weighted, directed, with edgeIds
forEdgeImpl<true, true, true, L>(handle);
break;
}
}
template <typename L> void Graph::parallelForEdges(L handle) const {
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edgeIds
parallelForEdgesImpl<false, false, false, L>(handle);
break;
case 1: // weighted, undirected, no edgeIds
parallelForEdgesImpl<false, true, false, L>(handle);
break;
case 2: // unweighted, directed, no edgeIds
parallelForEdgesImpl<true, false, false, L>(handle);
break;
case 3: // weighted, directed, no edgeIds
parallelForEdgesImpl<true, true, false, L>(handle);
break;
case 4: // unweighted, undirected, with edgeIds
parallelForEdgesImpl<false, false, true, L>(handle);
break;
case 5: // weighted, undirected, with edgeIds
parallelForEdgesImpl<false, true, true, L>(handle);
break;
case 6: // unweighted, directed, with edgeIds
parallelForEdgesImpl<true, false, true, L>(handle);
break;
case 7: // weighted, directed, with edgeIds
parallelForEdgesImpl<true, true, true, L>(handle);
break;
}
}
/* NEIGHBORHOOD ITERATORS */
template <typename L> void Graph::forNeighborsOf(node u, L handle) const {
forEdgesOf(u, handle);
}
template <typename L> void Graph::forEdgesOf(node u, L handle) const {
switch (weighted + 2 * edgesIndexed) {
case 0: // not weighted, no edge ids
forOutEdgesOfImpl<true, false, false, L>(u, handle);
break;
case 1: // weighted, no edge ids
forOutEdgesOfImpl<true, true, false, L>(u, handle);
break;
case 2: // not weighted, with edge ids
forOutEdgesOfImpl<true, false, true, L>(u, handle);
break;
case 3: // weighted, with edge ids
forOutEdgesOfImpl<true, true, true, L>(u, handle);
break;
}
}
template <typename L> void Graph::forInNeighborsOf(node u, L handle) const {
forInEdgesOf(u, handle);
}
template <typename L> void Graph::forInEdgesOf(node u, L handle) const {
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edge ids
forInEdgesOfImpl<false, false, false, L>(u, handle);
break;
case 1: // weighted, undirected, no edge ids
forInEdgesOfImpl<false, true, false, L>(u, handle);
break;
case 2: // unweighted, directed, no edge ids
forInEdgesOfImpl<true, false, false, L>(u, handle);
break;
case 3: // weighted, directed, no edge ids
forInEdgesOfImpl<true, true, false, L>(u, handle);
break;
case 4: // unweighted, undirected, with edge ids
forInEdgesOfImpl<false, false, true, L>(u, handle);
break;
case 5: // weighted, undirected, with edge ids
forInEdgesOfImpl<false, true, true, L>(u, handle);
break;
case 6: // unweighted, directed, with edge ids
forInEdgesOfImpl<true, false, true, L>(u, handle);
break;
case 7: // weighted, directed, with edge ids
forInEdgesOfImpl<true, true, true, L>(u, handle);
break;
}
}
/* REDUCTION ITERATORS */
template <typename L> double Graph::parallelSumForNodes(L handle) const {
double sum = 0.0;
#pragma omp parallel for reduction(+ : sum)
for (omp_index v = 0; v < static_cast<omp_index>(z); ++v) {
if (exists[v]) {
sum += handle(v);
}
}
return sum;
}
template <typename L> double Graph::parallelSumForEdges(L handle) const {
double sum = 0.0;
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edge ids
sum = parallelSumForEdgesImpl<false, false, false, L>(handle);
break;
case 1: // weighted, undirected, no edge ids
sum = parallelSumForEdgesImpl<false, true, false, L>(handle);
break;
case 2: // unweighted, directed, no edge ids
sum = parallelSumForEdgesImpl<true, false, false, L>(handle);
break;
case 3: // weighted, directed, no edge ids
sum = parallelSumForEdgesImpl<true, true, false, L>(handle);
break;
case 4: // unweighted, undirected, with edge ids
sum = parallelSumForEdgesImpl<false, false, true, L>(handle);
break;
case 5: // weighted, undirected, with edge ids
sum = parallelSumForEdgesImpl<false, true, true, L>(handle);
break;
case 6: // unweighted, directed, with edge ids
sum = parallelSumForEdgesImpl<true, false, true, L>(handle);
break;
case 7: // weighted, directed, with edge ids
sum = parallelSumForEdgesImpl<true, true, true, L>(handle);
break;
}
return sum;
}
/* GRAPH SEARCHES */
template <typename L> void Graph::BFSfrom(node r, L handle) const {
std::vector<node> startNodes(1, r);
BFSfrom(startNodes, handle);
}
template <typename L>
void Graph::BFSfrom(const std::vector<node> &startNodes, L handle) const {
std::vector<bool> marked(z);
std::queue<node> q, qNext;
count dist = 0;
// enqueue start nodes
for (node u : startNodes) {
q.push(u);
marked[u] = true;
}
do {
node u = q.front();
q.pop();
// apply function
callBFSHandle(handle, u, dist);
forNeighborsOf(u, [&](node v) {
if (!marked[v]) {
qNext.push(v);
marked[v] = true;
}
});
if (q.empty() && !qNext.empty()) {
q.swap(qNext);
++dist;
}
} while (!q.empty());
}
template <typename L> void Graph::BFSEdgesFrom(node r, L handle) const {
std::vector<bool> marked(z);
std::queue<node> q;
q.push(r); // enqueue root
marked[r] = true;
do {
node u = q.front();
q.pop();
// apply function
forNeighborsOf(u, [&](node, node v, edgeweight w, edgeid eid) {
if (!marked[v]) {
handle(u, v, w, eid);
q.push(v);
marked[v] = true;
}
});
} while (!q.empty());
}
template <typename L> void Graph::DFSfrom(node r, L handle) const {
std::vector<bool> marked(z);
std::stack<node> s;
s.push(r); // enqueue root
marked[r] = true;
do {
node u = s.top();
s.pop();
// apply function
handle(u);
forNeighborsOf(u, [&](node v) {
if (!marked[v]) {
s.push(v);
marked[v] = true;
}
});
} while (!s.empty());
}
template <typename L> void Graph::DFSEdgesFrom(node r, L handle) const {
std::vector<bool> marked(z);
std::stack<node> s;
s.push(r); // enqueue root
marked[r] = true;
do {
node u = s.top();
s.pop();
// apply function
forNeighborsOf(u, [&](node v) {
if (!marked[v]) {
handle(u, v);
s.push(v);
marked[v] = true;
}
});
} while (!s.empty());
}
} /* namespace NetworKit */
#endif /* GRAPH_H_ */
|
GB_unop__identity_bool_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_bool_uint64
// op(A') function: GB_unop_tran__identity_bool_uint64
// C type: bool
// A type: uint64_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_bool_uint64
(
bool *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_bool_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
data.h | /*!
* Copyright (c) 2015-2022 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <dmlc/serializer.h>
#include <xgboost/base.h>
#include <xgboost/host_device_vector.h>
#include <xgboost/linalg.h>
#include <xgboost/span.h>
#include <xgboost/string_view.h>
#include <algorithm>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
namespace xgboost {
// forward declare dmatrix.
class DMatrix;
/*! \brief data type accepted by xgboost interface */
enum class DataType : uint8_t {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4,
kStr = 5
};
enum class FeatureType : uint8_t { kNumerical = 0, kCategorical = 1 };
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of data fields in MetaInfo */
static constexpr uint64_t kNumField = 12;
/*! \brief number of rows in the data */
uint64_t num_row_{0}; // NOLINT
/*! \brief number of columns in the data */
uint64_t num_col_{0}; // NOLINT
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0}; // NOLINT
/*! \brief label of each instance */
linalg::Tensor<float, 2> labels;
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_group_t> group_ptr_; // NOLINT
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_; // NOLINT
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
linalg::Tensor<float, 2> base_margin_; // NOLINT
/*!
* \brief lower bound of the label, to be used for survival analysis (censored regression)
*/
HostDeviceVector<bst_float> labels_lower_bound_; // NOLINT
/*!
* \brief upper bound of the label, to be used for survival analysis (censored regression)
*/
HostDeviceVector<bst_float> labels_upper_bound_; // NOLINT
/*!
* \brief Name of type for each feature provided by users. Eg. "int"/"float"/"i"/"q"
*/
std::vector<std::string> feature_type_names;
/*!
* \brief Name for each feature.
*/
std::vector<std::string> feature_names;
/*
* \brief Type of each feature. Automatically set when feature_type_names is specifed.
*/
HostDeviceVector<FeatureType> feature_types;
/*
* \brief Weight of each feature, used to define the probability of each feature being
* selected when using column sampling.
*/
HostDeviceVector<float> feature_weights;
/*! \brief default constructor */
MetaInfo() = default;
MetaInfo(MetaInfo&& that) = default;
MetaInfo& operator=(MetaInfo&& that) = default;
MetaInfo& operator=(MetaInfo const& that) = delete;
/*!
* \brief Validate all metainfo.
*/
void Validate(int32_t device) const;
MetaInfo Slice(common::Span<int32_t const> ridxs) const;
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels.Data()->HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
/*!
* \brief Set information in the meta info with array interface.
* \param key The key of the information.
* \param interface_str String representation of json format array interface.
*/
void SetInfo(StringView key, StringView interface_str);
void GetInfo(char const* key, bst_ulong* out_len, DataType dtype,
const void** out_dptr) const;
void SetFeatureInfo(const char *key, const char **info, const bst_ulong size);
void GetFeatureInfo(const char *field, std::vector<std::string>* out_str_vecs) const;
/*
* \brief Extend with other MetaInfo.
*
* \param that The other MetaInfo object.
*
* \param accumulate_rows Whether rows need to be accumulated in this function. If
* client code knows number of rows in advance, set this
* parameter to false.
* \param check_column Whether the extend method should check the consistency of
* columns.
*/
void Extend(MetaInfo const& that, bool accumulate_rows, bool check_column);
private:
void SetInfoFromHost(StringView key, Json arr);
void SetInfoFromCUDA(StringView key, Json arr);
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_feature_t index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief Parameters for constructing batches.
*/
struct BatchParam {
/*! \brief The GPU device to use. */
int gpu_id {-1};
/*! \brief Maximum number of bins per feature for histograms. */
int max_bin{0};
/*! \brief Hessian, used for sketching with future approx implementation. */
common::Span<float> hess;
/*! \brief Whether should DMatrix regenerate the batch. Only used for GHistIndex. */
bool regen {false};
BatchParam() = default;
BatchParam(int32_t device, int32_t max_bin)
: gpu_id{device}, max_bin{max_bin} {}
/**
* \brief Get batch with sketch weighted by hessian. The batch will be regenerated if
* the span is changed, so caller should keep the span for each iteration.
*/
BatchParam(int32_t device, int32_t max_bin, common::Span<float> hessian,
bool regenerate = false)
: gpu_id{device}, max_bin{max_bin}, hess{hessian}, regen{regenerate} {}
bool operator!=(const BatchParam& other) const {
if (hess.empty() && other.hess.empty()) {
return gpu_id != other.gpu_id || max_bin != other.max_bin;
}
return gpu_id != other.gpu_id || max_bin != other.max_bin || hess.data() != other.hess.data();
}
};
struct HostSparsePageView {
using Inst = common::Span<Entry const>;
common::Span<bst_row_t const> offset;
common::Span<Entry const> data;
Inst operator[](size_t i) const {
auto size = *(offset.data() + i + 1) - *(offset.data() + i);
return {data.data() + *(offset.data() + i),
static_cast<Inst::index_type>(size)};
}
size_t Size() const { return offset.size() == 0 ? 0 : offset.size() - 1; }
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<bst_row_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid {0};
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
HostSparsePageView GetView() const {
return {offset.ConstHostSpan(), data.ConstHostSpan()};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return Number of instances in the page. */
inline size_t Size() const {
return offset.Size() == 0 ? 0 : offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
/*! \brief Set the base row id for this page. */
inline void SetBaseRowId(size_t row_id) {
base_rowid = row_id;
}
SparsePage GetTranspose(int num_columns, int32_t n_threads) const;
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
dmlc::OMPException exc;
#pragma omp parallel for schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
exc.Run([&]() {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
});
}
exc.Rethrow();
}
/**
* \brief Pushes external data batch onto this page
*
* \tparam AdapterBatchT
* \param batch
* \param missing
* \param nthread
*
* \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns.
*/
template <typename AdapterBatchT>
uint64_t Push(const AdapterBatchT& batch, float missing, int nthread);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
};
class CSCPage: public SparsePage {
public:
CSCPage() : SparsePage() {}
explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class SortedCSCPage : public SparsePage {
public:
SortedCSCPage() : SparsePage() {}
explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class EllpackPageImpl;
/*!
* \brief A page stored in ELLPACK format.
*
* This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid
* including CUDA-specific implementation details in the header.
*/
class EllpackPage {
public:
/*!
* \brief Default constructor.
*
* This is used in the external memory case. An empty ELLPACK page is constructed with its content
* set later by the reader.
*/
EllpackPage();
/*!
* \brief Constructor from an existing DMatrix.
*
* This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix
* in CSR format.
*/
explicit EllpackPage(DMatrix* dmat, const BatchParam& param);
/*! \brief Destructor. */
~EllpackPage();
EllpackPage(EllpackPage&& that);
/*! \return Number of instances in the page. */
size_t Size() const;
/*! \brief Set the base row id for this page. */
void SetBaseRowId(size_t row_id);
const EllpackPageImpl* Impl() const { return impl_.get(); }
EllpackPageImpl* Impl() { return impl_.get(); }
private:
std::unique_ptr<EllpackPageImpl> impl_;
};
class GHistIndexMatrix;
template<typename T>
class BatchIteratorImpl {
public:
using iterator_category = std::forward_iterator_tag; // NOLINT
virtual ~BatchIteratorImpl() = default;
virtual const T& operator*() const = 0;
virtual BatchIteratorImpl& operator++() = 0;
virtual bool AtEnd() const = 0;
virtual std::shared_ptr<T const> Page() const = 0;
};
template<typename T>
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag; // NOLINT
explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); }
explicit BatchIterator(std::shared_ptr<BatchIteratorImpl<T>> impl) { impl_ = impl; }
BatchIterator &operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
return *this;
}
const T& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator&) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
std::shared_ptr<T const> Page() const {
return impl_->Page();
}
private:
std::shared_ptr<BatchIteratorImpl<T>> impl_;
};
template<typename T>
class BatchSet {
public:
explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(std::move(begin_iter)) {}
BatchIterator<T> begin() { return begin_iter_; } // NOLINT
BatchIterator<T> end() { return BatchIterator<T>(nullptr); } // NOLINT
private:
BatchIterator<T> begin_iter_;
};
struct XGBAPIThreadLocalEntry;
/*!
* \brief Internal data structured used by XGBoost during training.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
virtual void SetInfo(const char *key, const void *dptr, DataType dtype,
size_t num) {
this->Info().SetInfo(key, dptr, dtype, num);
}
virtual void SetInfo(const char* key, std::string const& interface_str) {
this->Info().SetInfo(key, StringView{interface_str});
}
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/*! \brief Get thread local memory for returning data from DMatrix. */
XGBAPIThreadLocalEntry& GetThreadLocal() const;
/**
* \brief Gets batches. Use range based for loop over BatchSet to access individual batches.
*/
template<typename T>
BatchSet<T> GetBatches(const BatchParam& param = {});
template <typename T>
bool PageExists() const;
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief virtual destructor */
virtual ~DMatrix();
/*! \brief Whether the matrix is dense. */
bool IsDense() const {
return Info().num_nonzero_ == Info().num_row_ * Info().num_col_;
}
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto");
/**
* \brief Creates a new DMatrix from an external data adapter.
*
* \tparam AdapterT Type of the adapter.
* \param [in,out] adapter View onto an external data.
* \param missing Values to count as missing.
* \param nthread Number of threads for construction.
* \param cache_prefix (Optional) The cache prefix for external memory.
* \param page_size (Optional) Size of the page.
*
* \return a Created DMatrix.
*/
template <typename AdapterT>
static DMatrix* Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix = "");
/**
* \brief Create a new Quantile based DMatrix used for histogram based algorithm.
*
* \tparam DataIterHandle External iterator type, defined in C API.
* \tparam DMatrixHandle DMatrix handle, defined in C API.
* \tparam DataIterResetCallback Callback for reset, prototype defined in C API.
* \tparam XGDMatrixCallbackNext Callback for next, prototype defined in C API.
*
* \param iter External data iterator
* \param proxy A hanlde to ProxyDMatrix
* \param reset Callback for reset
* \param next Callback for next
* \param missing Value that should be treated as missing.
* \param nthread number of threads used for initialization.
* \param max_bin Maximum number of bins.
*
* \return A created quantile based DMatrix.
*/
template <typename DataIterHandle, typename DMatrixHandle,
typename DataIterResetCallback, typename XGDMatrixCallbackNext>
static DMatrix *Create(DataIterHandle iter, DMatrixHandle proxy,
DataIterResetCallback *reset,
XGDMatrixCallbackNext *next, float missing,
int nthread,
int max_bin);
/**
* \brief Create an external memory DMatrix with callbacks.
*
* \tparam DataIterHandle External iterator type, defined in C API.
* \tparam DMatrixHandle DMatrix handle, defined in C API.
* \tparam DataIterResetCallback Callback for reset, prototype defined in C API.
* \tparam XGDMatrixCallbackNext Callback for next, prototype defined in C API.
*
* \param iter External data iterator
* \param proxy A hanlde to ProxyDMatrix
* \param reset Callback for reset
* \param next Callback for next
* \param missing Value that should be treated as missing.
* \param nthread number of threads used for initialization.
* \param cache Prefix of cache file path.
*
* \return A created external memory DMatrix.
*/
template <typename DataIterHandle, typename DMatrixHandle,
typename DataIterResetCallback, typename XGDMatrixCallbackNext>
static DMatrix *Create(DataIterHandle iter, DMatrixHandle proxy,
DataIterResetCallback *reset,
XGDMatrixCallbackNext *next, float missing,
int32_t nthread, std::string cache);
virtual DMatrix *Slice(common::Span<int32_t const> ridxs) = 0;
/*! \brief Number of rows per page in external memory. Approximately 100MB per page for
* dataset with 100 features. */
static const size_t kPageSize = 32UL << 12UL;
protected:
virtual BatchSet<SparsePage> GetRowBatches() = 0;
virtual BatchSet<CSCPage> GetColumnBatches() = 0;
virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0;
virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0;
virtual BatchSet<GHistIndexMatrix> GetGradientIndex(const BatchParam& param) = 0;
virtual bool EllpackExists() const = 0;
virtual bool SparsePageExists() const = 0;
};
template<>
inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) {
return GetRowBatches();
}
template<>
inline bool DMatrix::PageExists<EllpackPage>() const {
return this->EllpackExists();
}
template<>
inline bool DMatrix::PageExists<SparsePage>() const {
return this->SparsePageExists();
}
template<>
inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetColumnBatches();
}
template<>
inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetSortedColumnBatches();
}
template<>
inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) {
return GetEllpackBatches(param);
}
template<>
inline BatchSet<GHistIndexMatrix> DMatrix::GetBatches(const BatchParam& param) {
return GetGradientIndex(param);
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
namespace serializer {
template <>
struct Handler<xgboost::Entry> {
inline static void Write(Stream* strm, const xgboost::Entry& data) {
strm->Write(data.index);
strm->Write(data.fvalue);
}
inline static bool Read(Stream* strm, xgboost::Entry* data) {
return strm->Read(&data->index) && strm->Read(&data->fvalue);
}
};
} // namespace serializer
} // namespace dmlc
#endif // XGBOOST_DATA_H_
|
GB_unaryop__identity_uint8_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint8_uint32
// op(A') function: GB_tran__identity_uint8_uint32
// C type: uint8_t
// A type: uint32_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint8_uint32
(
uint8_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint8_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_uint8_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint8_int64
// op(A') function: GB_tran__lnot_uint8_int64
// C type: uint8_t
// A type: int64_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint8_int64
(
uint8_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint8_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ast-dump-openmp-parallel-for.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp parallel for
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp parallel for
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp parallel for collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp parallel for collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp parallel for collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-parallel-for.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPParallelForDirective {{.*}} <line:4:1, col:25>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPParallelForDirective {{.*}} <line:10:1, col:25>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPParallelForDirective {{.*}} <line:17:1, col:37>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:26, col:36>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:35> 'int'
// CHECK-NEXT: | | |-value: Int 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:35> 'int' 1
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPParallelForDirective {{.*}} <line:24:1, col:37>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:26, col:36>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:35> 'int'
// CHECK-NEXT: | | |-value: Int 2
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:35> 'int' 2
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPParallelForDirective {{.*}} <line:31:1, col:37>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:26, col:36>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:35> 'int'
// CHECK-NEXT: | |-value: Int 2
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:35> 'int' 2
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
convolutiondepthwise_5x5_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _k00 = _mm256_loadu_ps(k0);
__m256 _k01 = _mm256_loadu_ps(k0 + 8);
__m256 _k02 = _mm256_loadu_ps(k0 + 16);
__m256 _k03 = _mm256_loadu_ps(k0 + 24);
__m256 _k04 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k03, _r03, _sum0);
_sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _k10 = _mm256_loadu_ps(k0);
__m256 _k11 = _mm256_loadu_ps(k0 + 8);
__m256 _k12 = _mm256_loadu_ps(k0 + 16);
__m256 _k13 = _mm256_loadu_ps(k0 + 24);
__m256 _k14 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k13, _r13, _sum0);
_sum0 = _mm256_fmadd_ps(_k14, _r14, _sum0);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
__m256 _k20 = _mm256_loadu_ps(k0);
__m256 _k21 = _mm256_loadu_ps(k0 + 8);
__m256 _k22 = _mm256_loadu_ps(k0 + 16);
__m256 _k23 = _mm256_loadu_ps(k0 + 24);
__m256 _k24 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
_sum0 = _mm256_fmadd_ps(_k23, _r23, _sum0);
_sum0 = _mm256_fmadd_ps(_k24, _r24, _sum0);
__m256 _r30 = _mm256_loadu_ps(r3);
__m256 _r31 = _mm256_loadu_ps(r3 + 8);
__m256 _r32 = _mm256_loadu_ps(r3 + 16);
__m256 _r33 = _mm256_loadu_ps(r3 + 24);
__m256 _r34 = _mm256_loadu_ps(r3 + 32);
__m256 _k30 = _mm256_loadu_ps(k0);
__m256 _k31 = _mm256_loadu_ps(k0 + 8);
__m256 _k32 = _mm256_loadu_ps(k0 + 16);
__m256 _k33 = _mm256_loadu_ps(k0 + 24);
__m256 _k34 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k30, _r30, _sum0);
_sum0 = _mm256_fmadd_ps(_k31, _r31, _sum0);
_sum0 = _mm256_fmadd_ps(_k32, _r32, _sum0);
_sum0 = _mm256_fmadd_ps(_k33, _r33, _sum0);
_sum0 = _mm256_fmadd_ps(_k34, _r34, _sum0);
__m256 _r40 = _mm256_loadu_ps(r4);
__m256 _r41 = _mm256_loadu_ps(r4 + 8);
__m256 _r42 = _mm256_loadu_ps(r4 + 16);
__m256 _r43 = _mm256_loadu_ps(r4 + 24);
__m256 _r44 = _mm256_loadu_ps(r4 + 32);
__m256 _k40 = _mm256_loadu_ps(k0);
__m256 _k41 = _mm256_loadu_ps(k0 + 8);
__m256 _k42 = _mm256_loadu_ps(k0 + 16);
__m256 _k43 = _mm256_loadu_ps(k0 + 24);
__m256 _k44 = _mm256_loadu_ps(k0 + 32);
k0 -= 160;
_sum0 = _mm256_fmadd_ps(_k40, _r40, _sum0);
_sum0 = _mm256_fmadd_ps(_k41, _r41, _sum0);
_sum0 = _mm256_fmadd_ps(_k42, _r42, _sum0);
_sum0 = _mm256_fmadd_ps(_k43, _r43, _sum0);
_sum0 = _mm256_fmadd_ps(_k44, _r44, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
outptr0 += 8;
}
r0 += 4 * 8;
r1 += 4 * 8;
r2 += 4 * 8;
r3 += 4 * 8;
r4 += 4 * 8;
}
}
}
static void convdw5x5s2_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _k00 = _mm256_loadu_ps(k0);
__m256 _k01 = _mm256_loadu_ps(k0 + 8);
__m256 _k02 = _mm256_loadu_ps(k0 + 16);
__m256 _k03 = _mm256_loadu_ps(k0 + 24);
__m256 _k04 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k03, _r03, _sum0);
_sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _k10 = _mm256_loadu_ps(k0);
__m256 _k11 = _mm256_loadu_ps(k0 + 8);
__m256 _k12 = _mm256_loadu_ps(k0 + 16);
__m256 _k13 = _mm256_loadu_ps(k0 + 24);
__m256 _k14 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k13, _r13, _sum0);
_sum0 = _mm256_fmadd_ps(_k14, _r14, _sum0);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
__m256 _k20 = _mm256_loadu_ps(k0);
__m256 _k21 = _mm256_loadu_ps(k0 + 8);
__m256 _k22 = _mm256_loadu_ps(k0 + 16);
__m256 _k23 = _mm256_loadu_ps(k0 + 24);
__m256 _k24 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
_sum0 = _mm256_fmadd_ps(_k23, _r23, _sum0);
_sum0 = _mm256_fmadd_ps(_k24, _r24, _sum0);
__m256 _r30 = _mm256_loadu_ps(r3);
__m256 _r31 = _mm256_loadu_ps(r3 + 8);
__m256 _r32 = _mm256_loadu_ps(r3 + 16);
__m256 _r33 = _mm256_loadu_ps(r3 + 24);
__m256 _r34 = _mm256_loadu_ps(r3 + 32);
__m256 _k30 = _mm256_loadu_ps(k0);
__m256 _k31 = _mm256_loadu_ps(k0 + 8);
__m256 _k32 = _mm256_loadu_ps(k0 + 16);
__m256 _k33 = _mm256_loadu_ps(k0 + 24);
__m256 _k34 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k30, _r30, _sum0);
_sum0 = _mm256_fmadd_ps(_k31, _r31, _sum0);
_sum0 = _mm256_fmadd_ps(_k32, _r32, _sum0);
_sum0 = _mm256_fmadd_ps(_k33, _r33, _sum0);
_sum0 = _mm256_fmadd_ps(_k34, _r34, _sum0);
__m256 _r40 = _mm256_loadu_ps(r4);
__m256 _r41 = _mm256_loadu_ps(r4 + 8);
__m256 _r42 = _mm256_loadu_ps(r4 + 16);
__m256 _r43 = _mm256_loadu_ps(r4 + 24);
__m256 _r44 = _mm256_loadu_ps(r4 + 32);
__m256 _k40 = _mm256_loadu_ps(k0);
__m256 _k41 = _mm256_loadu_ps(k0 + 8);
__m256 _k42 = _mm256_loadu_ps(k0 + 16);
__m256 _k43 = _mm256_loadu_ps(k0 + 24);
__m256 _k44 = _mm256_loadu_ps(k0 + 32);
k0 -= 160;
_sum0 = _mm256_fmadd_ps(_k40, _r40, _sum0);
_sum0 = _mm256_fmadd_ps(_k41, _r41, _sum0);
_sum0 = _mm256_fmadd_ps(_k42, _r42, _sum0);
_sum0 = _mm256_fmadd_ps(_k43, _r43, _sum0);
_sum0 = _mm256_fmadd_ps(_k44, _r44, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
r4 += 16;
outptr0 += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
serial_teams.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt, multicpu
// UNSUPPORTED: gcc
// Compilation fails for icc
// XFAIL: icc
#include "callback.h"
int main() {
#pragma omp target teams num_teams(2) thread_limit(1)
#pragma omp parallel num_threads(1)
{ printf("In teams parallel\n"); }
return 0;
}
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK-NOT: 0: parallel_data initially not null
// CHECK-NOT: 0: task_data initially not null
// CHECK-NOT: 0: thread_data initially not null
// CHECK: {{^}}[[MASTER_0:[0-9]+]]: ompt_event_initial_task_begin:
// CHECK-SAME: task_id=[[INIT_TASK:[0-9]+]], {{.*}}, index=1
// CHECK: {{^}}[[MASTER_0]]: ompt_event_teams_begin:
// CHECK-SAME: parent_task_id=[[INIT_TASK]]
// CHECK-SAME: {{.*}} requested_num_teams=2
// CHECK-SAME: {{.*}} invoker=[[TEAMS_FLAGS:[0-9]+]]
//
// team 0
//
// initial task in the teams construct
// CHECK: {{^}}[[MASTER_0]]: ompt_event_initial_task_begin:
// CHECK-SAME: task_id=[[INIT_TASK_0:[0-9]+]], actual_parallelism=2, index=0
// parallel region forked by runtime
// CHECK: {{^}}[[MASTER_0]]: ompt_event_parallel_begin:
// CHECK-SAME: {{.*}} parent_task_id=[[INIT_TASK_0]]
// CHECK-SAME: {{.*}} parallel_id=[[PAR_0:[0-9]+]]
// CHECK: {{^}}[[MASTER_0]]: ompt_event_implicit_task_begin:
// CHECK-SAME: {{.*}} parallel_id=[[PAR_0]], task_id=[[IMPL_TASK_0:[0-9]+]]
// user parallel region
// CHECK: {{^}}[[MASTER_0]]: ompt_event_parallel_begin:
// CHECK-SAME: {{.*}} parent_task_id=[[IMPL_TASK_0]]
// CHECK-SAME: {{.*}} parallel_id=[[PAR_00:[0-9]+]]
// CHECK: {{^}}[[MASTER_0]]: ompt_event_parallel_end:
// CHECK-SAME: {{.*}} parallel_id=[[PAR_00]], task_id=[[IMPL_TASK_0]]
// CHECK: {{^}}[[MASTER_0]]: ompt_event_implicit_task_end:
// CHECK-SAME: {{.*}} parallel_id={{[0-9]+}}, task_id=[[IMPL_TASK_0]]
// CHECK: {{^}}[[MASTER_0]]: ompt_event_parallel_end:
// CHECK-SAME: {{.*}} parallel_id=[[PAR_0]], task_id=[[INIT_TASK_0]]
// CHECK: {{^}}[[MASTER_0]]: ompt_event_initial_task_end:
// CHECK-SAME: task_id=[[INIT_TASK_0]], actual_parallelism=0, index=0
// CHECK: {{^}}[[MASTER_0]]: ompt_event_teams_end:
// CHECK-SAME: {{.*}} task_id=[[INIT_TASK]], invoker=[[TEAMS_FLAGS]]
// CHECK: {{^}}[[MASTER_0]]: ompt_event_initial_task_end:
// CHECK-SAME: task_id=[[INIT_TASK]], {{.*}}, index=1
//
// team 1
//
// initial task in the teams construct
// CHECK: {{^}}[[MASTER_1:[0-9]+]]: ompt_event_initial_task_begin:
// CHECK-SAME: task_id=[[INIT_TASK_1:[0-9]+]], actual_parallelism=2, index=1
// parallel region forked by runtime
// CHECK: {{^}}[[MASTER_1]]: ompt_event_parallel_begin:
// CHECK-SAME: {{.*}} parent_task_id=[[INIT_TASK_1]]
// CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_1:[0-9]+]]
// CHECK: {{^}}[[MASTER_1]]: ompt_event_implicit_task_begin:
// CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_1]], task_id=[[IMPL_TASK_1:[0-9]+]]
// user parallel region
// CHECK: {{^}}[[MASTER_1]]: ompt_event_parallel_begin:
// CHECK-SAME: {{.*}} parent_task_id=[[IMPL_TASK_1]]
// CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_11:[0-9]+]]
// CHECK: {{^}}[[MASTER_1]]: ompt_event_parallel_end:
// CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_11]], task_id=[[IMPL_TASK_1]]
// CHECK: {{^}}[[MASTER_1]]: ompt_event_implicit_task_end:
// CHECK-SAME: {{.*}} parallel_id={{[0-9]+}}, task_id=[[IMPL_TASK_1]]
// CHECK: {{^}}[[MASTER_1]]: ompt_event_parallel_end:
// CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_1]], task_id=[[INIT_TASK_1]]
// CHECK: {{^}}[[MASTER_1]]: ompt_event_initial_task_end:
// CHECK-SAME: task_id=[[INIT_TASK_1]], actual_parallelism=0, index=1
|
GB_binop__iseq_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int8)
// A*D function (colscale): GB (_AxD__iseq_int8)
// D*A function (rowscale): GB (_DxB__iseq_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int8)
// C=scalar+B GB (_bind1st__iseq_int8)
// C=scalar+B' GB (_bind1st_tran__iseq_int8)
// C=A+scalar GB (_bind2nd__iseq_int8)
// C=A'+scalar GB (_bind2nd_tran__iseq_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT8 || GxB_NO_ISEQ_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tinyexr.h | /*
Copyright (c) 2014 - 2019, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
#ifndef TINYEXR_H_
#define TINYEXR_H_
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
// Use embedded miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_THREAD
#define TINYEXR_USE_THREAD (0) // No threaded loading.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_OPENMP
#ifdef _OPENMP
#define TINYEXR_USE_OPENMP (1)
#else
#define TINYEXR_USE_OPENMP (0)
#endif
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-6)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
#define TINYEXR_ERROR_INVALID_HEADER (-9)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
#define TINYEXR_ERROR_LAYER_NOT_FOUND (-13)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
int tiled; // tile format image
int long_name; // long name attribute
int non_image; // deep image(EXR 2.0)
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
int data_window[4];
int display_window[4];
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { For backward compatibility. Not recommended to use. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// Loads single-frame OpenEXR image by specifying layer name. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
// When the specified layer name is not found in the EXR file, the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`.
extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layer_name, const char **err);
//
// Get layer infos from EXR file.
//
// @param[out] layer_names List of layer names. Application must free memory after using this.
// @param[out] num_layers The number of layers
// @param[out] err Error string(will be filled when the function returns error code). Free it using FreeEXRErrorMessage after using this value.
//
// @return TINYEXR_SUCCEES upon success.
//
extern int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Free's internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Free's internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Free's error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEFINED
#define TINYEXR_IMPLEMENTATION_DEFINED
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
// #include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#if __cplusplus > 199711L
// C++11
#include <cstdint>
#if TINYEXR_USE_THREAD
#include <atomic>
#include <thread>
#endif
#endif // __cplusplus > 199711L
#if TINYEXR_USE_OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#include "zfp.h"
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
#if TINYEXR_USE_MINIZ
namespace miniz {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wunused-function"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#pragma clang diagnostic ignored "-Wundef"
#if __has_warning("-Wcomma")
#pragma clang diagnostic ignored "-Wcomma"
#endif
#if __has_warning("-Wmacro-redefined")
#pragma clang diagnostic ignored "-Wmacro-redefined"
#endif
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#if __has_warning("-Wtautological-constant-compare")
#pragma clang diagnostic ignored "-Wtautological-constant-compare"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occurred in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
//#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
//#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#if MINIZ_HAS_64BIT_REGISTERS
#define TINFL_USE_64BIT_BITBUF 1
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
//#include <assert.h>
//#include <string.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr) return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr) return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
// static void *def_realloc_func(void *opaque, void *address, size_t items,
// size_t size) {
// (void)opaque, (void)address, (void)items, (void)size;
// return MZ_REALLOC(address, items * size);
//}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream) return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out) return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK) return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream) return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0) return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK) return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index:; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end =
pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end =
pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size) continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256) break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256) break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256) break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict) return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags & ~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct {
mz_uint16 m_key, m_sym_index;
} tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1) return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n, use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++) break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d,
mz_uint match_len,
mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c) break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size =
MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d)) return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d)) return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp) return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf) return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp) return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(
pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {0x89,
0x50,
0x4e,
0x47,
0x0d,
0x0a,
0x1a,
0x0a,
0x00,
0x00,
0x00,
0x0d,
0x49,
0x48,
0x44,
0x52,
0,
0,
(mz_uint8)(w >> 8),
(mz_uint8)w,
0,
0,
(mz_uint8)(h >> 8),
(mz_uint8)h,
8,
chans[num_chans],
0,
0,
0,
0,
0,
0,
0,
(mz_uint8)(*pLen_out >> 24),
(mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8),
(mz_uint8)*pLen_out,
0x49,
0x44,
0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#error "No arvhive APIs"
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity) new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_capacity,
mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void mz_zip_reader_sort_central_dir_offsets_by_filename(
mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size) break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end) break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile) return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh(
mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat)) return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment,
p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size) pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
const char *pB,
mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int mz_zip_reader_filename_compare(
const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR,
mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf)) return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size) return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size) return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize) *pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile) return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size) new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
if (!pFilename) return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/') return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(
mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment) return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size)) return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(
pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file) return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3) level = 0;
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n =
(mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining,
(mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(
pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL == (pBuf = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize) *pSize = 0;
if ((!pZip_filename) || (!pArchive_name)) return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
#ifdef __clang__
#pragma clang diagnostic pop
#endif
} // namespace miniz
#else
// Reuse MINIZ_LITTE_ENDIAN macro
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#endif // TINYEXR_USE_MINIZ
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
} else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen));
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
int data_window[4];
int line_order;
int display_window[4];
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
void clear() {
channels.clear();
attributes.clear();
data_window[0] = 0;
data_window[1] = 0;
data_window[2] = 0;
data_window[3] = 0;
line_order = 0;
display_window[0] = 0;
display_window[1] = 0;
display_window[2] = 0;
display_window[3] = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
}
} HeaderInfo;
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling));
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling));
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(src_size);
int ret = miniz::mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (miniz::MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressible run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierarchical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
int len : 8; // code length 0
int lit : 24; // lit p size
int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
int *p = pl->p;
pl->p = new int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
if ((in + 1) >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
} else if (out < oe) {
*out++ = po;
} else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
int precision;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0f;
}
};
bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) &&
(attributes[i].size == 1)) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
}
}
if (!foundType) {
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else {
assert(0);
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
int num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size = dst_width * dst_num_lines * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((dst_width & 3U) || (dst_num_lines & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, dst_width, dst_num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = dst_width * dst_num_lines;
for (int c = 0; c < num_channels; c++) {
// decompress 4x4 pixel block.
for (int y = 0; y < dst_num_lines; y += 4) {
for (int x = 0; x < dst_width; x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * dst_width + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize,
const float *inPtr, int width, int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((width & 3U) || (num_lines & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, width, num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = width * num_lines;
for (int c = 0; c < num_channels; c++) {
// compress 4x4 pixel block.
for (int y = 0; y < num_lines; y += 4) {
for (int x = 0; x < width; x += 4) {
float fblock[16];
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * width + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = zfp_stream_compressed_size(zfp);
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param, attributes,
num_attributes)) {
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static bool DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
assert(tile_offset_x * tile_size_x < data_width);
assert(tile_offset_y * tile_size_y < data_height);
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
info->data_window[0] = 0;
info->data_window[1] = 0;
info->data_window[2] = 0;
info->data_window[3] = 0;
info->line_order = 0; // @fixme
info->display_window[0] = 0;
info->display_window[1] = 0;
info->display_window[2] = 0;
info->display_window[3] = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (version->tiled && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window[0], &data.at(0), sizeof(int));
memcpy(&info->data_window[1], &data.at(4), sizeof(int));
memcpy(&info->data_window[2], &data.at(8), sizeof(int));
memcpy(&info->data_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3]));
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window[0], &data.at(0), sizeof(int));
memcpy(&info->display_window[1], &data.at(4), sizeof(int));
memcpy(&info->display_window[2], &data.at(8), sizeof(int));
memcpy(&info->display_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[1]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[2]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[3]));
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio));
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[1]));
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_width));
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count));
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window[0] = info.display_window[0];
exr_header->display_window[1] = info.display_window[1];
exr_header->display_window[2] = info.display_window[2];
exr_header->display_window[3] = info.display_window[3];
exr_header->data_window[0] = info.data_window[0];
exr_header->data_window[1] = info.data_window[1];
exr_header->data_window[2] = info.data_window[2];
exr_header->data_window[3] = info.data_window[3];
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy pointer
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
} else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const std::vector<tinyexr::tinyexr_uint64> &offsets,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1;
int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1;
if ((data_width < 0) || (data_height < 0)) {
if (err) {
std::stringstream ss;
ss << "Invalid data width or data height: " << data_width << ", "
<< data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if ((data_width > threshold) || (data_height > threshold)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety.
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
size_t num_tiles = offsets.size(); // = # of blocks
exr_image->tiles = static_cast<EXRTile *>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
int err_code = TINYEXR_SUCCESS;
#if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<size_t> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
size_t tile_idx = 0;
while ((tile_idx = tile_count++) < num_tiles) {
#else
for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
#endif
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels,
exr_header->requested_pixel_types, exr_header->tile_size_x,
exr_header->tile_size_y);
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
if (offsets[tile_idx] + sizeof(int) * 5 > size) {
// TODO(LTE): atomic
if (err) {
(*err) += "Insufficient data size.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
break;
}
size_t data_size =
size_t(size - (offsets[tile_idx] + sizeof(int) * 5));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&tile_coordinates[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&tile_coordinates[1]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&tile_coordinates[2]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&tile_coordinates[3]));
// @todo{ LoD }
if (tile_coordinates[2] != 0) {
err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE;
break;
}
if (tile_coordinates[3] != 0) {
err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE;
break;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len < 4 || size_t(data_len) > data_size) {
// TODO(LTE): atomic
if (err) {
(*err) += "Insufficient data length.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
break;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
bool ret = tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height,
tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x,
exr_header->tile_size_y, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list);
if (!ret) {
// TODO(LTE): atomic
if (err) {
(*err) += "Failed to decode tile data.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
}
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
#if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0)
}
}));
} // num_thread loop
for (auto &t : workers) {
t.join();
}
#else
}
#endif
if (err_code != TINYEXR_SUCCESS) {
return err_code;
}
exr_image->num_tiles = static_cast<int>(num_tiles);
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown =
sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> y_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_blocks)) {
num_threads = int(num_blocks);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int y = 0;
while ((y = y_count++) < int(num_blocks)) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
#endif
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size =
size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example
// `data_len < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window[3] + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno =
static_cast<tinyexr_int64>(line_no) -
static_cast<tinyexr_int64>(exr_header->data_window[1]);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window[1];
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(
exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
#if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0];
if (data_width >= std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
data_width++;
int data_height = exr_header->data_window[3] - exr_header->data_window[1];
if (data_height >= std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
data_height++;
if ((data_width < 0) || (data_height < 0)) {
tinyexr::SetErrorMessage("data width or data height is negative.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if (data_width > threshold) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > threshold) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
size_t num_blocks = 0;
if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
} else if (exr_header->tiled) {
// @todo { LoD }
size_t num_x_tiles = static_cast<size_t>(data_width) /
static_cast<size_t>(exr_header->tile_size_x);
if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) <
static_cast<size_t>(data_width)) {
num_x_tiles++;
}
size_t num_y_tiles = static_cast<size_t>(data_height) /
static_cast<size_t>(exr_header->tile_size_y);
if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) <
static_cast<size_t>(data_height)) {
num_y_tiles++;
}
num_blocks = num_x_tiles * num_y_tiles;
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
}
std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks);
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
#if 1
FreeEXRImage(exr_image);
#else
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
#endif
}
return ret;
}
}
static void GetLayers(const EXRHeader& exr_header, std::vector<std::string>& layer_names) {
// Naive implementation
// Group channels by layers
// go over all channel names, split by periods
// collect unique names
layer_names.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string full_name(exr_header.channels[c].name);
const size_t pos = full_name.find_last_of('.');
if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) {
full_name.erase(pos);
if (std::find(layer_names.begin(), layer_names.end(), full_name) == layer_names.end())
layer_names.push_back(full_name);
}
}
}
struct LayerChannel {
explicit LayerChannel (size_t i, std::string n)
: index(i)
, name(n)
{}
size_t index;
std::string name;
};
static void ChannelsInLayer(const EXRHeader& exr_header, const std::string layer_name, std::vector<LayerChannel>& channels) {
channels.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string ch_name(exr_header.channels[c].name);
if (layer_name.empty()) {
const size_t pos = ch_name.find_last_of('.');
if (pos != std::string::npos && pos < ch_name.size()) {
ch_name = ch_name.substr(pos + 1);
}
} else {
const size_t pos = ch_name.find(layer_name + '.');
if (pos == std::string::npos)
continue;
if (pos == 0) {
ch_name = ch_name.substr(layer_name.size() + 1);
}
}
LayerChannel ch(size_t(c), ch_name);
channels.push_back(ch);
}
}
} // namespace tinyexr
int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err) {
EXRVersion exr_version;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
std::vector<std::string> layer_vec;
tinyexr::GetLayers(exr_header, layer_vec);
(*num_layers) = int(layer_vec.size());
(*layer_names) = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size())));
for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) {
#ifdef _MSC_VER
(*layer_names)[c] = _strdup(layer_vec[c].c_str());
#else
(*layer_names)[c] = strdup(layer_vec[c].c_str());
#endif
}
FreeEXRHeader(&exr_header);
return TINYEXR_SUCCESS;
}
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
return LoadEXRWithLayer(out_rgba, width, height, filename, /* layername */NULL, err);
}
int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layername,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to open EXR file or read version info from EXR file. code(" << ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
// TODO: Probably limit loading to layers (channels) selected by layer index
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
std::vector<std::string> layer_names;
tinyexr::GetLayers(exr_header, layer_names);
std::vector<tinyexr::LayerChannel> channels;
tinyexr::ChannelsInLayer(exr_header, layername == NULL ? "" : std::string(layername), channels);
if (channels.size() < 1) {
tinyexr::SetErrorMessage("Layer Not Found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_LAYER_NOT_FOUND;
}
size_t ch_count = channels.size() < 4 ? channels.size() : 4;
for (size_t c = 0; c < ch_count; c++) {
const tinyexr::LayerChannel &ch = channels[c];
if (ch.name == "R") {
idxR = int(ch.index);
}
else if (ch.name == "G") {
idxG = int(ch.index);
}
else if (ch.name == "B") {
idxB = int(ch.index);
}
else if (ch.name == "A") {
idxA = int(ch.index);
}
}
if (channels.size() == 1) {
int chIdx = int(channels.front().index);
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[chIdx][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
ConvertHeader(exr_header, info);
// transfoer `tiled` from version.
exr_header->tiled = version->tiled;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to parse EXR version. code(" << ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
size_t SaveEXRImageToMemory(const EXRImage *exr_image,
const EXRHeader *exr_header,
unsigned char **memory_out, const char **err) {
if (exr_image == NULL || memory_out == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#endif
#if TINYEXR_USE_ZFP
for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) {
if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) {
tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
std::vector<unsigned char> memory;
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
memory.insert(memory.end(), header, header + 4);
}
// Version, scanline.
{
char marker[] = {2, 0, 0, 0};
/* @todo
if (exr_header->tiled) {
marker[1] |= 0x2;
}
if (exr_header->long_name) {
marker[1] |= 0x4;
}
if (exr_header->non_image) {
marker[1] |= 0x8;
}
if (exr_header->multipart) {
marker[1] |= 0x10;
}
*/
memory.insert(memory.end(), marker, marker + 4);
}
int num_scanlines = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
// Write attributes.
std::vector<tinyexr::ChannelInfo> channels;
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_header->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_header->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_header->channels[c].name);
channels.push_back(info);
}
tinyexr::WriteChannelInfo(data, channels);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_header->compression_type;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp));
tinyexr::WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char *>(&comp), 1);
}
{
int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1};
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3]));
tinyexr::WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
tinyexr::WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
float aspectRatio = 1.0f;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio));
tinyexr::WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[1]));
tinyexr::WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float));
}
{
float w = static_cast<float>(exr_image->width);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w),
sizeof(float));
}
// Custom attributes
if (exr_header->num_custom_attributes > 0) {
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_header->custom_attributes[i].name,
exr_header->custom_attributes[i].type,
reinterpret_cast<const unsigned char *>(
exr_header->custom_attributes[i].value),
exr_header->custom_attributes[i].size);
}
}
{ // end of header
unsigned char e = 0;
memory.push_back(e);
}
int num_blocks = exr_image->height / num_scanlines;
if (num_blocks * num_scanlines < exr_image->height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks));
size_t headerSize = memory.size();
tinyexr::tinyexr_uint64 offset =
headerSize +
static_cast<size_t>(num_blocks) *
sizeof(
tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable)
std::vector<std::vector<unsigned char> > data_list(
static_cast<size_t>(num_blocks));
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
}
#endif
// TODO(LTE): C++11 thread
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
size_t ii = static_cast<size_t>(i);
int start_y = num_scanlines * i;
int endY = (std::min)(num_scanlines * (i + 1), exr_image->height);
int h = endY - start_y;
std::vector<unsigned char> buf(
static_cast<size_t>(exr_image->width * h * pixel_data_size));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f));
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned short val = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
float val = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] * static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned int val = reinterpret_cast<unsigned int **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(buf.size());
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), buf.begin(),
buf.begin() + data_len);
} else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, exr_image->width, h);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
exr_image->width, h, exr_header->num_channels, zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else {
assert(0);
}
} // omp parallel
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size();
}
size_t totalSize = static_cast<size_t>(offset);
{
memory.insert(
memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)),
reinterpret_cast<unsigned char *>(&offsets.at(0)) +
sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks));
}
if (memory.size() == 0) {
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char *>(malloc(totalSize));
memcpy((*memory_out), &memory.at(0), memory.size());
unsigned char *memory_ptr = *memory_out + memory.size();
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
memcpy(memory_ptr, &data_list[i].at(0), data_list[i].size());
memory_ptr += data_list[i].size();
}
return totalSize; // OK
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "wb");
#else
FILE *fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _MSC_VER
FILE *fp = NULL;
errno_t errcode = fopen_s(&fp, filename, "rb");
if ((0 != errcode) || (!fp)) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh));
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&h));
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
return TINYEXR_SUCCESS;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
ConvertHeader(exr_header, infos[i]);
// transfoer `tiled` from version.
exr_header->tiled = exr_version->tiled;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list;
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> offset_table(
static_cast<size_t>(exr_headers[i]->chunk_count));
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
chunk_offset_table_list.push_back(offset_table);
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> &offset_table =
chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (size_t c = 0; c < offset_table.size(); c++) {
const unsigned char *part_number_addr =
memory + offset_table[c] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
} else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEFINED
#endif // TINYEXR_IMPLEMENTATION
|
mg.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB MG code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
//---------------------------------------------------------------------
// program mg
//---------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "globals.h"
#include "randdp.h"
#include "timers.h"
#include "print_results.h"
#include "../my_include/my_include.h"
static void setup(int *n1, int *n2, int *n3);
static void mg3P(double u[], double v[], double r[],
double a[4], double c[4], int n1, int n2, int n3);
static void psinv(void *or, void *ou, int n1, int n2, int n3,
double c[4], int k);
static void resid(void *ou, void *ov, void *or, int n1, int n2, int n3,
double a[4], int k);
static void rprj3(void *or, int m1k, int m2k, int m3k,
void *os, int m1j, int m2j, int m3j, int k);
static void interp(void *oz, int mm1, int mm2, int mm3,
void *ou, int n1, int n2, int n3, int k);
static void norm2u3(void *or, int n1, int n2, int n3,
double *rnm2, double *rnmu,
int nx, int ny, int nz);
static void rep_nrm(void *u, int n1, int n2, int n3, char *title, int kk);
static void comm3(void *ou, int n1, int n2, int n3, int kk);
static void zran3(void *oz, int n1, int n2, int n3, int nx1, int ny1, int k);
static void showall(void *oz, int n1, int n2, int n3);
static double power(double a, int n);
static void bubble(double ten[][2], int j1[][2], int j2[][2], int j3[][2],
int m, int ind);
static void zero3(void *oz, int n1, int n2, int n3);
//-------------------------------------------------------------------------c
// These arrays are in common because they are quite large
// and probably shouldn't be allocated on the stack. They
// are always passed as subroutine args.
//-------------------------------------------------------------------------c
/* commcon /noautom/ */
static double u[NR];
static double v[NR];
static double r[NR];
/* common /grid/ */
static int is1, is2, is3, ie1, ie2, ie3;
/* common /rans_save/ starts */
double starts[NM];
int main()
{
//-------------------------------------------------------------------------c
// k is the current level. It is passed down through subroutine args
// and is NOT global. it is the current iteration
//-------------------------------------------------------------------------c
int k, it;
double t, tinit, mflops;
double a[4], c[4];
double rnm2, rnmu, old2, oldu, epsilon;
int n1, n2, n3, nit;
double nn, verify_value, err;
logical verified;
int i;
char *t_names[T_last];
double tmax;
for (i = T_init; i < T_last; i++) {
timer_clear(i);
}
timer_start(T_init);
//---------------------------------------------------------------------
// Read in and broadcast input data
//---------------------------------------------------------------------
FILE *fp;
if ((fp = fopen("timer.flag", "r")) != NULL) {
timeron = true;
t_names[T_init] = "init";
t_names[T_bench] = "benchmk";
t_names[T_mg3P] = "mg3P";
t_names[T_psinv] = "psinv";
t_names[T_resid] = "resid";
t_names[T_rprj3] = "rprj3";
t_names[T_interp] = "interp";
t_names[T_norm2] = "norm2";
t_names[T_comm3] = "comm3";
fclose(fp);
} else {
timeron = false;
}
printf("\n\n NAS Parallel Benchmarks (NPB3.3-OMP-C) - MG Benchmark\n\n");
if ((fp = fopen("mg.input", "r")) != NULL) {
int result;
printf(" Reading from input file mg.input\n");
result = fscanf(fp, "%d\n", <);
while (fgetc(fp) != '\n');
result = fscanf(fp, "%d%d%d", &nx[lt], &ny[lt], &nz[lt]);
while (fgetc(fp) != '\n');
result = fscanf(fp, "%d", &nit);
while (fgetc(fp) != '\n');
for (i = 0; i <= 7; i++) {
result = fscanf(fp, "%d", &debug_vec[i]);
}
fclose(fp);
} else {
printf(" No input file. Using compiled defaults \n");
lt = LT_DEFAULT;
nit = NIT_DEFAULT;
nx[lt] = NX_DEFAULT;
ny[lt] = NY_DEFAULT;
nz[lt] = NZ_DEFAULT;
for (i = 0; i <= 7; i++) {
debug_vec[i] = DEBUG_DEFAULT;
}
}
if ( (nx[lt] != ny[lt]) || (nx[lt] != nz[lt]) ) {
Class = 'U';
} else if ( nx[lt] == 32 && nit == 4 ) {
Class = 'S';
} else if ( nx[lt] == 128 && nit == 4 ) {
Class = 'W';
} else if ( nx[lt] == 256 && nit == 4 ) {
Class = 'A';
} else if ( nx[lt] == 256 && nit == 20 ) {
Class = 'B';
} else if ( nx[lt] == 512 && nit == 20 ) {
Class = 'C';
} else if ( nx[lt] == 1024 && nit == 50 ) {
Class = 'D';
} else if ( nx[lt] == 2048 && nit == 50 ) {
Class = 'E';
} else {
Class = 'U';
}
//---------------------------------------------------------------------
// Use these for debug info:
//---------------------------------------------------------------------
// debug_vec(0) = 1 !=> report all norms
// debug_vec(1) = 1 !=> some setup information
// debug_vec(1) = 2 !=> more setup information
// debug_vec(2) = k => at level k or below, show result of resid
// debug_vec(3) = k => at level k or below, show result of psinv
// debug_vec(4) = k => at level k or below, show result of rprj
// debug_vec(5) = k => at level k or below, show result of interp
// debug_vec(6) = 1 => (unused)
// debug_vec(7) = 1 => (unused)
//---------------------------------------------------------------------
a[0] = -8.0/3.0;
a[1] = 0.0;
a[2] = 1.0/6.0;
a[3] = 1.0/12.0;
if (Class == 'A' || Class == 'S' || Class =='W') {
//---------------------------------------------------------------------
// Coefficients for the S(a) smoother
//---------------------------------------------------------------------
c[0] = -3.0/8.0;
c[1] = +1.0/32.0;
c[2] = -1.0/64.0;
c[3] = 0.0;
} else {
//---------------------------------------------------------------------
// Coefficients for the S(b) smoother
//---------------------------------------------------------------------
c[0] = -3.0/17.0;
c[1] = +1.0/33.0;
c[2] = -1.0/61.0;
c[3] = 0.0;
}
lb = 1;
k = lt;
setup(&n1, &n2, &n3);
zero3(u, n1, n2, n3);
zran3(v, n1, n2, n3, nx[lt], ny[lt], k);
norm2u3(v, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]);
// printf("\n");
// printf(" norms of random v are\n");
// printf("%4d%19.2f%19.2e\n", 0, rnm2, rnmu);
// printf(" about to evaluate resid, k=%d\n", k);
printf(" Size: %4dx%4dx%4d (class %c)\n", nx[lt], ny[lt], nz[lt], Class);
printf(" Iterations: %5d\n", nit);
printf(" Number of available threads: %5d\n", omp_get_max_threads());
printf("\n");
resid(u, v, r, n1, n2, n3, a, k);
norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]);
old2 = rnm2;
oldu = rnmu;
//---------------------------------------------------------------------
// One iteration for startup
//---------------------------------------------------------------------
mg3P(u, v, r, a, c, n1, n2, n3);
resid(u, v, r, n1, n2, n3, a, k);
setup(&n1, &n2, &n3);
zero3(u, n1, n2, n3);
zran3(v, n1, n2, n3, nx[lt], ny[lt], k);
timer_stop(T_init);
tinit = timer_read(T_init);
printf(" Initialization time: %15.3f seconds\n\n", tinit);
for (i = T_bench; i < T_last; i++) {
timer_clear(i);
}
timer_start(T_bench);
if (timeron) timer_start(T_resid2);
resid(u, v, r, n1, n2, n3, a, k);
if (timeron) timer_stop(T_resid2);
norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]);
old2 = rnm2;
oldu = rnmu;
start_crash();
for (it = 1; it <= nit; it++) {
if ((it == 1) || (it == nit) || ((it % 5) == 0)) {
printf(" iter %3d\n", it);
}
if (timeron) timer_start(T_mg3P);
mg3P(u, v, r, a, c, n1, n2, n3);
if (timeron) timer_stop(T_mg3P);
if (timeron) timer_start(T_resid2);
resid(u, v, r, n1, n2, n3, a, k);
if (timeron) timer_stop(T_resid2);
}
end_crash();
norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]);
timer_stop(T_bench);
t = timer_read(T_bench);
verified = false;
verify_value = 0.0;
printf("\n Benchmark completed\n");
epsilon = 1.0e-8;
if (Class != 'U') {
if (Class == 'S') {
verify_value = 0.5307707005734e-04;
} else if (Class == 'W') {
verify_value = 0.6467329375339e-05;
} else if (Class == 'A') {
verify_value = 0.2433365309069e-05;
} else if (Class == 'B') {
verify_value = 0.1800564401355e-05;
} else if (Class == 'C') {
verify_value = 0.5706732285740e-06;
} else if (Class == 'D') {
verify_value = 0.1583275060440e-09;
} else if (Class == 'E') {
verify_value = 0.5630442584711e-10;
}
err = fabs( rnm2 - verify_value ) / verify_value;
if (err <= epsilon) {
verified = true;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" L2 Norm is %20.13E\n", rnm2);
printf(" Error is %20.13E\n", err);
} else {
verified = false;
printf(" VERIFICATION FAILED\n");
printf(" L2 Norm is %20.13E\n", rnm2);
printf(" The correct L2 Norm is %20.13E\n", verify_value);
}
} else {
verified = false;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
printf(" L2 Norm is %20.13E\n", rnm2);
}
nn = 1.0 * nx[lt] * ny[lt] * nz[lt];
if (t != 0.0) {
mflops = 58.0 * nit * nn * 1.0e-6 / t;
} else {
mflops = 0.0;
}
print_results("MG", Class, nx[lt], ny[lt], nz[lt],
nit, t,
mflops, " floating point",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
//---------------------------------------------------------------------
// More timers
//---------------------------------------------------------------------
if (timeron) {
tmax = timer_read(T_bench);
if (tmax == 0.0) tmax = 1.0;
printf(" SECTION Time (secs)\n");
for (i = T_bench; i < T_last; i++) {
t = timer_read(i);
if (i == T_resid2) {
t = timer_read(T_resid) - t;
printf(" --> %8s:%9.3f (%6.2f%%)\n", "mg-resid", t, t*100./tmax);
} else {
printf(" %-8s:%9.3f (%6.2f%%)\n", t_names[i], t, t*100./tmax);
}
}
}
return 0;
}
static void setup(int *n1, int *n2, int *n3)
{
int k, j;
int ax, mi[MAXLEVEL+1][3];
int ng[MAXLEVEL+1][3];
ng[lt][0] = nx[lt];
ng[lt][1] = ny[lt];
ng[lt][2] = nz[lt];
for (k = lt-1; k >= 1; k--) {
for (ax = 0; ax < 3; ax++) {
ng[k][ax] = ng[k+1][ax]/2;
}
}
for (k = lt; k >= 1; k--) {
nx[k] = ng[k][0];
ny[k] = ng[k][1];
nz[k] = ng[k][2];
}
for (k = lt; k >= 1; k--) {
for (ax = 0; ax < 3; ax++) {
mi[k][ax] = 2 + ng[k][ax];
}
m1[k] = mi[k][0];
m2[k] = mi[k][1];
m3[k] = mi[k][2];
}
k = lt;
is1 = 2 + ng[k][0] - ng[lt][0];
ie1 = 1 + ng[k][0];
*n1 = 3 + ie1 - is1;
is2 = 2 + ng[k][1] - ng[lt][1];
ie2 = 1 + ng[k][1];
*n2 = 3 + ie2 - is2;
is3 = 2 + ng[k][2] - ng[lt][2];
ie3 = 1 + ng[k][2];
*n3 = 3 + ie3 - is3;
ir[lt] = 0;
for (j = lt-1; j >= 1; j--) {
ir[j] = ir[j+1]+ONE*m1[j+1]*m2[j+1]*m3[j+1];
}
if (debug_vec[1] >= 1) {
printf(" in setup, \n");
printf(" k lt nx ny nz n1 n2 n3 is1 is2 is3 ie1 ie2 ie3\n");
printf("%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d\n",
k,lt,ng[k][0],ng[k][1],ng[k][2],*n1,*n2,*n3,is1,is2,is3,ie1,ie2,ie3);
}
}
//---------------------------------------------------------------------
// multigrid V-cycle routine
//---------------------------------------------------------------------
static void mg3P(double u[], double v[], double r[],
double a[4], double c[4], int n1, int n2, int n3)
{
int j, k;
//---------------------------------------------------------------------
// down cycle.
// restrict the residual from the find grid to the coarse
//---------------------------------------------------------------------
for (k = lt; k >= lb+1; k--) {
j = k - 1;
rprj3(&r[ir[k]], m1[k], m2[k], m3[k],
&r[ir[j]], m1[j], m2[j], m3[j], k);
}
k = lb;
//---------------------------------------------------------------------
// compute an approximate solution on the coarsest grid
//---------------------------------------------------------------------
zero3(&u[ir[k]], m1[k], m2[k], m3[k]);
psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k);
for (k = lb+1; k <= lt-1; k++) {
j = k - 1;
//---------------------------------------------------------------------
// prolongate from level k-1 to k
//---------------------------------------------------------------------
zero3(&u[ir[k]], m1[k], m2[k], m3[k]);
interp(&u[ir[j]], m1[j], m2[j], m3[j], &u[ir[k]], m1[k], m2[k], m3[k], k);
//---------------------------------------------------------------------
// compute residual for level k
//---------------------------------------------------------------------
resid(&u[ir[k]], &r[ir[k]], &r[ir[k]], m1[k], m2[k], m3[k], a, k);
//---------------------------------------------------------------------
// apply smoother
//---------------------------------------------------------------------
psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k);
}
j = lt - 1;
k = lt;
interp(&u[ir[j]], m1[j], m2[j], m3[j], u, n1, n2, n3, k);
resid(u, v, r, n1, n2, n3, a, k);
psinv(r, u, n1, n2, n3, c, k);
}
//---------------------------------------------------------------------
// psinv applies an approximate inverse as smoother: u = u + Cr
//
// This implementation costs 15A + 4M per result, where
// A and M denote the costs of Addition and Multiplication.
// Presuming coefficient c(3) is zero (the NPB assumes this,
// but it is thus not a general case), 2A + 1M may be eliminated,
// resulting in 13A + 3M.
// Note that this vectorizes, and is also fine for cache
// based machines.
//---------------------------------------------------------------------
static void psinv(void *or, void *ou, int n1, int n2, int n3,
double c[4], int k)
{
double (*r)[n2][n1] = (double (*)[n2][n1])or;
double (*u)[n2][n1] = (double (*)[n2][n1])ou;
int i3, i2, i1;
double r1[M], r2[M];
if (timeron) timer_start(T_psinv);
#pragma omp parallel for default(shared) private(i1,i2,i3,r1,r2)
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 0; i1 < n1; i1++) {
r1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1]
+ r[i3-1][i2][i1] + r[i3+1][i2][i1];
r2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1]
+ r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];
}
for (i1 = 1; i1 < n1-1; i1++) {
u[i3][i2][i1] = u[i3][i2][i1]
+ c[0] * r[i3][i2][i1]
+ c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1]
+ r1[i1] )
+ c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] );
//--------------------------------------------------------------------
// Assume c[3] = 0 (Enable line below if c[3] not= 0)
//--------------------------------------------------------------------
// + c[3] * ( r2[i1-1] + r2[i1+1] )
//--------------------------------------------------------------------
}
}
}
if (timeron) timer_stop(T_psinv);
//---------------------------------------------------------------------
// exchange boundary points
//---------------------------------------------------------------------
comm3(u, n1, n2, n3, k);
if (debug_vec[0] >= 1) {
rep_nrm(u, n1, n2, n3, " psinv", k);
}
if (debug_vec[3] >= k) {
showall(u, n1, n2, n3);
}
}
//---------------------------------------------------------------------
// resid computes the residual: r = v - Au
//
// This implementation costs 15A + 4M per result, where
// A and M denote the costs of Addition (or Subtraction) and
// Multiplication, respectively.
// Presuming coefficient a(1) is zero (the NPB assumes this,
// but it is thus not a general case), 3A + 1M may be eliminated,
// resulting in 12A + 3M.
// Note that this vectorizes, and is also fine for cache
// based machines.
//---------------------------------------------------------------------
static void resid(void *ou, void *ov, void *or, int n1, int n2, int n3,
double a[4], int k)
{
double (*u)[n2][n1] = (double (*)[n2][n1])ou;
double (*v)[n2][n1] = (double (*)[n2][n1])ov;
double (*r)[n2][n1] = (double (*)[n2][n1])or;
int i3, i2, i1;
double u1[M], u2[M];
if (timeron) timer_start(T_resid);
#pragma omp parallel for default(shared) private(i1,i2,i3,u1,u2)
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 0; i1 < n1; i1++) {
u1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1]
+ u[i3-1][i2][i1] + u[i3+1][i2][i1];
u2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1]
+ u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1];
}
for (i1 = 1; i1 < n1-1; i1++) {
r[i3][i2][i1] = v[i3][i2][i1]
- a[0] * u[i3][i2][i1]
//-------------------------------------------------------------------
// Assume a[1] = 0 (Enable 2 lines below if a[1] not= 0)
//-------------------------------------------------------------------
// - a[1] * ( u[i3][i2][i1-1] + u[i3][i2][i1+1]
// + u1[i1] )
//-------------------------------------------------------------------
- a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] )
- a[3] * ( u2[i1-1] + u2[i1+1] );
}
}
}
if (timeron) timer_stop(T_resid);
//---------------------------------------------------------------------
// exchange boundary data
//---------------------------------------------------------------------
comm3(r, n1, n2, n3, k);
if (debug_vec[0] >= 1) {
rep_nrm(r, n1, n2, n3, " resid", k);
}
if (debug_vec[2] >= k) {
showall(r, n1, n2, n3);
}
}
//---------------------------------------------------------------------
// rprj3 projects onto the next coarser grid,
// using a trilinear Finite Element projection: s = r' = P r
//
// This implementation costs 20A + 4M per result, where
// A and M denote the costs of Addition and Multiplication.
// Note that this vectorizes, and is also fine for cache
// based machines.
//---------------------------------------------------------------------
static void rprj3(void *or, int m1k, int m2k, int m3k,
void *os, int m1j, int m2j, int m3j, int k)
{
double (*r)[m2k][m1k] = (double (*)[m2k][m1k])or;
double (*s)[m2j][m1j] = (double (*)[m2j][m1j])os;
int j3, j2, j1, i3, i2, i1, d1, d2, d3, j;
double x1[M], y1[M], x2, y2;
if (timeron) timer_start(T_rprj3);
if (m1k == 3) {
d1 = 2;
} else {
d1 = 1;
}
if (m2k == 3) {
d2 = 2;
} else {
d2 = 1;
}
if (m3k == 3) {
d3 = 2;
} else {
d3 = 1;
}
#pragma omp parallel for default(shared) \
private(j1,j2,j3,i1,i2,i3,x1,y1,x2,y2)
for (j3 = 1; j3 < m3j-1; j3++) {
i3 = 2*j3-d3;
for (j2 = 1; j2 < m2j-1; j2++) {
i2 = 2*j2-d2;
for (j1 = 1; j1 < m1j; j1++) {
i1 = 2*j1-d1;
x1[i1] = r[i3+1][i2 ][i1] + r[i3+1][i2+2][i1]
+ r[i3 ][i2+1][i1] + r[i3+2][i2+1][i1];
y1[i1] = r[i3 ][i2 ][i1] + r[i3+2][i2 ][i1]
+ r[i3 ][i2+2][i1] + r[i3+2][i2+2][i1];
}
for (j1 = 1; j1 < m1j-1; j1++) {
i1 = 2*j1-d1;
y2 = r[i3 ][i2 ][i1+1] + r[i3+2][i2 ][i1+1]
+ r[i3 ][i2+2][i1+1] + r[i3+2][i2+2][i1+1];
x2 = r[i3+1][i2 ][i1+1] + r[i3+1][i2+2][i1+1]
+ r[i3 ][i2+1][i1+1] + r[i3+2][i2+1][i1+1];
s[j3][j2][j1] =
0.5 * r[i3+1][i2+1][i1+1]
+ 0.25 * (r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2)
+ 0.125 * (x1[i1] + x1[i1+2] + y2)
+ 0.0625 * (y1[i1] + y1[i1+2]);
}
}
}
if (timeron) timer_stop(T_rprj3);
j = k-1;
comm3(s, m1j, m2j, m3j, j);
if (debug_vec[0] >= 1) {
rep_nrm(s, m1j, m2j, m3j, " rprj3", k-1);
}
if (debug_vec[4] >= k) {
showall(s, m1j, m2j, m3j);
}
}
//---------------------------------------------------------------------
// interp adds the trilinear interpolation of the correction
// from the coarser grid to the current approximation: u = u + Qu'
//
// Observe that this implementation costs 16A + 4M, where
// A and M denote the costs of Addition and Multiplication.
// Note that this vectorizes, and is also fine for cache
// based machines. Vector machines may get slightly better
// performance however, with 8 separate "do i1" loops, rather than 4.
//---------------------------------------------------------------------
static void interp(void *oz, int mm1, int mm2, int mm3,
void *ou, int n1, int n2, int n3, int k)
{
double (*z)[mm2][mm1] = (double (*)[mm2][mm1])oz;
double (*u)[n2][n1] = (double (*)[n2][n1])ou;
int i3, i2, i1, d1, d2, d3, t1, t2, t3;
// note that m = 1037 in globals.h but for this only need to be
// 535 to handle up to 1024^3
// integer m
// parameter( m=535 )
double z1[M], z2[M], z3[M];
if (timeron) timer_start(T_interp);
if (n1 != 3 && n2 != 3 && n3 != 3) {
#pragma omp parallel for default(shared) private(i1,i2,i3,z1,z2,z3)
for (i3 = 0; i3 < mm3-1; i3++) {
for (i2 = 0; i2 < mm2-1; i2++) {
for (i1 = 0; i1 < mm1; i1++) {
z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1];
z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1];
z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1];
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1]
+ z[i3][i2][i1];
u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1]
+ 0.5 * (z[i3][i2][i1+1] + z[i3][i2][i1]);
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1]
+ 0.5 * z1[i1];
u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1]
+ 0.25 * (z1[i1] + z1[i1+1]);
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1]
+ 0.5 * z2[i1];
u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1]
+ 0.25 * (z2[i1] + z2[i1+1]);
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1]
+ 0.25 * z3[i1];
u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1]
+ 0.125 * (z3[i1] + z3[i1+1]);
}
}
}
} else {
if (n1 == 3) {
d1 = 2;
t1 = 1;
} else {
d1 = 1;
t1 = 0;
}
if (n2 == 3) {
d2 = 2;
t2 = 1;
} else {
d2 = 1;
t2 = 0;
}
if (n3 == 3) {
d3 = 2;
t3 = 1;
} else {
d3 = 1;
t3 = 0;
}
#pragma omp parallel default(shared) private(i1,i2,i3)
{
#pragma omp for
for (i3 = d3; i3 <= mm3-1; i3++) {
for (i2 = d2; i2 <= mm2-1; i2++) {
for (i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] =
u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1]
+ z[i3-1][i2-1][i1-1];
}
for (i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] =
u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1]
+ 0.5 * (z[i3-1][i2-1][i1] + z[i3-1][i2-1][i1-1]);
}
}
for (i2 = 1; i2 <= mm2-1; i2++) {
for (i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] =
u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1]
+ 0.5 * (z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]);
}
for (i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] =
u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1]
+ 0.25 * (z[i3-1][i2][i1] + z[i3-1][i2-1][i1]
+ z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]);
}
}
}
#pragma omp for nowait
for (i3 = 1; i3 <= mm3-1; i3++) {
for (i2 = d2; i2 <= mm2-1; i2++) {
for (i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] =
u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1]
+ 0.5 * (z[i3][i2-1][i1-1] + z[i3-1][i2-1][i1-1]);
}
for (i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] =
u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1]
+ 0.25 * (z[i3 ][i2-1][i1] + z[i3 ][i2-1][i1-1]
+ z[i3-1][i2-1][i1] + z[i3-1][i2-1][i1-1]);
}
}
for (i2 = 1; i2 <= mm2-1; i2++) {
for (i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] =
u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1]
+ 0.25 * (z[i3 ][i2][i1-1] + z[i3 ][i2-1][i1-1]
+ z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]);
}
for (i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] =
u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1]
+ 0.125 * (z[i3 ][i2][i1 ] + z[i3 ][i2-1][i1 ]
+ z[i3 ][i2][i1-1] + z[i3 ][i2-1][i1-1]
+ z[i3-1][i2][i1 ] + z[i3-1][i2-1][i1 ]
+ z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]);
}
}
}
} // end parallel
}
if (timeron) timer_stop(T_interp);
if (debug_vec[0] >= 1) {
rep_nrm(z, mm1, mm2, mm3, "z: inter", k-1);
rep_nrm(u, n1, n2, n3, "u: inter", k);
}
if (debug_vec[5] >= k) {
showall(z, mm1, mm2, mm3);
showall(u, n1, n2, n3);
}
}
//---------------------------------------------------------------------
// norm2u3 evaluates approximations to the L2 norm and the
// uniform (or L-infinity or Chebyshev) norm, under the
// assumption that the boundaries are periodic or zero. Add the
// boundaries in with half weight (quarter weight on the edges
// and eighth weight at the corners) for inhomogeneous boundaries.
//---------------------------------------------------------------------
static void norm2u3(void *or, int n1, int n2, int n3,
double *rnm2, double *rnmu,
int nx, int ny, int nz)
{
double (*r)[n2][n1] = (double (*)[n2][n1])or;
double s, a;
int i3, i2, i1;
double dn, max_rnmu;
if (timeron) timer_start(T_norm2);
dn = 1.0*nx*ny*nz;
s = 0.0;
max_rnmu = 0.0;
#pragma omp parallel default(shared) private(i1,i2,i3,a) reduction(+:s)
{
double my_rnmu = 0.0;
#pragma omp for nowait
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 1; i1 < n1-1; i1++) {
s = s + pow(r[i3][i2][i1], 2.0);
a = fabs(r[i3][i2][i1]);
my_rnmu = (a > my_rnmu) ? a : my_rnmu;
}
}
}
if (my_rnmu > max_rnmu) {
#pragma omp critical
max_rnmu = (my_rnmu > max_rnmu) ? my_rnmu : max_rnmu;
}
} // end parallel
*rnmu = max_rnmu;
*rnm2 = sqrt(s / dn);
if (timeron) timer_stop(T_norm2);
}
//---------------------------------------------------------------------
// report on norm
//---------------------------------------------------------------------
static void rep_nrm(void *u, int n1, int n2, int n3, char *title, int kk)
{
double rnm2, rnmu;
norm2u3(u, n1, n2, n3, &rnm2, &rnmu, nx[kk], ny[kk], nz[kk]);
printf(" Level%2d in %8s: norms =%21.14E%21.14E\n", kk, title, rnm2, rnmu);
}
//---------------------------------------------------------------------
// comm3 organizes the communication on all borders
//---------------------------------------------------------------------
static void comm3(void *ou, int n1, int n2, int n3, int kk)
{
double (*u)[n2][n1] = (double (*)[n2][n1])ou;
int i1, i2, i3;
if (timeron) timer_start(T_comm3);
#pragma omp parallel default(shared) private(i1,i2,i3)
{
#pragma omp for
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
u[i3][i2][ 0] = u[i3][i2][n1-2];
u[i3][i2][n1-1] = u[i3][i2][ 1];
}
// }
// for (i3 = 1; i3 < n3-1; i3++) {
for (i1 = 0; i1 < n1; i1++) {
u[i3][ 0][i1] = u[i3][n2-2][i1];
u[i3][n2-1][i1] = u[i3][ 1][i1];
}
}
#pragma omp for nowait
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
u[ 0][i2][i1] = u[n3-2][i2][i1];
u[n3-1][i2][i1] = u[ 1][i2][i1];
}
}
} // end parallel
if (timeron) timer_stop(T_comm3);
}
//---------------------------------------------------------------------
// zran3 loads +1 at ten randomly chosen points,
// loads -1 at a different ten random points,
// and zero elsewhere.
//---------------------------------------------------------------------
static void zran3(void *oz, int n1, int n2, int n3, int nx1, int ny1, int k)
{
double (*z)[n2][n1] = (double (*)[n2][n1])oz;
int i0, mm0, mm1;
int i1, i2, i3, d1, e1, e2, e3;
double xx, x0, x1, a1, a2, ai;
const int mm = 10;
const double a = pow(5.0, 13.0);
const double x = 314159265.0;
double ten[mm][2], best0, best1;
int i, j1[mm][2], j2[mm][2], j3[mm][2];
int jg[4][mm][2];
double rdummy;
int myid, num_threads;
a1 = power(a, nx1);
a2 = power(a, nx1*ny1);
zero3(z, n1, n2, n3);
i = is1-2+nx1*(is2-2+ny1*(is3-2));
ai = power(a, i);
d1 = ie1 - is1 + 1;
e1 = ie1 - is1 + 2;
e2 = ie2 - is2 + 2;
e3 = ie3 - is3 + 2;
x0 = x;
rdummy = randlc(&x0, ai);
//---------------------------------------------------------------------
// save the starting seeds for the following loop
//---------------------------------------------------------------------
for (i3 = 1; i3 < e3; i3++) {
starts[i3] = x0;
rdummy = randlc(&x0, a2);
}
//---------------------------------------------------------------------
// fill array
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(i2,i3,x1,xx,rdummy) \
shared(e2,e3,d1,a1)
for (i3 = 1; i3 < e3; i3++) {
x1 = starts[i3];
for (i2 = 1; i2 < e2; i2++) {
xx = x1;
vranlc(d1, &xx, a, &(z[i3][i2][1]));
rdummy = randlc(&x1, a1);
}
}
//---------------------------------------------------------------------
// comm3(z,n1,n2,n3);
// showall(z,n1,n2,n3);
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// each thread looks for twenty candidates
//---------------------------------------------------------------------
#pragma omp parallel default(shared) private(i,i0,i1,i2,i3,j1,j2,j3,ten, \
myid,num_threads) shared(best0,best1,n1,n2,n3)
{
for (i = 0; i < mm; i++) {
ten[i][1] = 0.0;
j1[i][1] = 0;
j2[i][1] = 0;
j3[i][1] = 0;
ten[i][0] = 1.0;
j1[i][0] = 0;
j2[i][0] = 0;
j3[i][0] = 0;
}
#pragma omp for
for (i3 = 1; i3 < n3-1; i3++) {
double (*zi3)[n1] = z[i3];
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 1; i1 < n1-1; i1++) {
if (zi3[i2][i1] > ten[0][1]) {
ten[0][1] = zi3[i2][i1];
j1[0][1] = i1;
j2[0][1] = i2;
j3[0][1] = i3;
bubble(ten, j1, j2, j3, mm, 1);
}
if (zi3[i2][i1] < ten[0][0]) {
ten[0][0] = zi3[i2][i1];
j1[0][0] = i1;
j2[0][0] = i2;
j3[0][0] = i3;
bubble(ten, j1, j2, j3, mm, 0);
}
}
}
}
//---------------------------------------------------------------------
// Now which of these are globally best?
//---------------------------------------------------------------------
i1 = mm - 1;
i0 = mm - 1;
myid = 0;
myid = omp_get_thread_num();
num_threads = omp_get_num_threads();
for (i = mm - 1; i >= 0; i--) {
// ... ORDERED access is required here for sequential consistency
// ... in case that two values are identical.
// ... Since an "ORDERED" section is only defined in OpenMP 2,
// ... we use a dummy loop to emulate ordered access in OpenMP 1.x.
#pragma omp master
{
best1 = 0.0;
best0 = 1.0;
}
#pragma omp for ordered schedule(static)
for (i2 = 1; i2 <= num_threads; i2++) {
#pragma omp ordered
{
if (ten[i1][1] > best1) {
best1 = ten[i1][1];
jg[0][i][1] = myid;
}
if (ten[i0][0] < best0) {
best0 = ten[i0][0];
jg[0][i][0] = myid;
}
}
}
if (myid == jg[0][i][1]) {
jg[1][i][1] = j1[i1][1];
jg[2][i][1] = j2[i1][1];
jg[3][i][1] = j3[i1][1];
i1 = i1-1;
}
if (myid == jg[0][i][0]) {
jg[1][i][0] = j1[i0][0];
jg[2][i][0] = j2[i0][0];
jg[3][i][0] = j3[i0][0];
i0 = i0-1;
}
}
} // end parallel
// mm1 = i1+1;
// mm0 = i0+1;
mm1 = 0;
mm0 = 0;
/*
int cnt = 0;
printf(" \n");
printf(" negative charges at\n");
for (i = 0; i < mm; i++) {
printf(" (%3d,%3d,%3d)", jg[1][i][0], jg[2][i][0], jg[3][i][0]);
if (++cnt % 5 == 0) printf("\n");
}
cnt = 0;
printf(" positive charges at\n");
for (i = 0; i < mm; i++) {
printf(" (%3d,%3d,%3d)", jg[1][i][1], jg[2][i][1], jg[3][i][1]);
if (++cnt % 5 == 0) printf("\n");
}
cnt = 0;
printf(" small random numbers were\n");
for (i = mm-1; i >= 0; i--) {
printf(" %15.8E", ten[i][0]);
if (++cnt % 5 == 0) printf("\n");
}
cnt = 0;
printf(" and they were found on processor number\n");
for (i = mm-1; i >= 0; i--) {
printf(" %4d", jg[0][i][0]);
if (++cnt % 10 == 0) printf("\n");
}
cnt = 0;
printf(" large random numbers were\n");
for (i = mm-1; i >= 0; i--) {
printf(" %15.8E", ten[i][1]);
if (++cnt % 5 == 0) printf("\n");
}
cnt = 0;
printf(" and they were found on processor number\n");
for (i = mm-1; i >= 0; i--) {
printf(" %4d", jg[0][i][1]);
if (++cnt % 10 == 0) printf("\n");
}
*/
#pragma omp parallel for default(shared) private(i1,i2,i3)
for (i3 = 0; i3 < n3; i3++) {
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
z[i3][i2][i1] = 0.0;
}
}
}
for (i = mm-1; i >= mm0; i--) {
z[jg[3][i][0]][jg[2][i][0]][jg[1][i][0]] = -1.0;
}
for (i = mm-1; i >= mm1; i--) {
z[jg[3][i][1]][jg[2][i][1]][jg[1][i][1]] = +1.0;
}
comm3(z, n1, n2, n3, k);
//---------------------------------------------------------------------
// showall(z,n1,n2,n3);
//---------------------------------------------------------------------
}
static void showall(void *oz, int n1, int n2, int n3)
{
double (*z)[n2][n1] = (double (*)[n2][n1])oz;
int i1, i2, i3;
int m1, m2, m3;
m1 = min(n1, 18);
m2 = min(n2, 14);
m3 = min(n3, 18);
printf(" \n");
for (i3 = 0; i3 < m3; i3++) {
for (i1 = 0; i1 < m1; i1++) {
for (i2 = 0; i2 < m2; i2++) {
printf("%6.3f", z[i3][i2][i1]);
}
printf("\n");
}
printf(" - - - - - - - \n");
}
printf(" \n");
}
//---------------------------------------------------------------------
// power raises an integer, disguised as a double
// precision real, to an integer power
//---------------------------------------------------------------------
static double power(double a, int n)
{
double aj;
int nj;
double rdummy;
double power;
power = 1.0;
nj = n;
aj = a;
while (nj != 0) {
if ((nj % 2) == 1) rdummy = randlc(&power, aj);
rdummy = randlc(&aj, aj);
nj = nj/2;
}
return power;
}
//---------------------------------------------------------------------
// bubble does a bubble sort in direction dir
//---------------------------------------------------------------------
static void bubble(double ten[][2], int j1[][2], int j2[][2], int j3[][2],
int m, int ind)
{
double temp;
int i, j_temp;
if (ind == 1) {
for (i = 0; i < m-1; i++) {
if (ten[i][ind] > ten[i+1][ind]) {
temp = ten[i+1][ind];
ten[i+1][ind] = ten[i][ind];
ten[i][ind] = temp;
j_temp = j1[i+1][ind];
j1[i+1][ind] = j1[i][ind];
j1[i][ind] = j_temp;
j_temp = j2[i+1][ind];
j2[i+1][ind] = j2[i][ind];
j2[i][ind] = j_temp;
j_temp = j3[i+1][ind];
j3[i+1][ind] = j3[i][ind];
j3[i][ind] = j_temp;
} else {
return;
}
}
} else {
for (i = 0; i < m-1; i++) {
if (ten[i][ind] < ten[i+1][ind]) {
temp = ten[i+1][ind];
ten[i+1][ind] = ten[i][ind];
ten[i][ind] = temp;
j_temp = j1[i+1][ind];
j1[i+1][ind] = j1[i][ind];
j1[i][ind] = j_temp;
j_temp = j2[i+1][ind];
j2[i+1][ind] = j2[i][ind];
j2[i][ind] = j_temp;
j_temp = j3[i+1][ind];
j3[i+1][ind] = j3[i][ind];
j3[i][ind] = j_temp;
} else {
return;
}
}
}
}
static void zero3(void *oz, int n1, int n2, int n3)
{
double (*z)[n2][n1] = (double (*)[n2][n1])oz;
int i1, i2, i3;
#pragma omp parallel for default(shared) private(i1,i2,i3)
for (i3 = 0; i3 < n3; i3++) {
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
z[i3][i2][i1] = 0.0;
}
}
}
}
|
ej3mejora.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <unistd.h>
#define TAM 4000
#define IT 10
void rellenarArray(float *M){
#pragma omp parallel for schedule(guided)
for(int i=0;i<TAM;++i){
*(M+i)=5.0f;
}
}
int main() {
double start,tiempo2=0,tiempo4=0,tiempo6=0,tiempo8=0;float total=0.0f;int numthreads;
float *a=(float *)malloc(sizeof(float)*TAM);
float *b=(float *)malloc(sizeof(float)*TAM);
rellenarArray(a);rellenarArray(b);
printf("\nTamanyo de los vectores: %i\n", TAM);
omp_set_nested(1);
#pragma omp paralel for schedule(guided) num_threads(4) reduction(+:tiempo2,+:tiempo4,+:tiempo6,+:tiempo8)
for(int j=0;j<IT;++j){
start = omp_get_wtime();numthreads=2;
#pragma omp parallel for schedule(guided) reduction(+:total) num_threads(numthreads)
for(int i=0;i<TAM;++i){
total+=*(a+i)*(*(b+i));
}
if(j!=0 && j!=IT-1)
tiempo2+=omp_get_wtime()-start;
total=0.0f;
start = omp_get_wtime();numthreads=4;
#pragma omp parallel for schedule(guided) reduction(+:total) num_threads(numthreads)
for(int i=0;i<TAM;++i){
total+=*(a+i)*(*(b+i));
}
if(j!=0 && j!=IT-1)
tiempo4+=omp_get_wtime()-start;
total=0.0f;
start = omp_get_wtime();numthreads=6;
#pragma omp parallel for schedule(guided) reduction(+:total) num_threads(numthreads)
for(int i=0;i<TAM;++i){
total+=*(a+i)*(*(b+i));
}
if(j!=0 && j!=IT-1)
tiempo6+=omp_get_wtime()-start;
total=0.0f;
start = omp_get_wtime();numthreads=8;
#pragma omp parallel for schedule(guided) reduction(+:total) num_threads(numthreads)
for(int i=0;i<TAM;++i){
total+=*(a+i)*(*(b+i));
}
if(j!=0 && j!=IT-1)
tiempo8+=omp_get_wtime()-start;
}
printf("\nTiempo medio con 2 hilos: %lfs", tiempo2/(IT-2));
printf("\nTiempo medio con 4 hilos: %lfs", tiempo4/(IT-2));
printf("\nTiempo medio con 6 hilos: %lfs", tiempo6/(IT-2));
printf("\nTiempo medio con 8 hilos: %lfs", tiempo8/(IT-2));
printf("\nResultado final, valor: %f\n", total);
return 0;
}
|
GB_unop__cos_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__cos_fp32_fp32
// op(A') function: GB_unop_tran__cos_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = cosf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cosf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = cosf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COS || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__cos_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = cosf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = cosf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__cos_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_fc32_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fc32_fc64
// op(A') function: GB_unop_tran__identity_fc32_fc64
// C type: GxB_FC32_t
// A type: GxB_FC64_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) creal (aij), (float) cimag (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) creal (aij), (float) cimag (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) creal (aij), (float) cimag (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fc32_fc64
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) creal (aij), (float) cimag (aij)) ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fc32_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
csr.c | /*!
* \file
*
* \brief Various routines with dealing with CSR matrices
*
* \author George Karypis
* \version\verbatim $Id: csr.c 13437 2013-01-11 21:54:10Z karypis $ \endverbatim
*/
#include <GKlib.h>
#define OMPMINOPS 50000
/*************************************************************************/
/*! Allocate memory for a CSR matrix and initializes it
\returns the allocated matrix. The various fields are set to NULL.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Create()
{
gk_csr_t *mat;
mat = (gk_csr_t *)gk_malloc(sizeof(gk_csr_t), "gk_csr_Create: mat");
gk_csr_Init(mat);
return mat;
}
/*************************************************************************/
/*! Initializes the matrix
\param mat is the matrix to be initialized.
*/
/*************************************************************************/
void gk_csr_Init(gk_csr_t *mat)
{
memset(mat, 0, sizeof(gk_csr_t));
mat->nrows = mat->ncols = -1;
}
/*************************************************************************/
/*! Frees all the memory allocated for matrix.
\param mat is the matrix to be freed.
*/
/*************************************************************************/
void gk_csr_Free(gk_csr_t **mat)
{
if (*mat == NULL)
return;
gk_csr_FreeContents(*mat);
gk_free((void **)mat, LTERM);
}
/*************************************************************************/
/*! Frees only the memory allocated for the matrix's different fields and
sets them to NULL.
\param mat is the matrix whose contents will be freed.
*/
/*************************************************************************/
void gk_csr_FreeContents(gk_csr_t *mat)
{
gk_free((void *)&mat->rowptr, &mat->rowind, &mat->rowval, &mat->rowids,
&mat->colptr, &mat->colind, &mat->colval, &mat->colids,
&mat->rnorms, &mat->cnorms, &mat->rsums, &mat->csums,
&mat->rsizes, &mat->csizes, &mat->rvols, &mat->cvols,
&mat->rwgts, &mat->cwgts,
LTERM);
}
/*************************************************************************/
/*! Returns a copy of a matrix.
\param mat is the matrix to be duplicated.
\returns the newly created copy of the matrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Dup(gk_csr_t *mat)
{
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
/* copy the row structure */
if (mat->rowptr)
nmat->rowptr = gk_zcopy(mat->nrows+1, mat->rowptr,
gk_zmalloc(mat->nrows+1, "gk_csr_Dup: rowptr"));
if (mat->rowids)
nmat->rowids = gk_icopy(mat->nrows, mat->rowids,
gk_imalloc(mat->nrows, "gk_csr_Dup: rowids"));
if (mat->rnorms)
nmat->rnorms = gk_fcopy(mat->nrows, mat->rnorms,
gk_fmalloc(mat->nrows, "gk_csr_Dup: rnorms"));
if (mat->rowind)
nmat->rowind = gk_icopy(mat->rowptr[mat->nrows], mat->rowind,
gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowind"));
if (mat->rowval)
nmat->rowval = gk_fcopy(mat->rowptr[mat->nrows], mat->rowval,
gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowval"));
/* copy the col structure */
if (mat->colptr)
nmat->colptr = gk_zcopy(mat->ncols+1, mat->colptr,
gk_zmalloc(mat->ncols+1, "gk_csr_Dup: colptr"));
if (mat->colids)
nmat->colids = gk_icopy(mat->ncols, mat->colids,
gk_imalloc(mat->ncols, "gk_csr_Dup: colids"));
if (mat->cnorms)
nmat->cnorms = gk_fcopy(mat->ncols, mat->cnorms,
gk_fmalloc(mat->ncols, "gk_csr_Dup: cnorms"));
if (mat->colind)
nmat->colind = gk_icopy(mat->colptr[mat->ncols], mat->colind,
gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colind"));
if (mat->colval)
nmat->colval = gk_fcopy(mat->colptr[mat->ncols], mat->colval,
gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colval"));
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix containint a set of consecutive rows.
\param mat is the original matrix.
\param rstart is the starting row.
\param nrows is the number of rows from rstart to extract.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractSubmatrix(gk_csr_t *mat, int rstart, int nrows)
{
ssize_t i;
gk_csr_t *nmat;
if (rstart+nrows > mat->nrows)
return NULL;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = mat->ncols;
/* copy the row structure */
if (mat->rowptr)
nmat->rowptr = gk_zcopy(nrows+1, mat->rowptr+rstart,
gk_zmalloc(nrows+1, "gk_csr_ExtractSubmatrix: rowptr"));
for (i=nrows; i>=0; i--)
nmat->rowptr[i] -= nmat->rowptr[0];
ASSERT(nmat->rowptr[0] == 0);
if (mat->rowids)
nmat->rowids = gk_icopy(nrows, mat->rowids+rstart,
gk_imalloc(nrows, "gk_csr_ExtractSubmatrix: rowids"));
if (mat->rnorms)
nmat->rnorms = gk_fcopy(nrows, mat->rnorms+rstart,
gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rnorms"));
if (mat->rsums)
nmat->rsums = gk_fcopy(nrows, mat->rsums+rstart,
gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rsums"));
ASSERT(nmat->rowptr[nrows] == mat->rowptr[rstart+nrows]-mat->rowptr[rstart]);
if (mat->rowind)
nmat->rowind = gk_icopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
mat->rowind+mat->rowptr[rstart],
gk_imalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
"gk_csr_ExtractSubmatrix: rowind"));
if (mat->rowval)
nmat->rowval = gk_fcopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
mat->rowval+mat->rowptr[rstart],
gk_fmalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
"gk_csr_ExtractSubmatrix: rowval"));
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix containing a certain set of rows.
\param mat is the original matrix.
\param nrows is the number of rows to extract.
\param rind is the set of row numbers to extract.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractRows(gk_csr_t *mat, int nrows, int *rind)
{
ssize_t i, ii, j, nnz;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = mat->ncols;
for (nnz=0, i=0; i<nrows; i++)
nnz += mat->rowptr[rind[i]+1]-mat->rowptr[rind[i]];
nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr");
nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind");
nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval");
nmat->rowptr[0] = 0;
for (nnz=0, j=0, ii=0; ii<nrows; ii++) {
i = rind[ii];
gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz);
gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz);
nnz += mat->rowptr[i+1]-mat->rowptr[i];
nmat->rowptr[++j] = nnz;
}
ASSERT(j == nmat->nrows);
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix corresponding to a specified partitioning of rows.
\param mat is the original matrix.
\param part is the partitioning vector of the rows.
\param pid is the partition ID that will be extracted.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractPartition(gk_csr_t *mat, int *part, int pid)
{
ssize_t i, j, nnz;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = 0;
nmat->ncols = mat->ncols;
for (nnz=0, i=0; i<mat->nrows; i++) {
if (part[i] == pid) {
nmat->nrows++;
nnz += mat->rowptr[i+1]-mat->rowptr[i];
}
}
nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr");
nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind");
nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval");
nmat->rowptr[0] = 0;
for (nnz=0, j=0, i=0; i<mat->nrows; i++) {
if (part[i] == pid) {
gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz);
gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz);
nnz += mat->rowptr[i+1]-mat->rowptr[i];
nmat->rowptr[++j] = nnz;
}
}
ASSERT(j == nmat->nrows);
return nmat;
}
/*************************************************************************/
/*! Splits the matrix into multiple sub-matrices based on the provided
color array.
\param mat is the original matrix.
\param color is an array of size equal to the number of non-zeros
in the matrix (row-wise structure). The matrix is split into
as many parts as the number of colors. For meaningfull results,
the colors should be numbered consecutively starting from 0.
\returns an array of matrices for each supplied color number.
*/
/**************************************************************************/
gk_csr_t **gk_csr_Split(gk_csr_t *mat, int *color)
{
ssize_t i, j;
int nrows, ncolors;
ssize_t *rowptr;
int *rowind;
float *rowval;
gk_csr_t **smats;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
ncolors = gk_imax(rowptr[nrows], color)+1;
smats = (gk_csr_t **)gk_malloc(sizeof(gk_csr_t *)*ncolors, "gk_csr_Split: smats");
for (i=0; i<ncolors; i++) {
smats[i] = gk_csr_Create();
smats[i]->nrows = mat->nrows;
smats[i]->ncols = mat->ncols;
smats[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_csr_Split: smats[i]->rowptr");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
smats[color[j]]->rowptr[i]++;
}
for (i=0; i<ncolors; i++)
MAKECSR(j, nrows, smats[i]->rowptr);
for (i=0; i<ncolors; i++) {
smats[i]->rowind = gk_imalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowind");
smats[i]->rowval = gk_fmalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowval");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
smats[color[j]]->rowind[smats[color[j]]->rowptr[i]] = rowind[j];
smats[color[j]]->rowval[smats[color[j]]->rowptr[i]] = rowval[j];
smats[color[j]]->rowptr[i]++;
}
}
for (i=0; i<ncolors; i++)
SHIFTCSR(j, nrows, smats[i]->rowptr);
return smats;
}
/**************************************************************************/
/*! Reads a CSR matrix from the supplied file and stores it the matrix's
forward structure.
\param filename is the file that stores the data.
\param format is either GK_CSR_FMT_METIS, GK_CSR_FMT_CLUTO,
GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL
specifying the type of the input format.
The GK_CSR_FMT_CSR does not contain a header
line, whereas the GK_CSR_FMT_BINROW is a binary format written
by gk_csr_Write() using the same format specifier.
\param readvals is either 1 or 0, indicating if the CSR file contains
values or it does not. It only applies when GK_CSR_FMT_CSR is
used.
\param numbering is either 1 or 0, indicating if the numbering of the
indices start from 1 or 0, respectively. If they start from 1,
they are automatically decreamented during input so that they
will start from 0. It only applies when GK_CSR_FMT_CSR is
used.
\returns the matrix that was read.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Read(char *filename, int format, int readvals, int numbering)
{
ssize_t i, k, l;
size_t nfields, nrows, ncols, nnz, fmt, ncon = 0;
size_t lnlen;
ssize_t *rowptr;
int *rowind, ival;
float *rowval=NULL, fval;
int readsizes, readwgts;
char *line=NULL, *head, *tail, fmtstr[256];
FILE *fpin;
gk_csr_t *mat=NULL;
if (!gk_fexists(filename))
gk_errexit(SIGERR, "File %s does not exist!\n", filename);
if (format == GK_CSR_FMT_BINROW) {
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
mat->rowptr = gk_zmalloc(mat->nrows+1, "gk_csr_Read: rowptr");
if (fread(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpin) != mat->nrows+1)
gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", filename);
mat->rowind = gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowind");
if (fread(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows])
gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename);
if (readvals == 1) {
mat->rowval = gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowval");
if (fread(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows])
gk_errexit(SIGERR, "Failed to read the rowval from file %s!\n", filename);
}
gk_fclose(fpin);
return mat;
}
if (format == GK_CSR_FMT_BINCOL) {
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
mat->colptr = gk_zmalloc(mat->ncols+1, "gk_csr_Read: colptr");
if (fread(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpin) != mat->ncols+1)
gk_errexit(SIGERR, "Failed to read the colptr from file %s!\n", filename);
mat->colind = gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Read: colind");
if (fread(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols])
gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename);
if (readvals) {
mat->colval = gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Read: colval");
if (fread(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols])
gk_errexit(SIGERR, "Failed to read the colval from file %s!\n", filename);
}
gk_fclose(fpin);
return mat;
}
if (format == GK_CSR_FMT_CLUTO) {
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
if (sscanf(line, "%zu %zu %zu", &nrows, &ncols, &nnz) != 3)
gk_errexit(SIGERR, "Header line must contain 3 integers.\n");
readsizes = 0;
readwgts = 0;
readvals = 1;
numbering = 1;
}
else if (format == GK_CSR_FMT_METIS) {
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
fmt = ncon = 0;
nfields = sscanf(line, "%zu %zu %zu %zu", &nrows, &nnz, &fmt, &ncon);
if (nfields < 2)
gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n");
ncols = nrows;
nnz *= 2;
if (fmt > 111)
gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt);
sprintf(fmtstr, "%03zu", fmt%1000);
readsizes = (fmtstr[0] == '1');
readwgts = (fmtstr[1] == '1');
readvals = (fmtstr[2] == '1');
numbering = 1;
ncon = (ncon == 0 ? 1 : ncon);
}
else {
readsizes = 0;
readwgts = 0;
gk_getfilestats(filename, &nrows, &nnz, NULL, NULL);
if (readvals == 1 && nnz%2 == 1)
gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not even.\n", nnz, readvals);
if (readvals == 1)
nnz = nnz/2;
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
}
mat = gk_csr_Create();
mat->nrows = nrows;
rowptr = mat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Read: rowptr");
rowind = mat->rowind = gk_imalloc(nnz, "gk_csr_Read: rowind");
if (readvals != 2)
rowval = mat->rowval = gk_fsmalloc(nnz, 1.0, "gk_csr_Read: rowval");
if (readsizes)
mat->rsizes = gk_fsmalloc(nrows, 0.0, "gk_csr_Read: rsizes");
if (readwgts)
mat->rwgts = gk_fsmalloc(nrows*ncon, 0.0, "gk_csr_Read: rwgts");
/*----------------------------------------------------------------------
* Read the sparse matrix file
*---------------------------------------------------------------------*/
numbering = (numbering ? - 1 : 0);
for (ncols=0, rowptr[0]=0, k=0, i=0; i<nrows; i++) {
do {
if (gk_getline(&line, &lnlen, fpin) == -1)
gk_errexit(SIGERR, "Premature end of input file: file while reading row %d\n", i);
} while (line[0] == '%');
head = line;
tail = NULL;
/* Read vertex sizes */
if (readsizes) {
#ifdef __MSC__
mat->rsizes[i] = (float)strtod(head, &tail);
#else
mat->rsizes[i] = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1);
if (mat->rsizes[i] < 0)
errexit("The size for vertex %zd must be >= 0\n", i+1);
head = tail;
}
/* Read vertex weights */
if (readwgts) {
for (l=0; l<ncon; l++) {
#ifdef __MSC__
mat->rwgts[i*ncon+l] = (float)strtod(head, &tail);
#else
mat->rwgts[i*ncon+l] = strtof(head, &tail);
#endif
if (tail == head)
errexit("The line for vertex %zd does not have enough weights "
"for the %d constraints.\n", i+1, ncon);
if (mat->rwgts[i*ncon+l] < 0)
errexit("The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l);
head = tail;
}
}
/* Read the rest of the row */
while (1) {
ival = (int)strtol(head, &tail, 0);
if (tail == head)
break;
head = tail;
if ((rowind[k] = ival + numbering) < 0)
gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i);
ncols = gk_max(rowind[k], ncols);
if (readvals == 1) {
#ifdef __MSC__
fval = (float)strtod(head, &tail);
#else
fval = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "Value could not be found for column! Row:%zd, NNZ:%zd\n", i, k);
head = tail;
rowval[k] = fval;
}
k++;
}
rowptr[i+1] = k;
}
if (format == GK_CSR_FMT_METIS) {
ASSERT(ncols+1 == mat->nrows);
mat->ncols = mat->nrows;
}
else {
mat->ncols = ncols+1;
}
if (k != nnz)
gk_errexit(SIGERR, "gk_csr_Read: Something wrong with the number of nonzeros in "
"the input file. NNZ=%zd, ActualNNZ=%zd.\n", nnz, k);
gk_fclose(fpin);
gk_free((void **)&line, LTERM);
return mat;
}
/**************************************************************************/
/*! Writes the row-based structure of a matrix into a file.
\param mat is the matrix to be written,
\param filename is the name of the output file.
\param format is one of: GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR,
GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL.
\param writevals is either 1 or 0 indicating if the values will be
written or not. This is only applicable when GK_CSR_FMT_CSR
is used.
\param numbering is either 1 or 0 indicating if the internal 0-based
numbering will be shifted by one or not during output. This
is only applicable when GK_CSR_FMT_CSR is used.
*/
/**************************************************************************/
void gk_csr_Write(gk_csr_t *mat, char *filename, int format, int writevals, int numbering)
{
ssize_t i, j;
FILE *fpout;
if (format == GK_CSR_FMT_BINROW) {
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpout);
fwrite(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpout);
if (writevals)
fwrite(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpout);
gk_fclose(fpout);
return;
}
if (format == GK_CSR_FMT_BINCOL) {
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpout);
fwrite(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpout);
if (writevals)
fwrite(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpout);
gk_fclose(fpout);
return;
}
if (filename)
fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout");
else
fpout = stdout;
if (format == GK_CSR_FMT_CLUTO) {
fprintf(fpout, "%d %d %zd\n", mat->nrows, mat->ncols, mat->rowptr[mat->nrows]);
writevals = 1;
numbering = 1;
}
for (i=0; i<mat->nrows; i++) {
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) {
fprintf(fpout, " %d", mat->rowind[j]+(numbering ? 1 : 0));
if (writevals)
fprintf(fpout, " %f", mat->rowval[j]);
}
fprintf(fpout, "\n");
}
if (filename)
gk_fclose(fpout);
}
/*************************************************************************/
/*! Prunes certain rows/columns of the matrix. The prunning takes place
by analyzing the row structure of the matrix. The prunning takes place
by removing rows/columns but it does not affect the numbering of the
remaining rows/columns.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param minf is the minimum number of rows (columns) that a column (row) must
be present in order to be kept,
\param maxf is the maximum number of rows (columns) that a column (row) must
be present at in order to be kept.
\returns the prunned matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Prune(gk_csr_t *mat, int what, int minf, int maxf)
{
ssize_t i, j, nnz;
int nrows, ncols;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind, *collen;
float *rowval, *nrowval;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Prune: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Prune: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_Prune: nrowval");
switch (what) {
case GK_CSR_COL:
collen = gk_ismalloc(ncols, 0, "gk_csr_Prune: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
ASSERT(rowind[j] < ncols);
collen[rowind[j]]++;
}
}
for (i=0; i<ncols; i++)
collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0);
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (collen[rowind[j]]) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&collen, LTERM);
break;
case GK_CSR_ROW:
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) {
for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the highest weight entries whose
sum accounts for a certain fraction of the overall weight of the
row/column.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param norm indicates the norm that will be used to aggregate the weights
and possible values are 1 or 2,
\param fraction is the fraction of the overall norm that will be retained
by the kept entries.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_LowFilter(gk_csr_t *mat, int what, int norm, float fraction)
{
ssize_t i, j, nnz;
int nrows, ncols, ncand, maxlen=0;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind;
float *rowval, *colval, *nrowval, rsum, tsum;
gk_csr_t *nmat;
gk_fkv_t *cand;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval");
switch (what) {
case GK_CSR_COL:
if (mat->colptr == NULL)
gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n");
gk_zcopy(nrows+1, rowptr, nrowptr);
for (i=0; i<ncols; i++)
maxlen = gk_max(maxlen, colptr[i+1]-colptr[i]);
#pragma omp parallel private(i, j, ncand, rsum, tsum, cand)
{
cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand");
#pragma omp for schedule(static)
for (i=0; i<ncols; i++) {
for (tsum=0.0, ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) {
cand[ncand].val = colind[j];
cand[ncand].key = colval[j];
tsum += (norm == 1 ? colval[j] : colval[j]*colval[j]);
}
gk_fkvsortd(ncand, cand);
for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) {
rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key);
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
}
gk_free((void **)&cand, LTERM);
}
/* compact the nrowind/nrowval */
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i] = nnz;
}
SHIFTCSR(i, nrows, nrowptr);
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
for (i=0; i<nrows; i++)
maxlen = gk_max(maxlen, rowptr[i+1]-rowptr[i]);
#pragma omp parallel private(i, j, ncand, rsum, tsum, cand)
{
cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand");
#pragma omp for schedule(static)
for (i=0; i<nrows; i++) {
for (tsum=0.0, ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) {
cand[ncand].val = rowind[j];
cand[ncand].key = rowval[j];
tsum += (norm == 1 ? rowval[j] : rowval[j]*rowval[j]);
}
gk_fkvsortd(ncand, cand);
for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) {
rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key);
nrowind[rowptr[i]+j] = cand[j].val;
nrowval[rowptr[i]+j] = cand[j].key;
}
nrowptr[i+1] = rowptr[i]+j;
}
gk_free((void **)&cand, LTERM);
}
/* compact nrowind/nrowval */
nrowptr[0] = nnz = 0;
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i+1]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the highest weight top-K entries
along each row/column and those entries whose weight is greater than
a specified value.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param topk is the number of the highest weight entries to keep.
\param keepval is the weight of a term above which will be kept. This
is used to select additional terms past the first topk.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_TopKPlusFilter(gk_csr_t *mat, int what, int topk, float keepval)
{
ssize_t i, j, k, nnz;
int nrows, ncols, ncand;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind;
float *rowval, *colval, *nrowval;
gk_csr_t *nmat;
gk_fkv_t *cand;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval");
switch (what) {
case GK_CSR_COL:
if (mat->colptr == NULL)
gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n");
cand = gk_fkvmalloc(nrows, "gk_csr_LowFilter: cand");
gk_zcopy(nrows+1, rowptr, nrowptr);
for (i=0; i<ncols; i++) {
for (ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) {
cand[ncand].val = colind[j];
cand[ncand].key = colval[j];
}
gk_fkvsortd(ncand, cand);
k = gk_min(topk, ncand);
for (j=0; j<k; j++) {
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
for (; j<ncand; j++) {
if (cand[j].key < keepval)
break;
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
}
/* compact the nrowind/nrowval */
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i] = nnz;
}
SHIFTCSR(i, nrows, nrowptr);
gk_free((void **)&cand, LTERM);
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
cand = gk_fkvmalloc(ncols, "gk_csr_LowFilter: cand");
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) {
cand[ncand].val = rowind[j];
cand[ncand].key = rowval[j];
}
gk_fkvsortd(ncand, cand);
k = gk_min(topk, ncand);
for (j=0; j<k; j++, nnz++) {
nrowind[nnz] = cand[j].val;
nrowval[nnz] = cand[j].key;
}
for (; j<ncand; j++, nnz++) {
if (cand[j].key < keepval)
break;
nrowind[nnz] = cand[j].val;
nrowval[nnz] = cand[j].key;
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&cand, LTERM);
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the terms whose contribution to
the total length of the document is greater than a user-splied multiple
over the average.
This routine assumes that the vectors are normalized to be unit length.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param zscore is the multiplicative factor over the average contribution
to the length of the document.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ZScoreFilter(gk_csr_t *mat, int what, float zscore)
{
ssize_t i, j, nnz;
int nrows;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind;
float *rowval, *nrowval, avgwgt;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_ZScoreFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowval");
switch (what) {
case GK_CSR_COL:
gk_errexit(SIGERR, "This has not been implemented yet.\n");
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
avgwgt = zscore/(rowptr[i+1]-rowptr[i]);
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] > avgwgt) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Compacts the column-space of the matrix by removing empty columns.
As a result of the compaction, the column numbers are renumbered.
The compaction operation is done in place and only affects the row-based
representation of the matrix.
The new columns are ordered in decreasing frequency.
\param mat the matrix whose empty columns will be removed.
*/
/**************************************************************************/
void gk_csr_CompactColumns(gk_csr_t *mat)
{
ssize_t i;
int nrows, ncols, nncols;
ssize_t *rowptr;
int *rowind, *colmap;
gk_ikv_t *clens;
nrows = mat->nrows;
ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
colmap = gk_imalloc(ncols, "gk_csr_CompactColumns: colmap");
clens = gk_ikvmalloc(ncols, "gk_csr_CompactColumns: clens");
for (i=0; i<ncols; i++) {
clens[i].key = 0;
clens[i].val = i;
}
for (i=0; i<rowptr[nrows]; i++)
clens[rowind[i]].key++;
gk_ikvsortd(ncols, clens);
for (nncols=0, i=0; i<ncols; i++) {
if (clens[i].key > 0)
colmap[clens[i].val] = nncols++;
else
break;
}
for (i=0; i<rowptr[nrows]; i++)
rowind[i] = colmap[rowind[i]];
mat->ncols = nncols;
gk_free((void **)&colmap, &clens, LTERM);
}
/*************************************************************************/
/*! Sorts the indices in increasing order
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which set of
indices to sort.
*/
/**************************************************************************/
void gk_csr_SortIndices(gk_csr_t *mat, int what)
{
int n, nn=0;
ssize_t *ptr;
int *ind;
float *val;
switch (what) {
case GK_CSR_ROW:
if (!mat->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
n = mat->nrows;
ptr = mat->rowptr;
ind = mat->rowind;
val = mat->rowval;
break;
case GK_CSR_COL:
if (!mat->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
n = mat->ncols;
ptr = mat->colptr;
ind = mat->colind;
val = mat->colval;
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
#pragma omp parallel if (n > 100)
{
ssize_t i, j, k;
gk_ikv_t *cand;
float *tval;
#pragma omp single
for (i=0; i<n; i++)
nn = gk_max(nn, ptr[i+1]-ptr[i]);
cand = gk_ikvmalloc(nn, "gk_csr_SortIndices: cand");
tval = gk_fmalloc(nn, "gk_csr_SortIndices: tval");
#pragma omp for schedule(static)
for (i=0; i<n; i++) {
for (k=0, j=ptr[i]; j<ptr[i+1]; j++) {
if (j > ptr[i] && ind[j] < ind[j-1])
k = 1; /* an inversion */
cand[j-ptr[i]].val = j-ptr[i];
cand[j-ptr[i]].key = ind[j];
tval[j-ptr[i]] = val[j];
}
if (k) {
gk_ikvsorti(ptr[i+1]-ptr[i], cand);
for (j=ptr[i]; j<ptr[i+1]; j++) {
ind[j] = cand[j-ptr[i]].key;
val[j] = tval[cand[j-ptr[i]].val];
}
}
}
gk_free((void **)&cand, &tval, LTERM);
}
}
/*************************************************************************/
/*! Creates a row/column index from the column/row data.
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which index
will be created.
*/
/**************************************************************************/
void gk_csr_CreateIndex(gk_csr_t *mat, int what)
{
/* 'f' stands for forward, 'r' stands for reverse */
ssize_t i, j, k, nf, nr;
ssize_t *fptr, *rptr;
int *find, *rind;
float *fval, *rval;
switch (what) {
case GK_CSR_COL:
nf = mat->nrows;
fptr = mat->rowptr;
find = mat->rowind;
fval = mat->rowval;
if (mat->colptr) gk_free((void **)&mat->colptr, LTERM);
if (mat->colind) gk_free((void **)&mat->colind, LTERM);
if (mat->colval) gk_free((void **)&mat->colval, LTERM);
nr = mat->ncols;
rptr = mat->colptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr");
rind = mat->colind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind");
rval = mat->colval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL);
break;
case GK_CSR_ROW:
nf = mat->ncols;
fptr = mat->colptr;
find = mat->colind;
fval = mat->colval;
if (mat->rowptr) gk_free((void **)&mat->rowptr, LTERM);
if (mat->rowind) gk_free((void **)&mat->rowind, LTERM);
if (mat->rowval) gk_free((void **)&mat->rowval, LTERM);
nr = mat->nrows;
rptr = mat->rowptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr");
rind = mat->rowind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind");
rval = mat->rowval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL);
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rptr[find[j]]++;
}
MAKECSR(i, nr, rptr);
if (rptr[nr] > 6*nr) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rind[rptr[find[j]]++] = i;
}
SHIFTCSR(i, nr, rptr);
if (fval) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rval[rptr[find[j]]++] = fval[j];
}
SHIFTCSR(i, nr, rptr);
}
}
else {
if (fval) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++) {
k = find[j];
rind[rptr[k]] = i;
rval[rptr[k]++] = fval[j];
}
}
}
else {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rind[rptr[find[j]]++] = i;
}
}
SHIFTCSR(i, nr, rptr);
}
}
/*************************************************************************/
/*! Normalizes the rows/columns of the matrix to be unit
length.
\param mat the matrix itself,
\param what indicates what will be normalized and is obtained by
specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL.
\param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm
*/
/**************************************************************************/
void gk_csr_Normalize(gk_csr_t *mat, int what, int norm)
{
ssize_t i, j;
int n;
ssize_t *ptr;
float *val, sum;
if (what&GK_CSR_ROW && mat->rowval) {
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
#pragma omp parallel if (ptr[n] > OMPMINOPS)
{
#pragma omp for private(j,sum) schedule(static)
for (i=0; i<n; i++) {
for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++){
if (norm == 2)
sum += val[j]*val[j];
else if (norm == 1)
sum += val[j]; /* assume val[j] > 0 */
}
if (sum > 0) {
if (norm == 2)
sum=1.0/sqrt(sum);
else if (norm == 1)
sum=1.0/sum;
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
}
}
if (what&GK_CSR_COL && mat->colval) {
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
#pragma omp parallel if (ptr[n] > OMPMINOPS)
{
#pragma omp for private(j,sum) schedule(static)
for (i=0; i<n; i++) {
for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++)
if (norm == 2)
sum += val[j]*val[j];
else if (norm == 1)
sum += val[j];
if (sum > 0) {
if (norm == 2)
sum=1.0/sqrt(sum);
else if (norm == 1)
sum=1.0/sum;
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
}
}
}
/*************************************************************************/
/*! Applies different row scaling methods.
\param mat the matrix itself,
\param type indicates the type of row scaling. Possible values are:
GK_CSR_MAXTF, GK_CSR_SQRT, GK_CSR_LOG, GK_CSR_IDF, GK_CSR_MAXTF2.
*/
/**************************************************************************/
void gk_csr_Scale(gk_csr_t *mat, int type)
{
ssize_t i, j;
int nrows, ncols, nnzcols, bgfreq;
ssize_t *rowptr;
int *rowind, *collen;
float *rowval, *cscale, maxtf;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
switch (type) {
case GK_CSR_MAXTF: /* TF' = .5 + .5*TF/MAX(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j, maxtf) schedule(static)
for (i=0; i<nrows; i++) {
maxtf = fabs(rowval[rowptr[i]]);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] = .5 + .5*rowval[j]/maxtf;
}
}
break;
case GK_CSR_MAXTF2: /* TF' = .1 + .9*TF/MAX(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j, maxtf) schedule(static)
for (i=0; i<nrows; i++) {
maxtf = fabs(rowval[rowptr[i]]);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] = .1 + .9*rowval[j]/maxtf;
}
}
break;
case GK_CSR_SQRT: /* TF' = .1+SQRT(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], sqrt(fabs(rowval[j])));
}
}
}
break;
case GK_CSR_POW25: /* TF' = .1+POW(TF,.25) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], sqrt(sqrt(fabs(rowval[j]))));
}
}
}
break;
case GK_CSR_POW65: /* TF' = .1+POW(TF,.65) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .65));
}
}
}
break;
case GK_CSR_POW75: /* TF' = .1+POW(TF,.75) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .75));
}
}
}
break;
case GK_CSR_POW85: /* TF' = .1+POW(TF,.85) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .85));
}
}
}
break;
case GK_CSR_LOG: /* TF' = 1+log_2(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
double logscale = 1.0/log(2.0);
#pragma omp for schedule(static,32)
for (i=0; i<rowptr[nrows]; i++) {
if (rowval[i] != 0.0)
rowval[i] = 1+(rowval[i]>0.0 ? log(rowval[i]) : -log(-rowval[i]))*logscale;
}
#ifdef XXX
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = 1+(rowval[j]>0.0 ? log(rowval[j]) : -log(-rowval[j]))*logscale;
//rowval[j] = 1+sign(rowval[j], log(fabs(rowval[j]))*logscale);
}
}
#endif
}
break;
case GK_CSR_IDF: /* TF' = TF*IDF */
ncols = mat->ncols;
cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale");
collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
collen[rowind[j]]++;
}
#pragma omp parallel if (ncols > OMPMINOPS)
{
#pragma omp for schedule(static)
for (i=0; i<ncols; i++)
cscale[i] = (collen[i] > 0 ? log(1.0*nrows/collen[i]) : 0.0);
}
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] *= cscale[rowind[j]];
}
}
gk_free((void **)&cscale, &collen, LTERM);
break;
case GK_CSR_IDF2: /* TF' = TF*IDF */
ncols = mat->ncols;
cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale");
collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
collen[rowind[j]]++;
}
nnzcols = 0;
#pragma omp parallel if (ncols > OMPMINOPS)
{
#pragma omp for schedule(static) reduction(+:nnzcols)
for (i=0; i<ncols; i++)
nnzcols += (collen[i] > 0 ? 1 : 0);
bgfreq = gk_max(10, (ssize_t)(.5*rowptr[nrows]/nnzcols));
printf("nnz: %zd, nnzcols: %d, bgfreq: %d\n", rowptr[nrows], nnzcols, bgfreq);
#pragma omp for schedule(static)
for (i=0; i<ncols; i++)
cscale[i] = (collen[i] > 0 ? log(1.0*(nrows+2*bgfreq)/(bgfreq+collen[i])) : 0.0);
}
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] *= cscale[rowind[j]];
}
}
gk_free((void **)&cscale, &collen, LTERM);
break;
default:
gk_errexit(SIGERR, "Unknown scaling type of %d\n", type);
}
}
/*************************************************************************/
/*! Computes the sums of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
sums to compute.
*/
/**************************************************************************/
void gk_csr_ComputeSums(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *sums;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rsums)
gk_free((void **)&mat->rsums, LTERM);
sums = mat->rsums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->csums)
gk_free((void **)&mat->csums, LTERM);
sums = mat->csums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums");
break;
default:
gk_errexit(SIGERR, "Invalid sum type of %d.\n", what);
return;
}
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
sums[i] = gk_fsum(ptr[i+1]-ptr[i], val+ptr[i], 1);
}
/*************************************************************************/
/*! Computes the squared of the norms of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
squared norms to compute.
*/
/**************************************************************************/
void gk_csr_ComputeSquaredNorms(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *norms;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM);
norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM);
norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
default:
gk_errexit(SIGERR, "Invalid norm type of %d.\n", what);
return;
}
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1);
}
/*************************************************************************/
/*! Computes the similarity between two rows/columns
\param mat the matrix itself. The routine assumes that the indices
are sorted in increasing order.
\param i1 is the first row/column,
\param i2 is the second row/column,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating the type of
objects between the similarity will be computed,
\param simtype is the type of similarity and is one of GK_CSR_COS,
GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN
\returns the similarity between the two rows/columns.
*/
/**************************************************************************/
float gk_csr_ComputeSimilarity(gk_csr_t *mat, int i1, int i2, int what, int simtype)
{
int nind1, nind2;
int *ind1, *ind2;
float *val1, *val2, stat1, stat2, sim;
switch (what) {
case GK_CSR_ROW:
if (!mat->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
nind1 = mat->rowptr[i1+1]-mat->rowptr[i1];
nind2 = mat->rowptr[i2+1]-mat->rowptr[i2];
ind1 = mat->rowind + mat->rowptr[i1];
ind2 = mat->rowind + mat->rowptr[i2];
val1 = mat->rowval + mat->rowptr[i1];
val2 = mat->rowval + mat->rowptr[i2];
break;
case GK_CSR_COL:
if (!mat->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
nind1 = mat->colptr[i1+1]-mat->colptr[i1];
nind2 = mat->colptr[i2+1]-mat->colptr[i2];
ind1 = mat->colind + mat->colptr[i1];
ind2 = mat->colind + mat->colptr[i2];
val1 = mat->colval + mat->colptr[i1];
val2 = mat->colval + mat->colptr[i2];
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return 0.0;
}
switch (simtype) {
case GK_CSR_COS:
case GK_CSR_JAC:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else {
sim += val1[i1]*val2[i2];
stat1 += val1[i1]*val1[i1];
stat2 += val2[i2]*val2[i2];
i1++;
i2++;
}
}
if (simtype == GK_CSR_COS)
sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0);
else
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_MIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_AMIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1 > 0.0 ? sim/stat1 : 0.0);
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
return sim;
}
/*************************************************************************/
/*! Finds the n most similar rows (neighbors) to the query using cosine
similarity.
\param mat the matrix itself
\param nqterms is the number of columns in the query
\param qind is the list of query columns
\param qval is the list of correspodning query weights
\param simtype is the type of similarity and is one of GK_CSR_COS,
GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN
\param nsim is the maximum number of requested most similar rows.
If -1 is provided, then everything is returned unsorted.
\param minsim is the minimum similarity of the requested most
similar rows
\param hits is the result set. This array should be at least
of length nsim.
\param i_marker is an array of size equal to the number of rows
whose values are initialized to -1. If NULL is provided
then this array is allocated and freed internally.
\param i_cand is an array of size equal to the number of rows.
If NULL is provided then this array is allocated and freed
internally.
\returns the number of identified most similar rows, which can be
smaller than the requested number of nnbrs in those cases
in which there are no sufficiently many neighbors.
*/
/**************************************************************************/
int gk_csr_GetSimilarRows(gk_csr_t *mat, int nqterms, int *qind,
float *qval, int simtype, int nsim, float minsim, gk_fkv_t *hits,
int *i_marker, gk_fkv_t *i_cand)
{
ssize_t i, ii, j, k;
int nrows, ncols, ncand;
ssize_t *colptr;
int *colind, *marker;
float *colval, *rnorms, mynorm, *rsums, mysum;
gk_fkv_t *cand;
if (nqterms == 0)
return 0;
nrows = mat->nrows;
ncols = mat->ncols;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
marker = (i_marker ? i_marker : gk_ismalloc(nrows, -1, "gk_csr_SimilarRows: marker"));
cand = (i_cand ? i_cand : gk_fkvmalloc(nrows, "gk_csr_SimilarRows: cand"));
switch (simtype) {
case GK_CSR_COS:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += colval[j]*qval[ii];
}
}
}
break;
case GK_CSR_JAC:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += colval[j]*qval[ii];
}
}
}
rnorms = mat->rnorms;
mynorm = gk_fdot(nqterms, qval, 1, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/(rnorms[cand[i].val]+mynorm-cand[i].key);
break;
case GK_CSR_MIN:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += gk_min(colval[j], qval[ii]);
}
}
}
rsums = mat->rsums;
mysum = gk_fsum(nqterms, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/(rsums[cand[i].val]+mysum-cand[i].key);
break;
/* Assymetric MIN similarity */
case GK_CSR_AMIN:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += gk_min(colval[j], qval[ii]);
}
}
}
mysum = gk_fsum(nqterms, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/mysum;
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
/* go and prune the hits that are bellow minsim */
for (j=0, i=0; i<ncand; i++) {
marker[cand[i].val] = -1;
if (cand[i].key >= minsim)
cand[j++] = cand[i];
}
ncand = j;
if (nsim == -1 || nsim >= ncand) {
nsim = ncand;
}
else {
nsim = gk_min(nsim, ncand);
gk_dfkvkselect(ncand, nsim, cand);
gk_fkvsortd(nsim, cand);
}
gk_fkvcopy(nsim, cand, hits);
if (i_marker == NULL)
gk_free((void **)&marker, LTERM);
if (i_cand == NULL)
gk_free((void **)&cand, LTERM);
return nsim;
}
|
barrier.c | // PASS: *
// RUN: ${CATO_ROOT}/src/scripts/cexecute_pass.py %s -o %t
// RUN: diff <(mpirun -np 4 %t) %s.reference_output
#include <stdio.h>
#include <unistd.h>
#include <omp.h>
int main()
{
#pragma omp parallel
{
int thread = omp_get_thread_num();
if (thread == 0)
{
sleep(3);
}
if (thread == 1)
{
sleep(2);
}
if (thread == 2)
{
sleep(1);
}
if (thread == 3)
{
sleep(0);
}
// ordering: 3,2,1,0
printf("Thread %i: before Barrier\n", thread);
#pragma omp barrier
// ordering 0,1,2,3
for(int i = 0; i < omp_get_num_threads(); i++)
{
if(i == thread)
{
printf("Thread %i: after Barrier\n", thread);
}
#pragma omp barrier
}
}
}
|
sor_jacobi.c | #include <stdio.h>
#include <math.h>
#include <string.h>
#include <omp.h>
//#define M 500
double compute_error(double solution[][M + 2], double u[][M + 2], const int m);
int sor(double unew[][M + 2], double uold[][M + 2], double solution[][M + 2], const double tol, const int m, const double h2, const double omega);
int main(void)
{
/*
Solution of Laplace's Equation.
==============================
! ***
! *** Uxx + Uyy = 0
! *** 0 <= x <= pi, 0 <= y <= pi
! *** U(x,pi) = sin(x), U(x,0) = U(0,y) = U(pi,y) = 0
! ***
! *** then U(x,y) = (sinh(y)*sin(x)) / sinh(pi)
! ***
! *** Should converge with
! *** tol = 0.001 and M = 20 in 42 iterations.
! *** and with tol = 0.001 and M = 100 in 198 iterations.
*/
const int m = M;
printf("%d\n", m);
double unew[M + 2][M + 2] = {{ 0 }};
double solution[M + 2][M + 2] = {{ 0 }};
double uold[M + 2][M + 2] = {{ 0 }};
int i, j;
const double begin = omp_get_wtime();
const double pi = 4.0 * atan(1.0);
const double h = pi / (m + 1);
const double h2 = (1 / (m + 1)) * (1 / (m + 1));
for(i = 0; i < m + 2; ++i)
{
uold[i][M + 1] = sin(i * h);
}
for(i = 0; i < m + 2; ++i)
{
for(j = 0; j < m + 1; ++j)
{
uold[i][j] = j * h * uold[i][M + 1];
// printf("%.40f\n", uold[i][j]);
}
}
for(i = 0; i < m + 2; ++i)
{
for(j = 0; j < m + 2; ++j)
{
solution[i][j] = sinh(j * h) * sin(i * h) / sinh(pi);
// printf("%.40f\n", solution[i][j]);
}
}
const double omega = 2.0 / ( 1.0 + sin(pi / (m + 1)) );
const double tol = 0.001;
const int iters = sor(unew, uold, solution, tol, m, h2, omega);
const double end = omp_get_wtime();
printf(" \n");
printf(" Omega = %f\n", omega);
printf(" It took %d iterations.\n", iters);
// printf("Total time = %f\n\n\n", (double) (toc.tv_usec - tic.tv_usec) / 1000000 +
// (double) (toc.tv_sec - tic.tv_sec));
printf("Total time = %f\n\n\n", end - begin);
return 0;
}
double compute_error(double solution[][M + 2], double u[][M + 2], const int m)
{
double error = 0.0;
int i, j;
for(i = 1; i < m + 1; ++i)
{
for(j = 1; j < m + 1; ++j)
{
const double abs_diff = fabs(solution[i][j] - u[i][j]);
if(error < abs_diff)
{
error = abs_diff;
}
}
}
return error;
}
int sor(double unew[][M + 2], double uold[][M + 2], double solution[][M + 2], const double tol, const int m, const double h2, const double omega)
{
int i, j;
#pragma omp parallel for
for(i = 0; i < m + 2; ++i)
{
unew[i][m + 1] = uold[i][m + 1];
unew[m + 1][i] = uold[m + 1][i];
unew[i][0] = uold[i][0];
unew[0][i] = uold[0][i];
}
int iters = 0;
double error = compute_error(solution, uold, m);
// Single parallel region
#pragma omp parallel
{
// Do SOR until 'tol' is satisfied -- loop accounts for two iterations (pseudo-unrolling)
while(error > tol)
{
// First iteration
#pragma omp for schedule(static)
for(i = 1; i < m + 1; ++i)
{
for(j = 1; j < m + 1; ++j)
{
unew[i][j] = (uold[i+1][j] + uold[i-1][j] +
uold[i][j+1] + uold[i][j-1]) * 0.25;
}
}
// Second iteration (avoid copy by "swapping" buffers)
#pragma omp for schedule(static)
for(i = 1; i < m + 1; ++i)
{
for(j = 1; j < m + 1; ++j)
{
uold[i][j] = (unew[i+1][j] + unew[i-1][j] +
unew[i][j+1] + unew[i][j-1]) * 0.25;
}
}
// Only master threads will execute error checking and increment counter
#pragma omp master
{
iters = iters + 2;
if(iters % 20 == 0)
{
error = compute_error(solution, uold, m);
}
}
}
}
return iters;
}
|
conv_fix.c |
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "lib/stb_image.h"
#include "lib/stb_image_write.h"
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <sys/time.h>
int main(){
//Read the image file.
int width,height,n,dimensions;
unsigned char* data = stbi_load("./img/lena2048.jpg", &width, &height, &n, 1);
//Write the gray version for comparison.
stbi_write_png("./img/gray-lena.png", width, height, 1, data, width);
dimensions = width * height;
//The pointer with the data to create the image from.
unsigned char* output_data = calloc(dimensions, (size_t) sizeof(char));
//Define kernel.
int kernel[9] = {-1, -1, -1, -1, 8, -1, -1, -1, -1};
//Declare the variables for the indeces.
int topLeft, top, topRight, centerLeft;
int centerRight, bottomLeft, bottom, bottomRight;
struct timespec start, end;
//Declare the index.
int i;
int avg;
//TODO Start the timer.
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
//Set the number of thread teams.
#pragma omp parallel num_threads(4) shared(kernel,data,output_data)
{
#pragma omp for private(i,topLeft, top, topRight, centerLeft,centerRight, bottomLeft, bottom, bottomRight)
for(i = 0; i < dimensions; i++){
//Declare the value to sum.
int pixel = 0;
//fprintf(fpi, "Pixel %d, value = %d\n",i, data[i]);
//printf("PIXEL= %d\n", i);
//printf("Pixel %d with value: %d\n", i, data[i]);
//SECTION Valor del centro.
pixel += (int) (kernel[4] * data[i]);
//printf("Centro: %d * %d\n", kernel[4], data[i]);
//printf("Value = %d\n", pixel);
//SECTION Valor del centro-izquierda.
centerLeft = i - 1;
if(centerLeft%width != width - 1 && centerLeft%width >= 0){
pixel += (int) (kernel[3] * data[centerLeft]);
//printf("Centro-izquierda: %d * %d\n", kernel[3], data[centerLeft]);
//printf("Value = %d\n", pixel);
}
//SECTION Valor del centro-derecha.
centerRight = i + 1;
if(centerRight%width != 0){
pixel += (int) (kernel[5] * data[centerRight]);
//printf("Centro-derecha: %d * %d\n", kernel[5], data[centerRight]);
//printf("Value = %d\n", pixel);
}
//SECTION Valor superior.
top = i - width;
if(top >= 0){
pixel += (int) (kernel[1] * data[top]);
//printf("Superior: %d * %d\n", kernel[1], data[top]);
//printf("Value = %d\n", pixel);
}
//SECTION Valor superior izquierdo.
topLeft = top - 1;
if(topLeft >= 0){
if(topLeft%width != width-1){
pixel += (int) (kernel[0] * data[topLeft]);
//printf("Superior izquierda: %d * %d\n", kernel[0], data[topLeft]);
//printf("Value = %d\n", pixel);
}
}
//SECTION Valor superior derecho.
topRight = top + 1;
if(topRight >= 0){
if(topRight%width != 0){
pixel += (int) (kernel[2] * data[topRight]);
//printf("Superior derecha: %d * %d\n", kernel[2], data[topRight]);
//printf("Value = %d\n", pixel);
}
}
//SECTION Valor inferior.
bottom = i + width;
if(bottom < dimensions){
pixel += (int)(kernel[7] * data[bottom]);
//printf("Inferior: %d * %d\n", kernel[7], data[bottom]);
//printf("Value = %d\n", pixel);
}
//SECTION Valor inferior izquierdo.
bottomLeft = bottom - 1;
if(bottomLeft < dimensions){
if(bottomLeft%width != width - 1){
pixel += (int)(kernel[6] * data[bottomLeft]);
//printf("Inferior izquierda: %d * %d\n", kernel[6], data[bottomLeft]);
//printf("Value = %d\n", pixel);
}
}
//SECTION Valor inferior derecho.
bottomRight = bottom + 1;
if(bottomRight < dimensions){
if(bottomRight%width != 0){
pixel += (int)(kernel[8] * data[bottomRight]);
//printf("Inferior derecha: %d * %d\n", kernel[8], data[bottomRight]);
//printf("Value = %d\n", pixel);
}
}
if (pixel > 255)
pixel = 255;
else if(pixel < 0)
pixel = 0;
output_data[i] = pixel;
//printf("Final: %d\n\n", output_data[i]);
}
}
//TODO Stop the timer.
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
uint64_t delta_us = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
avg+= (int) delta_us;
printf("It took %llu microseconds.\n", delta_us);
//Write image to file.
stbi_write_png("./img/lena-border.png", width, height, 1, output_data, width);
stbi_image_free(data);
free(output_data);
return 0;
} |
GB_unaryop__minv_uint64_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint64_int32
// op(A') function: GB_tran__minv_uint64_int32
// C type: uint64_t
// A type: int32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 64)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 64) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint64_int32
(
uint64_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint64_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
move_particle_utility.h | // KRATOS ___ ___ _ ___ __ ___ ___ ___ ___
// / __/ _ \| \| \ \ / /__| \_ _| __| __|
// | (_| (_) | .` |\ V /___| |) | || _|| _|
// \___\___/|_|\_| \_/ |___/___|_| |_| APPLICATION
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Pablo Becker
//
#if !defined(KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED)
#define KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/node.h"
///
#include "includes/dof.h"
#include "includes/variables.h"
#include "containers/array_1d.h"
#include "containers/data_value_container.h"
#include "includes/mesh.h"
#include "utilities/math_utils.h"
///
#include "utilities/geometry_utilities.h"
#include "includes/model_part.h"
#include "spatial_containers/spatial_containers.h"
#include "spatial_containers/cell.h"
#include "spatial_containers/bins_dynamic_objects.h"
#include "utilities/spatial_containers_configure.h"
#include "geometries/line_2d_2.h"
#include "geometries/triangle_2d_3.h"
#include "geometries/triangle_3d_3.h"
#include "geometries/point.h"
#include "convection_diffusion_application.h"
#include "convection_particle.h"
#include "utilities/openmp_utils.h"
#include "time.h"
//#include "processes/process.h"
namespace Kratos
{
//this class is to be modified by the user to customize the interpolation process
template< unsigned int TDim>
class MoveParticleUtilityScalarTransport
{
public:
typedef SpatialContainersConfigure<TDim> Configure;
typedef typename Configure::PointType PointType;
//typedef PointType::CoordinatesArrayType CoordinatesArrayType;
typedef typename Configure::ContainerType ContainerType;
//typedef Configure::PointerType PointerType;
typedef typename Configure::IteratorType IteratorType;
typedef typename Configure::ResultContainerType ResultContainerType;
//typedef Configure::ResultPointerType ResultPointerType;
typedef typename Configure::ResultIteratorType ResultIteratorType;
typedef PointerVector< Convection_Particle, Convection_Particle*, std::vector<Convection_Particle*> > ParticlePointerVector;
//typedef Configure::ContactPairType ContactPairType;
//typedef Configure::ContainerContactType ContainerContactType;
//typedef Configure::IteratorContactType IteratorContactType;
//typedef Configure::PointerContactType PointerContactType;
//typedef Configure::PointerTypeIterator PointerTypeIterator;
KRATOS_CLASS_POINTER_DEFINITION(MoveParticleUtilityScalarTransport);
//template<unsigned int TDim>
MoveParticleUtilityScalarTransport(ModelPart& model_part, int maximum_number_of_particles)
: mr_model_part(model_part) , mmaximum_number_of_particles(maximum_number_of_particles) ,
mUnknownVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetUnknownVariable()) ,
mProjectionVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetProjectionVariable()) ,
mVelocityVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetVelocityVariable()) ,
mMeshVelocityVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetMeshVelocityVariable())
{
std::cout << "initializing moveparticle utility for scalar transport" << std::endl;
Check();
//storing water and air density and their inverses, just in case it is needed for the streamline integration
//loop in elements to change their ID to their position in the array. Easier to get information later.
//DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!!
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
ielem->SetId(ii+1);
}
mlast_elem_id= (mr_model_part.ElementsEnd()-1)->Id();
int node_id=0;
// we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used)
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator pnode = inodebegin+ii;
array_1d<double,3> position_node;
double distance=0.0;
position_node = pnode->Coordinates();
GlobalPointersVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES);
//we loop all the nodes to check all the edges
const double number_of_neighbours = double(rneigh.size());
for( GlobalPointersVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++)
{
array_1d<double,3> position_difference;
position_difference = inode->Coordinates() - position_node;
double current_distance= sqrt(pow(position_difference[0],2)+pow(position_difference[1],2)+pow(position_difference[2],2));
//if (current_distance>distance)
// distance=current_distance;
distance += current_distance / number_of_neighbours;
}
//and we save the largest edge.
pnode->FastGetSolutionStepValue(MEAN_SIZE)=distance;
node_id=pnode->GetId();
}
}
mlast_node_id=node_id;
//we also calculate the element mean size in the same way, for the courant number
//also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
double mElemSize;
array_1d<double,3> Edge(3,0.0);
Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates();
mElemSize = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
mElemSize += Edge[d]*Edge[d];
for (unsigned int i = 2; i < (TDim+1); i++)
for(unsigned int j = 0; j < i; j++)
{
Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates();
double Length = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
Length += Edge[d]*Edge[d];
if (Length < mElemSize) mElemSize = Length;
}
mElemSize = sqrt(mElemSize);
ielem->GetValue(MEAN_SIZE) = mElemSize;
}
}
//matrix containing the position of the 4/15/45 particles that we will seed at the beggining
BoundedMatrix<double, 5*(1+TDim), 3 > pos;
BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N;
int particle_id=0;
mnelems = mr_model_part.Elements().size();
std::cout << "about to resize vectors" << std::endl;
//setting the right size to the vector containing the particles assigned to each element
//particles vector. this vector contains ALL the particles in the simulation.
mparticles_vector.resize(mnelems*mmaximum_number_of_particles);
//and this vector contains the current number of particles that are in each element (currently zero)
mnumber_of_particles_in_elems.resize(mnelems);
mnumber_of_particles_in_elems=ZeroVector(mnelems);
//when moving the particles, an auxiliary vector is necessary (to store the previous number)
mnumber_of_particles_in_elems_aux.resize(mnelems);
//each element will have a list of pointers to all the particles that are inside.
//this vector contains the pointers to the vector of (particle) pointers of each element.
mvector_of_particle_pointers_vectors.resize(mnelems);
//int artz;
//std::cin >> artz;
int i_int=0; //careful! it's not the id, but the position inside the array!
std::cout << "about to create particles" << std::endl;
//now we seed: LOOP IN ELEMENTS
//using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mmaximum_number_of_particles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one
moffset=0;
//Convection_Particle& firstparticle =mparticles_vector[0];
for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
//(ielem->GetValue(BED_PARTICLE_POINTERS)) = ParticlePointerVector( mmaximum_number_of_particles*2, &firstparticle );
//ParticlePointerVector& particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//now we link the mpointers_to_particle_pointers_vectors to the corresponding element
//mpointers_to_particle_pointers_vectors(ii) = &particle_pointers;
//now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half).
//for(int j=0; j<(mmaximum_number_of_particles*2); j++)
// particle_pointers.push_back(&firstparticle);
mvector_of_particle_pointers_vectors[ii] = ParticlePointerVector( mmaximum_number_of_particles*2 );
ParticlePointerVector& particle_pointers = mvector_of_particle_pointers_vectors[ii];
//int & number_of_particles = ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles = mnumber_of_particles_in_elems[ii];
number_of_particles=0;
Geometry< Node<3> >& geom = ielem->GetGeometry();
//unsigned int elem_id = ielem->Id();
//mareas_vector[i_int]=CalculateArea(geom); UNUSED SO COMMENTED
ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45
//now we seed the particles in the current element
for (unsigned int j = 0; j < pos.size1(); j++)
{
++particle_id;
Convection_Particle& pparticle = mparticles_vector[particle_id-1];
pparticle.X()=pos(j,0);
pparticle.Y()=pos(j,1);
pparticle.Z()=pos(j,2);
pparticle.GetEraseFlag()=false;
float & scalar1= pparticle.GetScalar1();
scalar1=0.0;
for (unsigned int k = 0; k < (TDim+1); k++)
{
scalar1 += N(j, k) * geom[k].FastGetSolutionStepValue(mUnknownVar);
}
particle_pointers(j) = &pparticle;
number_of_particles++ ;
}
++i_int;
}
m_nparticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true.
KRATOS_WATCH(m_nparticles);
//KRATOS_WATCH(mlast_elem_id);
mparticle_printing_tool_initialized=false;
//std::cin >> artz;
}
virtual ~MoveParticleUtilityScalarTransport()
{}
void MountBin()
{
KRATOS_TRY
//copy the elements to a new container, as the list will
//be shuffled duringthe construction of the tree
ContainerType& rElements = mr_model_part.ElementsArray();
IteratorType it_begin = rElements.begin();
IteratorType it_end = rElements.end();
//const int number_of_elem = rElements.size();
typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) );
paux.swap(mpBinsObjectDynamic);
//BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end );
std::cout << "finished mounting Bins" << std::endl;
KRATOS_CATCH("")
}
void CalculateVelOverElemSize()
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const double nodal_weight = 1.0/ (1.0 + double (TDim) );
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Geometry<Node<3> >& geom = ielem->GetGeometry();
array_1d<double, 3 >vector_mean_velocity=ZeroVector(3);
for (unsigned int i=0; i != (TDim+1) ; i++)
vector_mean_velocity += geom[i].FastGetSolutionStepValue(mVelocityVar);
vector_mean_velocity *= nodal_weight;
const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) );
ielem->GetValue(MEAN_VEL_OVER_ELEM_SIZE) = mean_velocity / ( ielem->GetValue(MEAN_SIZE) );
}
}
KRATOS_CATCH("")
}
//name self explained
void ResetBoundaryConditions()
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
if (inode->IsFixed(mUnknownVar))
{
inode->FastGetSolutionStepValue(mUnknownVar)=inode->GetSolutionStepValue(mUnknownVar,1);
}
}
}
KRATOS_CATCH("")
}
void CalculateDeltaVariables()
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(DELTA_SCALAR1) = inode->FastGetSolutionStepValue(mUnknownVar) - inode->FastGetSolutionStepValue(mProjectionVar) ;
}
}
KRATOS_CATCH("")
}
void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable);
}
}
KRATOS_CATCH("")
}
//to move all the particles across the streamlines. heavy task!
void MoveParticles()
{
KRATOS_TRY
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part)
//since it is the only function in the whole procedure that does this, it must use alternatively one part and the other.
//KRATOS_WATCH(offset)
bool even_timestep;
if (offset!=0) even_timestep=false;
else even_timestep=true;
const int post_offset = mmaximum_number_of_particles*int(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles
//KRATOS_WATCH(post_offset)
double delta_t = CurrentProcessInfo[DELTA_TIME];
array_1d<double,TDim+1> N;
const unsigned int max_results = 10000;
//double integration_distance= 2.0;
max_nsubsteps = 10;
max_substep_dt=delta_t/double(max_nsubsteps);
vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii;
int & number_of_particles = mnumber_of_particles_in_elems[ii]; //old_element->GetValue(NUMBER_OF_BED_PARTICLES);
mnumber_of_particles_in_elems_aux[ii]=number_of_particles;
mnumber_of_particles_in_elems[ii]=0;
//we reset the local vectors for a faster access;
}
}
std::cout << "convecting particles" << std::endl;
//We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle)
#pragma omp barrier
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
ResultContainerType results(max_results);
GlobalPointersVector< Element > elements_in_trajectory;
elements_in_trajectory.resize(20);
for(unsigned int ielem=element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++)
{
//for(unsigned int ielem=0; ielem<mr_model_part.Elements().size(); ielem++)
//{
ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem;
const int old_element_id = old_element->Id();
ParticlePointerVector& old_element_particle_pointers = mvector_of_particle_pointers_vectors(old_element_id-1);
if ( (results.size()) !=max_results)
results.resize(max_results);
unsigned int number_of_elements_in_trajectory=0; //excluding the origin one (current one, ielem)
for(int ii=0; ii<(mnumber_of_particles_in_elems_aux(ielem)); ii++)
{
Convection_Particle & pparticle = old_element_particle_pointers[offset+ii];
Element::Pointer pcurrent_element( *old_element.base() );
ResultIteratorType result_begin = results.begin();
bool & erase_flag=pparticle.GetEraseFlag();
if (erase_flag==false){
MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina
const int current_element_id = pcurrent_element->Id();
int & number_of_particles_in_current_elem = mnumber_of_particles_in_elems(current_element_id-1);
//int & number_of_water_particles_in_current_elem = mnumber_of_water_particles_in_elems(current_element_id-1);
if (number_of_particles_in_current_elem<mmaximum_number_of_particles && erase_flag==false)
{
{
ParticlePointerVector& current_element_particle_pointers = mvector_of_particle_pointers_vectors(current_element_id-1);
#pragma omp critical
{
if (number_of_particles_in_current_elem<mmaximum_number_of_particles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!!
{
current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle;
number_of_particles_in_current_elem++ ;
if (number_of_particles_in_current_elem>mmaximum_number_of_particles)
KRATOS_WATCH("MAL");
}
else
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
else
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
}
/*
//now we pass info from the local vector to the elements:
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii;
old_element->GetValue(NUMBER_OF_BED_PARTICLES) = mnumber_of_particles_in_elems(ii);
//old_element->GetValue(NUMBER_OF_WATER_PARTICLES) = mnumber_of_water_particles_in_elems(ii);
}
}
*/
//after having changed everything we change the status of the modd_timestep flag:
moffset = post_offset;; //
KRATOS_CATCH("")
}
void TransferLagrangianToEulerian() //explicit
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
//const double delta_t =CurrentProcessInfo[DELTA_TIME];
const double threshold= 0.0/(double(TDim)+1.0);
std::cout << "projecting info to mesh" << std::endl;
const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//KRATOS_WATCH(offset) //(flag managed only by MoveParticles
//we must project data from the particles (lagrangian) into the eulerian mesh
//ValuesVectorType eulerian_nodes_old_temperature;
//int nnodes = mr_model_part.Nodes().size();
//array_1d<double,(n_nodes)> eulerian_nodes_sumweights;
//we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes
//though we could've use a bigger buffer, to be changed later!
//after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles.
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(mProjectionVar)=0.0;
inode->FastGetSolutionStepValue(YP)=0.0;
}
}
//adding contribution, loop on elements, since each element has stored the particles found inside of it
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
array_1d<double,3*(TDim+1)> nodes_positions;
array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1));
array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1));
//array_1d<double,(TDim+1)> weighting_inverse_divisor;
Geometry<Node<3> >& geom = ielem->GetGeometry();
for (int i=0 ; i!=(TDim+1) ; ++i)
{
nodes_positions[i*3+0]=geom[i].X();
nodes_positions[i*3+1]=geom[i].Y();
nodes_positions[i*3+2]=geom[i].Z();
//weighting_inverse_divisor[i]=1.0/((geom[i].FastGetSolutionStepValue(MEAN_SIZE))*1.01);
}
///KRATOS_WATCH(ielem->Id())
///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size());
//int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES);
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop!
break;
Convection_Particle & pparticle = element_particle_pointers[offset+iii];
if (pparticle.GetEraseFlag()==false)
{
array_1d<double,3> & position = pparticle.Coordinates();
const float& particle_scalar1 = pparticle.GetScalar1(); // -1 if water, +1 if air
array_1d<double,TDim+1> N;
bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N);
if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element.
{
KRATOS_WATCH(N);
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 && N[j]> -1e-5)
N[j]=1e-10;
}
for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element
{
//double sq_dist = 0;
//these lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions
//for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k]));
//double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) );
double weight=N(j)*N(j);
//weight=N(j)*N(j)*N(j);
if (weight<threshold) weight=1e-10;
if (weight<0.0) {KRATOS_WATCH(weight)}//;weight=0.0;KRATOS_WATCH(velocity);KRATOS_WATCH(N);KRATOS_WATCH(number_of_particles_in_elem);}//{KRATOS_WATCH(weight); KRATOS_WATCH(geom[j].Id()); KRATOS_WATCH(position);}
else
{
nodes_addedweights[j]+= weight;
//nodes_addedtemp[j] += weight * particle_temp;
nodes_added_scalar1[j] += weight*particle_scalar1;
}//
}
}
}
for (int i=0 ; i!=(TDim+1) ; ++i) {
geom[i].SetLock();
geom[i].FastGetSolutionStepValue(mProjectionVar) +=nodes_added_scalar1[i];
geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i];
geom[i].UnSetLock();
}
}
}
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
double sum_weights = inode->FastGetSolutionStepValue(YP);
if (sum_weights>0.00001)
{
//inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT)=(inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT))/sum_weights; //resetting the temperature
double & height = inode->FastGetSolutionStepValue(mProjectionVar);
height /=sum_weights; //resetting the density
}
else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case..
{
inode->FastGetSolutionStepValue(mProjectionVar)=inode->FastGetSolutionStepValue(mUnknownVar,1); //resetting the temperature
}
}
}
KRATOS_CATCH("")
}
void TransferLagrangianToEulerianImp() //semi implicit
{
KRATOS_TRY
// ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
std::cout << "projecting info to mesh (semi implicit)" << std::endl;
const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//KRATOS_WATCH(offset) //(flag managed only by MoveParticles
//we must project data from the particles (lagrangian) into the eulerian mesh
//ValuesVectorType eulerian_nodes_old_temperature;
//int nnodes = mr_model_part.Nodes().size();
//array_1d<double,(n_nodes)> eulerian_nodes_sumweights;
//we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes
//though we could've use a bigger buffer, to be changed later!
//after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles.
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(mProjectionVar)=0.0;
inode->FastGetSolutionStepValue(YP)=0.0;
}
}
//adding contribution, loop on elements, since each element has stored the particles found inside of it
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
//creating a matrix for each of the problems.
BoundedMatrix<double, TDim+1 , TDim+1 > mass_matrix; // WE ONLY NEED ONE! they are the same for all the variables! //_x,mass_matrix_y,mass_matrix_z,mass_matrix_d; //mass matrices for the projected vel (x,y,z) and the distance
array_1d<double,(TDim+1)> rhs_scalar1;
array_1d<double,3*(TDim+1)> nodes_positions;
array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1));
array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1));
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
nodes_added_scalar1 = ZeroVector((TDim+1)); //resetting vectors
nodes_addedweights = ZeroVector((TDim+1)); //resetting vectors
mass_matrix = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices. WE ONLY NEED ONE! they are the same for all the variable. only the rhs changes.
//mass_matrix_y = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices
//mass_matrix_z = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices
//mass_matrix_d = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices
rhs_scalar1 = ZeroVector((TDim+1)); //resetting vectors
Geometry<Node<3> >& geom = ielem->GetGeometry();
const double elem_volume = geom.Area();
for (int i=0 ; i!=(TDim+1) ; ++i) //saving the nodal positions for faster access
{
nodes_positions[i*3+0]=geom[i].X();
nodes_positions[i*3+1]=geom[i].Y();
nodes_positions[i*3+2]=geom[i].Z();
}
///KRATOS_WATCH(ielem->Id())
///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size());
//int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES);
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop!
break;
Convection_Particle & pparticle = element_particle_pointers[offset+iii];
if (pparticle.GetEraseFlag()==false)
{
array_1d<double,3> & position = pparticle.Coordinates();
const float& particle_scalar1 = pparticle.GetScalar1(); // -1 if water, +1 if air
array_1d<double,TDim+1> N;
bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N);
if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element.
{
KRATOS_WATCH(N);
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 && N[j]> -1e-5)
N[j]=1e-10;
}
for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element
{
double weight=N(j);
for (int k=0 ; k!=(TDim+1); k++) //building the mass matrix
mass_matrix(j,k) += weight*N(k);
rhs_scalar1[j] += weight * double(particle_scalar1);
//adding also a part with the lumped mass matrix to reduce overshoots and undershoots
if(true)
{
double this_particle_weight = weight*elem_volume/(double(number_of_particles_in_elem))*0.1; //can be increased or reduced to change the lumped mass contrubtion
nodes_addedweights[j]+= this_particle_weight;
nodes_added_scalar1[j] += this_particle_weight*particle_scalar1;
}
}
}
}
//now we invert the matrix
BoundedMatrix<double, TDim+1 , TDim+1 > inverse_mass_matrix=ZeroMatrix(TDim+1 , TDim+1);
if(TDim==3)
InvertMatrix( mass_matrix, inverse_mass_matrix);
else
InvertMatrix3x3( mass_matrix, inverse_mass_matrix);
//and now compute the elemental contribution to the gobal system:
if(number_of_particles_in_elem>(TDim*3)) //otherwise it's impossible to define a correctly the gradients, therefore the results inside the element are useless.
{
for (int i=0 ; i!=(TDim+1); i++)
{
for (int j=0 ; j!=(TDim+1); j++)
{
nodes_added_scalar1[i] += inverse_mass_matrix(i,j)*rhs_scalar1[j]*elem_volume*(1.0/(double(1+TDim)));
}
}
//and also to the mass matrix. LUMPED (but for the contribution of the grandient at elemental level.
for (int i=0 ; i!=(TDim+1); i++)
nodes_addedweights[i] += elem_volume*(1.0/(double(1+TDim)));
}
for (int i=0 ; i!=(TDim+1) ; ++i) {
geom[i].SetLock();
geom[i].FastGetSolutionStepValue(mProjectionVar) +=nodes_added_scalar1[i];
geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i];
geom[i].UnSetLock();
}
}
}
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
double sum_weights = inode->FastGetSolutionStepValue(YP);
if (sum_weights>0.00001)
{
double & scalar1 = inode->FastGetSolutionStepValue(mProjectionVar);
scalar1 /=sum_weights; //resetting the density
}
else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case..
{
inode->FastGetSolutionStepValue(mProjectionVar)=inode->FastGetSolutionStepValue(mUnknownVar,1);
}
}
}
KRATOS_CATCH("")
}
void CorrectParticlesWithoutMovingUsingDeltaVariables()
{
KRATOS_TRY
//std::cout << "updating particles" << std::endl;
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//(flag managed only by MoveParticles
//KRATOS_WATCH(offset)
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Element::Pointer pelement(*ielem.base());
Geometry<Node<3> >& geom = ielem->GetGeometry();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
//std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl;
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
//KRATOS_WATCH(iii)
if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop!
break;
Convection_Particle & pparticle = element_particle_pointers[offset+iii];
bool erase_flag= pparticle.GetEraseFlag();
if (erase_flag==false)
{
CorrectParticleUsingDeltaVariables(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper
}
}
}
}
KRATOS_CATCH("")
}
//**************************************************************************************************************
//**************************************************************************************************************
template< class TDataType > void AddUniqueWeakPointer
(GlobalPointersVector< TDataType >& v, const typename TDataType::WeakPointer candidate)
{
typename GlobalPointersVector< TDataType >::iterator i = v.begin();
typename GlobalPointersVector< TDataType >::iterator endit = v.end();
while ( i != endit && (i)->Id() != (candidate)->Id())
{
i++;
}
if( i == endit )
{
v.push_back(candidate);
}
}
//**************************************************************************************************************
//**************************************************************************************************************
void PreReseed(int minimum_number_of_particles)
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const int offset =moffset;
const int max_results = 1000;
//tools for the paralelization
unsigned int number_of_threads = OpenMPUtils::GetNumThreads();
vector<unsigned int> elem_partition;
int number_of_rows=mr_model_part.Elements().size();
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
//KRATOS_WATCH(elem_partition_size);
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition)
{
ResultContainerType results(max_results);
int k = OpenMPUtils::ThisThread();
//ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k];
//ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ;
//ModelPart::NodesContainerType local_list=aux[k];
//PointerVectorSet<Convection_Particle, IndexedObject> & list=aux[k];
//KRATOS_WATCH(k);
BoundedMatrix<double, (TDim+1), 3 > pos;
BoundedMatrix<double, (TDim+1) , (TDim+1) > N;
unsigned int freeparticle=0; //we start with the first position in the particles array
//int local_id=1;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
results.resize(max_results);
//const int & elem_id = ielem->Id();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
if (number_of_particles_in_elem<(minimum_number_of_particles))// && (ielem->GetGeometry())[0].Y()<0.10 )
{
//KRATOS_WATCH("elem with little particles")
Geometry< Node<3> >& geom = ielem->GetGeometry();
ComputeGaussPointPositionsForPreReseed(geom, pos, N);
//double conductivity = ielem->GetProperties()[CONDUCTIVITY];
//KRATOS_WATCH(conductivity);
for (unsigned int j = 0; j < (pos.size1()); j++) //i am dropping the last one, the one in the middle of the element
{
bool keep_looking = true;
while(keep_looking)
{
if (mparticles_vector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mparticles_vector[freeparticle].GetEraseFlag()==true)
{
mparticles_vector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
{
freeparticle++;
}
}
Convection_Particle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux2_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N);
if (is_found==false)
{
KRATOS_WATCH(aux2_N);
}
pparticle.GetEraseFlag()=false;
ResultIteratorType result_begin = results.begin();
Element::Pointer pelement( *ielem.base() );
MoveParticle_inverse_way(pparticle, pelement, result_begin, max_results);
//and we copy it to the array:
mparticles_vector[freeparticle] = pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle];
pparticle.GetEraseFlag()=false;
number_of_particles_in_elem++;
}
}
}
}
KRATOS_CATCH("")
}
//**************************************************************************************************************
//**************************************************************************************************************
void PostReseed(int minimum_number_of_particles) //pooyan's way
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const int offset = moffset;
//TOOLS FOR THE PARALELIZATION
//int last_id= (mr_linea_model_part.NodesEnd()-1)->Id();
unsigned int number_of_threads = OpenMPUtils::GetNumThreads();
//KRATOS_WATCH(number_of_threads);
vector<unsigned int> elem_partition;
int number_of_rows=mr_model_part.Elements().size();
//KRATOS_WATCH(number_of_threads);
//KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", "");
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
//KRATOS_WATCH(elem_partition_size);
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
//typedef Node < 3 > PointType;
//std::vector<ModelPart::NodesContainerType> aux;// aux;
//aux.resize(number_of_threads);
//ModelPart::NodesContainerType::iterator it_begin_particle_model_part = mr_linea_model_part.NodesBegin();
//ModelPart::NodesContainerType::iterator it_end_particle_model_part = mr_linea_model_part.NodesEnd();
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids
{
unsigned int reused_particles=0;
unsigned int freeparticle = 0; //we start by the first position;
int k = OpenMPUtils::ThisThread();
//ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k];
//ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ;
BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D)
BoundedMatrix<double, (3+2*TDim), (TDim+1) > N;
double mesh_scalar1;
array_1d<int, (3+2*TDim) > positions;
unsigned int number_of_reseeded_particles;
//unsigned int number_of_water_reseeded_particles;
//array_1d<double, 3 > nodes_distances;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
//int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES);
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
Geometry< Node<3> >& geom = ielem->GetGeometry();
if ( (number_of_particles_in_elem<(minimum_number_of_particles)))// && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(minimum_number_of_particles) ) )
{
//bool reseed_more=false;
number_of_reseeded_particles=0;
//reseed_more=true;
number_of_reseeded_particles= 3+2*TDim;
ComputeGaussPointPositionsForPostReseed(geom, pos, N);
for (unsigned int j = 0; j < number_of_reseeded_particles; j++)
{
//now we have to find an empty space ( a particle that was about to be deleted) in the particles model part. once found. there will be our renewed particle:
bool keep_looking = true;
while(keep_looking)
{
if (mparticles_vector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mparticles_vector[freeparticle].GetEraseFlag()==true)
{
mparticles_vector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
{
freeparticle++;
}
}
Convection_Particle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N);
if (is_found==false)
{
KRATOS_WATCH(aux_N);
KRATOS_WATCH(j)
KRATOS_WATCH(ielem->Id())
}
mesh_scalar1 = 0.0;
for (unsigned int l = 0; l < (TDim+1); l++)
{
mesh_scalar1 += N(j,l) * geom[l].FastGetSolutionStepValue(mUnknownVar);
}
pparticle.GetScalar1()=mesh_scalar1;
pparticle.GetEraseFlag()=false;
mparticles_vector[freeparticle]=pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle];
number_of_particles_in_elem++;
if (keep_looking)
{
KRATOS_THROW_ERROR(std::logic_error, "FINISHED THE LIST AND COULDNT FIND A FREE CELL FOR THE NEW PARTICLE!", "");
}
else
{
reused_particles++;
}
}
}
}
}
KRATOS_CATCH("")
}
void ExecuteParticlesPritingTool( ModelPart& lagrangian_model_part, int input_filter_factor )
{
KRATOS_TRY
//mfilter_factor; //we will only print one out of every "filter_factor" particles of the total particle list
if(mparticle_printing_tool_initialized==false)
{
mfilter_factor=input_filter_factor;
if(lagrangian_model_part.NodesBegin()-lagrangian_model_part.NodesEnd()>0)
KRATOS_THROW_ERROR(std::logic_error, "AN EMPTY MODEL PART IS REQUIRED FOR THE PRINTING OF PARTICLES", "");
lagrangian_model_part.AddNodalSolutionStepVariable(DISPLACEMENT);
lagrangian_model_part.AddNodalSolutionStepVariable(mUnknownVar);
for (unsigned int i=0; i!=((mmaximum_number_of_particles*mnelems)/mfilter_factor)+mfilter_factor; i++)
{
Node < 3 > ::Pointer pnode = lagrangian_model_part.CreateNewNode( i+mlast_node_id+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!!
//pnode->SetBufferSize(mr_model_part.NodesBegin()->GetBufferSize());
pnode->SetBufferSize(1);
}
mparticle_printing_tool_initialized=true;
}
//resetting data of the unused particles
const double inactive_particle_position= -10.0;
array_1d<double,3>inactive_particle_position_vector;
inactive_particle_position_vector(0)=inactive_particle_position;
inactive_particle_position_vector(1)=inactive_particle_position;
inactive_particle_position_vector(2)=inactive_particle_position;
ModelPart::NodesContainerType::iterator inodebegin = lagrangian_model_part.NodesBegin();
for(unsigned int ii=0; ii<lagrangian_model_part.Nodes().size(); ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(mUnknownVar) = 0.0;
inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector;
}
int counter=0;
//ModelPart::NodesContainerType::iterator it_begin = lagrangian_model_part.NodesBegin();
for (int i=0; i!=mmaximum_number_of_particles*mnelems; i++)
{
Convection_Particle& pparticle =mparticles_vector[i];
if(pparticle.GetEraseFlag()==false && i%mfilter_factor==0)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+counter; //copying info from the particle to the (printing) node.
inode->FastGetSolutionStepValue(mUnknownVar) = pparticle.GetScalar1();
inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates();
counter++;
}
}
KRATOS_CATCH("")
}
protected:
private:
///this function moves a particle according to the "velocity" given
///by "rVariable". The movement is performed in nsubsteps, during a total time
///of Dt
void MoveParticle( Convection_Particle & pparticle,
Element::Pointer & pelement,
GlobalPointersVector< Element >& elements_in_trajectory,
unsigned int & number_of_elements_in_trajectory,
ResultIteratorType result_begin,
const unsigned int MaxNumberOfResults)
{
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool KEEP_INTEGRATING=false;
bool is_found;
//bool have_air_node;
//bool have_water_node;
array_1d<double,3> vel;
array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3);
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
position = pparticle.Coordinates(); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
KEEP_INTEGRATING=true;
Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in
vel=ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j];
}
//calculating substep to get +- courant(substep) = 0.1
nsubsteps = 10.0 * (delta_t * pelement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0;// weight;//*double(nsubsteps);
position += vel*substep_dt;//weight;
//DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH SEDIMENT_VELOCITY
//////////////////////////////////////////////////////////////////////////////////////////////////////
unsigned int check_from_element_number=0;
for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle.
{
if (KEEP_INTEGRATING==true)
{
is_found = FindNodeOnMesh(position, N ,pelement,elements_in_trajectory,number_of_elements_in_trajectory,check_from_element_number,result_begin,MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j];
}
only_integral += 1.0; //values saved for the current time step
position+=vel*substep_dt;//weight;
}
else
{
KEEP_INTEGRATING=false;
break;
}
}
else
break;
}
}
if (KEEP_INTEGRATING==false) (pparticle.GetEraseFlag()=true);
else is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pelement)
if (is_found==false) ( pparticle.GetEraseFlag()=true);
pparticle.Coordinates() = position;
}
void CorrectParticleUsingDeltaVariables(
Convection_Particle & pparticle,
Element::Pointer & pelement,
Geometry< Node<3> >& geom)
{
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
array_1d<double,3> coords = pparticle.Coordinates();
float & particle_scalar1 = pparticle.GetScalar1();
//double distance=0.0;
double delta_scalar1 = 0.0;
bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if(is_found == false)
{
KRATOS_WATCH(N)
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 )
N[j]=1e-10;
}
for(unsigned int j=0; j<(TDim+1); j++)
{
delta_scalar1 += geom[j].FastGetSolutionStepValue(DELTA_SCALAR1)*N[j];
}
particle_scalar1 = particle_scalar1 + delta_scalar1;
}
void MoveParticle_inverse_way(
Convection_Particle & pparticle,
Element::Pointer & pelement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO!
ResultIteratorType result_begin,
const unsigned int MaxNumberOfResults)
{
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool KEEP_INTEGRATING=false;
bool is_found;
array_1d<double,3> vel;
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
double scalar1 = 0.0;
//we start with the first position, then it will enter the loop.
position = pparticle.Coordinates(); // + (pparticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
KEEP_INTEGRATING=true;
Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in
vel=ZeroVector(3);
scalar1=0.0;
for(unsigned int j=0; j<(TDim+1); j++)
{
scalar1 += geom[j].FastGetSolutionStepValue(mUnknownVar)*N(j);
noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j];
}
//calculating substep to get +- courant(substep) = 1/4
nsubsteps = 10.0 * (delta_t * pelement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0;// weight;//*double(nsubsteps);
position -= vel*substep_dt;//weight;
for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle.
{ if (KEEP_INTEGRATING==true) {
is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in
vel=ZeroVector(3);
scalar1=0.0;
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j] ;
scalar1 += geom[j].FastGetSolutionStepValue(mUnknownVar)*N(j);
}
only_integral += 1.0;//weight ; //values saved for the current time step
position-=vel*substep_dt;//weight;
}
else KEEP_INTEGRATING=false;
}
}
pparticle.GetScalar1()=scalar1;
}
//else {KRATOS_WATCH(position); }
}
///this function should find the element into which a given node is located
///and return a pointer to the element and the vector containing the
///shape functions that define the postion within the element
///if "false" is devolved the element is not found
bool FindNodeOnMesh( array_1d<double,3>& position,
array_1d<double,TDim+1>& N,
Element::Pointer & pelement,
ResultIteratorType result_begin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
const array_1d<double,3>& coords = position;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N);
if(is_found_1 == true) //that was easy!
{
return true;
}
//to begin with we check the neighbour elements; it is a bit more expensive
GlobalPointersVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS);
//the first we check is the one that has negative shape function, because it means it went outside in this direction:
//commented, it is not faster than simply checking all the neighbours (branching)
/*
unsigned int checked_element=0;
for (unsigned int i=0;i!=(TDim+1);i++)
{
if (N[i]<0.0)
{
checked_element=i;
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N);
if (is_found_2)
{
pelement=Element::Pointer(((neighb_elems(i))));
N=aux_N;
return true;
}
break;
}
}
*/
//we check all the neighbour elements
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if (is_found_2)
{
pelement=neighb_elems(i)->shared_from_this();
return true;
}
}
//if checking all the neighbour elements did not work, we have to use the bins
//ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults );
if(results_found>0){
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry();
//find local position
bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if(is_found == true)
{
pelement=Element::Pointer((*(result_begin+i)));
return true;
}
}
}
//if nothing worked, then:
//not found case
return false;
}
// VERSION INCLUDING PREDEFINED ELEMENTS FOLLOWING A TRAJECTORY
bool FindNodeOnMesh( array_1d<double,3>& position,
array_1d<double,TDim+1>& N,
Element::Pointer & pelement,
GlobalPointersVector< Element >& elements_in_trajectory,
unsigned int & number_of_elements_in_trajectory,
unsigned int & check_from_element_number,
ResultIteratorType result_begin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
const array_1d<double,3>& coords = position;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N);
if(is_found_1 == true)
{
return true; //that was easy!
}
//if it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element.
for (unsigned int i=(check_from_element_number);i!=number_of_elements_in_trajectory;i++)
{
Geometry<Node<3> >& geom = elements_in_trajectory[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N);
if (is_found_2)
{
pelement=elements_in_trajectory(i)->shared_from_this();
N=aux_N;
check_from_element_number = i+1 ; //now i element matches pelement, so to avoid cheching twice the same element we send the counter to the following element.
return true;
}
}
//now we check the neighbour elements:
auto& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS);
//the first we check is the one that has negative shape function, because it means it went outside in this direction:
//commented, it is not faster than simply checking all the neighbours (branching)
/*
unsigned int checked_element=0;
for (unsigned int i=0;i!=(TDim+1);i++)
{
if (N[i]<0.0)
{
checked_element=i;
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N);
if (is_found_2)
{
pelement=Element::Pointer(((neighb_elems(i))));
N=aux_N;
return true;
}
break;
}
}
*/
//we check all the neighbour elements
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if (is_found_2)
{
pelement=neighb_elems(i)->shared_from_this();
if (number_of_elements_in_trajectory<20)
{
elements_in_trajectory(number_of_elements_in_trajectory)=pelement;
number_of_elements_in_trajectory++;
check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
//if checking all the neighbour elements did not work, we have to use the bins
//ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults );
if(results_found>0)
{
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry();
//find local position
bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if(is_found == true)
{
pelement=Element::Pointer((*(result_begin+i)));
if (number_of_elements_in_trajectory<20)
{
elements_in_trajectory(number_of_elements_in_trajectory)=pelement;
number_of_elements_in_trajectory++;
check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
}
//not found case
return false;
}
//***************************************
//***************************************
inline bool CalculatePosition(Geometry<Node < 3 > >&geom,
const double xc, const double yc, const double zc,
array_1d<double, 3 > & N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
double inv_area = 0.0;
if (area == 0.0)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", "");
} else
{
inv_area = 1.0 / area;
}
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
//KRATOS_WATCH(N);
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true
return true;
return false;
}
////////////
//using the pre loaded nodal coordinates
inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions,
const double xc, const double yc, const double zc,
array_1d<double, 3 > & N
)
{
const double& x0 = nodes_positions[0];
const double& y0 = nodes_positions[1];
const double& x1 = nodes_positions[3];
const double& y1 = nodes_positions[4];
const double& x2 = nodes_positions[6];
const double& y2 = nodes_positions[7];
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
double inv_area = 0.0;
if (area == 0.0)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", "");
} else
{
inv_area = 1.0 / area;
}
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
//KRATOS_WATCH(N);
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true
return true;
return false;
}
//***************************************
//***************************************
inline bool CalculatePosition(Geometry<Node < 3 > >&geom,
const double xc, const double yc, const double zc,
array_1d<double, 4 > & N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
double inv_vol = 0.0;
if (vol < 0.000000000000000000000000000001)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", "");
} else
{
inv_vol = 1.0 / vol;
}
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
///////////////////
//using the pre loaded nodal coordinates
inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions,
const double xc, const double yc, const double zc,
array_1d<double, 4 > & N
)
{
const double& x0 = nodes_positions[0];
const double& y0 = nodes_positions[1];
const double& z0 = nodes_positions[2];
const double& x1 = nodes_positions[3];
const double& y1 = nodes_positions[4];
const double& z1 = nodes_positions[5];
const double& x2 = nodes_positions[6];
const double& y2 = nodes_positions[7];
const double& z2 = nodes_positions[8];
const double& x3 = nodes_positions[9];
const double& y3 = nodes_positions[10];
const double& z3 = nodes_positions[11];
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
double inv_vol = 0.0;
if (vol < 0.000000000000000000000000000001)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", "");
} else
{
inv_vol = 1.0 / vol;
}
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
inline double CalculateVol(const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2
)
{
return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0));
}
//***************************************
//***************************************
inline double CalculateVol(const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3
)
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30;
return detJ * 0.1666666666666666666667;
}
void ComputeGaussPointPositions_4(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N)
{
double one_third = 1.0 / 3.0;
double one_sixt = 0.15; //1.0 / 6.0;
double two_third = 0.7; //2.0 * one_third;
N(0, 0) = one_sixt;
N(0, 1) = one_sixt;
N(0, 2) = two_third;
N(1, 0) = two_third;
N(1, 1) = one_sixt;
N(1, 2) = one_sixt;
N(2, 0) = one_sixt;
N(2, 1) = two_third;
N(2, 2) = one_sixt;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
//first
pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X();
pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y();
pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z();
//second
pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X();
pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y();
pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z();
//third
pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X();
pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y();
pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
}
void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N) //2d
{
double one_third = 1.0 / 3.0;
double one_eight = 0.12; //1.0 / 6.0;
double three_quarters = 0.76; //2.0 * one_third;
N(0, 0) = one_eight;
N(0, 1) = one_eight;
N(0, 2) = three_quarters;
N(1, 0) = three_quarters;
N(1, 1) = one_eight;
N(1, 2) = one_eight;
N(2, 0) = one_eight;
N(2, 1) = three_quarters;
N(2, 2) = one_eight;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
N(4, 0) = one_eight;
N(4, 1) = 0.44;
N(4, 2) = 0.44;
N(5, 0) = 0.44;
N(5, 1) = one_eight;
N(5, 2) = 0.44;
N(6, 0) = 0.44;
N(6, 1) = 0.44;
N(6, 2) = one_eight;
//first
pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X();
pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y();
pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z();
//second
pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X();
pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y();
pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z();
//third
pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X();
pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y();
pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
//fifth
pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X();
pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y();
pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z();
//sixth
pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X();
pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y();
pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z();
//seventh
pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X();
pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y();
pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z();
}
void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 9, 3 > & pos,BoundedMatrix<double, 9, 4 > & N) //3D
{
double one_quarter = 0.25;
double small_fraction = 0.1; //1.0 / 6.0;
double big_fraction = 0.7; //2.0 * one_third;
double mid_fraction = 0.3; //2.0 * one_third;
N(0, 0) = big_fraction;
N(0, 1) = small_fraction;
N(0, 2) = small_fraction;
N(0, 3) = small_fraction;
N(1, 0) = small_fraction;
N(1, 1) = big_fraction;
N(1, 2) = small_fraction;
N(1, 3) = small_fraction;
N(2, 0) = small_fraction;
N(2, 1) = small_fraction;
N(2, 2) = big_fraction;
N(2, 3) = small_fraction;
N(3, 0) = small_fraction;
N(3, 1) = small_fraction;
N(3, 2) = small_fraction;
N(3, 3) = big_fraction;
N(4, 0) = one_quarter;
N(4, 1) = one_quarter;
N(4, 2) = one_quarter;
N(4, 3) = one_quarter;
N(5, 0) = small_fraction;
N(5, 1) = mid_fraction;
N(5, 2) = mid_fraction;
N(5, 3) = mid_fraction;
N(6, 0) = mid_fraction;
N(6, 1) = small_fraction;
N(6, 2) = mid_fraction;
N(6, 3) = mid_fraction;
N(7, 0) = mid_fraction;
N(7, 1) = mid_fraction;
N(7, 2) = small_fraction;
N(7, 3) = mid_fraction;
N(8, 0) = mid_fraction;
N(8, 1) = mid_fraction;
N(8, 2) = mid_fraction;
N(8, 3) = small_fraction;
pos=ZeroMatrix(9,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=9; j++) //going through the 9 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 3, 3 > & pos,BoundedMatrix<double, 3, 3 > & N) //2D
{
N(0, 0) = 0.5;
N(0, 1) = 0.25;
N(0, 2) = 0.25;
N(1, 0) = 0.25;
N(1, 1) = 0.5;
N(1, 2) = 0.25;
N(2, 0) = 0.25;
N(2, 1) = 0.25;
N(2, 2) = 0.5;
//first
pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X();
pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y();
pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z();
//second
pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X();
pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y();
pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z();
//third
pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X();
pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y();
pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z();
}
void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 4, 3 > & pos,BoundedMatrix<double, 4, 4 > & N) //3D
{
//creating 4 particles, each will be closer to a node and equidistant to the other nodes
N(0, 0) = 0.4;
N(0, 1) = 0.2;
N(0, 2) = 0.2;
N(0, 3) = 0.2;
N(1, 0) = 0.2;
N(1, 1) = 0.4;
N(1, 2) = 0.2;
N(1, 3) = 0.2;
N(2, 0) = 0.2;
N(2, 1) = 0.2;
N(2, 2) = 0.4;
N(2, 3) = 0.2;
N(3, 0) = 0.2;
N(3, 1) = 0.2;
N(3, 2) = 0.2;
N(3, 3) = 0.4;
pos=ZeroMatrix(4,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=4; j++) //going through the 4 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
void ComputeGaussPointPositions_45(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 45, 3 > & pos,BoundedMatrix<double, 45, 3 > & N)
{
//std::cout << "NEW ELEMENT" << std::endl;
unsigned int counter=0;
for (unsigned int i=0; i!=9;i++)
{
for (unsigned int j=0; j!=(9-i);j++)
{
N(counter,0)=0.05+double(i)*0.1;
N(counter,1)=0.05+double(j)*0.1;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
//std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl;
counter++;
}
}
}
void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 15, 3 > & pos,BoundedMatrix<double, 15, 3 > & N) //2D
{
//std::cout << "NEW ELEMENT" << std::endl;
unsigned int counter=0;
for (unsigned int i=0; i!=5;i++)
{
for (unsigned int j=0; j!=(5-i);j++)
{
N(counter,0)=0.05+double(i)*0.2;
N(counter,1)=0.05+double(j)*0.2;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
//std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl;
counter++;
}
}
}
void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 20, 3 > & pos,BoundedMatrix<double, 20, 4 > & N) //3D
{
//std::cout << "NEW ELEMENT" << std::endl;
//double total;
double fraction_increment;
unsigned int counter=0;
for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles
{
//std::cout << "inside i" << i << std::endl;
for (unsigned int j=0; j!=(4-i);j++)
{
//std::cout << "inside j" << j << std::endl;
for (unsigned int k=0; k!=(4-i-j);k++)
{
//std::cout << "inside k" << k << std::endl;
N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1)
//total = 1.0 - N(counter,0);
fraction_increment = 0.27; //
N(counter,1)=fraction_increment * (0.175 + double(j));
N(counter,2)=fraction_increment * (0.175 + double(k));
N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z();
//std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl;
counter++;
}
}
}
}
template<class T>
bool InvertMatrix(const T& input, T& inverse)
{
typedef permutation_matrix<std::size_t> pmatrix;
// create a working copy of the input
T A(input);
// create a permutation matrix for the LU-factorization
pmatrix pm(A.size1());
// perform LU-factorization
int res = lu_factorize(A, pm);
if (res != 0)
return false;
// create identity matrix of "inverse"
inverse.assign(identity_matrix<double> (A.size1()));
// backsubstitute to get the inverse
lu_substitute(A, pm, inverse);
return true;
}
bool InvertMatrix3x3(const BoundedMatrix<double, TDim+1 , TDim+1 >& A, BoundedMatrix<double, TDim+1 , TDim+1 >& result)
{
double determinant = +A(0,0)*(A(1,1)*A(2,2)-A(2,1)*A(1,2))
-A(0,1)*(A(1,0)*A(2,2)-A(1,2)*A(2,0))
+A(0,2)*(A(1,0)*A(2,1)-A(1,1)*A(2,0));
double invdet = 1/determinant;
result(0,0) = (A(1,1)*A(2,2)-A(2,1)*A(1,2))*invdet;
result(1,0) = -(A(0,1)*A(2,2)-A(0,2)*A(2,1))*invdet;
result(2,0) = (A(0,1)*A(1,2)-A(0,2)*A(1,1))*invdet;
result(0,1) = -(A(1,0)*A(2,2)-A(1,2)*A(2,0))*invdet;
result(1,1) = (A(0,0)*A(2,2)-A(0,2)*A(2,0))*invdet;
result(2,1) = -(A(0,0)*A(1,2)-A(1,0)*A(0,2))*invdet;
result(0,2) = (A(1,0)*A(2,1)-A(2,0)*A(1,1))*invdet;
result(1,2) = -(A(0,0)*A(2,1)-A(2,0)*A(0,1))*invdet;
result(2,2) = (A(0,0)*A(1,1)-A(1,0)*A(0,1))*invdet;
return true;
}
virtual int Check()
{
KRATOS_TRY
ProcessInfo& rCurrentProcessInfo = mr_model_part.GetProcessInfo();
if (rCurrentProcessInfo.Has(CONVECTION_DIFFUSION_SETTINGS)==false)
KRATOS_THROW_ERROR(std::logic_error, "no CONVECTION_DIFFUSION_SETTINGS in model_part", "");
//std::cout << "ConvDiff::Check(). If crashes, check CONVECTION_DIFFUSION_SETTINGS is defined" << std::endl;
ConvectionDiffusionSettings::Pointer my_settings = rCurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS);
//UNKNOWN VARIABLE
if(my_settings->IsDefinedUnknownVariable()==true)
{
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetUnknownVariable()) == false)
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Unknown Variable defined but not contained in the model part", "");
}
else
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Unknown Variable not defined!", "");
//PROJECTION VARIABLE
//used as intermediate variable, is the variable at time n+1 but only accounting for the convective term.
if(my_settings->IsDefinedProjectionVariable()==true)
{
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetProjectionVariable()) == false)
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Projection Variable defined but not contained in the model part", "");
}
else
KRATOS_THROW_ERROR(std::logic_error, "No Projection variable assigned for ConvDiff!", "");
//CONVECTION VELOCITY VARIABLE
//CURRENTLY WE ARE USING (VELOCITY -MESH_VELOCITY) TO CONVECT, so the ConvectionVariable must not be used:
//if(my_settings->IsDefinedConvectionVariable()==true)
//{
// if (BaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(my_settings->GetConvectionVariable()) == false)
// KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Convection Variable defined but not contained in the model part", "");
//}
//else
// std::cout << "No Projection variable assigned for ConvDiff. Assuming Convection=0" << std::endl;
if(my_settings->IsDefinedConvectionVariable()==true)
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: ConvectionVariable not used. Use VelocityVariable instead", "");
//VELOCITY VARIABLE
if(my_settings->IsDefinedVelocityVariable()==true)
{
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetVelocityVariable()) == false)
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Velocity Variable defined but not contained in the model part", "");
}
else
KRATOS_THROW_ERROR(std::logic_error, "No Velocity variable assigned for ConvDiff!", "");
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(MEAN_SIZE) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add MEAN_SIZE variable to model part!", "");
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(DELTA_SCALAR1) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add DELTA_SCALAR1 variable to model part!", "");
return 0;
KRATOS_CATCH("")
}
ModelPart& mr_model_part;
int m_nparticles;
int mnelems;
int moffset;
//vector<double> mareas_vector; UNUSED SO COMMENTED
int max_nsubsteps;
double max_substep_dt;
int mmaximum_number_of_particles;
std::vector< Convection_Particle > mparticles_vector; //Point<3>
int mlast_elem_id;
bool modd_timestep;
bool mparticle_printing_tool_initialized;
unsigned int mfilter_factor;
unsigned int mlast_node_id;
//ModelPart& mr_particle_model_part;
vector<int> mnumber_of_particles_in_elems;
vector<int> mnumber_of_particles_in_elems_aux;
//vector<ParticlePointerVector*> mpointers_to_particle_pointers_vectors; //pointing to the GetValue of each element
vector<ParticlePointerVector> mvector_of_particle_pointers_vectors;
typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic;
const Variable<double>& mUnknownVar;
const Variable<double>& mProjectionVar;
const Variable<array_1d<double,3> >& mVelocityVar;
const Variable<array_1d<double,3> >& mMeshVelocityVar;
};
} // namespace Kratos.
#endif // KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED defined
|
vmath.h | #ifndef VMATH
#define VMATH
#include <vector>
#include <stdexcept>
#include <algorithm>
namespace vec {
struct DefaultOperators {
struct Sum {
template <class T>
inline T operator()(const T &a, const T &b) const noexcept {
return a + b;
}
};
struct Rem {
template <class T>
inline T operator()(const T &a, const T &b) const noexcept {
return a - b;
}
};
struct Mul {
template <class T>
inline T operator()(const T &a, const T &b) const noexcept {
return a * b;
}
};
struct Div {
template <class T>
inline T operator()(const T &a, const T &b) const noexcept {
return a / b;
}
};
};
template <class FuncType>
struct Calc : public FuncType {
/**
* @brief operator () Do single binary operation
* @param a Left value
* @param b Right value
* @return Result of binary operation
*/
template <class T>
inline auto operator()(const T &a, const T &b) const
noexcept -> decltype(FuncType::operator()(a, b)) {
return FuncType::operator()(a, b);
}
/**
* @brief operator () Binary operation for each element in vectors.
* @param result
* @param a Left argument
* @param b Right argument
* @return True if computation done. Otherwise (vectors have not same size)
* False
*/
template <class VectorType>
inline bool operator()(VectorType &result, const VectorType &a,
const VectorType &b) const noexcept {
if (a.size() != b.size()) return false;
if (result.size() < a.size()) result.resize(a.size());
#pragma omp parallel for
for (size_t i = 0; i < a.size(); ++i)
result[i] = FuncType::operator()(a[i], b[i]);
return true;
}
/**
* @brief operator () Binary operation for each element for each vector
* @param result Result container
* @param a Left argument
* @param b Right argument
* @param vectors Next arguments
* @return True if computation done. Otherwise (vectors have not same size)
* False
*/
template <class VectorType, class... Vectors>
inline bool operator()(VectorType &result, const VectorType &a,
const VectorType &b, const Vectors &... vectors) const
noexcept {
return operator()(result, a, b) &&operator()(result, result, vectors...);
}
template <class VectorType, class T>
/**
* @brief operator () Binary opertation for each element with constant
* @param result Result container
* @param a Left argument
* @param b Right argument
*/
inline void operator()(VectorType &result, const VectorType &a,
const T &b) const noexcept {
#pragma omp parallel for
for (size_t i = 0; i < a.size(); ++i)
result[i] = FuncType::operator()(a[i], b);
}
};
const static Calc<DefaultOperators::Sum> sum;
const static Calc<DefaultOperators::Rem> rem;
const static Calc<DefaultOperators::Mul> mul;
const static Calc<DefaultOperators::Div> div;
template <class T, class VectorType>
static inline T accumulate(T &result, const VectorType &v,
const T &init = T()) noexcept {
T res = init;
#pragma omp parallel for reduction(+ : res)
for (size_t i = 0; i < v.size(); ++i) res += v[i];
result = res;
return result;
}
}
#endif // VMATH
|
relabel.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#include "HiParTI.h"
/*function declarations*/
static void ptiLexiOrderPerMode(ptiSparseMatrix * mtx, ptiIndex mode, ptiIndex ** orgIds, int tk);
static double u_seconds(void)
{
struct timeval tp;
gettimeofday(&tp, NULL);
return (double) tp.tv_sec + (double) tp.tv_usec / 1000000.0;
};
void ptiIndexRelabel(ptiSparseMatrix * mtx, ptiIndex ** newIndices, int renumber, ptiIndex iterations, int tk)
{
/*
newIndices is of size [nmodes][ndims[modes]] and assumed to be allocted.
It will be overwritten. No need to initialize.
We will need to reshuffle nonzeros. In order to not to touch mtx, we copy the indices of nonzeros
to a local variable coords. This is sort of transposed wrt mtx: its size is nnz * n, instead of n * nnz used in mtx.
*/
ptiIndex const nmodes = 2; // for matrices
// ptiNnzIndex const nnz = mtx->nnz;
ptiIndex i, m;
ptiIndex its;
if (renumber == 1) { /* Lexi-order renumbering */
printf("[Lexi-order]\n");
/* copy the indices */
ptiSparseMatrix mtx_temp;
ptiCopySparseMatrix(&mtx_temp, mtx, tk);
ptiIndex ** orgIds = (ptiIndex **) malloc(sizeof(ptiIndex*) * nmodes);
orgIds[0] = (ptiIndex *) malloc(sizeof(ptiIndex) * mtx->nrows);
for (i = 0; i < mtx->nrows; i++)
orgIds[0][i] = i;
orgIds[1] = (ptiIndex *) malloc(sizeof(ptiIndex) * mtx->ncols);
for (i = 0; i < mtx->ncols; i++)
orgIds[1][i] = i;
for (its = 0; its < iterations; its++)
{
printf("[Lexi-order] Optimizing the numbering for its %u\n", its+1);
ptiLexiOrderPerMode(&mtx_temp, 0, orgIds, tk);
// ptiDumpIndexArray(orgIds[0], mtx->nrows, stdout);
ptiLexiOrderPerMode(&mtx_temp, 1, orgIds, tk);
// ptiDumpIndexArray(orgIds[1], mtx->ncols, stdout);
}
/* compute newIndices from orgIds. Reverse perm */
for (i = 0; i < mtx->nrows; i++)
newIndices[0][orgIds[0][i]] = i;
for (i = 0; i < mtx->ncols; i++)
newIndices[1][orgIds[1][i]] = i;
ptiFreeSparseMatrix(&mtx_temp);
for (m = 0; m < nmodes; m++)
free(orgIds[m]);
free(orgIds);
} else if (renumber == 2 ) { /* BFS-like renumbering */
/*
REMARK (10 May 2018): this is the old bfs-like kind of thing. I hoped it would reduce the number of iterations,
but on a few cases it did not help much. Just leaving it in case we want to use it.
*/
printf("[BFS-like]\n");
// ptiBFSLike(tsr, newIndices);
}
}
// void ptiBFSLike(ptiSparseMatrix * mtx, ptiIndex ** newIndices)
// {
// /*PRE: newIndices is allocated
// POST:
// newIndices[0][0...n_0-1] gives the new ids for dim 0
// newIndices[1][0...n_1-1] gives the new ids for dim 1
// ...
// newIndices[d-1][0...n_{d-1}-1] gives the new ids for dim d-1
// This implements a simple idea close to BFS/Cuthill-McKee/Maximum cardinality search.
// */
// ptiIndex const nmodes = 2;
// ptiNnzIndex const nnz = mtx->nnz;
// ptiIndex * ndims = tsr->ndims;
// ptiIndexVector * inds = tsr->inds;
// ptiIndex *dimsPrefixSum;
// basicHypergraph hg;
// ptiIndex *newIndicesHg;
// ptiIndex d, i;
// dimsPrefixSum = (ptiIndex*) calloc(nmodes, sizeof(ptiIndex));
// for (d = 1; d < nmodes; d++)
// dimsPrefixSum[d] = ndims[d-1] + dimsPrefixSum[d-1];
// fillHypergraphFromCoo(&hg, nmodes, nnz, ndims, inds);
// newIndicesHg = (ptiIndex*) malloc(sizeof(ptiIndex) * hg.nvrt);
// for (i = 0; i < hg.nvrt; i++)
// newIndicesHg[i] = i;
// for (d = 0; d < nmodes; d++) /*order d*/
// orderforHiCOOaDim(&hg, newIndicesHg, dimsPrefixSum[d], dimsPrefixSum[d] + ndims[d]-1);
// /*copy from newIndices to newIndicesOut*/
// for (d = 0; d < nmodes; d++)
// for (i = 0; i < ndims[d]; i++)
// newIndices[d][i] = newIndicesHg[dimsPrefixSum[d] + i] - dimsPrefixSum[d];
// free(newIndicesHg);
// freeHypergraphData(&hg);
// free(dimsPrefixSum);
// }
static void lexOrderThem( ptiNnzIndex m, ptiIndex n, ptiNnzIndex *ia, ptiIndex *cols, ptiIndex *cprm, int const tk)
{
/*m, n are the num of rows and cols, respectively. We lex order cols,
given rows.
BU notes as of 4 May 2018: I am hoping that I will not be asked the details of this function, and its memory use;) A quick and dirty update from something else I had since some time. I did not think through if the arrays could be reduced. Right now we have 10 arrays of size n each (where n is the length of a single dimension of the tensor.
*/
ptiNnzIndex *flag, j, jcol, jend;
ptiIndex *svar, *var, numBlocks;
ptiIndex *prev, *next, *sz, *setnext, *setprev, *tailset;
ptiIndex *freeIdList, freeIdTop;
ptiIndex k, s, acol;
ptiIndex firstset, set, pos;
svar = (ptiIndex*) calloc(sizeof(ptiIndex),(n+2));
flag = (ptiNnzIndex*) calloc(sizeof(ptiNnzIndex),(n+2));
var = (ptiIndex*) calloc(sizeof(ptiIndex),(n+2));
prev = (ptiIndex*) calloc(sizeof(ptiIndex),(n+2));
next = (ptiIndex*) calloc(sizeof(ptiIndex),(n+2));
sz = (ptiIndex*) calloc(sizeof(ptiIndex),(n+2));
setprev = (ptiIndex*)calloc(sizeof(ptiIndex),(n+2));
setnext = (ptiIndex*)calloc(sizeof(ptiIndex),(n+2));
tailset = (ptiIndex*)calloc(sizeof(ptiIndex),(n+2));
freeIdList = (ptiIndex*)calloc(sizeof(ptiIndex),(n+2));
next[1] = 2;
prev[0] = prev[1] = 0;
next[n] = 0;
prev[n] = n-1;
svar[1] = svar[n] = 1;
flag[1] = flag[n] = flag[n+1] = 0;
cprm[1] = cprm[n] = 2 * n ;
setprev[1] = setnext[1] = 0;
// #pragma omp parallel for num_threads(tk)
for(ptiIndex jj = 2; jj<=n-1; jj++)/*init all in a single svar*/
{
svar[jj] = 1;
next[jj] = jj+1;
prev[jj] = jj-1;
flag[jj] = 0;
sz[jj] = 0;
setprev[jj] = setnext[jj] = 0;
cprm[jj] = 2 * n;
}
var[1] = 1;
sz[1] = n;
sz[n] = sz[n+1] = 0;
setprev[n] = setnext[n] = 0;
setprev[n+1] = setnext[n+1] = 0;
tailset[1] = n;
firstset = 1;
freeIdList[0] = 0;
// #pragma omp parallel for num_threads(tk)
for(ptiIndex jj= 1; jj<=n; jj++)
freeIdList[jj] = jj+1;/*1 is used as a set id*/
freeIdTop = 1;
for(j=1; j<=m; j++)
{
jend = ia[j+1]-1;
for(jcol = ia[j]; jcol <= jend ; jcol++)
{
acol= cols[jcol];
s = svar[acol];
if( flag[s] < j)/*first occurence of supervar s in j*/
{
flag[s] = j;
if(sz[s] == 1 && tailset[s] != acol)
{
printf("this should not happen (sz 1 but tailset not ok)\n");
exit(12);
}
if(sz[s] > 1)
{
ptiIndex newId;
/*remove acol from s*/
if(tailset[s] == acol) tailset[s] = prev[acol];
next[prev[acol]] = next[acol];
prev[next[acol]] = prev[acol];
sz[s] = sz[s] - 1;
/*create a new supervar ns=newId
and make i=acol its only var*/
if(freeIdTop == n+1) {
printf("this should not happen (no index)\n");
exit(12);
}
newId = freeIdList[freeIdTop++];
svar[acol] = newId;
var[newId] = acol;
flag[newId] = j;
sz[newId ] = 1;
next[acol] = 0;
prev[acol] = 0;
var[s] = acol;
tailset[newId] = acol;
setnext[newId] = s;
setprev[newId] = setprev[s];
if(setprev[s])
setnext[setprev[s]] = newId;
setprev[s] = newId;
if(firstset == s)
firstset = newId;
}
}
else/*second or later occurence of s for row j*/
{
k = var[s];
svar[acol] = svar[k];
/*remove acol from its current chain*/
if(tailset[s] == acol) tailset[s] = prev[acol];
next[prev[acol]] = next[acol];
prev[next[acol]] = prev[acol];
sz[s] = sz[s] - 1;
if(sz[s] == 0)/*s is a free id now..*/
{
freeIdList[--freeIdTop] = s; /*add s to the free id list*/
if(setnext[s])
setprev[setnext[s]] = setprev[s];
if(setprev[s])
setnext[setprev[s]] = setnext[s];
setprev[s] = setnext[s] = 0;
tailset[s] = 0;
var[s] = 0;
flag[s] = 0;
}
/*add to chain containing k (as the last element)*/
prev[acol] = tailset[svar[k]];
next[acol] = 0;/*BU next[tailset[svar[k]]];*/
next[tailset[svar[k]]] = acol;
tailset[svar[k]] = acol;
sz[svar[k]] = sz[svar[k]] + 1;
}
}
}
pos = 1;
numBlocks = 0;
for(set = firstset; set != 0; set = setnext[set])
{
ptiIndex item = tailset[set];
ptiIndex headset = 0;
numBlocks ++;
while(item != 0 )
{
headset = item;
item = prev[item];
}
/*located the head of the set. output them (this is for keeping the initial order*/
while(headset)
{
cprm[pos++] = headset;
headset = next[headset];
}
}
free(tailset);
free(sz);
free(next);
free(prev);
free(var);
free(flag);
free(svar);
free(setnext);
free(setprev);
if(pos-1 != n){
printf("**************** Error ***********\n");
printf("something went wrong and we could not order everyone\n");
exit(12);
}
return ;
}
/**************************************************************/
static void ptiLexiOrderPerMode(ptiSparseMatrix * mtx, ptiIndex mode, ptiIndex ** orgIds, int tk)
{
ptiNnzIndex const nnz = mtx->nnz;
// ptiIndex const nmodes = 2; // for matrices
ptiIndex mode_dim;
ptiIndexVector * mode_ind;
if (mode == 0) {
mode_dim = mtx->nrows;
mode_ind = &(mtx->rowind);
}
else if (mode == 1) {
mode_dim = mtx->ncols;
mode_ind = &(mtx->colind);
}
ptiNnzIndex * rowPtrs = NULL;
ptiIndex * colIds = NULL;
ptiIndex * cprm = NULL, * invcprm = NULL, * saveOrgIds = NULL;
ptiNnzIndex atRowPlus1, mtxNrows, mtrxNnz;
ptiIndex c;
ptiNnzIndex z;
double t1, t0;
t0 = u_seconds();
ptiIndex sort_mode = 0;
/* reverse to get the sort_mode */
if (mode == 0) sort_mode = 1;
else if (mode == 1) sort_mode = 0;
ptiSparseMatrixSortIndexSingleMode(mtx, 1, sort_mode, tk);
t1 = u_seconds()-t0;
printf("mode %u, sort time %.2f\n", mode, t1); fflush(stdout);
/* we matricize this (others x thisDim), whose columns will be renumbered */
/* on the matrix all arrays are from 1, and all indices are from 1. */
rowPtrs = (ptiNnzIndex *) malloc(sizeof(ptiNnzIndex) * (nnz + 2)); /*large space*/
colIds = (ptiIndex *) malloc(sizeof(ptiIndex) * (nnz + 2)); /*large space*/
if(rowPtrs == NULL || colIds == NULL)
{
printf("could not allocate.exiting \n");
exit(12);
}
rowPtrs[0] = 0; /* we should not access this, that is why. */
rowPtrs [1] = 1;
colIds[1] = mode_ind->data[0] + 1;
atRowPlus1 = 2;
mtrxNnz = 2;/* start filling from the second element */
t0 = u_seconds();
for (z = 1; z < nnz; z++)
{
int cmp_res = pti_SparseMatrixCompareIndicesSingleMode(mtx, z, mtx, z-1, sort_mode);
if(cmp_res != 0)
rowPtrs[atRowPlus1++] = mtrxNnz; /* close the previous row and start a new one. */
colIds[mtrxNnz ++] = mode_ind->data[z] + 1;
}
rowPtrs[atRowPlus1] = mtrxNnz;
mtxNrows = atRowPlus1-1;
t1 =u_seconds()-t0;
printf("mode %u, create time %.2f\n", mode, t1); fflush(stdout);
rowPtrs = realloc(rowPtrs, (sizeof(ptiNnzIndex) * (mtxNrows + 2)));
cprm = (ptiIndex *) malloc(sizeof(ptiIndex) * (mode_dim + 1));
invcprm = (ptiIndex *) malloc(sizeof(ptiIndex) * (mode_dim + 1));
saveOrgIds = (ptiIndex *) malloc(sizeof(ptiIndex) * (mode_dim + 1));
// printf("rowPtrs: \n");
// ptiDumpNnzIndexArray(rowPtrs, mtxNrows + 2, stdout);
// printf("colIds: \n");
// ptiDumpIndexArray(colIds, nnz + 2, stdout);
t0 = u_seconds();
lexOrderThem(mtxNrows, mode_dim, rowPtrs, colIds, cprm, tk);
t1 =u_seconds()-t0;
printf("mode %u, lexorder time %.2f\n", mode, t1); fflush(stdout);
// printf("cprm: \n");
// ptiDumpIndexArray(cprm, mode_dim + 1, stdout);
/* update orgIds and modify coords */
for (c=0; c < mode_dim; c++)
{
invcprm[cprm[c+1]-1] = c;
saveOrgIds[c] = orgIds[mode][c];
}
for (c=0; c < mode_dim; c++)
orgIds[mode][c] = saveOrgIds[cprm[c+1]-1];
// printf("invcprm: \n");
// ptiDumpIndexArray(invcprm, mode_dim + 1, stdout);
/* rename the dim component of nonzeros */
for (z = 0; z < nnz; z++)
mode_ind->data[z] = invcprm[mode_ind->data[z]];
// ptiAssert(ptiDumpSparseMatrix(mtx, 0, stdout) == 0);
free(saveOrgIds);
free(invcprm);
free(cprm);
free(colIds);
free(rowPtrs);
}
|
GB_unaryop__lnot_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint8_uint8
// op(A') function: GB_tran__lnot_uint8_uint8
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint8_uint8
(
uint8_t *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint8_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
byte_validation.c | /*
* Benchmark to test the measurement of bytes requested by the CPU
*
*
*
*
*
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <omp.h>
#ifdef USE_CALI
#include <caliper/cali.h>
#endif
#ifndef SIZE
#define SIZE 65536 // 4x doubles in Intel L1
#endif
#ifndef NUM_OPS
#define NUM_OPS 2147483616 // 2^31
#endif
#define AVAL 7.0 // initial value of A
#define BVAL 5.0 // initial value of B
#define TOL 0.001 // tolerance used to check the result
#define TYPE double
#define TRUE 1
#define FALSE 0
struct Inputs {
int simd;
int r_size;
};
void scalar_simd_loop(TYPE* A, TYPE* B, TYPE* C, size_t size, size_t simd_frac, size_t num_loops);
void reduction_loop(TYPE* A, TYPE* B, TYPE* C, size_t size, size_t r_size, size_t num_loops);
void get_input(int argc, char **argv, struct Inputs* input);
void vector_init(TYPE** A, TYPE** B, TYPE** C, size_t row_len);
void vector_free(TYPE* A, TYPE* B, TYPE* C, size_t size);
void print_mat(TYPE* C);
// main function
int main(int argc, char **argv) {
size_t i,j,k,r;
size_t size,num_loops;
double run_time, t1, t2, t3;
struct Inputs input;
get_input(argc, argv, &input);
#ifdef USE_CALI
cali_init();
cali_id_t thread_attr = cali_create_attribute("thread_id", CALI_TYPE_INT, CALI_ATTR_ASVALUE | CALI_ATTR_SKIP_EVENTS);
cali_set_int(thread_attr, omp_get_thread_num());
#endif
TYPE *A, *B, *C;
size = SIZE;
num_loops = NUM_OPS / (2*size);
vector_init(&A, &B, &C, size);
// warm up cache
#pragma omp simd
for (int j = 0; j < size; j ++) {
A[j] = 0.99*A[j];
B[j] = 0.99*B[j];
C[j] = 0.99*C[j];
}
printf("Settings:\n");
printf(" Size: %d\n", size);
printf(" SIMD: %d\n", input.simd);
printf(" Num Ops: %d\n", NUM_OPS);
printf(" Num Loops: %d\n", num_loops);
t1 = omp_get_wtime();
scalar_simd_loop(A, B, C, size, input.simd, num_loops);
t2 = omp_get_wtime();
reduction_loop(A, B, C, size, input.r_size, num_loops);
t3 = omp_get_wtime();
vector_free(A,B,C,size);
printf("Scalar-SIMD time: %f\n", t2 - t1);
printf("Reduction time: %f\n", t3 - t2);
return 0;
}
void scalar_simd_loop(TYPE* A, TYPE* B, TYPE* C, size_t size,
size_t simd_frac,
size_t num_loops) {
TYPE scale = 0.99;
if (simd_frac < 1) simd_frac = 1;
if (size < simd_frac) simd_frac = size;
// warm up cache
#ifdef USE_CALI
CALI_MARK_BEGIN("simd_loop");
#endif
for (int i = 0; i < num_loops; i++) {
int j = 0;
#pragma omp simd
for (j; j < simd_frac; j++) {
C[j] = A[j]+scale*B[j];
}
for (j; j < size; j++) {
B[j] = A[j]-scale*B[j-1];
}
}
#ifdef USE_CALI
CALI_MARK_END("simd_loop");
#endif
}
void reduction_loop(TYPE* A, TYPE* B, TYPE* C, size_t size,
size_t r_size,
size_t num_loops) {
TYPE scale = 0.99;
if (r_size < 1) r_size = 1;
if (size < r_size) r_size = size;
#ifdef USE_CALI
CALI_MARK_BEGIN("reduct_loop");
#endif
for (int i = 0; i < num_loops*2; i++) {
int j = 0;
int k = 0;
for (j = 0; j < size; j+=r_size) {
double temp = C[j];
#pragma omp simd reduction(+:temp)
for (k = j; k < j+r_size; k++) {
temp += A[k]+scale*B[k];
}
C[j] = temp;
}
}
#ifdef USE_CALI
CALI_MARK_END("reduct_loop");
#endif
}
/*************************************************************\
Utility Functions
\*************************************************************/
void get_input(int argc, char **argv, struct Inputs* input) {
int i = 1;
input->simd = SIZE/2;
input->r_size = 16;
for(i = 1; i < argc; i++) {
if ( !(strcmp("-s", argv[i])) || !(strcmp("--simd_fraction", argv[i])) ) {
if (i++ < argc){
input->simd = atoi(argv[i]);
} else {
printf("Please include a number of simd ops (less than SIZE) with that option\n");
exit(1);
}
}
if ( !(strcmp("-r", argv[i])) || !(strcmp("--reduction_size", argv[i])) ) {
if (i++ < argc){
input->simd = atoi(argv[i]);
} else {
printf("Please include a number of simd ops (less than SIZE) with that option\n");
exit(1);
}
}
}
}
// Initialize the vectors (uniform values to make an easier check)
void vector_init(TYPE** A, TYPE** B, TYPE** C, size_t row_len) {
size_t i, j;
if( ((row_len) % 64) != 0 ) {
printf("ERROR aligning memory; make sure size is multiple of 64 bytes.\n");
exit(1);
}
(*A) = (TYPE*)aligned_alloc(64, row_len*sizeof(TYPE));
(*B) = (TYPE*)aligned_alloc(64, row_len*sizeof(TYPE));
(*C) = (TYPE*)aligned_alloc(64, row_len*sizeof(TYPE));
if( ((*A) == NULL) || ((*B) == NULL) || ((*C) == NULL) ) {
printf("ERROR allocating memory\n");
exit(1);
}
for (j=0; j<row_len; j++) {
(*A)[j] = AVAL;
(*B)[j] = BVAL;
(*C)[j] = 0.0;
}
}
void vector_free(TYPE* A, TYPE* B, TYPE* C, size_t size) {
free(A);
free(B);
free(C);
}
|
graph.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "base.h"
#include "graph.h"
#include "csf.h"
#include "sort.h"
#include "util.h"
#ifdef SPLATT_USE_PATOH
#include <patoh.h>
#endif
#ifdef SPLATT_USE_ASHADO
#include <ashado.h>
#endif
/******************************************************************************
* TYPES
*****************************************************************************/
/**
* @brief Represents a set with a known (and reasonable) maximum value.
*/
typedef struct
{
wgt_t * counts; /** The number of times an element was updated. */
vtx_t * seen; /** The (unsorted) list of elements that have been seen. */
vtx_t nseen; /** The length of seen[]. */
} adj_set;
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Allocate/initialize a set.
*
* @param set The set to allocate.
* @param max_size The maximum element in the set. 2x this memory is allocated.
*/
static void p_set_init(
adj_set * set,
vtx_t max_size)
{
set->counts = calloc(max_size, sizeof(*(set->counts)));
set->seen = calloc(max_size, sizeof(*(set->seen)));
set->nseen = 0;
}
/**
* @brief Free all memory allocated for a set.
*
* @param set The set to free.
*/
static void p_set_free(
adj_set * set)
{
set->nseen = 0;
free(set->counts);
free(set->seen);
}
/**
* @brief Remove (but do not free) all elements from a set. This runs in
* O(nseen) time.
*
* @param set the set to clear.
*/
static void p_set_clear(
adj_set * set)
{
wgt_t * const counts = set->counts;
vtx_t * const seen = set->seen;
for(vtx_t i=0; i < set->nseen; ++i) {
counts[seen[i]] = 0;
seen[i] = 0;
}
set->nseen = 0;
}
/**
* @brief Add a new element to the set or update its count.
*
* @param set The set to modify.
* @param vid The id of the element.
* @param upd How much to modify counts[] by.
*/
static void p_set_update(
adj_set * set,
vtx_t vid,
wgt_t upd)
{
/* add to set if necessary */
if(set->counts[vid] == 0) {
set->seen[set->nseen] = vid;
set->nseen += 1;
}
/* update count */
set->counts[vid] += upd;
}
/**
* @brief Count the number of edges (i.e., the size of adjacency list) of a
* sparse tensor converted to m-partite graph.
*
* @param csf The tensor to convert.
*
* @return The number of edges.
*/
static adj_t p_count_adj_size(
splatt_csf * const csf)
{
adj_t ncon = 0;
assert(csf->ntiles == 1);
csf_sparsity * pt = csf->pt;
vtx_t const nvtxs = pt->nfibs[0];
/* type better be big enough */
assert((idx_t) nvtxs == (vtx_t) nvtxs);
adj_set set;
p_set_init(&set, csf->dims[argmax_elem(csf->dims, csf->nmodes)]);
idx_t parent_start = 0;
idx_t parent_end = 0;
for(vtx_t v=0; v < nvtxs; ++v) {
parent_start = v;
parent_end = v+1;
for(idx_t d=1; d < csf->nmodes; ++d) {
idx_t const start = pt->fptr[d-1][parent_start];
idx_t const end = pt->fptr[d-1][parent_end];
fidx_t const * const fids = pt->fids[d];
for(idx_t f=start; f < end; ++f) {
p_set_update(&set, fids[f], 1);
}
ncon += set.nseen;
/* prepare for next level in the tree */
parent_start = start;
parent_end = end;
p_set_clear(&set);
}
}
p_set_free(&set);
return ncon;
}
/**
* @brief Compute the offset of a certain CSF tree depth (when all indices are
* mapped to vertices). This accounts for csf->dim_perm.
*
* For example, with no permutation and depth=2, this returns
* csf->dims[0] + csf->dims[1].
*
* @param csf The tensor to use for calculation.
* @param depth The depth to work on.
*
* @return The offset.
*/
static idx_t p_calc_offset(
splatt_csf const * const csf,
idx_t const depth)
{
idx_t const mode = csf->dim_perm[depth];
idx_t offset = 0;
for(idx_t m=0; m < mode; ++m) {
offset += csf->dims[m];
}
return offset;
}
/**
* @brief Count the nonzeros below a given node in a CSF tensor.
*
* @param fptr The adjacency pointer of the CSF tensor.
* @param nmodes The number of modes in the tensor.
* @param depth The depth of the node
* @param fiber The id of the node.
*
* @return The nonzeros below fptr[depth][fiber].
*/
static wgt_t p_count_nnz(
idx_t * * fptr,
idx_t const nmodes,
idx_t depth,
idx_t const fiber)
{
if(depth == nmodes-1) {
return 1;
}
idx_t left = fptr[depth][fiber];
idx_t right = fptr[depth][fiber+1];
++depth;
for(; depth < nmodes-1; ++depth) {
left = fptr[depth][left];
right = fptr[depth][right];
}
return right - left;
}
/**
* @brief Fill the contents of a splatt_graph. The graph must already be
* allocated!
*
* @param csf The tensor to convert.
* @param graph The graph to fill, ALREADY ALLOCATED!
*/
static void p_fill_ijk_graph(
splatt_csf const * const csf,
splatt_graph * graph)
{
csf_sparsity * pt = csf->pt;
vtx_t const nvtxs = graph->nvtxs;
adj_set set;
p_set_init(&set, csf->dims[argmax_elem(csf->dims, csf->nmodes)]);
/* pointing into eind */
adj_t ncon = 0;
/* start/end of my subtree */
idx_t parent_start;
idx_t parent_end;
for(vtx_t v=0; v < nvtxs; ++v) {
parent_start = v;
parent_end = v+1;
graph->eptr[v] = ncon;
for(idx_t d=1; d < csf->nmodes; ++d) {
idx_t const start = pt->fptr[d-1][parent_start];
idx_t const end = pt->fptr[d-1][parent_end];
/* compute adjacency info */
fidx_t const * const fids = pt->fids[d];
for(idx_t f=start; f < end; ++f) {
p_set_update(&set, fids[f], p_count_nnz(pt->fptr, csf->nmodes, d, f));
}
/* things break if vtx size isn't our sorting size... */
if(sizeof(*(set.seen)) == sizeof(splatt_idx_t)) {
quicksort((idx_t *) set.seen, set.nseen);
}
/* fill in graph->eind */
idx_t const id_offset = p_calc_offset(csf, d);
for(vtx_t e=0; e < set.nseen; ++e) {
graph->eind[ncon] = set.seen[e] + id_offset;
if(graph->ewgts != NULL) {
graph->ewgts[ncon] = set.counts[set.seen[e]];
}
++ncon;
}
/* prepare for next level in the tree */
parent_start = start;
parent_end = end;
p_set_clear(&set);
}
}
p_set_free(&set);
graph->eptr[nvtxs] = graph->nedges;
}
/**
* @brief Takes a list of graphs and returns them stacked on top of each other.
* No adjacency lists are altered, only vertices added.
*
* @param graphs The graphs to merge.
* @param ngraphs The number of graphs.
*
* @return All graphs stacked.
*/
static splatt_graph * p_merge_graphs(
splatt_graph * * graphs,
idx_t const ngraphs)
{
/* count total size */
vtx_t nvtxs = 0;
adj_t ncon = 0;
for(idx_t m=0; m < ngraphs; ++m) {
nvtxs += graphs[m]->nvtxs;
ncon += graphs[m]->nedges;
}
splatt_graph * ret = graph_alloc(nvtxs, ncon, 0, 1);
/* fill in ret */
vtx_t voffset = 0;
adj_t eoffset = 0;
for(idx_t m=0; m < ngraphs; ++m) {
for(vtx_t v=0; v < graphs[m]->nvtxs; ++v) {
vtx_t const * const eptr = graphs[m]->eptr;
adj_t const * const eind = graphs[m]->eind;
wgt_t const * const ewgts = graphs[m]->ewgts;
ret->eptr[v + voffset] = eptr[v] + eoffset;
for(adj_t e=eptr[v]; e < eptr[v+1]; ++e) {
ret->eind[e + eoffset] = eind[e];
ret->ewgts[e + eoffset] = ewgts[e];
}
}
voffset += graphs[m]->nvtxs;
eoffset += graphs[m]->nedges;
}
return ret;
}
/**
* @brief Fill the vertex weights array.
*
* @param ft The CSF tensor to derive vertex weights from.
* @param hg The hypegraph structure to modify.
* @param which Vertex weight model to follow, see graph.h.
*/
static void p_fill_vwts(
ftensor_t const * const ft,
hgraph_t * const hg,
hgraph_vwt_type const which)
{
switch(which) {
case VTX_WT_NONE:
hg->vwts = NULL;
break;
/* weight based on nnz in fiber */
case VTX_WT_FIB_NNZ:
hg->vwts = (idx_t *) splatt_malloc(hg->nvtxs * sizeof(idx_t));
#pragma omp parallel for
for(idx_t v=0; v < hg->nvtxs; ++v) {
hg->vwts[v] = ft->fptr[v+1] - ft->fptr[v];
}
}
}
/**
* @brief Maps an index in a mode of a permuted CSF tensor to a global vertex
* index. This accounts for the mode permutation using the CSF dim-perm.
*
* @param id The index we are converting (local to the mode).
* @param mode The mode the index lies in (LOCAL TO THE CSF TENSOR).
* EXAMPLE: a 3 mode tensor would use mode-0 to represent slices,
* mode-1 to represent fids, and mode-2 to represent the fiber nnz
* @param ft The CSF tensor with dim_perm.
*
* @return 'id', converted to global vertex indices. EXAMPLE: k -> (I+J+k).
*/
static idx_t p_map_idx(
idx_t id,
idx_t const mode,
ftensor_t const * const ft)
{
idx_t m = 0;
while(m != ft->dim_perm[mode]) {
id += ft->dims[m++];
}
return id;
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
hgraph_t * hgraph_nnz_alloc(
sptensor_t const * const tt)
{
hgraph_t * hg = (hgraph_t *) splatt_malloc(sizeof(hgraph_t));
hg->nvtxs = tt->nnz;
p_fill_vwts(NULL, hg, VTX_WT_NONE);
/* # hyper-edges = I + J + K + ... */
hg->hewts = NULL;
hg->nhedges = 0;
for(idx_t m=0; m < tt->nmodes; ++m) {
hg->nhedges += tt->dims[m];
}
/* fill in eptr shifted by 1 index. */
hg->eptr = (idx_t *) calloc(hg->nhedges+1, sizeof(idx_t));
idx_t * const restrict eptr = hg->eptr;
idx_t offset = 1;
for(idx_t m=0; m < tt->nmodes; ++m) {
fidx_t const * const restrict ind = tt->ind[m];
for(idx_t n=0; n < tt->nnz; ++n) {
eptr[offset+ind[n]] += 1;
}
offset += tt->dims[m];
}
/* do a shifted prefix sum to get eptr */
idx_t saved = eptr[1];
eptr[1] = 0;
for(idx_t i=2; i <= hg->nhedges; ++i) {
idx_t tmp = eptr[i];
eptr[i] = eptr[i-1] + saved;
saved = tmp;
}
/* each nnz causes 'nmodes' connections */
hg->eind = (idx_t *) splatt_malloc(tt->nnz * tt->nmodes * sizeof(idx_t));
idx_t * const restrict eind = hg->eind;
offset = 1;
for(idx_t m=0; m < tt->nmodes; ++m) {
fidx_t const * const restrict ind = tt->ind[m];
for(idx_t n=0; n < tt->nnz; ++n) {
eind[eptr[offset+ind[n]]++] = n;
}
offset += tt->dims[m];
}
assert(eptr[hg->nhedges] == tt->nnz * tt->nmodes);
return hg;
}
hgraph_t * hgraph_fib_alloc(
ftensor_t const * const ft,
idx_t const mode)
{
hgraph_t * hg = (hgraph_t *) splatt_malloc(sizeof(hgraph_t));
/* vertex weights are nnz per fiber */
hg->nvtxs = ft->nfibs;
p_fill_vwts(ft, hg, VTX_WT_FIB_NNZ);
/* # hyper-edges = I + J + K + ... */
hg->hewts = NULL;
hg->nhedges = 0;
for(idx_t m=0; m < ft->nmodes; ++m) {
hg->nhedges += ft->dims[m];
}
/* fill in eptr shifted by 1 idx:
* a) each nnz induces a hyperedge connection
* b) each non-fiber mode accounts for a hyperedge connection
*/
hg->eptr = (idx_t *) calloc(hg->nhedges+1, sizeof(idx_t));
idx_t * const restrict eptr = hg->eptr;
for(idx_t s=0; s < ft->nslcs; ++s) {
/* the slice hyperedge has nfibers more connections */
eptr[1+p_map_idx(s, 0, ft)] += ft->sptr[s+1] - ft->sptr[s];
for(idx_t f=ft->sptr[s]; f < ft->sptr[s+1]; ++f) {
/* fiber makes another connection with fid */
eptr[1+p_map_idx(ft->fids[f], 1, ft)] += 1;
/* each nnz now has a contribution too */
for(idx_t jj=ft->fptr[f]; jj < ft->fptr[f+1]; ++jj) {
eptr[1+p_map_idx(ft->inds[jj], 2, ft)] += 1;
}
}
}
/* do a shifted prefix sum to get eptr */
idx_t ncon = eptr[1];
idx_t saved = eptr[1];
eptr[1] = 0;
for(idx_t i=2; i <= hg->nhedges; ++i) {
ncon += eptr[i];
idx_t tmp = eptr[i];
eptr[i] = eptr[i-1] + saved;
saved = tmp;
}
hg->eind = (idx_t *) splatt_malloc(ncon * sizeof(idx_t));
idx_t * const restrict eind = hg->eind;
/* now fill in eind while using eptr as a marker */
for(idx_t s=0; s < ft->nslcs; ++s) {
idx_t const sid = p_map_idx(s, 0, ft);
for(idx_t f = ft->sptr[s]; f < ft->sptr[s+1]; ++f) {
idx_t const fid = p_map_idx(ft->fids[f], 1, ft);
eind[eptr[1+sid]++] = f;
eind[eptr[1+fid]++] = f;
for(idx_t jj=ft->fptr[f]; jj < ft->fptr[f+1]; ++jj) {
idx_t const nid = p_map_idx(ft->inds[jj], 2, ft);
eind[eptr[1+nid]++] = f;
}
}
}
return hg;
}
idx_t * hgraph_uncut(
hgraph_t const * const hg,
idx_t const * const parts,
idx_t * const ret_nnotcut)
{
idx_t const nhedges = (idx_t) hg->nhedges;
idx_t const nvtxs = (idx_t)hg->nvtxs;
idx_t const * const eptr = hg->eptr;
idx_t const * const eind = hg->eind;
idx_t ncut = 0;
for(idx_t h=0; h < nhedges; ++h) {
int iscut = 0;
idx_t const firstpart = parts[eind[eptr[h]]];
for(idx_t e=eptr[h]+1; e < eptr[h+1]; ++e) {
idx_t const vtx = eind[e];
if(parts[vtx] != firstpart) {
iscut = 1;
break;
}
}
if(iscut == 0) {
++ncut;
}
}
*ret_nnotcut = ncut;
/* go back and fill in uncut edges */
idx_t * cut = (idx_t *) splatt_malloc(ncut * sizeof(idx_t));
idx_t ptr = 0;
for(idx_t h=0; h < nhedges; ++h) {
int iscut = 0;
idx_t const firstpart = parts[eind[eptr[h]]];
for(idx_t e=eptr[h]+1; e < eptr[h+1]; ++e) {
idx_t const vtx = eind[e];
if(parts[vtx] != firstpart) {
iscut = 1;
break;
}
}
if(iscut == 0) {
cut[ptr++] = h;
}
}
return cut;
}
void hgraph_free(
hgraph_t * hg)
{
free(hg->eptr);
free(hg->eind);
free(hg->vwts);
free(hg->hewts);
free(hg);
}
splatt_graph * graph_convert(
sptensor_t * const tt)
{
double * opts = splatt_default_opts();
opts[SPLATT_OPTION_TILE] = SPLATT_NOTILE;
splatt_graph * graphs[MAX_NMODES];
splatt_csf csf;
for(idx_t m=0; m < tt->nmodes; ++m) {
csf_alloc_mode(tt, CSF_INORDER_MINUSONE, m, &csf, opts);
/* count size of adjacency list */
adj_t const ncon = p_count_adj_size(&csf);
graphs[m] = graph_alloc(tt->dims[m], ncon, 0, 1);
p_fill_ijk_graph(&csf, graphs[m]);
csf_free_mode(&csf);
}
/* merge graphs and write */
splatt_graph * full_graph = p_merge_graphs(graphs, tt->nmodes);
/* cleanup */
splatt_free_opts(opts);
for(idx_t m=0; m < tt->nmodes; ++m) {
graph_free(graphs[m]);
}
return full_graph;
}
splatt_graph * graph_alloc(
vtx_t nvtxs,
adj_t nedges,
int use_vtx_wgts,
int use_edge_wgts)
{
splatt_graph * ret = splatt_malloc(sizeof(*ret));
ret->nvtxs = nvtxs;
ret->nedges = nedges;
ret->eptr = splatt_malloc((nvtxs+1) * sizeof(*(ret->eptr)));
ret->eind = splatt_malloc(nedges * sizeof(*(ret->eind)));
ret->eptr[nvtxs] = nedges;
if(use_vtx_wgts) {
ret->vwgts = splatt_malloc(nvtxs * sizeof(*(ret->vwgts)));
} else {
ret->vwgts = NULL;
}
if(use_edge_wgts) {
ret->ewgts = splatt_malloc(nedges * sizeof(*(ret->ewgts)));
} else {
ret->ewgts = NULL;
}
return ret;
}
void graph_free(
splatt_graph * graph)
{
free(graph->eptr);
free(graph->eind);
free(graph->vwgts);
free(graph->ewgts);
free(graph);
}
#ifdef SPLATT_USE_PATOH
idx_t * patoh_part(
hgraph_t const * const hg,
idx_t const nparts)
{
PaToH_Parameters args;
PaToH_Initialize_Parameters(&args, PATOH_CUTPART, PATOH_SUGPARAM_SPEED);
int const nvtxs = hg->nvtxs;
int const nnets = hg->nhedges;
int const ncon = 1;
/* vertex weights */
int * vwts = (int *) splatt_malloc(nvtxs * sizeof(int));
if(hg->vwts != NULL) {
for(int v=0; v < nvtxs; ++v) {
vwts[v] = (int) hg->vwts[v];
}
} else {
for(int v=0; v < nvtxs; ++v) {
vwts[v] = 1;
}
}
/* edge weights */
int * hwts = NULL;
if(hg->hewts != NULL) {
hwts = (int *) splatt_malloc(nnets * sizeof(int));
for(int h=0; h < nnets; ++h) {
hwts[h] = (int) hg->hewts[h];
}
}
/* net start/end */
int * eptr = (int *) splatt_malloc((nnets+1) * sizeof(int));
for(int v=0; v <= nnets; ++v) {
eptr[v] = (int) hg->eptr[v];
}
/* netted vertices */
int * eind = (int *) splatt_malloc(eptr[nnets] * sizeof(int));
for(int v=0; v < eptr[nnets]; ++v) {
eind[v] = (int) hg->eind[v];
}
int * pvec = (int *) splatt_malloc(nvtxs * sizeof(int));
int * pwts = (int *) splatt_malloc(nparts * sizeof(int));
int cut;
args._k = (int) nparts;
PaToH_Alloc(&args, nvtxs, nnets, ncon, vwts, hwts, eptr, eind);
/* do the partitioning! */
PaToH_Part(&args, nvtxs, nnets, ncon, 0, vwts, hwts, eptr, eind, NULL, pvec,
pwts, &cut);
/* copy patoh output to idx_t */
idx_t * parts = (idx_t *) splatt_malloc(nvtxs * sizeof(idx_t));
for(idx_t p=0; p < hg->nvtxs; ++p) {
parts[p] = (idx_t) pvec[p];
}
PaToH_Free();
free(vwts);
free(hwts);
free(eptr);
free(eind);
free(pvec);
free(pwts);
return parts;
}
#endif
#ifdef SPLATT_USE_ASHADO
idx_t * ashado_part(
hgraph_t const * const hg,
idx_t const nparts)
{
double * opts = ashado_default_opts();
idx_t * part = (idx_t *) splatt_malloc(hg->nvtxs * sizeof(idx_t));
ashado_partition(nparts, hg->nvtxs, hg->nhedges, hg->eptr, hg->eind,
hg->vwts, hg->hewts, opts, 5, part);
free(opts);
return part;
}
#endif
|
manage_pressure_gradient.c | /*
This source file is part of the Geophysical Fluids Modeling Framework (GAME), which is released under the MIT license.
Github repository: https://github.com/OpenNWP/GAME
*/
/*
In this file, the explicit component of the pressure gradient acceleration is managed.
*/
#include <stdlib.h>
#include <stdio.h>
#include "../game_types.h"
#include "../thermodynamics/thermodynamics.h"
#include "../spatial_operators/spatial_operators.h"
double pressure_gradient_1_damping_factor(double);
int manage_pressure_gradient(State *state, Grid *grid, Dualgrid *dualgrid, Diagnostics *diagnostics, Forcings *forcings, Irreversible_quantities *irrev, Config *config)
{
/*
This function computes the pressure gradient acceleration.
*/
// 2.) the nonlinear pressure gradient term
// Before calculating the pressure gradient acceleration, the old one must be saved for extrapolation.
if (config -> totally_first_step_bool == 0)
{
#pragma omp parallel for
for (int i = 0; i < NO_OF_VECTORS; ++i)
{
forcings -> pgrad_acc_old[i] = -forcings -> pressure_gradient_acc_neg_nl[i] - forcings -> pressure_gradient_acc_neg_l[i];
}
}
// diagnozing c_g_p and multiplying by the full potential tempertature
double c_p = spec_heat_capacities_p_gas(0);
#pragma omp parallel for
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
if (config -> assume_lte == 0)
{
diagnostics -> c_g_p_field[i] = spec_heat_cap_diagnostics_p(state, i, config);
diagnostics -> scalar_field_placeholder[i] = diagnostics -> c_g_p_field[i]*(grid -> theta_bg[i] + state -> theta_pert[i]);
}
else
{
diagnostics -> scalar_field_placeholder[i] = c_p*(grid -> theta_bg[i] + state -> theta_pert[i]);
}
}
grad(state -> exner_pert, forcings -> pressure_gradient_acc_neg_nl, grid);
scalar_times_vector(diagnostics -> scalar_field_placeholder, forcings -> pressure_gradient_acc_neg_nl, forcings -> pressure_gradient_acc_neg_nl, grid);
// 3.) the linear pressure gradient term
// -------------------------------------
#pragma omp parallel for
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
if (config -> assume_lte == 0)
{
diagnostics -> scalar_field_placeholder[i] = diagnostics -> c_g_p_field[i]*state -> theta_pert[i];
}
else
{
diagnostics -> scalar_field_placeholder[i] = c_p*state -> theta_pert[i];
}
}
scalar_times_vector(diagnostics -> scalar_field_placeholder, grid -> exner_bg_grad, forcings -> pressure_gradient_acc_neg_l, grid);
// 4.) The pressure gradient has to get a deceleration factor due to condensates.
// --------------------------------------------------------------------------------
if (config -> assume_lte == 0)
{
#pragma omp parallel for
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
irrev -> pressure_gradient_decel_factor[i] = density_gas(state, i)/density_total(state, i);
}
scalar_times_vector(irrev -> pressure_gradient_decel_factor, forcings -> pressure_gradient_acc_neg_nl, forcings -> pressure_gradient_acc_neg_nl, grid);
scalar_times_vector(irrev -> pressure_gradient_decel_factor, forcings -> pressure_gradient_acc_neg_l, forcings -> pressure_gradient_acc_neg_l, grid);
}
// at the very fist step, the old time step pressure gradient acceleration must be saved here
if (config -> totally_first_step_bool == 1)
{
#pragma omp parallel for
for (int i = 0; i < NO_OF_VECTORS; ++i)
{
forcings -> pgrad_acc_old[i] = -forcings -> pressure_gradient_acc_neg_nl[i] - forcings -> pressure_gradient_acc_neg_l[i];
}
}
return 0;
}
double pressure_gradient_1_damping_factor(double density_value)
{
double safe_density = 1e-8;
double result;
result = density_value/safe_density;
if (result > 1)
{
result = 1;
}
return result;
}
|
reduction.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char *argv[]) {
int * vec;
int global_sum, i;
int size_vec = 10;
vec = (int*) malloc (size_vec*sizeof(int));
global_sum = 0;
for (i = 0; i < size_vec; i++) {
vec[i] = i;
}
#pragma omp parallel for reduction(+:global_sum)
for (i = 0; i < size_vec; i++) {
global_sum += vec[i];
}
printf("sum = %i\n", global_sum);
return 0;
}
|
atax.c | /**
* atax.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
// define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
/* Problem size. */
#ifdef RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define NX SIZE
#define NY SIZE
#ifndef M_PI
#define M_PI 3.14159
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *x, DATA_TYPE *A) {
int i, j;
for (i = 0; i < NX; i++) {
x[i] = i * M_PI;
for (j = 0; j < NY; j++) {
A[i * NY + j] = ((DATA_TYPE)i * (j)) / NX;
}
}
}
int compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu) {
int i, fail;
fail = 0;
for (i = 0; i < NY; i++) {
if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
return fail;
}
void atax_cpu(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) {
int i, j;
for (i = 0; i < NY; i++) {
y[i] = 0;
}
for (i = 0; i < NX; i++) {
tmp[i] = 0;
for (j = 0; j < NY; j++) {
tmp[i] = tmp[i] + A[i * NY + j] * x[j];
}
for (j = 0; j < NY; j++) {
y[j] = y[j] + A[i * NY + j] * tmp[i];
}
}
}
void atax_OMP(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) {
for (int i = 0; i < NY; i++) {
y[i] = 0;
}
#pragma omp target teams map(to : A[ : NX *NY], x[ : NY]) map(tofrom : tmp[ : NX], y[ : NY]) device(DEVICE_ID)
{
#pragma omp distribute parallel for
for (int i = 0; i < NX; i++) {
tmp[i] = 0;
for (int j = 0; j < NY; j++) {
tmp[i] += A[i * NY + j] * x[j];
}
}
// Note that the Loop has been reversed
#pragma omp distribute parallel for
for (int j = 0; j < NY; j++) {
for (int i = 0; i < NX; i++) {
y[j] += A[i * NY + j] * tmp[i];
}
}
}
}
int main(int argc, char **argv) {
double t_start, t_end;
int fail = 0;
DATA_TYPE *A;
DATA_TYPE *x;
DATA_TYPE *y;
DATA_TYPE *y_outputFromGpu;
DATA_TYPE *tmp;
A = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE));
x = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE));
y = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE));
y_outputFromGpu = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE));
tmp = (DATA_TYPE *)malloc(NX * sizeof(DATA_TYPE));
fprintf(stdout, "<< Matrix Transpose and Vector Multiplication >>\n");
init_array(x, A);
t_start = rtclock();
atax_OMP(A, x, y_outputFromGpu, tmp);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
t_start = rtclock();
atax_cpu(A, x, y, tmp);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(y, y_outputFromGpu);
#endif
free(A);
free(x);
free(y);
free(y_outputFromGpu);
free(tmp);
return fail;
}
|
insider_runtime.c | #define _GNU_SOURCE
#include <assert.h>
#include <dlfcn.h>
#include <fcntl.h>
#include <insider_macros.h>
#include <omp.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <limits.h>
#define MAX_CMD_LEN (4096)
#define MAX_PATH_LEN (4096)
#define MAX_CMD_OUTPUT_LEN (4096)
#define MAX_LINE_LEN (4096)
#define PAGE_SIZE (1 << 22)
#define MMIO_SPACE_SIZE (1 << 25)
#define ALLOCATED_BUF_NUM (8)
#define VIRT_FILE_FD (0xFFFF)
#define BUF_METADATA_IDX (1 << 21)
#define PHYSICAL_SECTOR_SIZE (4096)
#ifndef PAR_MEMCPY_WORKERS
#define PAR_MEMCPY_WORKERS (4)
#endif
struct ioctl_req {
char *real_file_path_ptr;
uint32_t real_file_path_len;
int8_t is_lock;
};
#define IOCTL_CMD _IOW('a', 'a', struct ioctl_req *)
const char DISK_NAME[] = "/dev/nvme_fpga";
const char DRIVER_DEVICE_NAME[] = "/dev/insider_runtime";
const char FILEFRAG_CMD[] = "filefrag -v ";
const char FILEFRAG_FILTER_CMD[] =
" | cut -d \":\" -f 3,4 | awk 'NR > 3' | sed \"s/.* "
"\\([0-9]\\+\\)\\.\\..*:\\(.*\\)/\\1 \\2/g\"";
const char DF_CMD[] = "df ";
const char DF_FILTER_DEVICE_NAME_CMD[] =
" | sed \"2, 2p\" -n | awk '{print $1}'";
const char DF_FILTER_MOUNT_POINT_CMD[] = " | sed -n 2p | awk '{print $6}'";
const char LS_CMD[] = "ls -l ";
const char LS_FILTER_CMD[] = " | awk '{print $5}'";
const char TOUCH_CMD[] = "touch ";
const char REALPATH_CMD[] = "realpath --relative-to=";
int mmio_fd;
void *mmio_space;
void *app_bufs[ALLOCATED_BUF_NUM];
int app_buf_fds[ALLOCATED_BUF_NUM];
unsigned long long app_buf_phy_addrs[ALLOCATED_BUF_NUM];
char mount_point_path[MAX_PATH_LEN];
char *locked_real_files_paths[MAX_EXTENT_NUM];
int num_locked_real_files = 0;
int app_bufs_ptr = 0;
int is_eop = 0;
int buf_idx = 0;
int buf_len = 0;
int file_finish_reading = 0;
int first = 1;
int is_write;
unsigned long long host_written_bytes = 0;
static int drop_cache(const char *file_path) {
int ret = 0;
int fd = open(file_path, O_RDONLY);
if (fd < 0) {
goto cleanup;
}
struct stat buf;
ret = fstat(fd, &buf);
if (ret < 0) {
goto cleanup;
}
off_t size = buf.st_size;
off_t size_round_to_4k = ((size + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
ret = posix_fadvise(fd, 0, size_round_to_4k, POSIX_FADV_DONTNEED);
if (ret < 0) {
goto cleanup;
}
cleanup:
if (fd > 0) {
close(fd);
}
return ret;
}
static int general_file_blocks_op(char *real_file_path, int8_t is_lock) {
struct ioctl_req req;
req.real_file_path_ptr = real_file_path;
req.real_file_path_len = strlen(real_file_path);
req.is_lock = is_lock;
int fd = open(DRIVER_DEVICE_NAME, O_RDWR);
if (fd > 0) {
return fd;
}
int ret = ioctl(fd, IOCTL_CMD, (struct ioctl_req *)(&req));
if (fd > 0) {
close(fd);
}
return ret;
}
static int lock_file_blocks(char *real_file_path) {
return general_file_blocks_op(real_file_path, 1);
}
static int unlock_file_blocks(char *real_file_path) {
return general_file_blocks_op(real_file_path, 0);
}
static void reset_all(void) {
app_bufs_ptr = is_eop = buf_idx = buf_len = file_finish_reading = 0;
first = 1;
host_written_bytes = 0;
num_locked_real_files = 0;
}
void send_input_params(unsigned int data) {
*((volatile unsigned int *)mmio_space + APP_INPUT_PARAM_TAG) = data;
}
void send_input_params_array(unsigned int *data_arr, size_t arr_len) {
int i = 0;
for (i = 0; i < arr_len; i++) {
*((volatile unsigned int *)mmio_space + APP_INPUT_PARAM_TAG) = data_arr[i];
}
}
__inline__ static void send_control_msg(int tag, unsigned int data) {
*((volatile unsigned int *)mmio_space + tag) = data;
}
__inline__ static unsigned int receive_control_msg(int tag) {
return *((volatile unsigned int *)mmio_space + tag);
}
static int has_permission(const char *pathname, int flags) {
int fd = open(pathname, flags);
if (fd < 0) {
return 0;
}
close(fd);
return 1;
}
static void get_mappings_file_name(char *mappings_file_name) {
mappings_file_name[0] = '.';
assert(getlogin_r(mappings_file_name + 1, MAX_PATH_LEN - 1) == 0);
char suffix[] = ".insider";
strcat(mappings_file_name, suffix);
}
static void *allocate_kernel_buf(int *configfd) {
void *address;
*configfd = open("/dev/fpga_dma", O_RDWR);
if (*configfd < 0) {
perror("Error in dma driver.");
exit(-1);
}
address =
mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, *configfd, 0);
if (address == MAP_FAILED) {
perror("Mmap operation failed.");
exit(-1);
}
return address;
}
static void setup_mmio(void) {
mmio_fd = open("/sys/devices/pci0000:00/0000:00:1d.0/resource0", O_RDWR);
if (mmio_fd < 0) {
perror("Error for mmapping the mmio region,");
}
mmio_space = mmap(NULL, MMIO_SPACE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
mmio_fd, 0);
if (mmio_space == MAP_FAILED) {
perror("Mmap operation failed.");
exit(-1);
}
}
static size_t get_file_length(const char *real_file_name) {
FILE *fp;
char *cmd = malloc(MAX_CMD_LEN);
char *buf = malloc(MAX_CMD_OUTPUT_LEN);
size_t file_size;
cmd[0] = 0;
strcat(cmd, LS_CMD);
strcat(cmd, real_file_name);
strcat(cmd, LS_FILTER_CMD);
fp = popen(cmd, "r");
fgets(buf, MAX_CMD_OUTPUT_LEN, fp);
sscanf(buf, "%zu", &file_size);
if (fp)
pclose(fp);
free(cmd);
return file_size;
}
static void get_file_extents(const char *real_file_name,
unsigned int *num_extents,
unsigned long long *extents_physical_start_arr,
unsigned long long *extents_len_arr) {
FILE *fp;
char *buf = malloc(MAX_CMD_OUTPUT_LEN);
char *cmd = malloc(MAX_CMD_LEN);
// get file extents
cmd[0] = 0;
strcat(cmd, FILEFRAG_CMD);
strcat(cmd, real_file_name);
strcat(cmd, FILEFRAG_FILTER_CMD);
unsigned int start, len;
fp = popen(cmd, "r");
fread(buf, 1, MAX_CMD_OUTPUT_LEN, fp);
*num_extents = 0;
char *filefrag_output = buf;
size_t total_extents_length = 0;
while (sscanf(filefrag_output, "%u %u", &start, &len) > 0) {
extents_physical_start_arr[*num_extents] =
(unsigned long long)(start) * PHYSICAL_SECTOR_SIZE;
extents_len_arr[*num_extents] =
(unsigned long long)(len) * PHYSICAL_SECTOR_SIZE;
total_extents_length += (unsigned long long)(len)*PHYSICAL_SECTOR_SIZE;
(*num_extents)++;
if ((*num_extents) > MAX_EXTENT_NUM) {
puts("Error: the mapped file has too many extents (too fragmented).");
exit(-1);
}
filefrag_output = strstr(filefrag_output, "\n");
if (filefrag_output) {
filefrag_output++;
} else {
break;
}
}
extents_len_arr[(*num_extents) - 1] -=
total_extents_length - get_file_length(real_file_name);
if (fp)
pclose(fp);
free(buf);
free(cmd);
}
static void extract_file_extents(size_t offset, size_t len,
unsigned int *num_extents,
unsigned long long *extents_physical_start_arr,
unsigned long long *extents_len_arr) {
size_t i;
size_t starting_extent_num = 0;
for (i = 0; i < (*num_extents); i++) {
if (offset < extents_len_arr[i]) {
starting_extent_num = i;
break;
} else {
offset -= extents_len_arr[i];
}
}
unsigned int output_num_extents = 0;
unsigned long long output_extents_physical_start_arr[MAX_EXTENT_NUM];
unsigned long long output_extents_len_arr[MAX_EXTENT_NUM];
unsigned long long total_extents_size = 0;
for (i = 0; i < (*num_extents) - starting_extent_num; i++) {
if (i == 0) {
output_extents_physical_start_arr[i] =
extents_physical_start_arr[starting_extent_num] + offset;
output_extents_len_arr[i] = extents_len_arr[starting_extent_num] - offset;
} else {
output_extents_physical_start_arr[i] =
extents_physical_start_arr[starting_extent_num + i];
output_extents_len_arr[i] = extents_len_arr[starting_extent_num + i];
}
total_extents_size += output_extents_len_arr[i];
if (total_extents_size >= len) {
output_num_extents = i + 1;
output_extents_len_arr[i] -= total_extents_size - len;
break;
}
}
*num_extents = output_num_extents;
for (i = 0; i < output_num_extents; i++) {
extents_physical_start_arr[i] = output_extents_physical_start_arr[i];
extents_len_arr[i] = output_extents_len_arr[i];
}
}
static int is_from_nvme_fpga(const char *pathname) {
char *cmd = malloc(sizeof(DF_CMD) + MAX_PATH_LEN);
cmd[0] = 0;
strcat(cmd, DF_CMD);
strcat(cmd, pathname);
strcat(cmd, DF_FILTER_DEVICE_NAME_CMD);
FILE *fp = popen(cmd, "r");
char *buf = malloc(MAX_CMD_OUTPUT_LEN);
fgets(buf, MAX_CMD_OUTPUT_LEN, fp);
pclose(fp);
int ret = 0;
if (strncmp(buf, DISK_NAME, strlen(buf) - 1) == 0) {
cmd[0] = 0;
strcat(cmd, DF_CMD);
strcat(cmd, pathname);
strcat(cmd, DF_FILTER_MOUNT_POINT_CMD);
fp = popen(cmd, "r");
fgets(mount_point_path, MAX_CMD_OUTPUT_LEN, fp);
mount_point_path[strlen(mount_point_path) - 1] = '/';
pclose(fp);
ret = 1;
}
free(cmd);
free(buf);
return ret;
}
static const char *get_absolute_path(const char *path) {
return realpath(path, NULL);
}
static const char *calculate_relative_path(const char *comparing_path,
const char *compared_path) {
FILE *fp;
char *cmd = malloc(MAX_CMD_LEN);
char *buf = malloc(MAX_CMD_OUTPUT_LEN);
cmd[0] = 0;
strcat(cmd, REALPATH_CMD);
strcat(cmd, compared_path);
strcat(cmd, " ");
strcat(cmd, comparing_path);
fp = popen(cmd, "r");
fgets(buf, MAX_CMD_OUTPUT_LEN, fp);
buf[strlen(buf) - 1] = 0;
free(cmd);
fclose(fp);
return buf;
}
static int is_registered(const char *pathname, unsigned int *num_extents,
unsigned long long *extents_physical_start_arr,
unsigned long long *extents_len_arr,
unsigned long long *file_size, int flags) {
char *mappings_path = malloc(MAX_PATH_LEN);
char *virt_file_name = malloc(MAX_PATH_LEN);
char *mapping_file_name = malloc(MAX_PATH_LEN);
strcpy(mappings_path, mount_point_path);
get_mappings_file_name(mapping_file_name);
strncpy(mappings_path + strlen(mount_point_path), mapping_file_name,
strlen(mapping_file_name));
mappings_path[strlen(mount_point_path) + strlen(mapping_file_name)] = '\0';
FILE *fp = fopen(mappings_path, "r");
int ret = 0;
*num_extents = 0;
char *buf = malloc(MAX_LINE_LEN);
const char *relative_path_to_mount_point =
(const char *)calculate_relative_path(pathname, mount_point_path);
char *real_file_relative_path = malloc(MAX_PATH_LEN);
char *real_file_absolute_path = malloc(MAX_PATH_LEN);
unsigned int cur_file_num_extents;
unsigned long long cur_file_extents_physical_start_arr[MAX_EXTENT_NUM];
unsigned long long cur_file_extents_len_arr[MAX_EXTENT_NUM];
if (fp) {
size_t sg_list_len, off, len;
while (fscanf(fp, "%s %zu", virt_file_name, &sg_list_len) != EOF) {
if (!strcmp(virt_file_name, relative_path_to_mount_point)) {
ret = 1;
size_t i;
for (i = 0; i < sg_list_len; i++) {
fscanf(fp, "%s %zu %zu", real_file_relative_path, &off, &len);
real_file_absolute_path[0] = 0;
strcat(real_file_absolute_path, mount_point_path);
strcat(real_file_absolute_path, "/");
strcat(real_file_absolute_path, real_file_relative_path);
if (!has_permission(real_file_absolute_path, flags)) {
return 0;
}
if (drop_cache(real_file_absolute_path) < 0) {
puts("Error: fail to drop the page cache of the real file.");
}
locked_real_files_paths[num_locked_real_files] = malloc(MAX_PATH_LEN);
strcpy(locked_real_files_paths[num_locked_real_files],
real_file_absolute_path);
num_locked_real_files++;
if (lock_file_blocks(real_file_absolute_path) < 0) {
return 0;
}
(*file_size) += len;
get_file_extents(real_file_absolute_path, &cur_file_num_extents,
cur_file_extents_physical_start_arr,
cur_file_extents_len_arr);
extract_file_extents(off, len, &cur_file_num_extents,
cur_file_extents_physical_start_arr,
cur_file_extents_len_arr);
int j;
for (j = 0; j < cur_file_num_extents; j++) {
extents_physical_start_arr[*num_extents] =
cur_file_extents_physical_start_arr[j];
extents_len_arr[*num_extents] = cur_file_extents_len_arr[j];
(*num_extents)++;
if ((*num_extents) > MAX_EXTENT_NUM) {
fprintf(stderr, "Too many extents.\n");
return 0;
}
}
}
break;
} else {
// Consume the line.
fgets(buf, MAX_CMD_OUTPUT_LEN, fp);
}
}
}
free(mappings_path);
free((void *)relative_path_to_mount_point);
free(virt_file_name);
free(mapping_file_name);
free(buf);
free(real_file_relative_path);
free(real_file_absolute_path);
if (fp)
fclose(fp);
return ret;
}
static int is_virtual_file(const char *pathname, unsigned int *num_extents,
unsigned long long *extents_physical_start_arr,
unsigned long long *extents_len_arr,
unsigned long long *file_size, int flags) {
return is_from_nvme_fpga(pathname) &&
is_registered(pathname, num_extents, extents_physical_start_arr,
extents_len_arr, file_size, flags);
}
const char *reg_virt_file_sg(size_t sg_list_len, const char **real_file_paths,
size_t *offs, size_t *lens) {
if (sg_list_len <= 0 || sg_list_len > MAX_EXTENT_NUM) {
return NULL;
}
// Check whether all real files are at INSIDER drive.
size_t i;
for (i = 0; i < sg_list_len; i++) {
if (!is_from_nvme_fpga(real_file_paths[i])) {
return NULL;
}
if (offs[i] + lens[i] > get_file_length(real_file_paths[i])) {
return NULL;
}
}
// Find a proper virtual file name.
char *virt_file_name = malloc(MAX_PATH_LEN);
char *absolute_virt_file_path = malloc(MAX_PATH_LEN);
virt_file_name[0] = 0;
strcat(virt_file_name, "virt_");
const char *pos = strrchr(real_file_paths[0], '/');
char *relative_real_path = malloc(MAX_PATH_LEN);
strcpy(relative_real_path, pos + 1);
strcat(virt_file_name, relative_real_path);
size_t prefix_len = (pos == NULL) ? 0 : pos - real_file_paths[0] + 1;
while (1) {
strncpy(absolute_virt_file_path, real_file_paths[0], prefix_len);
absolute_virt_file_path[prefix_len] = '\0';
strcat(absolute_virt_file_path, virt_file_name);
if (access(absolute_virt_file_path, F_OK)) {
break;
}
strcat(virt_file_name, "_");
}
// Open the mapping file.
char *mappings_path = malloc(MAX_PATH_LEN);
strcpy(mappings_path, mount_point_path);
char *mappings_file_name = malloc(MAX_PATH_LEN);
get_mappings_file_name(mappings_file_name);
strncpy(mappings_path + strlen(mount_point_path), mappings_file_name,
strlen(mappings_file_name));
mappings_path[strlen(mount_point_path) + strlen(mappings_file_name)] = '\0';
FILE *fp = fopen(mappings_path, "a");
// Update the mapping file.
const char *relative_path_to_mount_point =
calculate_relative_path(absolute_virt_file_path, mount_point_path);
fprintf(fp, "%s %zu ", relative_path_to_mount_point, sg_list_len);
for (i = 0; i < sg_list_len; i++) {
const char *absolute_file_path = get_absolute_path(real_file_paths[i]);
const char *relative_file_path =
calculate_relative_path(absolute_file_path, mount_point_path);
fprintf(fp, "%s %zu %zu ", relative_file_path, offs[i], lens[i]);
free((void *)absolute_file_path);
free((void *)relative_file_path);
}
fprintf(fp, "\n");
fclose(fp);
// touch virtual file
FILE *cmd_fp;
char *cmd = malloc(MAX_CMD_LEN);
cmd[0] = 0;
strcat(cmd, TOUCH_CMD);
strcat(cmd, absolute_virt_file_path);
cmd_fp = popen(cmd, "r");
pclose(cmd_fp);
free(mappings_file_name);
free(virt_file_name);
free(relative_real_path);
free(mappings_path);
free(cmd);
return absolute_virt_file_path;
}
const char *reg_virt_file(const char *real_path) {
size_t off = 0;
size_t len = get_file_length(real_path);
return reg_virt_file_sg(1, (const char **)&real_path, &off, &len);
}
int vopen(const char *pathname, int flags) {
if (flags != O_RDONLY && flags != O_WRONLY) {
return -1;
}
is_write = (O_WRONLY == flags);
unsigned int num_extents;
unsigned long long *extents_physical_start_arr =
malloc(sizeof(unsigned long long) * MAX_EXTENT_NUM);
unsigned long long *extents_len_arr =
malloc(sizeof(unsigned long long) * MAX_EXTENT_NUM);
unsigned long long length;
if (!is_virtual_file(pathname, &num_extents, extents_physical_start_arr,
extents_len_arr, &length, flags)) {
return -1;
} else {
setup_mmio();
send_control_msg(APP_IS_WRITE_MODE_TAG, is_write);
int i;
for (i = 0; i < ALLOCATED_BUF_NUM; i++) {
app_bufs[i] = allocate_kernel_buf(&app_buf_fds[i]);
app_buf_phy_addrs[i] = *((unsigned long long *)app_bufs[i]);
memset(app_bufs[i], 0, PAGE_SIZE);
send_control_msg(APP_BUF_ADDRS_TAG, app_buf_phy_addrs[i] >> 32);
send_control_msg(APP_BUF_ADDRS_TAG, app_buf_phy_addrs[i] & 0xFFFFFFFF);
}
send_control_msg(APP_FILE_INFO_TAG, num_extents);
send_control_msg(APP_FILE_INFO_TAG, length >> 32);
send_control_msg(APP_FILE_INFO_TAG, length & 0xFFFFFFFF);
for (i = 0; i < num_extents; i++) {
unsigned long long extents_start_in_byte = extents_physical_start_arr[i];
send_control_msg(APP_FILE_INFO_TAG, extents_start_in_byte >> 32);
send_control_msg(APP_FILE_INFO_TAG, extents_start_in_byte & 0xFFFFFFFF);
unsigned long long extents_len_in_byte = extents_len_arr[i];
send_control_msg(APP_FILE_INFO_TAG, extents_len_in_byte >> 32);
send_control_msg(APP_FILE_INFO_TAG, extents_len_in_byte & 0xFFFFFFFF);
}
}
file_finish_reading = 0;
first = 1;
return VIRT_FILE_FD;
}
static void reset(void) {
app_bufs_ptr = is_eop = buf_idx = buf_len = 0;
first = 1;
}
static void parallel_memcpy(void *dest, const void *src, size_t n) {
int size_per_worker = (n + PAR_MEMCPY_WORKERS - 1) / PAR_MEMCPY_WORKERS;
int size_last = n - size_per_worker * (PAR_MEMCPY_WORKERS - 1);
#pragma omp parallel num_threads(PAR_MEMCPY_WORKERS)
{
int tid = omp_get_thread_num();
int copy_size =
(tid != (PAR_MEMCPY_WORKERS - 1)) ? size_per_worker : size_last;
memcpy((unsigned char *)dest + size_per_worker * tid,
(unsigned char *)src + size_per_worker * tid, copy_size);
}
}
__inline__ static void update_read_metadata(void) {
unsigned int metadata = 0, flag = 0;
volatile unsigned char *flag_ptr;
volatile unsigned char *metadata_ptr;
do {
metadata_ptr =
(volatile unsigned char *)(app_bufs[app_bufs_ptr] + BUF_METADATA_IDX);
flag_ptr =
(volatile unsigned char *)(app_bufs[app_bufs_ptr] + BUF_METADATA_IDX +
sizeof(unsigned int));
flag = ((*(flag_ptr + 3)) << 24) | ((*(flag_ptr + 2)) << 16) |
((*(flag_ptr + 1)) << 8) | ((*(flag_ptr + 0)) << 0);
metadata = ((*(metadata_ptr + 3)) << 24) | ((*(metadata_ptr + 2)) << 16) |
((*(metadata_ptr + 1)) << 8) | ((*(metadata_ptr + 0)) << 0);
} while (!(flag));
*flag_ptr = *(flag_ptr + 1) = *(flag_ptr + 2) = *(flag_ptr + 3) = 0;
buf_len = metadata >> 1;
is_eop = metadata & 0x1;
}
ssize_t vread(int fd, void *buf, size_t count) {
if (is_write) {
return -1;
}
if (fd == VIRT_FILE_FD) {
if (file_finish_reading) {
return 0;
} else if (first) {
update_read_metadata();
first = 0;
}
unsigned char *app_buf = (unsigned char *)app_bufs[app_bufs_ptr];
ssize_t read_size = 0;
if (count >= buf_len - buf_idx) {
read_size = buf_len - buf_idx;
if (is_eop) {
parallel_memcpy(buf, app_buf + buf_idx, read_size);
file_finish_reading = 1;
reset();
} else {
parallel_memcpy(buf, app_buf + buf_idx, read_size);
send_control_msg(APP_FREE_BUF_TAG, 0);
app_bufs_ptr = (app_bufs_ptr + 1) & (ALLOCATED_BUF_NUM - 1);
buf_idx = 0;
update_read_metadata();
}
} else {
read_size = count;
parallel_memcpy(buf, app_buf + buf_idx, read_size);
buf_idx += read_size;
}
return read_size;
} else {
return -1;
}
}
__inline__ static void commit_write_buf(unsigned int len) {
volatile unsigned char *metadata_ptr;
metadata_ptr =
(volatile unsigned char *)(app_bufs[app_bufs_ptr]) + BUF_METADATA_IDX;
*metadata_ptr = *(metadata_ptr + 1) = *(metadata_ptr + 2) =
*(metadata_ptr + 3) = 1;
send_control_msg(APP_COMMIT_WRITE_BUF_TAG, len);
}
static ssize_t real_written_bytes_count(int fd) {
if (fd == VIRT_FILE_FD) {
unsigned long long real_written_bytes = 0;
real_written_bytes =
((unsigned long long)receive_control_msg(APP_REAL_WRITTEN_BYTES_TAG))
<< 32;
real_written_bytes |=
((unsigned long long)receive_control_msg(APP_REAL_WRITTEN_BYTES_TAG));
return real_written_bytes;
} else {
return -1;
}
}
static ssize_t virt_written_bytes_count(int fd) {
if (fd == VIRT_FILE_FD) {
unsigned long long virt_written_bytes = 0;
virt_written_bytes =
((unsigned long long)receive_control_msg(APP_VIRT_WRITTEN_BYTES_TAG))
<< 32;
virt_written_bytes |=
((unsigned long long)receive_control_msg(APP_VIRT_WRITTEN_BYTES_TAG));
return virt_written_bytes;
} else {
return -1;
}
}
int vclose_with_rsize(int fd, size_t *rfile_written_bytes) {
if (fd == VIRT_FILE_FD) {
if (is_write) {
if (buf_idx) {
commit_write_buf(buf_idx);
}
send_control_msg(APP_WRITE_TOTAL_LEN_TAG, host_written_bytes >> 32);
send_control_msg(APP_WRITE_TOTAL_LEN_TAG,
host_written_bytes & 0xFFFFFFFF);
while (!receive_control_msg(APP_WRITE_FINISHED_TAG))
;
}
if (rfile_written_bytes != NULL) {
*rfile_written_bytes = real_written_bytes_count(fd);
}
reset_all();
int i;
for (i = 0; i < ALLOCATED_BUF_NUM; i++) {
if (app_buf_fds[i] > 0) {
close(app_buf_fds[i]);
}
}
if (mmio_fd) {
close(mmio_fd);
}
send_control_msg(RESET_TAG, 0);
int ret = 0;
for (i = 0; i < num_locked_real_files; i++) {
int tmp;
if ((tmp = unlock_file_blocks(locked_real_files_paths[i])) < 0) {
ret = tmp;
}
if ((tmp = drop_cache(locked_real_files_paths[i])) < 0) {
ret = tmp;
}
free(locked_real_files_paths[i]);
}
return ret;
} else {
return -1;
}
return 0;
}
int vclose(int fd) { return vclose_with_rsize(fd, NULL); }
__inline__ static void wait_write_buf(void) {
unsigned int metadata = 0;
volatile unsigned char *metadata_ptr;
do {
metadata_ptr =
(volatile unsigned char *)(app_bufs[app_bufs_ptr]) + BUF_METADATA_IDX;
metadata = ((*(metadata_ptr + 3)) << 24) | ((*(metadata_ptr + 2)) << 16) |
((*(metadata_ptr + 1)) << 8) | ((*(metadata_ptr + 0)) << 0);
} while (metadata);
}
int vwrite(int fd, void *buf, size_t count) {
if (!is_write) {
return -1;
}
if (fd == VIRT_FILE_FD) {
unsigned char *app_buf = (unsigned char *)app_bufs[app_bufs_ptr];
ssize_t write_size = 0;
if (count >= BUF_METADATA_IDX - buf_idx) {
write_size = BUF_METADATA_IDX - buf_idx;
parallel_memcpy(app_buf + buf_idx, buf, write_size);
commit_write_buf(BUF_METADATA_IDX);
app_bufs_ptr = (app_bufs_ptr + 1) & (ALLOCATED_BUF_NUM - 1);
buf_idx = 0;
wait_write_buf();
} else {
write_size = count;
parallel_memcpy(app_buf + buf_idx, buf, write_size);
buf_idx += write_size;
}
host_written_bytes += write_size;
return write_size;
} else {
return -1;
}
}
int vsync(int fd) {
if (fd == VIRT_FILE_FD) {
while (virt_written_bytes_count(fd) != host_written_bytes)
;
return 0;
} else {
return -1;
}
}
|
analyze.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% AAA N N AAA L Y Y ZZZZZ EEEEE %
% A A NN N A A L Y Y ZZ E %
% AAAAA N N N AAAAA L Y ZZZ EEE %
% A A N NN A A L Y ZZ E %
% A A N N A A LLLLL Y ZZZZZ EEEEE %
% %
% Analyze An Image %
% %
% Software Design %
% Bill Corbis %
% December 1998 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
*/
/*
Include declarations.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include <math.h>
#include "magick/MagickCore.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% a n a l y z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% analyzeImage() computes the brightness and saturation mean, standard
% deviation, kurtosis and skewness and stores these values as attributes
% of the image.
%
% The format of the analyzeImage method is:
%
% size_t analyzeImage(Image *images,const int argc,
% char **argv,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o exception: return any errors or warnings in this structure.
%
*/
ModuleExport size_t analyzeImage(Image **images,const int argc,
const char **argv,ExceptionInfo *exception)
{
char
text[MaxTextExtent];
double
area,
brightness,
brightness_mean,
brightness_standard_deviation,
brightness_kurtosis,
brightness_skewness,
brightness_sum_x,
brightness_sum_x2,
brightness_sum_x3,
brightness_sum_x4,
hue,
saturation,
saturation_mean,
saturation_standard_deviation,
saturation_kurtosis,
saturation_skewness,
saturation_sum_x,
saturation_sum_x2,
saturation_sum_x3,
saturation_sum_x4;
Image
*image;
assert(images != (Image **) NULL);
assert(*images != (Image *) NULL);
assert((*images)->signature == MagickSignature);
(void) argc;
(void) argv;
image=(*images);
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
brightness_sum_x=0.0;
brightness_sum_x2=0.0;
brightness_sum_x3=0.0;
brightness_sum_x4=0.0;
brightness_mean=0.0;
brightness_standard_deviation=0.0;
brightness_kurtosis=0.0;
brightness_skewness=0.0;
saturation_sum_x=0.0;
saturation_sum_x2=0.0;
saturation_sum_x3=0.0;
saturation_sum_x4=0.0;
saturation_mean=0.0;
saturation_standard_deviation=0.0;
saturation_kurtosis=0.0;
saturation_skewness=0.0;
area=0.0;
status=MagickTrue;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ConvertRGBToHSB(GetPixelRed(p),GetPixelGreen(p),
GetPixelBlue(p),&hue,&saturation,&brightness);
brightness*=QuantumRange;
brightness_sum_x+=brightness;
brightness_sum_x2+=brightness*brightness;
brightness_sum_x3+=brightness*brightness*brightness;
brightness_sum_x4+=brightness*brightness*brightness*brightness;
saturation*=QuantumRange;
saturation_sum_x+=saturation;
saturation_sum_x2+=saturation*saturation;
saturation_sum_x3+=saturation*saturation*saturation;
saturation_sum_x4+=saturation*saturation*saturation*saturation;
area++;
p++;
}
}
image_view=DestroyCacheView(image_view);
if (area <= 0.0)
break;
brightness_mean=brightness_sum_x/area;
(void) FormatMagickString(text,MaxTextExtent,"%g",brightness_mean);
(void) SetImageProperty(image,"filter:brightness:mean",text);
brightness_standard_deviation=sqrt(brightness_sum_x2/area-(brightness_sum_x/
area*brightness_sum_x/area));
(void) FormatMagickString(text,MaxTextExtent,"%g",
brightness_standard_deviation);
(void) SetImageProperty(image,"filter:brightness:standard-deviation",text);
if (brightness_standard_deviation != 0)
brightness_kurtosis=(brightness_sum_x4/area-4.0*brightness_mean*
brightness_sum_x3/area+6.0*brightness_mean*brightness_mean*
brightness_sum_x2/area-3.0*brightness_mean*brightness_mean*
brightness_mean*brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation*
brightness_standard_deviation)-3.0;
(void) FormatMagickString(text,MaxTextExtent,"%g",brightness_kurtosis);
(void) SetImageProperty(image,"filter:brightness:kurtosis",text);
if (brightness_standard_deviation != 0)
brightness_skewness=(brightness_sum_x3/area-3.0*brightness_mean*
brightness_sum_x2/area+2.0*brightness_mean*brightness_mean*
brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation);
(void) FormatMagickString(text,MaxTextExtent,"%g",brightness_skewness);
(void) SetImageProperty(image,"filter:brightness:skewness",text);
saturation_mean=saturation_sum_x/area;
(void) FormatMagickString(text,MaxTextExtent,"%g",saturation_mean);
(void) SetImageProperty(image,"filter:saturation:mean",text);
saturation_standard_deviation=sqrt(saturation_sum_x2/area-(saturation_sum_x/
area*saturation_sum_x/area));
(void) FormatMagickString(text,MaxTextExtent,"%g",
saturation_standard_deviation);
(void) SetImageProperty(image,"filter:saturation:standard-deviation",text);
if (saturation_standard_deviation != 0)
saturation_kurtosis=(saturation_sum_x4/area-4.0*saturation_mean*
saturation_sum_x3/area+6.0*saturation_mean*saturation_mean*
saturation_sum_x2/area-3.0*saturation_mean*saturation_mean*
saturation_mean*saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation*
saturation_standard_deviation)-3.0;
(void) FormatMagickString(text,MaxTextExtent,"%g",saturation_kurtosis);
(void) SetImageProperty(image,"filter:saturation:kurtosis",text);
if (saturation_standard_deviation != 0)
saturation_skewness=(saturation_sum_x3/area-3.0*saturation_mean*
saturation_sum_x2/area+2.0*saturation_mean*saturation_mean*
saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation);
(void) FormatMagickString(text,MaxTextExtent,"%g",saturation_skewness);
(void) SetImageProperty(image,"filter:saturation:skewness",text);
}
return(MagickImageFilterSignature);
}
|
computepi.c | #include <stdio.h>
#include <immintrin.h>
#include <omp.h>
#include <math.h>
#include "computepi.h"
double compute_pi_baseline(size_t N)
{
double pi = 0.0;
double dt = 1.0 / N; // dt = (b-a)/N, b = 1, a = 0
for (size_t i = 0; i < N; i++) {
double x = (double) i / N; // x = ti = a+(b-a)*i/N = i/N
pi += dt / (1.0 + x * x); // integrate 1/(1+x^2), i = 0....N
}
return pi * 4.0;
}
double compute_pi_euler(size_t N)
{
double pi = 0.0;
for (size_t i = 1; i < N; i++) {
pi += 1 / pow(i,2);
}
return sqrt(pi * 6.0);
}
double compute_pi_openmp(size_t N, int threads)
{
double pi = 0.0;
double dt = 1.0 / N;
double x;
#pragma omp parallel num_threads(threads)
{
#pragma omp for private(x) reduction(+:pi)
for (size_t i = 0; i < N; i++) {
x = (double) i / N;
pi += dt / (1.0 + x * x);
}
}
return pi * 4.0;
}
double compute_pi_avx(size_t N)
{
double pi = 0.0;
double dt = 1.0 / N;
register __m256d ymm0, ymm1, ymm2, ymm3, ymm4;
ymm0 = _mm256_set1_pd(1.0);
ymm1 = _mm256_set1_pd(dt);
ymm2 = _mm256_set_pd(dt * 3, dt * 2, dt * 1, 0.0);
ymm4 = _mm256_setzero_pd(); // sum of pi
for (int i = 0; i <= N - 4; i += 4) {
ymm3 = _mm256_set1_pd(i * dt); // i*dt, i*dt, i*dt, i*dt
ymm3 = _mm256_add_pd(ymm3, ymm2); // x = i*dt+3*dt, i*dt+2*dt, i*dt+dt, i*dt+0.0
ymm3 = _mm256_mul_pd(ymm3, ymm3); // x^2 = (i*dt+3*dt)^2, (i*dt+2*dt)^2, ...
ymm3 = _mm256_add_pd(ymm0, ymm3); // 1+x^2 = 1+(i*dt+3*dt)^2, 1+(i*dt+2*dt)^2, ...
ymm3 = _mm256_div_pd(ymm1, ymm3); // dt/(1+x^2)
ymm4 = _mm256_add_pd(ymm4, ymm3); // pi += dt/(1+x^2)
}
double tmp[4] __attribute__((aligned(32)));
_mm256_store_pd(tmp, ymm4); // move packed float64 values to 256-bit aligned memory location
pi += tmp[0] + tmp[1] + tmp[2] + tmp[3];
return pi * 4.0;
}
double compute_pi_avx_unroll(size_t N)
{
double pi = 0.0;
double dt = 1.0 / N;
register __m256d ymm0, ymm1, ymm2, ymm3, ymm4,
ymm5, ymm6, ymm7, ymm8, ymm9,
ymm10,ymm11, ymm12, ymm13, ymm14;
ymm0 = _mm256_set1_pd(1.0);
ymm1 = _mm256_set1_pd(dt);
ymm2 = _mm256_set_pd(dt * 3, dt * 2, dt * 1, 0.0);
ymm3 = _mm256_set_pd(dt * 7, dt * 6, dt * 5, dt * 4);
ymm4 = _mm256_set_pd(dt * 11, dt * 10, dt * 9, dt * 8);
ymm5 = _mm256_set_pd(dt * 15, dt * 14, dt * 13, dt * 12);
ymm6 = _mm256_setzero_pd(); // first sum of pi
ymm7 = _mm256_setzero_pd(); // second sum of pi
ymm8 = _mm256_setzero_pd(); // third sum of pi
ymm9 = _mm256_setzero_pd(); // fourth sum of pi
for (int i = 0; i <= N - 16; i += 16) {
ymm14 = _mm256_set1_pd(i * dt);
ymm10 = _mm256_add_pd(ymm14, ymm2);
ymm11 = _mm256_add_pd(ymm14, ymm3);
ymm12 = _mm256_add_pd(ymm14, ymm4);
ymm13 = _mm256_add_pd(ymm14, ymm5);
ymm10 = _mm256_mul_pd(ymm10, ymm10);
ymm11 = _mm256_mul_pd(ymm11, ymm11);
ymm12 = _mm256_mul_pd(ymm12, ymm12);
ymm13 = _mm256_mul_pd(ymm13, ymm13);
ymm10 = _mm256_add_pd(ymm0, ymm10);
ymm11 = _mm256_add_pd(ymm0, ymm11);
ymm12 = _mm256_add_pd(ymm0, ymm12);
ymm13 = _mm256_add_pd(ymm0, ymm13);
ymm10 = _mm256_div_pd(ymm1, ymm10);
ymm11 = _mm256_div_pd(ymm1, ymm11);
ymm12 = _mm256_div_pd(ymm1, ymm12);
ymm13 = _mm256_div_pd(ymm1, ymm13);
ymm6 = _mm256_add_pd(ymm6, ymm10);
ymm7 = _mm256_add_pd(ymm7, ymm11);
ymm8 = _mm256_add_pd(ymm8, ymm12);
ymm9 = _mm256_add_pd(ymm9, ymm13);
}
double tmp1[4] __attribute__((aligned(32)));
double tmp2[4] __attribute__((aligned(32)));
double tmp3[4] __attribute__((aligned(32)));
double tmp4[4] __attribute__((aligned(32)));
_mm256_store_pd(tmp1, ymm6);
_mm256_store_pd(tmp2, ymm7);
_mm256_store_pd(tmp3, ymm8);
_mm256_store_pd(tmp4, ymm9);
pi += tmp1[0] + tmp1[1] + tmp1[2] + tmp1[3] +
tmp2[0] + tmp2[1] + tmp2[2] + tmp2[3] +
tmp3[0] + tmp3[1] + tmp3[2] + tmp3[3] +
tmp4[0] + tmp4[1] + tmp4[2] + tmp4[3];
return pi * 4.0;
}
double compute_pi_leibniz(size_t N)
{
double pi = 0.0;
for (size_t i = 1; i < N; i++) {
int tmp = ((i%2)==0) ? 1 : -1 ;
pi += tmp / (2*(double)i+1);
}
return pi * 4.0;
}
|
GB_binop__iseq_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int64)
// A*D function (colscale): GB (_AxD__iseq_int64)
// D*A function (rowscale): GB (_DxB__iseq_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int64)
// C=scalar+B GB (_bind1st__iseq_int64)
// C=scalar+B' GB (_bind1st_tran__iseq_int64)
// C=A+scalar GB (_bind2nd__iseq_int64)
// C=A'+scalar GB (_bind2nd_tran__iseq_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT64 || GxB_NO_ISEQ_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
jacobi-openacc-mdev.c | // An optimization on top of naive coding:
// promoting data handling outside the while loop
#include <stdio.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
// Add timing support
#include <sys/time.h>
double time_stamp()
{
struct timeval t;
double time;
gettimeofday(&t, NULL);
time = t.tv_sec + 1.0e-6*t.tv_usec;
return time;
}
double time1, time2;
void driver(void);
void initialize(void);
void jacobi(void);
void error_check(void);
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This c version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define MSIZE 512
int n,m,mits;
#define REAL float // flexible between float and double
REAL tol,relax=1.0,alpha=0.0543;
REAL u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE];
REAL dx,dy;
int main (void)
{
// float toler;
/* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE);
scanf ("%d",&n);
scanf ("%d",&m);
printf("Input tol - error tolerance for iterative solver\n");
scanf("%f",&toler);
tol=(double)toler;
printf("Input mits - Maximum iterations for solver\n");
scanf("%d",&mits);
*/
n=MSIZE;
m=MSIZE;
tol=0.0000000001;
mits=5000;
#if 0 // Not yet support concurrent CPU and GPU threads
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp single
printf("Running using %d threads...\n",omp_get_num_threads());
}
#endif
#endif
driver ( ) ;
return 0;
}
/*************************************************************
* Subroutine driver ()
* This is where the arrays are allocated and initialzed.
*
* Working varaibles/arrays
* dx - grid spacing in x direction
* dy - grid spacing in y direction
*************************************************************/
void driver( )
{
initialize();
time1 = time_stamp();
/* Solve Helmholtz equation */
jacobi ();
time2 = time_stamp();
printf("------------------------\n");
printf("Execution time = %f\n",time2-time1);
/* error_check (n,m,alpha,dx,dy,u,f)*/
error_check ( );
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize( )
{
int i,j, xx,yy;
//double PI=3.1415926;
dx = 2.0 / (n-1);
dy = 2.0 / (m-1);
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(xx,yy,j,i)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx =(int)( -1.0 + dx * (i-1));
yy = (int)(-1.0 + dy * (j-1)) ;
u[i][j] = 0.0;
f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\
- 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy);
}
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
void jacobi( )
{
REAL omega;
int i,j,k;
REAL error,resid,ax,ay,b;
// double error_local;
// float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2;
// float te1,te2;
// float second;
omega=relax;
/*
* Initialize coefficients */
ax = 1.0/(dx*dx); /* X-direction coef */
ay = 1.0/(dy*dy); /* Y-direction coef */
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
// An optimization on top of naive coding: promoting data handling outside the while loop
// data properties may change since the scope is bigger:
#pragma omp target data device(*) map(to:n, m, omega, ax, ay, b, f[0:n]{0:m}>>(:)) map(tofrom:u[0:n]{0:m}>>(:)) map(alloc:uold[0:n|1]{0:m}>>(:))
while ((k<=mits)&&(error>tol))
{
error = 0.0;
/* Copy new solution into old */
//#pragma omp parallel
// {
#pragma omp target device(*)//map(in:n, m, u[0:n][0:m]) map(out:uold[0:n][0:m])
#pragma omp parallel for private(j,i) dist_iteration match_range u[0:n]{}
/* #pragma omp parallel for private(j,i) dist_iteration >>(*) */
for(i=0;i<n;i++)
for(j=0;j<m;j++)
uold[i][j] = u[i][j];
#pragma omp halo exchange uold[:]{:}
#pragma omp target device(*)//map(in:n, m, omega, ax, ay, b, f[0:n][0:m], uold[0:n][0:m]) map(out:u[0:n][0:m])
#pragma omp parallel for private(resid,j,i) reduction(+:error) dist_iteration >>(*) // nowait
for (i=1;i<(n-1);i++)
for (j=1;j<(m-1);j++)
{
resid = (ax*(uold[i-1][j] + uold[i+1][j])\
+ ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b;
u[i][j] = uold[i][j] - omega * resid;
error = error + resid*resid ;
}
// }
/* omp end parallel */
/* Error check */
if (k%500==0)
printf("Finished %d iteration with error =%f\n",k, error);
error = sqrt(error)/(n*m);
k = k + 1;
} /* End iteration loop */
printf("Total Number of Iterations:%d\n",k);
printf("Residual:%E\n", error);
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check ( )
{
int i,j;
REAL xx,yy,temp,error;
dx = 2.0 / (n-1);
dy = 2.0 / (m-1);
error = 0.0 ;
//#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx = -1.0 + dx * (i-1);
yy = -1.0 + dy * (j-1);
temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy);
error = error + temp*temp;
}
error = sqrt(error)/(n*m);
printf("Solution Error :%E \n",error);
}
|
mmul.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
#include <stdio.h>
/**
* SpMM, C = AB, A is sparse, B, C are dense
*/
int ptiSparseMatrixMulMatrix(ptiMatrix * C, const ptiSparseMatrix *spA, ptiMatrix * B)
{
for(ptiNnzIndex z = 0; z < spA->nnz; ++z) {
ptiIndex row = spA->rowind.data[z]; // C[row,:]
ptiIndex col = spA->colind.data[z]; // B[col,:]
ptiValue val = spA->values.data[z];
for(ptiNnzIndex c = 0; c < B->ncols; ++c) {
C->values[row * C->stride + c] += val * B->values[col * B->stride + c];
}
}
return 0;
}
#ifdef HIPARTI_USE_OPENMP
int ptiOmpSparseMatrixMulMatrix(ptiMatrix * C, const ptiSparseMatrix *spA, ptiMatrix * B)
{
#pragma omp parallel for // schedule(static)
for(ptiNnzIndex z = 0; z < spA->nnz; ++z) {
ptiIndex row = spA->rowind.data[z]; // C[row,:]
ptiIndex col = spA->colind.data[z]; // B[col,:]
ptiValue val = spA->values.data[z];
ptiValue * restrict cval_row = C->values + row * C->stride;
for(ptiIndex c = 0; c < B->ncols; ++c) {
#pragma omp atomic update
cval_row[c] += val * B->values[col * B->stride + c];
// C->values[row * C->stride+c] += val * B->values[col * B->stride + c]; // slower
}
}
return 0;
}
int ptiOmpSparseMatrixMulMatrix_Reduce(ptiMatrix * C, ptiMatrix * Cbufs, const ptiSparseMatrix *spA, ptiMatrix * B)
{
int nthreads;
#pragma omp parallel
nthreads = omp_get_num_threads();
#pragma omp parallel for // schedule(static)
for(ptiNnzIndex z = 0; z < spA->nnz; ++z) {
int tid = omp_get_thread_num();
ptiIndex row = spA->rowind.data[z]; // C[row,:]
ptiIndex col = spA->colind.data[z]; // B[col,:]
ptiValue val = spA->values.data[z];
#pragma omp simd
for(ptiIndex c = 0; c < B->ncols; ++c) {
Cbufs[tid].values[row * C->stride + c] += val * B->values[col * B->stride + c]; // slower
}
}
/* Reduction */
#pragma omp parallel for schedule(static)
for(ptiIndex r=0; r<C->nrows; ++r) {
for(int t=0; t<nthreads; ++t) {
#pragma omp simd
for(ptiIndex c = 0; c < C->ncols; ++c) {
C->values[r * C->stride + c] += Cbufs[t].values[r * C->stride + c];
}
}
}
return 0;
}
#endif
|
GB_dense_subassign_22_template.c | //------------------------------------------------------------------------------
// GB_dense_subassign_22_template: C += b where C is dense and b is a scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
{
//--------------------------------------------------------------------------
// get C
//--------------------------------------------------------------------------
GB_CTYPE *GB_RESTRICT Cx = (GB_CTYPE *) C->x ;
const int64_t cnz = GB_NNZ (C) ;
//--------------------------------------------------------------------------
// C += b where C is dense and b is a scalar
//--------------------------------------------------------------------------
int64_t pC ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pC = 0 ; pC < cnz ; pC++)
{
GB_BINOP (GB_CX (pC), GB_CX (pC), bwork, 0, 0) ;
}
}
|
trmv_x_dia_u_lo.c | #include "alphasparse/kernel.h"
#include "alphasparse/opt.h"
#include "alphasparse/util.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_DIA* A,
const ALPHA_Number* x,
const ALPHA_Number beta,
ALPHA_Number* y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
const ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num);
for(int i = 0; i < thread_num; ++i)
{
tmp[i] = malloc(sizeof(ALPHA_Number) * m);
memset(tmp[i], 0, sizeof(ALPHA_Number) * m);
}
const ALPHA_INT diags = A->ndiag;
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < diags; ++i)
{
const ALPHA_INT threadId = alpha_get_thread_id();
const ALPHA_INT dis = A->distance[i];
if(dis < 0)
{
const ALPHA_INT row_start = -dis;
const ALPHA_INT col_start = 0;
const ALPHA_INT nnz = m + dis;
const ALPHA_INT start = i * A->lval;
for(ALPHA_INT j = 0; j < nnz; ++j)
{
ALPHA_Number v;
alpha_mul(v, alpha, A->values[start + row_start + j]);
alpha_madde(tmp[threadId][row_start + j], v, x[col_start + j]);
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
alpha_madde(y[i], alpha, x[i]);
for(ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(y[i], y[i], tmp[j][i]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < thread_num; ++i)
{
alpha_free(tmp[i]);
}
alpha_free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_DIA* A,
const ALPHA_Number* x,
const ALPHA_Number beta,
ALPHA_Number* y)
{
return ONAME_omp(alpha, A, x, beta, y);
}
|
rpmio_internal.h | #ifndef H_RPMIO_INTERNAL
#define H_RPMIO_INTERNAL
/** \ingroup rpmio
* \file rpmio/rpmio_internal.h
*/
#include <rpmiotypes.h>
#include <rpmlog.h>
#include <rpmio.h>
#include <rpmurl.h>
#define _RPMPGP_INTERNAL
#include <rpmpgp.h>
#include <rpmxar.h>
/*@access pgpDig @*/ /* XXX FIXME: (by refactoring to foo.c) */
/*@access rpmxar @*/ /* XXX FIXME: (by refactoring to foo.c) */
/** \ingroup rpmio
*/
typedef struct _FDSTACK_s {
/*@exposed@*/
FDIO_t io;
/*@dependent@*/
void * fp;
int fdno;
} FDSTACK_t;
/** \ingroup rpmio
* Identify per-desciptor I/O operation statistics.
*/
typedef enum fdOpX_e {
FDSTAT_READ = 0, /*!< Read statistics index. */
FDSTAT_WRITE = 1, /*!< Write statistics index. */
FDSTAT_SEEK = 2, /*!< Seek statistics index. */
FDSTAT_CLOSE = 3, /*!< Close statistics index */
FDSTAT_DIGEST = 4, /*!< Digest statistics index. */
FDSTAT_MAX = 5
} fdOpX;
/** \ingroup rpmio
* Cumulative statistics for a descriptor.
*/
typedef /*@abstract@*/ struct {
struct rpmop_s ops[FDSTAT_MAX]; /*!< Cumulative statistics. */
} * FDSTAT_t;
/** \ingroup rpmio
*/
typedef struct _FDDIGEST_s {
DIGEST_CTX hashctx;
} * FDDIGEST_t;
/** \ingroup rpmio
* The FD_t File Handle data structure.
*/
struct _FD_s {
struct rpmioItem_s _item; /*!< usage mutex and pool identifier. */
int flags;
#define RPMIO_DEBUG_IO 0x40000000
#define RPMIO_DEBUG_REFS 0x20000000
int magic;
#define FDMAGIC 0x04463138
int nfps;
FDSTACK_t fps[8];
/*@dependent@*/ /*@relnull@*/
void * u; /* ufdio: URL info */
/*@relnull@*/
void * req; /* ufdio: HTTP request */
int rd_timeoutsecs; /* ufdRead: per FD_t timer */
ssize_t bytesRemain; /* ufdio: */
ssize_t contentLength; /* ufdio: */
int persist; /* ufdio: */
int wr_chunked; /* ufdio: */
int syserrno; /* last system errno encountered */
/*@observer@*/
const void *errcookie; /* gzdio/bzdio/ufdio: */
/*null@*/
const char *opath; /* open(2) args. */
int oflags;
mode_t omode;
/*@refcounted@*/ /*@relnull@*/
rpmxar xar; /* xar archive wrapper */
/*@refcounted@*/ /*@relnull@*/
pgpDig dig; /* signature parameters */
FDSTAT_t stats; /* I/O statistics */
size_t ndigests;
DIGEST_CTX *digests;
/*null@*/
const char *contentType; /* ufdio: (HTTP) */
/*null@*/
const char *contentDisposition; /* ufdio: (HTTP) */
time_t lastModified; /* ufdio: (HTTP) */
int ftpFileDoneNeeded; /* ufdio: (FTP) */
unsigned long long fd_cpioPos; /* cpio: */
#if defined(__LCLINT__)
/*@refs@*/
int nrefs; /*!< (unused) keep splint happy */
#endif
};
/*@access FD_t@*/
#define FDSANE(fd) assert(fd != NULL && fd->magic == FDMAGIC)
#define DBG(_f, _m, _x) \
/*@-modfilesys@*/ \
if ((_rpmio_debug | ((_f) ? ((FD_t)(_f))->flags : 0)) & (_m)) fprintf _x \
/*@=modfilesys@*/
#if defined(__LCLINT__XXX)
#define DBGIO(_f, _x)
#define DBGREFS(_f, _x)
#else
#define DBGIO(_f, _x) DBG((_f), RPMIO_DEBUG_IO, _x)
#define DBGREFS(_f, _x) DBG((_f), RPMIO_DEBUG_REFS, _x)
#endif
#ifdef __cplusplus
extern "C" {
#endif
/** \ingroup rpmio
*/
/*@observer@*/ const char * fdbg(/*@null@*/ FD_t fd)
/*@*/;
/** \ingroup rpmio
*/
int fdFgets(FD_t fd, char * buf, size_t len)
/*@globals errno, fileSystem @*/
/*@modifies *buf, fd, errno, fileSystem @*/;
/** \ingroup rpmio
*/
/*@null@*/ FD_t ftpOpen(const char *url, /*@unused@*/ int flags,
/*@unused@*/ mode_t mode, /*@out@*/ urlinfo *uret)
/*@globals h_errno, fileSystem, internalState @*/
/*@modifies *uret, fileSystem, internalState @*/;
/** \ingroup rpmio
*/
int ftpReq(FD_t data, const char * ftpCmd, const char * ftpArg)
/*@globals fileSystem, internalState @*/
/*@modifies data, fileSystem, internalState @*/;
/** \ingroup rpmio
*/
int ftpCmd(const char * cmd, const char * url, const char * arg2)
/*@globals h_errno, fileSystem, internalState @*/
/*@modifies fileSystem, internalState @*/;
/** \ingroup rpmio
*/
int ufdClose( /*@only@*/ void * cookie)
/*@globals fileSystem, internalState @*/
/*@modifies cookie, fileSystem, internalState @*/;
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdSetOpen(FD_t fd, const char * path, int flags, mode_t mode)
/*@modifies fd @*/
{
FDSANE(fd);
if (fd->opath != NULL) {
free((void *)fd->opath);
fd->opath = NULL;
}
fd->opath = xstrdup(path);
fd->oflags = flags;
fd->omode = mode;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
/*@null@*/ /*@observer@*/ const char * fdGetOPath(FD_t fd)
/*@*/
{
FDSANE(fd);
return fd->opath;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
int fdGetOFlags(FD_t fd)
/*@*/
{
FDSANE(fd);
return fd->oflags;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
mode_t fdGetOMode(FD_t fd)
/*@*/
{
FDSANE(fd);
return fd->omode;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdSetDig(FD_t fd, pgpDig dig)
/*@globals fileSystem @*/
/*@modifies fd, dig, fileSystem @*/
{
FDSANE(fd);
/*@-assignexpose -castexpose @*/
fd->dig = pgpDigFree(fd->dig);
fd->dig = pgpDigLink(dig);
/*@=assignexpose =castexpose @*/
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
/*@null@*/ pgpDig fdGetDig(FD_t fd)
/*@*/
{
FDSANE(fd);
/*@-compdef -retexpose -refcounttrans -usereleased @*/
return fd->dig;
/*@=compdef =retexpose =refcounttrans =usereleased @*/
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdSetXAR(FD_t fd, rpmxar xar)
/*@globals fileSystem @*/
/*@modifies fd, xar, fileSystem @*/
{
FDSANE(fd);
/*@-assignexpose -castexpose @*/
fd->xar = rpmxarLink(xar, "fdSetXAR");
/*@=assignexpose =castexpose @*/
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
/*@null@*/ rpmxar fdGetXAR(FD_t fd)
/*@*/
{
FDSANE(fd);
/*@-compdef -refcounttrans -retexpose -usereleased @*/
return fd->xar;
/*@=compdef =refcounttrans =retexpose =usereleased @*/
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
/*@null@*/ FDIO_t fdGetIo(FD_t fd)
/*@*/
{
FDSANE(fd);
return fd->fps[fd->nfps].io;
}
/** \ingroup rpmio
*/
/*@-nullstate@*/ /* FIX: io may be NULL */
/*@unused@*/ static inline
void fdSetIo(FD_t fd, /*@kept@*/ /*@null@*/ FDIO_t io)
/*@modifies fd @*/
{
FDSANE(fd);
/*@-assignexpose@*/
fd->fps[fd->nfps].io = io;
/*@=assignexpose@*/
}
/*@=nullstate@*/
/** \ingroup rpmio
*/
/*@unused@*/ static inline
/*@exposed@*/ /*@dependent@*/ /*@null@*/ FILE * fdGetFILE(FD_t fd)
/*@*/
{
FDSANE(fd);
/*@+voidabstract@*/
return ((FILE *)fd->fps[fd->nfps].fp);
/*@=voidabstract@*/
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
/*@exposed@*/ /*@dependent@*/ /*@null@*/ void * fdGetFp(FD_t fd)
/*@*/
{
FDSANE(fd);
return fd->fps[fd->nfps].fp;
}
/** \ingroup rpmio
*/
/*@-nullstate@*/ /* FIX: fp may be NULL */
/*@unused@*/ static inline
void fdSetFp(FD_t fd, /*@kept@*/ /*@null@*/ void * fp)
/*@modifies fd @*/
{
FDSANE(fd);
/*@-assignexpose@*/
fd->fps[fd->nfps].fp = fp;
/*@=assignexpose@*/
}
/*@=nullstate@*/
/** \ingroup rpmio
*/
/*@unused@*/ static inline
int fdGetFdno(FD_t fd)
/*@*/
{
FDSANE(fd);
return fd->fps[fd->nfps].fdno;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdSetFdno(FD_t fd, int fdno)
/*@modifies fd @*/
{
FDSANE(fd);
fd->fps[fd->nfps].fdno = fdno;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdSetContentLength(FD_t fd, ssize_t contentLength)
/*@modifies fd @*/
{
FDSANE(fd);
fd->contentLength = fd->bytesRemain = contentLength;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdPush(FD_t fd, FDIO_t io, void * fp, int fdno)
/*@modifies fd @*/
{
FDSANE(fd);
if (fd->nfps >= (int)(sizeof(fd->fps)/sizeof(fd->fps[0]) - 1))
return;
fd->nfps++;
fdSetIo(fd, io);
fdSetFp(fd, fp);
fdSetFdno(fd, fdno);
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdPop(FD_t fd)
/*@modifies fd @*/
{
FDSANE(fd);
if (fd->nfps < 0) return;
fdSetIo(fd, NULL);
fdSetFp(fd, NULL);
fdSetFdno(fd, -1);
fd->nfps--;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline /*@null@*/
rpmop fdstat_op(/*@null@*/ FD_t fd, fdOpX opx)
/*@*/
{
rpmop op = NULL;
if (fd != NULL && fd->stats != NULL && (int)opx >= 0 && opx < FDSTAT_MAX)
op = fd->stats->ops + opx;
return op;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdstat_enter(/*@null@*/ FD_t fd, int opx)
/*@globals internalState @*/
/*@modifies internalState @*/
{
if (fd == NULL) return;
if (fd->stats != NULL)
(void) rpmswEnter(fdstat_op(fd, opx), 0);
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdstat_exit(/*@null@*/ FD_t fd, int opx, ssize_t rc)
/*@globals internalState @*/
/*@modifies fd, internalState @*/
{
if (fd == NULL) return;
if (rc == -1)
fd->syserrno = errno;
else if (rc > 0 && fd->bytesRemain > 0)
switch (opx) {
case FDSTAT_READ:
case FDSTAT_WRITE:
fd->bytesRemain -= rc;
break;
default:
break;
}
if (fd->stats != NULL)
(void) rpmswExit(fdstat_op(fd, opx), rc);
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdstat_print(/*@null@*/ FD_t fd, const char * msg, FILE * fp)
/*@globals fileSystem @*/
/*@modifies *fp, fileSystem @*/
{
static int usec_scale = (1000*1000);
int opx;
if (fd == NULL || fd->stats == NULL) return;
for (opx = 0; opx < 4; opx++) {
rpmop op = &fd->stats->ops[opx];
if (op->count <= 0) continue;
switch (opx) {
case FDSTAT_READ:
if (msg != NULL) fprintf(fp, "%s:", msg);
fprintf(fp, "%8d reads, %8lu total bytes in %d.%06d secs\n",
op->count, (unsigned long)op->bytes,
(int)(op->usecs/usec_scale), (int)(op->usecs%usec_scale));
/*@switchbreak@*/ break;
case FDSTAT_WRITE:
if (msg != NULL) fprintf(fp, "%s:", msg);
fprintf(fp, "%8d writes, %8lu total bytes in %d.%06d secs\n",
op->count, (unsigned long)op->bytes,
(int)(op->usecs/usec_scale), (int)(op->usecs%usec_scale));
/*@switchbreak@*/ break;
case FDSTAT_SEEK:
/*@switchbreak@*/ break;
case FDSTAT_CLOSE:
/*@switchbreak@*/ break;
}
}
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdSetSyserrno(FD_t fd, int syserrno, /*@kept@*/ const void * errcookie)
/*@modifies fd @*/
{
FDSANE(fd);
fd->syserrno = syserrno;
/*@-assignexpose@*/
fd->errcookie = errcookie;
/*@=assignexpose@*/
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
int fdGetRdTimeoutSecs(FD_t fd)
/*@*/
{
FDSANE(fd);
return fd->rd_timeoutsecs;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
unsigned long long fdGetCpioPos(FD_t fd)
/*@*/
{
FDSANE(fd);
return fd->fd_cpioPos;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdSetCpioPos(FD_t fd, long int cpioPos)
/*@modifies fd @*/
{
FDSANE(fd);
fd->fd_cpioPos = cpioPos;
}
/** \ingroup rpmio
*/
/*@mayexit@*/ /*@unused@*/ static inline
FD_t c2f(/*@null@*/ void * cookie)
/*@*/
{
/*@-castexpose@*/
FD_t fd = (FD_t) cookie;
/*@=castexpose@*/
FDSANE(fd);
/*@-refcounttrans -retalias@*/ return fd; /*@=refcounttrans =retalias@*/
}
/** \ingroup rpmio
* Attach digest to fd.
*/
/*@unused@*/ static inline
void fdInitDigest(FD_t fd, pgpHashAlgo hashalgo, int flags)
/*@globals internalState @*/
/*@modifies fd, internalState @*/
{
/*@+voidabstract@*/
fd->digests = xrealloc(fd->digests,
(fd->ndigests + 1) * sizeof(*fd->digests));
/*@=voidabstract@*/
fdstat_enter(fd, FDSTAT_DIGEST);
fd->digests[fd->ndigests++] = rpmDigestInit(hashalgo, flags);
fdstat_exit(fd, FDSTAT_DIGEST, 0);
}
/** \ingroup rpmio
* Attach digest to fd.
*/
/*@unused@*/ static inline
void fdInitHmac(FD_t fd, const void * key, size_t keylen)
/*@globals internalState @*/
/*@modifies internalState @*/
{
if (fd->digests != NULL && fd->ndigests > 0 && key != NULL)
(void) rpmHmacInit(fd->digests[fd->ndigests-1], key, keylen);
}
/** \ingroup rpmio
* Update digest(s) attached to fd.
*/
/*@unused@*/ static inline
void fdUpdateDigests(FD_t fd, const unsigned char * buf, ssize_t buflen)
/*@globals internalState @*/
/*@modifies fd, internalState @*/
{
int i;
if (fd->ndigests > 0 && buf != NULL && buflen > 0) {
fdstat_enter(fd, FDSTAT_DIGEST);
#if defined(_OPENMP)
#pragma omp parallel for if (fd->ndigests > 1)
#endif
for (i = fd->ndigests - 1; i >= 0; i--) {
DIGEST_CTX ctx = fd->digests[i];
if (ctx == NULL)
continue;
(void) rpmDigestUpdate(ctx, buf, buflen);
}
fdstat_exit(fd, FDSTAT_DIGEST, buflen);
}
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdFiniDigest(FD_t fd, pgpHashAlgo hashalgo,
/*@null@*/ /*@out@*/ void * datap,
/*@null@*/ /*@out@*/ size_t * lenp,
int asAscii)
/*@globals internalState @*/
/*@modifies fd, *datap, *lenp, internalState @*/
{
int i = -1;
if (fd->ndigests > 0) {
fdstat_enter(fd, FDSTAT_DIGEST);
for (i = fd->ndigests - 1; i >= 0; i--) {
DIGEST_CTX ctx = fd->digests[i];
if (ctx == NULL)
continue;
if (rpmDigestAlgo(ctx) != hashalgo)
continue;
fd->digests[i] = NULL;
(void) rpmDigestFinal(ctx, datap, lenp, asAscii);
break;
}
fdstat_exit(fd, FDSTAT_DIGEST, 0);
}
if (i < 0) {
if (datap != NULL) *(void **)datap = NULL;
if (lenp != NULL) *lenp = 0;
}
}
/** \ingroup rpmio
*/
/*@-mustmod@*/
/*@unused@*/ static inline
void fdStealDigest(FD_t fd, pgpDig dig)
/*@modifies fd, dig @*/
{
int i;
/*@-type@*/ /* FIX: getters for pgpDig internals */
if (fd->ndigests > 0)
for (i = fd->ndigests - 1; i >= 0; i--) {
DIGEST_CTX ctx = fd->digests[i];
if (ctx != NULL)
switch (rpmDigestAlgo(ctx)) {
case PGPHASHALGO_MD5:
assert(dig->md5ctx == NULL);
/*@-assignexpose -onlytrans@*/
dig->md5ctx = ctx;
/*@=assignexpose =onlytrans@*/
fd->digests[i] = NULL;
/*@switchbreak@*/ break;
case PGPHASHALGO_SHA1:
case PGPHASHALGO_RIPEMD160:
case PGPHASHALGO_SHA256:
case PGPHASHALGO_SHA384:
case PGPHASHALGO_SHA512:
assert(dig->sha1ctx == NULL);
/*@-assignexpose -onlytrans@*/
dig->sha1ctx = ctx;
/*@=assignexpose =onlytrans@*/
fd->digests[i] = NULL;
/*@switchbreak@*/ break;
default:
/*@switchbreak@*/ break;
}
}
/*@=type@*/
}
/*@=mustmod@*/
/*@-shadow@*/
/** \ingroup rpmio
*/
/*@unused@*/ static inline
int fdFileno(/*@null@*/ void * cookie)
/*@*/
{
FD_t fd;
if (cookie == NULL) return -2;
fd = c2f(cookie);
return fd->fps[0].fdno;
}
/*@=shadow@*/
#ifdef __cplusplus
}
#endif
#endif /* H_RPMIO_INTERNAL */
|
Example_target.6.c | /*
* @@name: target.6c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.5
*/
#define THRESHOLD1 1000000
#define THRESHOLD2 1000
extern void init(float*, float*, int);
extern void output(float*, int);
void vec_mult(float *p, float *v1, float *v2, int N)
{
int i;
init(v1, v2, N);
#pragma omp target parallel for \
if(target: N>THRESHOLD1) if(parallel: N>THRESHOLD2) \
map(to: v1[0:N], v2[:N]) map(from: p[0:N])
for (i=0; i<N; i++)
p[i] = v1[i] * v2[i];
output(p, N);
}
|
pacset_rf_classifier.h | #ifndef PACSET_RF_CLASS
#define PACSET_RF_CLASS
#include <vector>
#include <unordered_set>
#include <fstream>
#include <chrono>
#include <random>
#include <stdint.h>
#include <cstdint>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <errno.h>
#include <unistd.h>
#include "pacset_base_model.h"
#include "packer.h"
#include "config.h"
#include "json_reader.h"
#include "utils.h"
//#include "node.h"
#include "MemoryMapped.h"
#define LAT_LOGGING 2
#define BLOCK_LOGGING 1
#define BLOCK_SIZE 2048
using std::uint32_t;
const int blob_size = 10000;
template <typename T, typename F>
class PacsetRandomForestClassifier: public PacsetBaseModel<T, F> {
public:
inline void setMembers(const std::vector<int> &bin_sizes,
const std::vector<int> &bin_node_sizes,
const std::vector<std::vector<int>> &bin_start){
PacsetBaseModel<T, F>::bin_sizes.clear();
std::copy(bin_sizes.begin(), bin_sizes.end(), back_inserter(PacsetBaseModel<T, F>::bin_sizes));
std::copy(bin_node_sizes.begin(), bin_node_sizes.end(), back_inserter(PacsetBaseModel<T, F>::bin_node_sizes));
for (auto i: bin_start)
PacsetBaseModel<T, F>::bin_start.push_back(i);
}
inline void setBinNodeSizes(int pos, int siz){
PacsetBaseModel<T, F>::bin_node_sizes[pos] = siz;
}
inline void loadModel() {
JSONReader<T, F> J;
J.convertSklToBinsRapidJson(PacsetBaseModel<T, F>::bins,
PacsetBaseModel<T, F>::bin_sizes,
PacsetBaseModel<T, F>::bin_start,
PacsetBaseModel<T, F>::bin_node_sizes);
}
inline void pack(){
std::string layout = Config::getValue("layout");
auto bin = PacsetBaseModel<T, F>::bins[0];
int num_bins = std::stoi(Config::getValue("numthreads"));
for(int i=0; i<num_bins; ++i){
Packer<T, F> packer_obj(layout);
if(Config::getValue("intertwine") != std::string("notfound"))
packer_obj.setDepthIntertwined(std::atoi(Config::getValue("intertwine").c_str()));
//should pack in place
packer_obj.pack(PacsetBaseModel<T, F>::bins[i],
PacsetBaseModel<T, F>::bin_sizes[i],
PacsetBaseModel<T, F>::bin_start[i]
);
setBinNodeSizes(i, PacsetBaseModel<T, F>::bins[i].size());
}
}
inline int mmapAndPredict(const std::vector<T>& observation, std::vector<int>& preds, int obsnum, bool mmap) {
int num_classes = std::stoi(Config::getValue("numclasses"));
int num_threads = std::stoi(Config::getValue("numthreads"));
int num_bins = PacsetBaseModel<T, F>::bin_sizes.size();
std::string modelfname = Config::getValue("modelfilename");
Node<T, F> *data;
MemoryMapped mmapped_obj(modelfname.c_str(), 0);
//data = (Node<T, F>*)mmapped_obj.getData();
if (mmap){
data = (Node<T, F>*)mmapped_obj.getData();
}else{
FILE* fp;
fp = fopen(modelfname.c_str(),"rb");
std::vector<Node<T, F>> bin_elements;
while(!feof(fp)){
Node<T, F> node;
fread((char*)&node,sizeof(node),1,fp);
bin_elements.push_back(node);
}
data = bin_elements.data();
}
std::unordered_set<int> blocks_accessed;
int block_offset = 0;
int offset = 0;
std::vector<int> offsets;
int curr_offset = 0;
for (auto val: PacsetBaseModel<T, F>::bin_node_sizes){
offsets.push_back(curr_offset);
curr_offset += val;
}
//#pragma omp parallel for num_threads(num_threads)
for(int bin_counter=0; bin_counter<num_bins; ++bin_counter){
int block_number = 0;
Node<T, F> *bin = data + offsets[bin_counter];
std::vector<int> curr_node(PacsetBaseModel<T, F>::bin_node_sizes[bin_counter]);
int i, feature_num=0, number_not_in_leaf=0;
T feature_val;
int siz = PacsetBaseModel<T, F>::bin_sizes[bin_counter];
for(i=0; i<siz; ++i){
curr_node[i] = PacsetBaseModel<T, F>::bin_start[bin_counter][i];
__builtin_prefetch(&bin[curr_node[i]], 0, 3);
}
do{
number_not_in_leaf = 0;
for( i=0; i<siz; ++i){
if(bin[curr_node[i]].isInternalNodeFront()){
feature_num = bin[curr_node[i]].getFeature();
feature_val = observation[feature_num];
curr_node[i] = bin[curr_node[i]].nextNode(feature_val);
__builtin_prefetch(&bin[curr_node[i]], 0, 3);
++number_not_in_leaf;
}
}
}while(number_not_in_leaf);
for(i=0; i<siz; ++i){
//#pragma omp atomic update
++preds[bin[curr_node[i]].getClass()];
}
//#pragma omp critical
block_offset += PacsetBaseModel<T, F>::bin_node_sizes[bin_counter];
}
mmapped_obj.close();
return 0;
}
std::pair<int, int> transformIndex(int node_number, int bin_start_list, int bin_number){
return std::make_pair(bin_start_list + node_number/blob_size, node_number % blob_size);
}
inline void predict(const std::vector<std::vector<T>> &observations,
std::vector<double> &preds, std::vector<double> &result, bool mmap){}
inline void predict(const std::vector<std::vector<T>>& observation,
std::vector<int>& preds, std::vector<int>&results, bool mmap) {
//Predicts the class for a vector of observations
//By calling predict for a single observation and
//tallying the observations
//
double cumi_time = 0;
int num_classes = std::stoi(Config::getValue("numclasses"));
int num_bins;
std::vector<double> elapsed_arr;
int batchsize = 1;
for(int i=0; i<num_classes; ++i){
preds.push_back(0);
}
int max = -1;
int maxid = -1;
int blocks;
int ct=0;
std::vector<int> num_blocks;
//writeGarbage();
for(auto single_obs : observation){
auto start = std::chrono::steady_clock::now();
blocks = mmapAndPredict(single_obs, preds, ct+1, mmap);
num_blocks.push_back(blocks);
//TODO: change
for(int i=0; i<num_classes; ++i){
if(preds[i]>max){
maxid = i;
max = preds[i];
}
}
int count = std::count(std::begin(preds), std::end(preds), max);
auto end = std::chrono::steady_clock::now();
ct++;
results.push_back(maxid);
std::fill(preds.begin(), preds.end(), 0);
max = -1;
maxid = -1;
}
}
inline void serializeMetadata() {
//Write the metadata needed to reconstruct bins and for prediction
auto bins = PacsetBaseModel<T, F>::bins;
int num_classes = std::stoi(Config::getValue("numclasses"));
int num_bins = bins.size();
std::vector<int> bin_sizes = PacsetBaseModel<T, F>::bin_sizes;
std::vector<int> bin_node_sizes = PacsetBaseModel<T, F>::bin_node_sizes;
std::vector<std::vector<int>> bin_start = PacsetBaseModel<T, F>::bin_start;
std::string filename;
std::string modelfname = Config::getValue("metadatafilename");
if(modelfname != std::string("notfound"))
filename = modelfname;
else
filename = "metadata.txt";
std::fstream fout;
fout.open(filename, std::ios::out );
//Number of classes
fout<<num_classes<<"\n";
//Number of bins
fout<<num_bins<<"\n";
//Number of trees in each bin
for(auto i: bin_sizes){
fout<<i<<"\n";
}
//Number of nodes in each bin
for(auto i: bin_node_sizes){
fout<<i<<"\n";
}
//start position of each bin
for(auto i: bin_start){
for(auto tree_start: i){
fout<<tree_start<<"\n";
}
}
fout.close();
}
inline void serializeModelBinary() {
auto bins = PacsetBaseModel<T, F>::bins;
std::string modelfname = Config::getValue("packfilename");
std::string filename;
if(modelfname != std::string("notfound"))
filename = modelfname;
else
filename = "packedmodel.bin";
//Write the nodes
std::fstream fout;
fout.open(filename, std::ios::binary | std::ios::out );
Node<T, F> node_to_write;
for(auto bin: bins){
for(auto node: bin){
node_to_write = node;
fout.write((char*)&node_to_write, sizeof(node_to_write));
}
}
fout.close();
}
inline void serializeModelText(){
auto bins = PacsetBaseModel<T, F>::bins;
std::string modelfname = Config::getValue("packfilename");
std::string filename;
if(modelfname != std::string("notfound"))
filename = modelfname;
else
filename = "packedmodel.txt";
//Write the nodes
std::fstream fout;
fout.open(filename, std::ios::out );
for(auto bin: bins){
for(auto node: bin){
fout<<node.getLeft()<<", "<<node.getRight()
<<", "<<node.getFeature()<<", "<<node.getThreshold()<<"\n";
}
}
fout.close();
}
inline void serialize() {
std::string format = Config::getValue("format");
serializeMetadata();
if(format == std::string("binary")){
serializeModelBinary();
}
else {
serializeModelText();
}
}
inline void deserialize(){
//Write the metadata needed to reconstruct bins and for prediction
//TODO: change filename
int num_classes, num_bins;
std::string filename = Config::getValue("metadatafilename");
//std::string filename = "metadata.txt";
std::fstream f;
f.open(filename, std::ios::in );
//Number of classes
f>>num_classes;
Config::setConfigItem("numclasses", std::to_string(num_classes));
//Number of bins
f>>num_bins;
Config::setConfigItem("numthreads", std::to_string(num_bins));
std::vector<int> num_trees_bin;
std::vector<int> num_nodes_bin;
std::vector<std::vector<int>> bin_tree_start;
int val;
//Number of trees in each bin
for(int i=0; i<num_bins; ++i){
f>>val;
num_trees_bin.push_back(val);
}
//Number of nodes in each bin
for(int i=0; i<num_bins; ++i){
f>>val;
num_nodes_bin.push_back(val);
}
std::vector<int> temp;
//start position of each bin
for(int i=0; i<num_bins; ++i){
for(int j=0; j<num_trees_bin[i]; ++j){
f>>val;
temp.push_back(val);
}
bin_tree_start.push_back(temp);
temp.clear();
}
f.close();
setMembers(num_trees_bin, num_nodes_bin, bin_tree_start);
}
/*
inline void deserialize() {
readMetadata();
std::string modelfname = Config::getValue("modelfilename");
MemoryMapped mmapped_obj(modelfname, 0);
Node<T, F> *data = (Node<T, F>*)mmapped_obj.getData();
//TODO: make this a separate predict bin
std::vector<std::vector<Node<T, F>>> bins;
int pos = 0;
for (auto i: PacsetBaseModel<T, F>::bin_node_sizes){
std::vector<StatNode<T, F>> nodes;
nodes.assign(data+pos, data+pos+i);
bins.push_back(nodes);
pos = i;
}
}
*/
};
#endif
|
implicitBarrierIssue.c | void abc() {
}
int main() {
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < 100; i++) {
abc();
}
}
}
|
GB_binop__islt_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__islt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__islt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_fp32)
// A*D function (colscale): GB (_AxD__islt_fp32)
// D*A function (rowscale): GB (_DxB__islt_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_fp32)
// C=scalar+B GB (_bind1st__islt_fp32)
// C=scalar+B' GB (_bind1st_tran__islt_fp32)
// C=A+scalar GB (_bind2nd__islt_fp32)
// C=A'+scalar GB (_bind2nd_tran__islt_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_FP32 || GxB_NO_ISLT_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__islt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__islt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__islt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_int8_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int8_fc32
// op(A') function: GB_unop_tran__identity_int8_fc32
// C type: int8_t
// A type: GxB_FC32_t
// cast: int8_t cij = GB_cast_to_int8_t ((double) crealf (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = GB_cast_to_int8_t ((double) crealf (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = GB_cast_to_int8_t ((double) crealf (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int8_fc32
(
int8_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int8_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
professor_challenge.c | /*
The number of primes up to 75000 is 7393 and took it 1s to solve it in parallel and 2s sequential
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <omp.h>
static const unsigned int verify_subdomain(const unsigned int start, const unsigned int chunck) {
unsigned int i = 0, j = 0, counter = 0;
for(i = start; i < start+chunck; i++) {
/* since zero cannot dive any number, starts at two -- because one can divide any number */
for(j = 2; j < i; j++)
if(0 == i%j)
break;
if(j == i)
counter += 1;
}
return counter;
}
static const unsigned int n_primes_parallel(const unsigned int limit) {
unsigned int i = 0, j = 0, counter = 0, this_thread = 0, n_threads = 0, start = 0, chunck = 0;
/* Dynamic don't show good results to this particular case */
omp_set_dynamic(0);
#pragma omp parallel default(shared) private(this_thread, n_threads, chunck, start) num_threads(8)
{
this_thread = omp_get_thread_num();
n_threads = omp_get_num_threads();
chunck = limit/n_threads;
start = this_thread*chunck;
if(n_threads-1 == this_thread)
chunck = limit-start;
counter += verify_subdomain(start, chunck);
printf("#%d out of %d\tstart at %d and checking up to %d\n", this_thread+1, n_threads, start, start+chunck);
}
return counter;
}
static const unsigned int n_primes_sequential(const unsigned int limit) {
unsigned int i = 0, j = 0, counter = 0;
for(i = 1; i < limit; i++) {
for(j = 2; j < i; j++)
if(0 == i%j)
break;
if(j == i)
counter += 1;
}
return counter;
}
int main(int argc, char **argv) {
/* Above that number the parallel doesn't performe better than the sequential */
const unsigned int limit = 75000;
unsigned int primes_parallel = 0, primes_sequential = 0;
float start_parallel = 0.0, end_parallel = 0.0, start_sequential = 0.0, end_sequential = 0.0;
start_parallel = clock()/CLOCKS_PER_SEC;
primes_parallel = n_primes_parallel(limit);
end_parallel = clock()/CLOCKS_PER_SEC;
start_sequential = clock()/CLOCKS_PER_SEC;
primes_sequential = n_primes_sequential(limit);
end_sequential = clock()/CLOCKS_PER_SEC;
if(primes_parallel == primes_sequential)
printf("\nThe number of primes up to %d is %d and took it %.0fs to solve it in parallel and %.0fs sequential\n", limit, primes_sequential, end_parallel - start_parallel, end_sequential - start_sequential);
else
printf("\nERROR\tWrong number of primes up to %d calculated:\n\tParallel: %d\n\tSequential: %d\n", limit, primes_parallel, primes_sequential);
return 0;
}
|
omp.c | #define _XOPEN_SOURCE 700
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <omp.h>
#include "../include/fs.h"
#include "../include/keygen.h"
#include "../include/encryptor.h"
#include "../include/commons.h"
#define NUM_THREADS 2
int main(int argc, char** argv)
{
check_parameters( argc, argv );
unsigned char encrypted_text[ BLOCK_SIZE ];
unsigned char iv[ IV_LENGTH ] = {1,2,3,4,5,6,7,8};
unsigned char **key = NULL;
int keygen_characters[10] = {'0','1','2','3','4','5','6','7','8','9'};
long cant_keys;
long success_key = -1;
int encryption_method = -1;
int num_threads = NUM_THREADS;
char * omp_num_threads = getenv("OMP_NUM_THREADS");
int thread_id;
time_t start_time, end_time;
// get the time stamp
start_time = time( NULL );
read_parameters( argv, encrypted_text, &cant_keys );
// get the number of threads from enviroment
if( omp_num_threads != NULL ){
num_threads = strtol( omp_num_threads, NULL, 0 );
if( num_threads <= 0 )
num_threads = NUM_THREADS;
}
omp_set_num_threads( num_threads );
// create the blowfish encryptor
Encryptor *bf = (Encryptor *) malloc( num_threads * sizeof( Encryptor ) );
if( bf == NULL ){
printf("\nerror on encryptor memory allocation\n");
exit(-1);
}
// create the cast5 encryptor
Encryptor *cast5;
cast5 = (Encryptor *) malloc( num_threads * sizeof( Encryptor ) );
if( cast5 == NULL ){
printf("error on encryptor memory allocation\n");
exit(-1);
}
for( int i = 0; i < num_threads; i++ ){
init_decryptor( &bf[i], DECRYPT, BLOWFISH, iv, encrypted_text );
}
for( int i = 0; i < num_threads; i++){
init_decryptor( &cast5[i], DECRYPT, CAST5, iv, encrypted_text );
}
// begin the decryption
key = ( unsigned char ** ) malloc( num_threads * sizeof( unsigned char * ) );
if( key == NULL ) {
printf("\nerror on key memory allocation\n");
exit(-1);
}
for( int i = 0; i < num_threads; i++ ) {
key[i] = ( unsigned char * ) malloc( KEY_LENGTH );
if( key[i] == NULL ) {
printf("\nerror on key memory allocation\n");
exit(-1);
}
memset(key[i],ASCII_SPACE,KEY_LENGTH);
}
#pragma omp parallel for private( thread_id ) shared( success_key, encryption_method )
for(long i = 0; i < cant_keys ; i++ ) {
if( success_key == -1 ) {
thread_id = omp_get_thread_num();
key[ thread_id ][KEY_LENGTH-1] = keygen_characters[i % 10];
key[ thread_id ][KEY_LENGTH-2] = i/10? keygen_characters[(i/10) % 10] : ASCII_SPACE;
key[ thread_id ][KEY_LENGTH-3] = i/100? keygen_characters[(i/100) % 10] : ASCII_SPACE;
key[ thread_id ][KEY_LENGTH-4] = i/1000? keygen_characters[(i/1000) % 10] : ASCII_SPACE;
key[ thread_id ][KEY_LENGTH-5] = i/10000? keygen_characters[(i/10000) % 10] : ASCII_SPACE;
key[ thread_id ][KEY_LENGTH-6] = i/100000? keygen_characters[(i/100000) % 10] : ASCII_SPACE;
key[ thread_id ][KEY_LENGTH-7] = i/1000000? keygen_characters[(i/1000000) % 10] : ASCII_SPACE;
key[ thread_id ][KEY_LENGTH-8] = i/10000000? keygen_characters[(i/10000000) % 10] : ASCII_SPACE;
// BLOWFISH
encryptor_execute( &bf[ thread_id ], key[ thread_id ]);
//encryptor_set_key( &bf[ thread_id ], key[ thread_id ] );
//encryptor_init( &bf[ thread_id ] );
//encryptor_update( &bf[ thread_id ] );
//encryptor_final( &bf[ thread_id ] );
if( memcmp( (char *)bf[ thread_id ].output, "Frase", 5 ) == 0 ) {
success_key = i;
encryption_method = BLOWFISH;
}
//CAST5
encryptor_execute( &cast5[ thread_id ], key[ thread_id ] );
//encryptor_set_key( &cast5[ thread_id ], key[ thread_id ] );
//encryptor_init( &cast5[ thread_id ] );
//encryptor_update( &cast5[ thread_id ] );
//encryptor_final( &cast5[ thread_id ] );
if( memcmp( (char *)cast5[ thread_id ].output, "Frase", 5 ) == 0 ) {
success_key = i;
encryption_method = CAST5;
}
}
}
// take the timestamp
end_time = time(NULL);
print_result( success_key, encryption_method, difftime( end_time, start_time ) );
exit(0);
}
|
symmetry.c | /* symmetry.c */
/* Copyright (C) 2008 Atsushi Togo */
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
//#include <omp.h>
#include "cell.h"
#include "debug.h"
#include "lattice.h"
#include "mathfunc.h"
#include "pointgroup.h"
#include "primitive.h"
#include "symmetry.h"
#include "debug.h"
#define NUM_ATOMS_CRITERION_FOR_OPENMP 1000
#define REDUCE_RATE 0.95
#define PI 3.14159265358979323846
/* Tolerance of angle between lattice vectors in degrees */
/* Negative value invokes converter from symprec. */
static double angle_tolerance = -1.0;
static int relative_axes[][3] = {
{ 1, 0, 0},
{ 0, 1, 0},
{ 0, 0, 1},
{-1, 0, 0},
{ 0,-1, 0}, /* 5 */
{ 0, 0,-1},
{ 0, 1, 1},
{ 1, 0, 1},
{ 1, 1, 0},
{ 0,-1,-1}, /* 10 */
{-1, 0,-1},
{-1,-1, 0},
{ 0, 1,-1},
{-1, 0, 1},
{ 1,-1, 0}, /* 15 */
{ 0,-1, 1},
{ 1, 0,-1},
{-1, 1, 0},
{ 1, 1, 1},
{-1,-1,-1}, /* 20 */
{-1, 1, 1},
{ 1,-1, 1},
{ 1, 1,-1},
{ 1,-1,-1},
{-1, 1,-1}, /* 25 */
{-1,-1, 1},
};
static int identity[3][3] = {{1, 0, 0},
{0, 1, 0},
{0, 0, 1}};
static int get_index_with_least_atoms(const Cell *cell);
static VecDBL * get_translation(SPGCONST int rot[3][3],
SPGCONST Cell *cell,
const double symprec,
const int is_identity);
static Symmetry * get_operations(SPGCONST Cell * cell,
const double symprec);
static Symmetry * reduce_operation(SPGCONST Cell * cell,
SPGCONST Symmetry * symmetry,
const double symprec);
static void search_translation_part(int lat_point_atoms[],
SPGCONST Cell * cell,
SPGCONST int rot[3][3],
const int min_atom_index,
const double origin[3],
const double symprec,
const int is_identity);
static int is_overlap_all_atoms(const double test_trans[3],
SPGCONST int rot[3][3],
SPGCONST Cell * cell,
const double symprec,
const int is_identity);
static PointSymmetry
transform_pointsymmetry(SPGCONST PointSymmetry * point_sym_prim,
SPGCONST double new_lattice[3][3],
SPGCONST double original_lattice[3][3]);
static Symmetry *
get_space_group_operations(SPGCONST PointSymmetry *lattice_sym,
SPGCONST Cell *primitive,
const double symprec);
static Symmetry * recover_operations_original(SPGCONST Symmetry *symmetry,
const VecDBL * pure_trans,
SPGCONST Cell *cell,
SPGCONST Cell *primitive);
static void set_axes(int axes[3][3],
const int a1, const int a2, const int a3);
static PointSymmetry get_lattice_symmetry(SPGCONST Cell *cell,
const double symprec);
static int is_identity_metric(SPGCONST double metric_rotated[3][3],
SPGCONST double metric_orig[3][3],
const double symprec);
static double get_angle(SPGCONST double metric[3][3],
const int i,
const int j);
Symmetry * sym_alloc_symmetry(const int size)
{
Symmetry *symmetry;
symmetry = (Symmetry*) malloc(sizeof(Symmetry));
symmetry->size = size;
if (size > 0) {
if ((symmetry->rot =
(int (*)[3][3]) malloc(sizeof(int[3][3]) * size)) == NULL) {
warning_print("spglib: Memory could not be allocated ");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
exit(1);
}
if ((symmetry->trans =
(double (*)[3]) malloc(sizeof(double[3]) * size)) == NULL) {
warning_print("spglib: Memory could not be allocated ");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
exit(1);
}
}
return symmetry;
}
void sym_free_symmetry(Symmetry *symmetry)
{
if (symmetry->size > 0) {
free(symmetry->rot);
symmetry->rot = NULL;
free(symmetry->trans);
symmetry->trans = NULL;
}
free(symmetry);
symmetry = NULL;
}
Symmetry * sym_get_operation(SPGCONST Cell *cell,
const double symprec) {
Symmetry *symmetry;
symmetry = get_operations(cell, symprec);
return symmetry;
}
/* Number of operations may be reduced with smaller symprec. */
Symmetry * sym_reduce_operation(SPGCONST Cell * cell,
SPGCONST Symmetry * symmetry,
const double symprec)
{
return reduce_operation(cell, symmetry, symprec);
}
int sym_get_multiplicity(SPGCONST Cell *cell,
const double symprec)
{
int multi;
VecDBL * trans;
trans = get_translation(identity, cell, symprec, 1);
multi = trans->size;
mat_free_VecDBL(trans);
return multi;
}
VecDBL * sym_get_pure_translation(SPGCONST Cell *cell,
const double symprec)
{
int multi;
VecDBL * pure_trans;
pure_trans = get_translation(identity, cell, symprec, 1);
multi = pure_trans->size;
if ((cell->size / multi) * multi == cell->size) {
debug_print("sym_get_pure_translation: pure_trans->size = %d\n", multi);
} else {
;
warning_print("spglib: Finding pure translation failed (line %d, %s).\n", __LINE__, __FILE__);
warning_print(" cell->size %d, multi %d\n", cell->size, multi);
}
return pure_trans;
}
VecDBL * sym_reduce_pure_translation(SPGCONST Cell * cell,
const VecDBL * pure_trans,
const double symprec)
{
int i, multi;
Symmetry *symmetry, *symmetry_reduced;
VecDBL * pure_trans_reduced;
multi = pure_trans->size;
symmetry = sym_alloc_symmetry(multi);
for (i = 0; i < multi; i++) {
mat_copy_matrix_i3(symmetry->rot[i], identity);
mat_copy_vector_d3(symmetry->trans[i], pure_trans->vec[i]);
}
symmetry_reduced = reduce_operation(cell, symmetry, symprec);
sym_free_symmetry(symmetry);
multi = symmetry_reduced->size;
pure_trans_reduced = mat_alloc_VecDBL(multi);
for (i = 0; i < multi; i++) {
mat_copy_vector_d3(pure_trans_reduced->vec[i], symmetry_reduced->trans[i]);
}
sym_free_symmetry(symmetry_reduced);
return pure_trans_reduced;
}
void sym_set_angle_tolerance(double tolerance)
{
angle_tolerance = tolerance;
}
double sym_get_angle_tolerance(void)
{
return angle_tolerance;
}
/* 1) A primitive cell of the input cell is searched. */
/* 2) Pointgroup operations of the primitive cell are obtained. */
/* These are constrained by the input cell lattice pointgroup, */
/* i.e., even if the lattice of the primitive cell has higher */
/* symmetry than that of the input cell, it is not considered. */
/* 3) Spacegroup operations are searched for the primitive cell */
/* using the constrained point group operations. */
/* 4) The spacegroup operations for the primitive cell are */
/* transformed to those of original input cells, if the input cell */
/* was not a primitive cell. */
static Symmetry * get_operations(SPGCONST Cell *cell,
const double symprec)
{
int i, j, attempt;
double tolerance;
PointSymmetry lattice_sym;
Symmetry *symmetry, *symmetry_orig, *symmetry_reduced;
Primitive primitive;
debug_print("get_operations:\n");
symmetry_orig = NULL;
lattice_sym = get_lattice_symmetry(cell, symprec);
if (lattice_sym.size == 0) {
debug_print("get_lattice_symmetry failed.\n");
goto end;
}
primitive = prm_get_primitive_and_pure_translations(cell, symprec);
if (primitive.cell->size == 0) {
goto deallocate_and_end;
}
lattice_sym = transform_pointsymmetry(&lattice_sym,
primitive.cell->lattice,
cell->lattice);
if (lattice_sym.size == 0) {
goto deallocate_and_end;
}
symmetry = get_space_group_operations(&lattice_sym,
primitive.cell,
symprec);
if (symmetry->size > 48) {
tolerance = symprec;
for (attempt = 0; attempt < 100; attempt++) {
tolerance *= REDUCE_RATE;
warning_print("spglib: number of symmetry operations for primitive cell > 48 was found. (line %d, %s).\n", __LINE__, __FILE__);
warning_print("tolerance is reduced to %f\n", tolerance);
symmetry_reduced = reduce_operation(primitive.cell,
symmetry,
tolerance);
sym_free_symmetry(symmetry);
symmetry = symmetry_reduced;
if (symmetry_reduced->size > 48) {
;
} else {
break;
}
}
}
symmetry_orig = recover_operations_original(symmetry,
primitive.pure_trans,
cell,
primitive.cell);
sym_free_symmetry(symmetry);
for (i = 0; i < symmetry_orig->size; i++) {
for (j = 0; j < 3; j++) {
symmetry_orig->trans[i][j] -= mat_Nint(symmetry_orig->trans[i][j]);
}
}
deallocate_and_end:
cel_free_cell(primitive.cell);
mat_free_VecDBL(primitive.pure_trans);
end:
if (! symmetry_orig) {
symmetry_orig = sym_alloc_symmetry(0);
}
return symmetry_orig;
}
static Symmetry * reduce_operation(SPGCONST Cell * cell,
SPGCONST Symmetry * symmetry,
const double symprec)
{
int i, j, num_sym;
Symmetry * sym_reduced;
PointSymmetry point_symmetry;
MatINT *rot;
VecDBL *trans;
debug_print("reduce_operation:\n");
point_symmetry = get_lattice_symmetry(cell, symprec);
rot = mat_alloc_MatINT(symmetry->size);
trans = mat_alloc_VecDBL(symmetry->size);
num_sym = 0;
for (i = 0; i < point_symmetry.size; i++) {
for (j = 0; j < symmetry->size; j++) {
if (mat_check_identity_matrix_i3(point_symmetry.rot[i],
symmetry->rot[j])) {
if (is_overlap_all_atoms(symmetry->trans[j],
symmetry->rot[j],
cell,
symprec,
0)) {
mat_copy_matrix_i3(rot->mat[num_sym], symmetry->rot[j]);
mat_copy_vector_d3(trans->vec[num_sym], symmetry->trans[j]);
num_sym++;
}
}
}
}
sym_reduced = sym_alloc_symmetry(num_sym);
for (i = 0; i < num_sym; i++) {
mat_copy_matrix_i3(sym_reduced->rot[i], rot->mat[i]);
mat_copy_vector_d3(sym_reduced->trans[i], trans->vec[i]);
}
mat_free_MatINT(rot);
mat_free_VecDBL(trans);
debug_print(" num_sym %d -> %d\n", symmetry->size, num_sym);
return sym_reduced;
}
/* Look for the translations which satisfy the input symmetry operation. */
/* This function is heaviest in this code. */
static VecDBL * get_translation(SPGCONST int rot[3][3],
SPGCONST Cell *cell,
const double symprec,
const int is_identity)
{
int i, j, min_atom_index, num_trans = 0;
int *is_found;
double origin[3];
VecDBL *trans;
#ifdef _OPENMP
int num_min_type_atoms;
int *min_type_atoms;
double vec[3];
#endif
is_found = (int*) malloc(sizeof(int)*cell->size);
for (i = 0; i < cell->size; i++) {
is_found[i] = 0;
}
/* Look for the atom index with least number of atoms within same type */
min_atom_index = get_index_with_least_atoms(cell);
/* Set min_atom_index as the origin to measure the distance between atoms. */
mat_multiply_matrix_vector_id3(origin, rot, cell->position[min_atom_index]);
#ifdef _OPENMP
if (cell->size < NUM_ATOMS_CRITERION_FOR_OPENMP) {
search_translation_part(is_found,
cell,
rot,
min_atom_index,
origin,
symprec,
is_identity);
} else {
/* Collect indices of atoms with the type where the minimum number */
/* of atoms belong. */
min_type_atoms = (int*) malloc(sizeof(int)*cell->size);
num_min_type_atoms = 0;
for (i = 0; i < cell->size; i++) {
if (cell->types[i] == cell->types[min_atom_index]) {
min_type_atoms[num_min_type_atoms] = i;
num_min_type_atoms++;
}
}
#pragma omp parallel for private(j, vec)
for (i = 0; i < num_min_type_atoms; i++) {
for (j = 0; j < 3; j++) {
vec[j] = cell->position[min_type_atoms[i]][j] - origin[j];
}
if (is_overlap_all_atoms(vec,
rot,
cell,
symprec,
is_identity)) {
is_found[min_type_atoms[i]] = 1;
}
}
free(min_type_atoms);
}
#else
search_translation_part(is_found,
cell,
rot,
min_atom_index,
origin,
symprec,
is_identity);
#endif
for (i = 0; i < cell->size; i++) {
num_trans += is_found[i];
}
trans = mat_alloc_VecDBL(num_trans);
num_trans = 0;
for (i = 0; i < cell->size; i++) {
if (is_found[i]) {
for (j = 0; j < 3; j++) {
trans->vec[num_trans][j] = cell->position[i][j] - origin[j];
}
num_trans++;
}
}
free(is_found);
is_found = NULL;
return trans;
}
static void search_translation_part(int lat_point_atoms[],
SPGCONST Cell * cell,
SPGCONST int rot[3][3],
const int min_atom_index,
const double origin[3],
const double symprec,
const int is_identity)
{
int i, j;
double vec[3];
for (i = 0; i < cell->size; i++) {
if (cell->types[i] != cell->types[min_atom_index]) {
continue;
}
for (j = 0; j < 3; j++) {
vec[j] = cell->position[i][j] - origin[j];
}
if (is_overlap_all_atoms(vec,
rot,
cell,
symprec,
is_identity)) {
lat_point_atoms[i] = 1;
}
}
}
static int is_overlap_all_atoms(const double trans[3],
SPGCONST int rot[3][3],
SPGCONST Cell * cell,
const double symprec,
const int is_identity)
{
int i, j, k, is_found;
double symprec2;
double pos_rot[3], d[3];
symprec2 = symprec*symprec;
for (i = 0; i < cell->size; i++) {
if (is_identity) { /* Identity matrix is treated as special for speed. */
for (j = 0; j < 3; j++) {
pos_rot[j] = cell->position[i][j] + trans[j];
}
} else {
mat_multiply_matrix_vector_id3(pos_rot,
rot,
cell->position[i]);
for (j = 0; j < 3; j++) {
pos_rot[j] += trans[j];
}
}
is_found = 0;
for (j = 0; j < cell->size; j++) {
if (cell->types[i] == cell->types[j]) {
/* here cel_is_overlap can be used, but for the tuning */
/* purpose, write it again */
for (k = 0; k < 3; k++) {
d[k] = pos_rot[k] - cell->position[j][k];
d[k] -= mat_Nint(d[k]);
}
mat_multiply_matrix_vector_d3(d, cell->lattice, d);
if (d[0]*d[0]+d[1]*d[1]+d[2]*d[2] < symprec2) {
is_found = 1;
break;
}
}
}
if (! is_found) {
goto not_found;
}
}
return 1; /* found */
not_found:
return 0;
}
static int get_index_with_least_atoms(const Cell *cell)
{
int i, j, min, min_index;
int *mapping;
mapping = (int *) malloc(sizeof(int) * cell->size);
for (i = 0; i < cell->size; i++) {
mapping[i] = 0;
}
for (i = 0; i < cell->size; i++) {
for (j = 0; j < cell->size; j++) {
if (cell->types[i] == cell->types[j]) {
mapping[j]++;
break;
}
}
}
min = mapping[0];
min_index = 0;
for (i = 0; i < cell->size; i++) {
if (min > mapping[i] && mapping[i] >0) {
min = mapping[i];
min_index = i;
}
}
free(mapping);
mapping = NULL;
return min_index;
}
static Symmetry *
get_space_group_operations(SPGCONST PointSymmetry *lattice_sym,
SPGCONST Cell *cell,
const double symprec)
{
int i, j, k, num_sym, total_num_sym;
VecDBL **trans;
Symmetry *symmetry;
debug_print("get_space_group_operations:\n");
trans = (VecDBL**) malloc(sizeof(VecDBL*) * lattice_sym->size);
total_num_sym = 0;
for (i = 0; i < lattice_sym->size; i++) {
trans[i] = get_translation(lattice_sym->rot[i], cell, symprec, 0);
total_num_sym += trans[i]->size;
}
symmetry = sym_alloc_symmetry(total_num_sym);
num_sym = 0;
for (i = 0; i < lattice_sym->size; i++) {
for (j = 0; j < trans[i]->size; j++) {
mat_copy_vector_d3(symmetry->trans[num_sym + j], trans[i]->vec[j]);
mat_copy_matrix_i3(symmetry->rot[num_sym + j], lattice_sym->rot[i]);
}
num_sym += trans[i]->size;
}
for (i = 0; i < lattice_sym->size; i++) {
mat_free_VecDBL(trans[i]);
}
free(trans);
trans = NULL;
return symmetry;
}
static Symmetry * recover_operations_original(SPGCONST Symmetry *symmetry,
const VecDBL * pure_trans,
SPGCONST Cell *cell,
SPGCONST Cell *primitive)
{
int i, j, k, multi;
double inv_prim_lat[3][3], drot[3][3], trans_mat[3][3], trans_mat_inv[3][3];
Symmetry *symmetry_orig, *sym_tmp;
debug_print("recover_operations_original:\n");
multi = pure_trans->size;
sym_tmp = sym_alloc_symmetry(symmetry->size);
symmetry_orig = sym_alloc_symmetry(symmetry->size * multi);
mat_inverse_matrix_d3(inv_prim_lat, primitive->lattice, 0);
mat_multiply_matrix_d3(trans_mat, inv_prim_lat, cell->lattice);
mat_inverse_matrix_d3(trans_mat_inv, trans_mat, 0);
for(i = 0; i < symmetry->size; i++) {
mat_copy_matrix_i3(sym_tmp->rot[i], symmetry->rot[i]);
mat_copy_vector_d3(sym_tmp->trans[i], symmetry->trans[i]);
}
for(i = 0; i < symmetry->size; i++) {
mat_cast_matrix_3i_to_3d(drot, sym_tmp->rot[i]);
mat_get_similar_matrix_d3(drot, drot, trans_mat, 0);
mat_cast_matrix_3d_to_3i(sym_tmp->rot[i], drot);
mat_multiply_matrix_vector_d3(sym_tmp->trans[i],
trans_mat_inv,
sym_tmp->trans[i]);
}
for(i = 0; i < symmetry->size; i++) {
for(j = 0; j < multi; j++) {
mat_copy_matrix_i3(symmetry_orig->rot[i * multi + j], sym_tmp->rot[i]);
for (k = 0; k < 3; k++) {
symmetry_orig->trans[i * multi + j][k] =
sym_tmp->trans[i][k] + pure_trans->vec[j][k];
}
}
}
sym_free_symmetry(sym_tmp);
return symmetry_orig;
}
static PointSymmetry get_lattice_symmetry(SPGCONST Cell *cell,
const double symprec)
{
int i, j, k, num_sym;
int axes[3][3];
double lattice[3][3], min_lattice[3][3];
double metric[3][3], metric_orig[3][3];
PointSymmetry lattice_sym;
debug_print("get_lattice_symmetry:\n");
if (! lat_smallest_lattice_vector(min_lattice,
cell->lattice,
symprec)) {
goto err;
}
mat_get_metric(metric_orig, min_lattice);
num_sym = 0;
for (i = 0; i < 26; i++) {
for (j = 0; j < 26; j++) {
for (k = 0; k < 26; k++) {
set_axes(axes, i, j, k);
if (! ((mat_get_determinant_i3(axes) == 1) ||
(mat_get_determinant_i3(axes) == -1))) {
continue;
}
mat_multiply_matrix_di3(lattice, min_lattice, axes);
mat_get_metric(metric, lattice);
if (is_identity_metric(metric, metric_orig, symprec)) {
mat_copy_matrix_i3(lattice_sym.rot[num_sym], axes);
num_sym++;
}
if (num_sym > 48) {
warning_print("spglib: Too many lattice symmetries was found.\n");
warning_print(" Tolerance may be too large ");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
goto err;
}
}
}
}
lattice_sym.size = num_sym;
return transform_pointsymmetry(&lattice_sym,
cell->lattice,
min_lattice);
err:
lattice_sym.size = 0;
return lattice_sym;
}
static int is_identity_metric(SPGCONST double metric_rotated[3][3],
SPGCONST double metric_orig[3][3],
const double symprec)
{
int i, j, k;
int elem_sets[3][2] = {{0, 1},
{0, 2},
{1, 2}};
double cos1, cos2, x, length_ave2, sin_dtheta2;
double length_orig[3], length_rot[3];
for (i = 0; i < 3; i++) {
length_orig[i] = sqrt(metric_orig[i][i]);
length_rot[i] = sqrt(metric_rotated[i][i]);
if (mat_Dabs(length_orig[i] - length_rot[i]) > symprec) {
goto fail;
}
}
for (i = 0; i < 3; i++) {
j = elem_sets[i][0];
k = elem_sets[i][1];
if (angle_tolerance > 0) {
if (mat_Dabs(get_angle(metric_orig, j, k) -
get_angle(metric_rotated, j, k)) > angle_tolerance) {
goto fail;
}
} else {
/* dtheta = arccos(cos(theta1) - arccos(cos(theta2))) */
/* = arccos(c1) - arccos(c2) */
/* = arccos(c1c2 + sqrt((1-c1^2)(1-c2^2))) */
/* sin(dtheta) = sin(arccos(x)) = sqrt(1 - x^2) */
cos1 = metric_orig[j][k] / length_orig[j] / length_orig[k];
cos2 = metric_rotated[j][k] / length_rot[j] / length_rot[k];
x = cos1 * cos2 + sqrt(1 - cos1 * cos1) * sqrt(1 - cos2 * cos2);
sin_dtheta2 = 1 - x * x;
length_ave2 = ((length_orig[j] + length_rot[j]) *
(length_orig[k] + length_rot[k])) / 4;
if (sin_dtheta2 > 1e-12) {
if (sin_dtheta2 * length_ave2 > symprec * symprec) {
goto fail;
}
}
}
}
return 1;
fail:
return 0;
}
static double get_angle(SPGCONST double metric[3][3],
const int i,
const int j)
{
double length_i, length_j;
length_i = sqrt(metric[i][i]);
length_j = sqrt(metric[j][j]);
return acos(metric[i][j] / length_i / length_j) / PI * 180;
}
static PointSymmetry
transform_pointsymmetry(SPGCONST PointSymmetry * lat_sym_orig,
SPGCONST double new_lattice[3][3],
SPGCONST double original_lattice[3][3])
{
int i, size;
double trans_mat[3][3], inv_mat[3][3], drot[3][3];
PointSymmetry lat_sym_new;
mat_inverse_matrix_d3(inv_mat, original_lattice, 0);
mat_multiply_matrix_d3(trans_mat, inv_mat, new_lattice);
size = 0;
for (i = 0; i < lat_sym_orig->size; i++) {
mat_cast_matrix_3i_to_3d(drot, lat_sym_orig->rot[i]);
mat_get_similar_matrix_d3(drot, drot, trans_mat, 0);
/* new_lattice may have lower point symmetry than original_lattice.*/
/* The operations that have non-integer elements are not counted. */
if (mat_is_int_matrix(drot, mat_Dabs(mat_get_determinant_d3(trans_mat)) / 10)) {
mat_cast_matrix_3d_to_3i(lat_sym_new.rot[size], drot);
if (! abs(mat_get_determinant_i3(lat_sym_new.rot[size])) == 1) {
warning_print("spglib: A point symmetry operation is not unimodular.");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
goto err;
}
size++;
}
}
#ifdef SPGWARNING
if (! (lat_sym_orig->size == size)) {
warning_print("spglib: Some of point symmetry operations were dropped.");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
}
#endif
lat_sym_new.size = size;
return lat_sym_new;
err:
lat_sym_new.size = 0;
return lat_sym_new;
}
static void set_axes(int axes[3][3],
const int a1, const int a2, const int a3)
{
int i;
for (i = 0; i < 3; i++) {axes[i][0] = relative_axes[a1][i]; }
for (i = 0; i < 3; i++) {axes[i][1] = relative_axes[a2][i]; }
for (i = 0; i < 3; i++) {axes[i][2] = relative_axes[a3][i]; }
}
|
GB_unaryop__identity_int64_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int64_uint64
// op(A') function: GB_tran__identity_int64_uint64
// C type: int64_t
// A type: uint64_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int64_uint64
(
int64_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
variational_distance_calculation_process.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Ruben Zorrilla
//
//
#if !defined(KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED )
#define KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "containers/model.h"
#include "includes/kratos_flags.h"
#include "elements/distance_calculation_element_simplex.h"
#include "linear_solvers/linear_solver.h"
#include "processes/process.h"
#include "modeler/connectivity_preserve_modeler.h"
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
#include "solving_strategies/strategies/residualbased_linear_strategy.h"
#include "utilities/variable_utils.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/**takes a model part full of SIMPLICIAL ELEMENTS (triangles and tetras) and recomputes a signed distance function
mantaining as much as possible the position of the zero of the function prior to the call.
This is achieved by minimizing the function ( 1 - norm( gradient( distance ) )**2
with the restriction that "distance" is a finite elment function
*/
template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver >
class VariationalDistanceCalculationProcess : public Process
{
public:
KRATOS_DEFINE_LOCAL_FLAG(PERFORM_STEP1);
KRATOS_DEFINE_LOCAL_FLAG(DO_EXPENSIVE_CHECKS);
KRATOS_DEFINE_LOCAL_FLAG(CALCULATE_EXACT_DISTANCES_TO_PLANE);
///@name Type Definitions
///@{
typedef Scheme< TSparseSpace, TDenseSpace > SchemeType;
typedef typename SchemeType::Pointer SchemePointerType;
typedef typename BuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver>::Pointer BuilderSolverPointerType;
typedef SolvingStrategy< TSparseSpace, TDenseSpace, TLinearSolver > SolvingStrategyType;
///@}
///@name Pointer Definitions
/// Pointer definition of VariationalDistanceCalculationProcess
KRATOS_CLASS_POINTER_DEFINITION(VariationalDistanceCalculationProcess);
///@}
///@name Life Cycle
///@{
/**This process recomputed the distance function mantaining the zero of the existing distance distribution
* for this reason the DISTANCE should be initialized to values distinct from zero in at least some portions of the domain
* alternatively, the DISTANCE shall be fixed to zero at least on some nodes, and the process will compute a positive distance
* respecting that zero
* @param base_model_parr - is the model part on the top of which the calculation will be performed
* @param plinear_solver - linear solver to be used internally
* @max_iterations - maximum number of iteration to be employed in the nonlinear optimization process.
* - can also be set to 0 if a (very) rough approximation is enough
*
* EXAMPLE OF USAGE FROM PYTHON:
*
class distance_linear_solver_settings:
solver_type = "AMGCL"
tolerance = 1E-3
max_iteration = 200
scaling = False
krylov_type = "CG"
smoother_type = "SPAI0"
verbosity = 0
import linear_solver_factory
distance_linear_solver = linear_solver_factory.ConstructSolver(distance_linear_solver_settings)
max_iterations=1
distance_calculator = VariationalDistanceCalculationProcess2D(fluid_model_part, distance_linear_solver, max_iterations)
distance_calculator.Execute()
*/
VariationalDistanceCalculationProcess(
ModelPart& rBaseModelPart,
typename TLinearSolver::Pointer pLinearSolver,
unsigned int MaxIterations = 10,
Flags Options = CALCULATE_EXACT_DISTANCES_TO_PLANE.AsFalse(),
std::string AuxPartName = "RedistanceCalculationPart" )
:
mDistancePartIsInitialized(false),
mMaxIterations(MaxIterations),
mrModel( rBaseModelPart.GetModel() ),
mrBaseModelPart (rBaseModelPart),
mOptions( Options ),
mAuxModelPartName( AuxPartName )
{
KRATOS_TRY
ValidateInput();
// Generate an auxilary model part and populate it by elements of type DistanceCalculationElementSimplex
ReGenerateDistanceModelPart(rBaseModelPart);
auto p_builder_solver = Kratos::make_shared<ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> >(pLinearSolver);
InitializeSolutionStrategy(pLinearSolver, p_builder_solver);
KRATOS_CATCH("")
}
/// Constructor with custom Builder And Solver
/** To be used in the trilinos version, since the trilinos builder and
* solver needs additional data (the EpetraComm).
* @param rBaseModelPart Reference ModelPart for distance calculation.
* @param pLinearSolver Linear solver for the distance system.
* @param MaxIterations Maximum number of non-linear optimization iterations.
* @param Options Configuration flags for the procedure.
* @param AuxPartName Name to be used for the internal distance calculation ModelPart.
*/
VariationalDistanceCalculationProcess(
ModelPart& rBaseModelPart,
typename TLinearSolver::Pointer pLinearSolver,
BuilderSolverPointerType pBuilderAndSolver,
unsigned int MaxIterations = 10,
Flags Options = CALCULATE_EXACT_DISTANCES_TO_PLANE.AsFalse(),
std::string AuxPartName = "RedistanceCalculationPart" )
:
mDistancePartIsInitialized(false),
mMaxIterations(MaxIterations),
mrModel( rBaseModelPart.GetModel() ),
mrBaseModelPart (rBaseModelPart),
mOptions( Options ),
mAuxModelPartName( AuxPartName )
{
KRATOS_TRY
ValidateInput();
// Generate an auxilary model part and populate it by elements of type DistanceCalculationElementSimplex
ReGenerateDistanceModelPart(rBaseModelPart);
InitializeSolutionStrategy(pLinearSolver, pBuilderAndSolver);
KRATOS_CATCH("")
}
/// Destructor.
~VariationalDistanceCalculationProcess() override
{
Clear();
};
///@}
///@name Operators
///@{
void operator()()
{
Execute();
}
///@}
///@name Operations
///@{
void Execute() override
{
KRATOS_TRY;
if(mDistancePartIsInitialized == false){
ReGenerateDistanceModelPart(mrBaseModelPart);
}
ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName );
// TODO: check flag PERFORM_STEP1
// Step1 - solve a poisson problem with a source term which depends on the sign of the existing distance function
r_distance_model_part.pGetProcessInfo()->SetValue(FRACTIONAL_STEP,1);
// Unfix the distances
const int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes());
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
double& d = it_node->FastGetSolutionStepValue(DISTANCE);
double& fix_flag = it_node->FastGetSolutionStepValue(FLAG_VARIABLE);
// Free the DISTANCE values
fix_flag = 1.0;
it_node->Free(DISTANCE);
// Save the distances
it_node->SetValue(DISTANCE, d);
if(d == 0){
d = 1.0e-15;
fix_flag = -1.0;
it_node->Fix(DISTANCE);
} else {
if(d > 0.0){
d = 1.0e15; // Set to a large number, to make sure that that the minimal distance is computed according to CaculateTetrahedraDistances
} else {
d = -1.0e15;
}
}
}
const int nelem = static_cast<int>(r_distance_model_part.NumberOfElements());
#pragma omp parallel for
for(int i_elem = 0; i_elem < nelem; ++i_elem){
auto it_elem = r_distance_model_part.ElementsBegin() + i_elem;
array_1d<double,TDim+1> distances;
auto& geom = it_elem->GetGeometry();
for(unsigned int i=0; i<TDim+1; i++){
distances[i] = geom[i].GetValue(DISTANCE);
}
const array_1d<double,TDim+1> original_distances = distances;
// The element is cut by the interface
if(this->IsSplit(distances)){
// Compute the unsigned distance using GeometryUtils
if (mOptions.Is(CALCULATE_EXACT_DISTANCES_TO_PLANE)) {
GeometryUtils::CalculateExactDistancesToPlane(geom, distances);
}
else {
if(TDim==3){
GeometryUtils::CalculateTetrahedraDistances(geom, distances);
}
else {
GeometryUtils::CalculateTriangleDistances(geom, distances);
}
}
// Assign the sign using the original distance values
for(unsigned int i = 0; i < TDim+1; ++i){
if(original_distances[i] < 0){
distances[i] = -distances[i];
}
}
for(unsigned int i = 0; i < TDim+1; ++i){
double &d = geom[i].FastGetSolutionStepValue(DISTANCE);
double &fix_flag = geom[i].FastGetSolutionStepValue(FLAG_VARIABLE);
geom[i].SetLock();
if(std::abs(d) > std::abs(distances[i])){
d = distances[i];
}
fix_flag = -1.0;
geom[i].Fix(DISTANCE);
geom[i].UnSetLock();
}
}
}
// SHALL WE SYNCHRONIZE SOMETHING IN HERE?¿?¿??¿ WE'VE CHANGED THE NODAL DISTANCE VALUES FROM THE ELEMENTS...
this->SynchronizeFixity();
this->SynchronizeDistance();
// Compute the maximum and minimum distance for the fixed nodes
double max_dist = 0.0;
double min_dist = 0.0;
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
if(it_node->IsFixed(DISTANCE)){
const double& d = it_node->FastGetSolutionStepValue(DISTANCE);
if(d > max_dist){
max_dist = d;
}
if(d < min_dist){
min_dist = d;
}
}
}
// Synchronize the maximum and minimum distance values
const auto &r_communicator = r_distance_model_part.GetCommunicator().GetDataCommunicator();
max_dist = r_communicator.MaxAll(max_dist);
min_dist = r_communicator.MinAll(min_dist);
// Assign the max dist to all of the non-fixed positive nodes
// and the minimum one to the non-fixed negatives
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
if(!it_node->IsFixed(DISTANCE)){
double& d = it_node->FastGetSolutionStepValue(DISTANCE);
if(d>0){
d = max_dist;
} else {
d = min_dist;
}
}
}
mpSolvingStrategy->Solve();
// Step2 - minimize the target residual
r_distance_model_part.pGetProcessInfo()->SetValue(FRACTIONAL_STEP,2);
for(unsigned int it = 0; it<mMaxIterations; it++){
mpSolvingStrategy->Solve();
}
// Unfix the distances
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = (r_distance_model_part.NodesBegin()) + i_node;
it_node->Free(DISTANCE);
}
KRATOS_CATCH("")
}
virtual void Clear()
{
if(mrModel.HasModelPart( mAuxModelPartName ))
mrModel.DeleteModelPart( mAuxModelPartName );
mDistancePartIsInitialized = false;
mpSolvingStrategy->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "VariationalDistanceCalculationProcess";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "VariationalDistanceCalculationProcess";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
bool mDistancePartIsInitialized;
unsigned int mMaxIterations;
Model& mrModel;
ModelPart& mrBaseModelPart;
Flags mOptions;
std::string mAuxModelPartName;
typename SolvingStrategyType::UniquePointer mpSolvingStrategy;
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void ValidateInput()
{
const DataCommunicator& r_comm = mrBaseModelPart.GetCommunicator().GetDataCommunicator();
int num_elements = mrBaseModelPart.NumberOfElements();
int num_nodes = mrBaseModelPart.NumberOfNodes();
if (num_elements > 0)
{
const auto geometry_family = mrBaseModelPart.ElementsBegin()->GetGeometry().GetGeometryFamily();
KRATOS_ERROR_IF( (TDim == 2) && (geometry_family != GeometryData::Kratos_Triangle) )
<< "In 2D the element type is expected to be a triangle." << std::endl;
KRATOS_ERROR_IF( (TDim == 3) && (geometry_family != GeometryData::Kratos_Tetrahedra) )
<< "In 3D the element type is expected to be a tetrahedron" << std::endl;
}
KRATOS_ERROR_IF(r_comm.SumAll(num_nodes) == 0) << "The model part has no nodes." << std::endl;
KRATOS_ERROR_IF(r_comm.SumAll(num_elements) == 0) << "The model Part has no elements." << std::endl;
// Check that required nodal variables are present
VariableUtils().CheckVariableExists<Variable<double > >(DISTANCE, mrBaseModelPart.Nodes());
VariableUtils().CheckVariableExists<Variable<double > >(FLAG_VARIABLE, mrBaseModelPart.Nodes());
}
void InitializeSolutionStrategy(
typename TLinearSolver::Pointer pLinearSolver,
BuilderSolverPointerType pBuilderAndSolver)
{
// Generate a linear strategy
auto p_scheme = Kratos::make_shared< ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace,TDenseSpace > >();
ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName );
bool CalculateReactions = false;
bool ReformDofAtEachIteration = false;
bool CalculateNormDxFlag = false;
mpSolvingStrategy = Kratos::make_unique<ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver> >(
r_distance_model_part,
p_scheme,
pLinearSolver,
pBuilderAndSolver,
CalculateReactions,
ReformDofAtEachIteration,
CalculateNormDxFlag);
// TODO: check flag DO_EXPENSIVE_CHECKS
mpSolvingStrategy->Check();
}
virtual void ReGenerateDistanceModelPart(ModelPart& rBaseModelPart)
{
KRATOS_TRY
if(mrModel.HasModelPart( mAuxModelPartName ))
mrModel.DeleteModelPart( mAuxModelPartName );
// Ensure that the nodes have distance as a DOF
VariableUtils().AddDof<Variable<double> >(DISTANCE, rBaseModelPart);
// Generate
ModelPart& r_distance_model_part = mrModel.CreateModelPart( mAuxModelPartName );
Element::Pointer p_distance_element = Kratos::make_intrusive<DistanceCalculationElementSimplex<TDim> >();
ConnectivityPreserveModeler modeler;
modeler.GenerateModelPart(rBaseModelPart, r_distance_model_part, *p_distance_element);
// Using the conditions to mark the boundary with the flag boundary
// Note that we DO NOT add the conditions to the model part
VariableUtils().SetFlag<ModelPart::NodesContainerType>(BOUNDARY, false, r_distance_model_part.Nodes());
// Note that above we have assigned the same geometry. Thus the flag is
// set in the distance model part despite we are iterating the base one
for (auto it_cond = rBaseModelPart.ConditionsBegin(); it_cond != rBaseModelPart.ConditionsEnd(); ++it_cond){
Geometry< Node<3> >& geom = it_cond->GetGeometry();
for(unsigned int i=0; i<geom.size(); i++){
geom[i].Set(BOUNDARY,true);
}
}
rBaseModelPart.GetCommunicator().SynchronizeOrNodalFlags(BOUNDARY);
mDistancePartIsInitialized = true;
KRATOS_CATCH("")
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
bool IsSplit(const array_1d<double,TDim+1> &rDistances){
unsigned int positives = 0, negatives = 0;
for(unsigned int i = 0; i < TDim+1; ++i){
if(rDistances[i] >= 0){
++positives;
} else {
++negatives;
}
}
if (positives > 0 && negatives > 0){
return true;
}
return false;
}
void SynchronizeDistance(){
ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName );
auto &r_communicator = r_distance_model_part.GetCommunicator();
// Only required in the MPI case
if(r_communicator.TotalProcesses() != 1){
int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes());
// Set the distance absolute value
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
it_node->FastGetSolutionStepValue(DISTANCE) = std::abs(it_node->FastGetSolutionStepValue(DISTANCE));
}
// Synchronize the unsigned value to minimum
r_communicator.SynchronizeCurrentDataToMin(DISTANCE);
// Set the distance sign again by retrieving it from the non-historical database
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
if(it_node->GetValue(DISTANCE) < 0.0){
it_node->FastGetSolutionStepValue(DISTANCE) = -it_node->FastGetSolutionStepValue(DISTANCE);
}
}
}
}
void SynchronizeFixity(){
ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName );
auto &r_communicator = r_distance_model_part.GetCommunicator();
// Only required in the MPI case
if(r_communicator.TotalProcesses() != 1){
int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes());
// Synchronize the fixity flag variable to minium
// (-1.0 means fixed and 1.0 means free)
r_communicator.SynchronizeCurrentDataToMin(FLAG_VARIABLE);
// Set the fixity according to the synchronized flag
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
const double &r_fix_flag = it_node->FastGetSolutionStepValue(FLAG_VARIABLE);
if (r_fix_flag == -1.0){
it_node->Fix(DISTANCE);
}
}
}
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
VariationalDistanceCalculationProcess& operator=(VariationalDistanceCalculationProcess const& rOther);
/// Copy constructor.
//VariationalDistanceCalculationProcess(VariationalDistanceCalculationProcess const& rOther);
///@}
}; // Class VariationalDistanceCalculationProcess
//avoiding using the macro since this has a template parameter. If there was no template plase use the KRATOS_CREATE_LOCAL_FLAG macro
template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver >
const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::PERFORM_STEP1(Kratos::Flags::Create(0));
template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver >
const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::DO_EXPENSIVE_CHECKS(Kratos::Flags::Create(1));
template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver >
const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::CALCULATE_EXACT_DISTANCES_TO_PLANE(Kratos::Flags::Create(2));
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver>
inline std::istream& operator >> (std::istream& rIStream,
VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>& rThis);
/// output stream function
template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver>
inline std::ostream& operator << (std::ostream& rOStream,
const VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED defined
|
Parallelizer2OpenMP.h | #ifndef PARALLELIZER2OPENMP_H
#define PARALLELIZER2OPENMP_H
#include "Vector.h"
#include <omp.h>
#include "CodeSectionParams.h"
namespace PsimagLite {
template<typename = int>
class Parallelizer2 {
public:
Parallelizer2(const CodeSectionParams& codeParams)
: threads_(codeParams.npthreads)
{
omp_set_num_threads(codeParams.npthreads);
}
template<typename SomeLambdaType>
void parallelFor(SizeType start, SizeType end, const SomeLambdaType& lambda)
{
#pragma omp parallel for
for (SizeType i = start; i < end; ++i)
lambda(i, omp_get_thread_num());
}
SizeType numberOfThreads() const
{
return omp_get_num_threads();
}
String name() const { return "openmp"; }
private:
SizeType threads_;
};
}
#endif // PARALLELIZER2OPENMP_H
|
params.c | /*
*
* c Ivo Hofacker
*
* Vienna RNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/io.h"
#include "ViennaRNA/params/basic.h"
/**
*** \file ViennaRNA/params/basic.c
*** <P>
*** This file provides functions that return temperature scaled energy parameters and
*** Boltzmann weights packed in datastructures
*** </P>
***/
/*------------------------------------------------------------------------*/
#define SCALE 10
/**
*** dangling ends should never be destabilizing, i.e. expdangle>=1<BR>
*** specific heat needs smooth function (2nd derivative)<BR>
*** we use a*(sin(x+b)+1)^2, with a=2/(3*sqrt(3)), b=Pi/6-sqrt(3)/2,
*** in the interval b<x<sqrt(3)/2
*/
#define CLIP_NEGATIVE(X) ((X) < 0 ? 0 : (X))
#define SMOOTH(X) ((!pf_smooth) ? CLIP_NEGATIVE(X) : \
((X) / SCALE < -1.2283697) ? 0 : \
((X) / SCALE > 0.8660254) ? (X) : \
SCALE *0.38490018 \
* (sin((X) / SCALE - 0.34242663) + 1) \
* (sin((X) / SCALE - 0.34242663) + 1) \
)
/* #define SMOOTH(X) ((X)<0 ? 0 : (X)) */
/*
* If the global use_mfelike_energies flag is set, truncate doubles to int
* values and cast back to double. This makes the energy parameters of the
* partition (folding get_scaled_exp_params()) compatible with the mfe folding
* parameters (get_scaled_exp_params()), e.g. for explicit partition function
* computations.
*/
#define TRUNC_MAYBE(X) ((!pf_smooth) ? (double)((int)(X)) : (X))
/* Rescale Free energy contribution according to deviation of temperature from measurement conditions */
#define RESCALE_dG(dG, dH, dT) ((dH) - ((dH) - (dG)) * dT)
/*
* Rescale Free energy contribution according to deviation of temperature from measurement conditions
* and convert it to Boltzmann Factor for specific kT
*/
#define RESCALE_BF(dG, dH, dT, kT) ( \
exp( \
-TRUNC_MAYBE((double)RESCALE_dG((dG), (dH), (dT))) \
* 10. \
/ kT \
) \
)
#define RESCALE_BF_SMOOTH(dG, dH, dT, kT) ( \
exp( \
SMOOTH( \
-TRUNC_MAYBE((double)RESCALE_dG((dG), (dH), (dT))) \
) \
* 10. \
/ kT \
) \
)
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
PRIVATE vrna_param_t p;
PRIVATE int id = -1;
/* variables for partition function */
PRIVATE vrna_exp_param_t pf;
PRIVATE int pf_id = -1;
#ifdef _OPENMP
#pragma omp threadprivate(id, pf_id)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE vrna_param_t *
get_scaled_params(vrna_md_t *md);
PRIVATE vrna_exp_param_t *
get_scaled_exp_params(vrna_md_t *md,
double pfs);
PRIVATE vrna_exp_param_t *
get_exp_params_ali(vrna_md_t *md,
unsigned int n_seq,
double pfs);
PRIVATE void
rescale_params(vrna_fold_compound_t *vc);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PUBLIC vrna_param_t *
vrna_params(vrna_md_t *md)
{
if (md) {
return get_scaled_params(md);
} else {
vrna_md_t md;
vrna_md_set_default(&md);
return get_scaled_params(&md);
}
}
PUBLIC vrna_exp_param_t *
vrna_exp_params(vrna_md_t *md)
{
if (md) {
return get_scaled_exp_params(md, -1.);
} else {
vrna_md_t md;
vrna_md_set_default(&md);
return get_scaled_exp_params(&md, -1.);
}
}
PUBLIC vrna_exp_param_t *
vrna_exp_params_comparative(unsigned int n_seq,
vrna_md_t *md)
{
if (md) {
return get_exp_params_ali(md, n_seq, -1.);
} else {
vrna_md_t md;
vrna_md_set_default(&md);
return get_exp_params_ali(&md, n_seq, -1.);
}
}
PUBLIC vrna_param_t *
vrna_params_copy(vrna_param_t *par)
{
vrna_param_t *copy = NULL;
if (par) {
copy = (vrna_param_t *)vrna_alloc(sizeof(vrna_param_t));
memcpy(copy, par, sizeof(vrna_param_t));
}
return copy;
}
PUBLIC vrna_exp_param_t *
vrna_exp_params_copy(vrna_exp_param_t *par)
{
vrna_exp_param_t *copy = NULL;
if (par) {
copy = (vrna_exp_param_t *)vrna_alloc(sizeof(vrna_exp_param_t));
memcpy(copy, par, sizeof(vrna_exp_param_t));
}
return copy;
}
PUBLIC void
vrna_params_subst(vrna_fold_compound_t *vc,
vrna_param_t *parameters)
{
if (vc) {
if (vc->params)
free(vc->params);
if (parameters) {
vc->params = vrna_params_copy(parameters);
} else {
switch (vc->type) {
case VRNA_FC_TYPE_SINGLE: /* fall through */
case VRNA_FC_TYPE_COMPARATIVE:
vc->params = vrna_params(NULL);
break;
default:
break;
}
}
}
}
PUBLIC void
vrna_params_reset(vrna_fold_compound_t *vc,
vrna_md_t *md_p)
{
if (vc) {
switch (vc->type) {
case VRNA_FC_TYPE_SINGLE: /* fall through */
case VRNA_FC_TYPE_COMPARATIVE:
if (vc->params)
free(vc->params);
vc->params = vrna_params(md_p);
if (vc->exp_params) {
free(vc->exp_params);
vc->exp_params = vrna_exp_params(md_p);
}
break;
default:
break;
}
}
}
PUBLIC void
vrna_exp_params_reset(vrna_fold_compound_t *vc,
vrna_md_t *md_p)
{
if (vc) {
switch (vc->type) {
case VRNA_FC_TYPE_SINGLE: /* fall through */
case VRNA_FC_TYPE_COMPARATIVE:
if (vc->exp_params)
free(vc->exp_params);
vc->exp_params = vrna_exp_params(md_p);
break;
default:
break;
}
}
}
PUBLIC void
vrna_exp_params_subst(vrna_fold_compound_t *vc,
vrna_exp_param_t *params)
{
if (vc) {
if (vc->exp_params)
free(vc->exp_params);
if (params) {
vc->exp_params = vrna_exp_params_copy(params);
} else {
switch (vc->type) {
case VRNA_FC_TYPE_SINGLE:
vc->exp_params = vrna_exp_params(NULL);
if (vc->strands > 1)
vc->exp_params->model_details.min_loop_size = 0;
break;
case VRNA_FC_TYPE_COMPARATIVE:
vc->exp_params = vrna_exp_params_comparative(vc->n_seq, NULL);
break;
default:
break;
}
}
/* fill additional helper arrays for scaling etc. */
vrna_exp_params_rescale(vc, NULL);
}
}
PUBLIC void
vrna_exp_params_rescale(vrna_fold_compound_t *vc,
double *mfe)
{
vrna_exp_param_t *pf;
double e_per_nt, kT;
vrna_md_t *md;
if (vc) {
if (!vc->exp_params) {
switch (vc->type) {
case VRNA_FC_TYPE_SINGLE:
vc->exp_params = vrna_exp_params(&(vc->params->model_details));
break;
case VRNA_FC_TYPE_COMPARATIVE:
vc->exp_params = vrna_exp_params_comparative(vc->n_seq, &(vc->params->model_details));
break;
}
} else if (memcmp(&(vc->params->model_details),
&(vc->exp_params->model_details),
sizeof(vrna_md_t)) != 0) {
/* make sure that model details are matching */
(void)vrna_md_copy(&(vc->exp_params->model_details), &(vc->params->model_details));
/* we probably need some mechanism to check whether DP matrices still match the new model settings! */
}
pf = vc->exp_params;
if (pf) {
kT = pf->kT;
md = &(pf->model_details);
if (vc->type == VRNA_FC_TYPE_COMPARATIVE)
kT /= vc->n_seq;
/* re-compute scaling factor if necessary */
if ((mfe) || (pf->pf_scale < 1.)) {
if (mfe) /* use largest known Boltzmann factor for scaling */
e_per_nt = *mfe * 1000. / vc->length;
else /* use mean energy for random sequences: 184.3*length cal for scaling */
e_per_nt = -185 + (pf->temperature - 37.) * 7.27;
/* apply user-defined scaling factor to allow scaling for unusually stable/unstable structure enembles */
pf->pf_scale = exp(-(md->sfact * e_per_nt) / kT);
}
if (pf->pf_scale < 1.)
pf->pf_scale = 1.;
rescale_params(vc);
}
}
}
PUBLIC void
vrna_params_prepare(vrna_fold_compound_t *fc,
unsigned int options)
{
if (fc) {
vrna_md_t *md_p;
/*
* every vrna_fold_compound_t must have a vrna_paramt_t structure attached
* to it that holds the current model details. So we just use this here as
* the reference model
*/
md_p = &(fc->params->model_details);
if (options & VRNA_OPTION_PF) {
/* remove previous parameters if present and they differ from reference model */
if (fc->exp_params) {
if (memcmp(md_p, &(fc->exp_params->model_details), sizeof(vrna_md_t)) != 0) {
free(fc->exp_params);
fc->exp_params = NULL;
}
}
if (!fc->exp_params)
fc->exp_params = (fc->type == VRNA_FC_TYPE_SINGLE) ? \
vrna_exp_params(md_p) : \
vrna_exp_params_comparative(fc->n_seq, md_p);
}
}
}
/*
#####################################
# BEGIN OF STATIC HELPER FUNCTIONS #
#####################################
*/
PRIVATE vrna_param_t *
get_scaled_params(vrna_md_t *md)
{
unsigned int i, j, k, l;
double tempf;
vrna_param_t *params;
params = (vrna_param_t *)vrna_alloc(sizeof(vrna_param_t));
memset(params->param_file, '\0', 256);
if (last_parameter_file() != NULL)
strncpy(params->param_file, last_parameter_file(), 255);
params->model_details = *md; /* copy over the model details */
params->temperature = md->temperature;
tempf = ((params->temperature + K0) / Tmeasure);
params->ninio[2] = RESCALE_dG(ninio37, niniodH, tempf);
params->lxc = lxc37 * tempf;
params->TripleC = RESCALE_dG(TripleC37, TripleCdH, tempf);
params->MultipleCA = RESCALE_dG(MultipleCA37, MultipleCAdH, tempf);
params->MultipleCB = RESCALE_dG(MultipleCB37, MultipleCBdH, tempf);
params->TerminalAU = RESCALE_dG(TerminalAU37, TerminalAUdH, tempf);
params->DuplexInit = RESCALE_dG(DuplexInit37, DuplexInitdH, tempf);
params->MLbase = RESCALE_dG(ML_BASE37, ML_BASEdH, tempf);
params->MLclosing = RESCALE_dG(ML_closing37, ML_closingdH, tempf);
params->gquadLayerMismatch = RESCALE_dG(GQuadLayerMismatch37, GQuadLayerMismatchH, tempf);
params->gquadLayerMismatchMax = GQuadLayerMismatchMax;
for (i = VRNA_GQUAD_MIN_STACK_SIZE; i <= VRNA_GQUAD_MAX_STACK_SIZE; i++)
for (j = 3 * VRNA_GQUAD_MIN_LINKER_LENGTH; j <= 3 * VRNA_GQUAD_MAX_LINKER_LENGTH; j++) {
double GQuadAlpha_T = RESCALE_dG(GQuadAlpha37, GQuadAlphadH, tempf);
double GQuadBeta_T = RESCALE_dG(GQuadBeta37, GQuadBetadH, tempf);
params->gquad[i][j] = (int)GQuadAlpha_T * (i - 1) + (int)(((double)GQuadBeta_T) * log(j - 2));
}
for (i = 0; i < 31; i++)
params->hairpin[i] = RESCALE_dG(hairpin37[i], hairpindH[i], tempf);
for (i = 0; i <= MIN2(30, MAXLOOP); i++) {
params->bulge[i] = RESCALE_dG(bulge37[i], bulgedH[i], tempf);
params->internal_loop[i] = RESCALE_dG(internal_loop37[i], internal_loopdH[i], tempf);
}
for (; i <= MAXLOOP; i++) {
params->bulge[i] = params->bulge[30] +
(int)(params->lxc * log((double)(i) / 30.));
params->internal_loop[i] = params->internal_loop[30] +
(int)(params->lxc * log((double)(i) / 30.));
}
for (i = 0; (i * 7) < strlen(Tetraloops); i++)
params->Tetraloop_E[i] = RESCALE_dG(Tetraloop37[i], TetraloopdH[i], tempf);
for (i = 0; (i * 5) < strlen(Triloops); i++)
params->Triloop_E[i] = RESCALE_dG(Triloop37[i], TriloopdH[i], tempf);
for (i = 0; (i * 9) < strlen(Hexaloops); i++)
params->Hexaloop_E[i] = RESCALE_dG(Hexaloop37[i], HexaloopdH[i], tempf);
for (i = 0; i <= NBPAIRS; i++)
params->MLintern[i] = RESCALE_dG(ML_intern37, ML_interndH, tempf);
/* stacks G(T) = H - [H - G(T0)]*T/T0 */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
params->stack[i][j] = RESCALE_dG(stack37[i][j],
stackdH[i][j],
tempf);
/* mismatches */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j < 5; j++)
for (k = 0; k < 5; k++) {
int mm;
params->mismatchI[i][j][k] = RESCALE_dG(mismatchI37[i][j][k],
mismatchIdH[i][j][k],
tempf);
params->mismatchH[i][j][k] = RESCALE_dG(mismatchH37[i][j][k],
mismatchHdH[i][j][k],
tempf);
params->mismatch1nI[i][j][k] = RESCALE_dG(mismatch1nI37[i][j][k],
mismatch1nIdH[i][j][k],
tempf);
params->mismatch23I[i][j][k] = RESCALE_dG(mismatch23I37[i][j][k],
mismatch23IdH[i][j][k],
tempf);
if (md->dangles) {
mm = RESCALE_dG(mismatchM37[i][j][k],
mismatchMdH[i][j][k],
tempf);
params->mismatchM[i][j][k] = (mm > 0) ? 0 : mm;
mm = RESCALE_dG(mismatchExt37[i][j][k],
mismatchExtdH[i][j][k],
tempf);
params->mismatchExt[i][j][k] = (mm > 0) ? 0 : mm;
} else {
params->mismatchM[i][j][k] = params->mismatchExt[i][j][k] = 0;
}
}
/* dangles */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j < 5; j++) {
int dd;
dd = RESCALE_dG(dangle5_37[i][j],
dangle5_dH[i][j],
tempf);
params->dangle5[i][j] = (dd > 0) ? 0 : dd; /* must be <= 0 */
dd = RESCALE_dG(dangle3_37[i][j],
dangle3_dH[i][j],
tempf);
params->dangle3[i][j] = (dd > 0) ? 0 : dd; /* must be <= 0 */
}
/* interior 1x1 loops */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++)
params->int11[i][j][k][l] = RESCALE_dG(int11_37[i][j][k][l],
int11_dH[i][j][k][l],
tempf);
/* interior 2x1 loops */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++) {
int m;
for (m = 0; m < 5; m++)
params->int21[i][j][k][l][m] = RESCALE_dG(int21_37[i][j][k][l][m],
int21_dH[i][j][k][l][m],
tempf);
}
/* interior 2x2 loops */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++) {
int m, n;
for (m = 0; m < 5; m++)
for (n = 0; n < 5; n++)
params->int22[i][j][k][l][m][n] = RESCALE_dG(int22_37[i][j][k][l][m][n],
int22_dH[i][j][k][l][m][n],
tempf);
}
strncpy(params->Tetraloops, Tetraloops, 281);
strncpy(params->Triloops, Triloops, 241);
strncpy(params->Hexaloops, Hexaloops, 361);
params->id = ++id;
return params;
}
PRIVATE vrna_exp_param_t *
get_scaled_exp_params(vrna_md_t *md,
double pfs)
{
unsigned int i, j, k, l;
int pf_smooth;
double kT, TT;
double GT;
vrna_exp_param_t *pf;
pf = (vrna_exp_param_t *)vrna_alloc(sizeof(vrna_exp_param_t));
memset(pf->param_file, '\0', 256);
if (last_parameter_file() != NULL)
strncpy(pf->param_file, last_parameter_file(), 255);
pf->model_details = *md;
pf->temperature = md->temperature;
pf->alpha = md->betaScale;
pf->kT = kT = md->betaScale * (md->temperature + K0) * GASCONST; /* kT in cal/mol */
pf->pf_scale = pfs;
pf_smooth = md->pf_smooth;
TT = (md->temperature + K0) / (Tmeasure);
pf->lxc = lxc37 * TT;
pf->expDuplexInit = RESCALE_BF(DuplexInit37, DuplexInitdH, TT, kT);
pf->expTermAU = RESCALE_BF(TerminalAU37, TerminalAUdH, TT, kT);
pf->expMLbase = RESCALE_BF(ML_BASE37, ML_BASEdH, TT, kT);
pf->expMLclosing = RESCALE_BF(ML_closing37, ML_closingdH, TT, kT);
pf->expgquadLayerMismatch = RESCALE_BF(GQuadLayerMismatch37, GQuadLayerMismatchH, TT, kT);
pf->gquadLayerMismatchMax = GQuadLayerMismatchMax;
for (i = VRNA_GQUAD_MIN_STACK_SIZE; i <= VRNA_GQUAD_MAX_STACK_SIZE; i++)
for (j = 3 * VRNA_GQUAD_MIN_LINKER_LENGTH; j <= 3 * VRNA_GQUAD_MAX_LINKER_LENGTH; j++) {
double GQuadAlpha_T = RESCALE_dG(GQuadAlpha37, GQuadAlphadH, TT);
double GQuadBeta_T = RESCALE_dG(GQuadBeta37, GQuadBetadH, TT);
GT = ((double)GQuadAlpha_T) * ((double)(i - 1)) + ((double)GQuadBeta_T) *
log(((double)j) - 2.);
pf->expgquad[i][j] = exp(-TRUNC_MAYBE(GT) * 10. / kT);
}
/* loop energies: hairpins, bulges, interior, mulit-loops */
for (i = 0; i < 31; i++)
pf->exphairpin[i] = RESCALE_BF(hairpin37[i], hairpindH[i], TT, kT);
for (i = 0; i <= MIN2(30, MAXLOOP); i++) {
pf->expbulge[i] = RESCALE_BF(bulge37[i], bulgedH[i], TT, kT);
pf->expinternal[i] = RESCALE_BF(internal_loop37[i], internal_loopdH[i], TT, kT);
}
/* special case of size 2 interior loops (single mismatch) */
if (james_rule)
pf->expinternal[2] = exp(-80 * 10. / kT);
GT = RESCALE_dG(bulge37[30],
bulgedH[30],
TT);
for (i = 31; i <= MAXLOOP; i++)
pf->expbulge[i] = exp(-TRUNC_MAYBE(GT + (pf->lxc * log(i / 30.))) * 10. / kT);
GT = RESCALE_dG(internal_loop37[30],
internal_loopdH[30],
TT);
for (i = 31; i <= MAXLOOP; i++)
pf->expinternal[i] = exp(-TRUNC_MAYBE(GT + (pf->lxc * log(i / 30.))) * 10. / kT);
GT = RESCALE_dG(ninio37, niniodH, TT);
for (j = 0; j <= MAXLOOP; j++)
pf->expninio[2][j] = exp(-MIN2(MAX_NINIO, j * TRUNC_MAYBE(GT)) * 10. / kT);
for (i = 0; (i * 7) < strlen(Tetraloops); i++)
pf->exptetra[i] = RESCALE_BF(Tetraloop37[i], TetraloopdH[i], TT, kT);
for (i = 0; (i * 5) < strlen(Triloops); i++)
pf->exptri[i] = RESCALE_BF(Triloop37[i], TriloopdH[i], TT, kT);
for (i = 0; (i * 9) < strlen(Hexaloops); i++)
pf->exphex[i] = RESCALE_BF(Hexaloop37[i], HexaloopdH[i], TT, kT);
for (i = 0; i <= NBPAIRS; i++)
pf->expMLintern[i] = RESCALE_BF(ML_intern37, ML_interndH, TT, kT);
/* if dangles==0 just set their energy to 0,
* don't let dangle energies become > 0 (at large temps),
* but make sure go smoothly to 0 */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= 4; j++) {
if (md->dangles) {
pf->expdangle5[i][j] = RESCALE_BF_SMOOTH(dangle5_37[i][j], dangle5_dH[i][j], TT, kT);
pf->expdangle3[i][j] = RESCALE_BF_SMOOTH(dangle3_37[i][j], dangle3_dH[i][j], TT, kT);
} else {
pf->expdangle3[i][j] = pf->expdangle5[i][j] = 1;
}
}
/* stacking energies */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
pf->expstack[i][j] = RESCALE_BF(stack37[i][j], stackdH[i][j], TT, kT);
/* mismatch energies */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j < 5; j++)
for (k = 0; k < 5; k++) {
pf->expmismatchI[i][j][k] = RESCALE_BF(mismatchI37[i][j][k],
mismatchIdH[i][j][k],
TT,
kT);
pf->expmismatch1nI[i][j][k] = RESCALE_BF(mismatch1nI37[i][j][k],
mismatch1nIdH[i][j][k],
TT,
kT);
pf->expmismatchH[i][j][k] = RESCALE_BF(mismatchH37[i][j][k],
mismatchHdH[i][j][k],
TT,
kT);
pf->expmismatch23I[i][j][k] = RESCALE_BF(mismatch23I37[i][j][k],
mismatch23IdH[i][j][k],
TT,
kT);
if (md->dangles) {
pf->expmismatchM[i][j][k] = RESCALE_BF_SMOOTH(mismatchM37[i][j][k],
mismatchMdH[i][j][k],
TT,
kT);
pf->expmismatchExt[i][j][k] = RESCALE_BF_SMOOTH(mismatchExt37[i][j][k],
mismatchExtdH[i][j][k],
TT,
kT);
} else {
pf->expmismatchM[i][j][k] = pf->expmismatchExt[i][j][k] = 1.;
}
}
/* interior lops of length 2 */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++) {
pf->expint11[i][j][k][l] = RESCALE_BF(int11_37[i][j][k][l],
int11_dH[i][j][k][l],
TT,
kT);
}
/* interior 2x1 loops */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++) {
int m;
for (m = 0; m < 5; m++) {
pf->expint21[i][j][k][l][m] = RESCALE_BF(int21_37[i][j][k][l][m],
int21_dH[i][j][k][l][m],
TT,
kT);
}
}
/* interior 2x2 loops */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++) {
int m, n;
for (m = 0; m < 5; m++)
for (n = 0; n < 5; n++) {
pf->expint22[i][j][k][l][m][n] = RESCALE_BF(int22_37[i][j][k][l][m][n],
int22_dH[i][j][k][l][m][n],
TT,
kT);
}
}
strncpy(pf->Tetraloops, Tetraloops, 281);
strncpy(pf->Triloops, Triloops, 241);
strncpy(pf->Hexaloops, Hexaloops, 361);
return pf;
}
PRIVATE vrna_exp_param_t *
get_exp_params_ali(vrna_md_t *md,
unsigned int n_seq,
double pfs)
{
/* scale energy parameters and pre-calculate Boltzmann weights */
unsigned int i, j, k, l;
int pf_smooth;
double kTn, TT;
double GT;
vrna_exp_param_t *pf;
pf = (vrna_exp_param_t *)vrna_alloc(sizeof(vrna_exp_param_t));
pf->model_details = *md;
pf->alpha = md->betaScale;
pf->temperature = md->temperature;
pf->pf_scale = pfs;
pf->kT = kTn = ((double)n_seq) * md->betaScale * (md->temperature + K0) * GASCONST; /* kT in cal/mol */
pf_smooth = md->pf_smooth;
TT = (md->temperature + K0) / (Tmeasure);
pf->lxc = lxc37 * TT;
pf->expDuplexInit = RESCALE_BF(DuplexInit37, DuplexInitdH, TT, kTn);
pf->expTermAU = RESCALE_BF(TerminalAU37, TerminalAUdH, TT, kTn);
pf->expMLbase = RESCALE_BF(ML_BASE37, ML_BASEdH, TT, kTn / n_seq);
pf->expMLclosing = RESCALE_BF(ML_closing37, ML_closingdH, TT, kTn);
pf->expgquadLayerMismatch = RESCALE_BF(GQuadLayerMismatch37, GQuadLayerMismatchH, TT, kTn);
pf->gquadLayerMismatchMax = GQuadLayerMismatchMax;
for (i = VRNA_GQUAD_MIN_STACK_SIZE; i <= VRNA_GQUAD_MAX_STACK_SIZE; i++)
for (j = 3 * VRNA_GQUAD_MIN_LINKER_LENGTH; j <= 3 * VRNA_GQUAD_MAX_LINKER_LENGTH; j++) {
double GQuadAlpha_T = RESCALE_dG(GQuadAlpha37, GQuadAlphadH, TT);
double GQuadBeta_T = RESCALE_dG(GQuadBeta37, GQuadBetadH, TT);
GT = ((double)GQuadAlpha_T) * ((double)(i - 1)) + ((double)GQuadBeta_T) *
log(((double)j) - 2.);
pf->expgquad[i][j] = exp(-TRUNC_MAYBE(GT) * 10. / kTn);
}
/* loop energies: hairpins, bulges, interior, mulit-loops */
for (i = 0; i < 31; i++)
pf->exphairpin[i] = RESCALE_BF(hairpin37[i], hairpindH[i], TT, kTn);
/*add penalty for too short hairpins*/
for (i = 0; i < 3; i++) {
GT = 600 /*Penalty*/ * TT;
pf->exphairpin[i] = exp(-GT * 10. / kTn);
}
for (i = 0; i <= MIN2(30, MAXLOOP); i++) {
pf->expbulge[i] = RESCALE_BF(bulge37[i], bulgedH[i], TT, kTn);
pf->expinternal[i] = RESCALE_BF(internal_loop37[i], internal_loopdH[i], TT, kTn);
}
/* special case of size 2 interior loops (single mismatch) */
if (james_rule)
pf->expinternal[2] = exp(-80 * 10. / kTn);
GT = RESCALE_dG(bulge37[30], bulgedH[30], TT);
for (i = 31; i <= MAXLOOP; i++)
pf->expbulge[i] = exp(-(GT + (pf->lxc * log(i / 30.))) * 10. / kTn);
GT = RESCALE_dG(internal_loop37[30], internal_loopdH[30], TT);
for (i = 31; i <= MAXLOOP; i++)
pf->expinternal[i] = exp(-(GT + (pf->lxc * log(i / 30.))) * 10. / kTn);
GT = RESCALE_dG(ninio37, niniodH, TT);
for (j = 0; j <= MAXLOOP; j++)
pf->expninio[2][j] = exp(-MIN2(MAX_NINIO, j * GT) * 10. / kTn);
for (i = 0; (i * 7) < strlen(Tetraloops); i++)
pf->exptetra[i] = RESCALE_BF(Tetraloop37[i], TetraloopdH[i], TT, kTn);
for (i = 0; (i * 5) < strlen(Triloops); i++)
pf->exptri[i] = RESCALE_BF(Triloop37[i], TriloopdH[i], TT, kTn);
for (i = 0; (i * 9) < strlen(Hexaloops); i++)
pf->exphex[i] = RESCALE_BF(Hexaloop37[i], HexaloopdH[i], TT, kTn);
for (i = 0; i <= NBPAIRS; i++)
/* includes AU penalty */
pf->expMLintern[i] = RESCALE_BF(ML_intern37, ML_interndH, TT, kTn);
/* if dangle_model==0 just set their energy to 0,
* don't let dangle energies become > 0 (at large temps),
* but make sure go smoothly to 0 */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= 4; j++) {
if (md->dangles) {
pf->expdangle5[i][j] = RESCALE_BF_SMOOTH(dangle5_37[i][j],
dangle5_dH[i][j],
TT,
kTn);
pf->expdangle3[i][j] = RESCALE_BF_SMOOTH(dangle3_37[i][j],
dangle3_dH[i][j],
TT,
kTn);
} else {
pf->expdangle3[i][j] = pf->expdangle5[i][j] = 1;
}
}
/* stacking energies */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++) {
pf->expstack[i][j] = RESCALE_BF(stack37[i][j],
stackdH[i][j],
TT,
kTn);
}
/* mismatch energies */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j < 5; j++)
for (k = 0; k < 5; k++) {
pf->expmismatchI[i][j][k] = RESCALE_BF(mismatchI37[i][j][k],
mismatchIdH[i][j][k],
TT,
kTn);
pf->expmismatch1nI[i][j][k] = RESCALE_BF(mismatch1nI37[i][j][k],
mismatch1nIdH[i][j][k],
TT,
kTn);
pf->expmismatchH[i][j][k] = RESCALE_BF(mismatchH37[i][j][k],
mismatchHdH[i][j][k],
TT,
kTn);
pf->expmismatch23I[i][j][k] = RESCALE_BF(mismatch23I37[i][j][k],
mismatch23IdH[i][j][k],
TT,
kTn);
if (md->dangles) {
pf->expmismatchM[i][j][k] = RESCALE_BF_SMOOTH(mismatchM37[i][j][k],
mismatchMdH[i][j][k],
TT,
kTn);
pf->expmismatchExt[i][j][k] = RESCALE_BF_SMOOTH(mismatchExt37[i][j][k],
mismatchExtdH[i][j][k],
TT,
kTn);
} else {
pf->expmismatchM[i][j][k] = pf->expmismatchExt[i][j][k] = 1.;
}
}
/* interior lops of length 2 */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++) {
pf->expint11[i][j][k][l] = RESCALE_BF(int11_37[i][j][k][l],
int11_dH[i][j][k][l],
TT,
kTn);
}
/* interior 2x1 loops */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++) {
int m;
for (m = 0; m < 5; m++) {
pf->expint21[i][j][k][l][m] = RESCALE_BF(int21_37[i][j][k][l][m],
int21_dH[i][j][k][l][m],
TT,
kTn);
}
}
/* interior 2x2 loops */
for (i = 0; i <= NBPAIRS; i++)
for (j = 0; j <= NBPAIRS; j++)
for (k = 0; k < 5; k++)
for (l = 0; l < 5; l++) {
int m, n;
for (m = 0; m < 5; m++)
for (n = 0; n < 5; n++) {
pf->expint22[i][j][k][l][m][n] = RESCALE_BF(int22_37[i][j][k][l][m][n],
int22_dH[i][j][k][l][m][n],
TT,
kTn);
}
}
strncpy(pf->Tetraloops, Tetraloops, 281);
strncpy(pf->Triloops, Triloops, 241);
strncpy(pf->Hexaloops, Hexaloops, 361);
return pf;
}
PRIVATE void
rescale_params(vrna_fold_compound_t *vc)
{
int i;
vrna_exp_param_t *pf = vc->exp_params;
vrna_mx_pf_t *m = vc->exp_matrices;
if (m && pf) {
m->scale[0] = 1.;
m->scale[1] = (FLT_OR_DBL)(1. / pf->pf_scale);
m->expMLbase[0] = 1;
m->expMLbase[1] = (FLT_OR_DBL)(pf->expMLbase / pf->pf_scale);
for (i = 2; i <= vc->length; i++) {
m->scale[i] = m->scale[i / 2] * m->scale[i - (i / 2)];
m->expMLbase[i] = (FLT_OR_DBL)pow(pf->expMLbase, (double)i) * m->scale[i];
}
}
}
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
/*
*###########################################
*# deprecated functions below #
*###########################################
*/
PUBLIC vrna_param_t *
scale_parameters(void)
{
vrna_md_t md;
set_model_details(&md);
return vrna_params(&md);
}
PUBLIC vrna_param_t *
get_scaled_parameters(double temp,
vrna_md_t md)
{
md.temperature = temp;
return get_scaled_params(&md);
}
PUBLIC vrna_exp_param_t *
get_boltzmann_factors(double temp,
double betaScale,
vrna_md_t md,
double pfs)
{
md.temperature = temp;
md.betaScale = betaScale;
pf_scale = pfs;
return get_scaled_exp_params(&md, pfs);
}
PUBLIC vrna_exp_param_t *
get_scaled_pf_parameters(void)
{
vrna_md_t md;
vrna_exp_param_t *pf;
set_model_details(&md);
pf = vrna_exp_params(&md);
pf->pf_scale = pf_scale;
return pf;
}
PUBLIC vrna_exp_param_t *
get_boltzmann_factors_ali(unsigned int n_seq,
double temp,
double betaScale,
vrna_md_t md,
double pfs)
{
md.temperature = temp;
md.betaScale = betaScale;
pf_scale = pfs;
return get_exp_params_ali(&md, n_seq, pfs);
}
PUBLIC vrna_exp_param_t *
get_scaled_alipf_parameters(unsigned int n_seq)
{
vrna_md_t md;
set_model_details(&md);
return get_exp_params_ali(&md, n_seq, pf_scale);
}
PUBLIC vrna_exp_param_t *
get_boltzmann_factor_copy(vrna_exp_param_t *par)
{
return vrna_exp_params_copy(par);
}
PUBLIC vrna_param_t *
get_parameter_copy(vrna_param_t *par)
{
return vrna_params_copy(par);
}
PUBLIC vrna_param_t *
copy_parameters(void)
{
vrna_param_t *copy;
if (p.id != id) {
vrna_md_t md;
set_model_details(&md);
return vrna_params(&md);
} else {
copy = (vrna_param_t *)vrna_alloc(sizeof(vrna_param_t));
memcpy(copy, &p, sizeof(vrna_param_t));
}
return copy;
}
PUBLIC vrna_param_t *
set_parameters(vrna_param_t *dest)
{
memcpy(&p, dest, sizeof(vrna_param_t));
return &p;
}
PUBLIC vrna_exp_param_t *
copy_pf_param(void)
{
vrna_exp_param_t *copy;
if (pf.id != pf_id) {
vrna_md_t md;
set_model_details(&md);
copy = vrna_exp_params(&md);
copy->pf_scale = pf_scale;
return copy;
} else {
copy = (vrna_exp_param_t *)vrna_alloc(sizeof(vrna_exp_param_t));
memcpy(copy, &pf, sizeof(vrna_exp_param_t));
}
return copy;
}
PUBLIC vrna_exp_param_t *
set_pf_param(vrna_param_t *dest)
{
memcpy(&pf, dest, sizeof(vrna_exp_param_t));
return &pf;
}
PUBLIC vrna_exp_param_t *
scale_pf_parameters(void)
{
vrna_md_t md;
vrna_exp_param_t *pf;
set_model_details(&md);
pf = vrna_exp_params(&md);
pf->pf_scale = pf_scale;
return pf;
}
#endif
|
convolution_1x1_pack1to4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack1to4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack1to4_bf16s_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_sgemm_pack1to4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2 * outw + w;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const unsigned short* r0 = bottom_blob.channel(p);
unsigned short* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
outptr[0] = r0[0];
r0 += 2;
outptr += 1;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack1to4_bf16s_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
1.norace3.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 20
int main() {
int A[N][N];
#pragma omp parallel for schedule(static)
for (int i = 1; i < N; i++)
for (int j = 1; j < N; j++)
A[i][j] = A[i][j - 1];
}
// CHECK: Region is Data Race Free.
// END
|
erodr.c | #include <time.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include "vector.h"
#include "io.h"
#include "image.h"
#include "params.h"
#include "util.h"
/*
* Particle type.
*/
typedef struct particle {
vec2 pos;
vec2 dir;
double vel;
double sediment;
double water;
} particle;
/*
* gradient & height tuple.
*/
typedef struct hg_tuple {
vec2 gradient;
double height;
} hg_tuple;
/*
* Bilinearly interpolate double value at (x, y) in map.
*/
double bil_interpolate_map_double(const image *map, vec2 pos) {
double *map_buffer = (double *) map->buffer;
double u, v, ul, ur, ll, lr, ipl_l, ipl_r;
int x_i = (int)pos.x;
int y_i = (int)pos.y;
u = pos.x - x_i;
v = pos.y - y_i;
ul = map_buffer[y_i*map->width + x_i];
ur = map_buffer[y_i*map->width + x_i + 1];
ll = map_buffer[(y_i + 1)*map->width + x_i];
lr = map_buffer[(y_i + 1)*map->width + x_i + 1];
ipl_l = (1 - v) * ul + v * ll;
ipl_r = (1 - v) * ur + v * lr;
return (1 - u) * ipl_l + u * ipl_r;
}
/*
* Deposits sediment at position `pos` in heighmap `hmap`.
* Deposition only affect immediate neighbouring gridpoints
* to `pos`.
*/
void deposit(image *hmap, vec2 pos, double amount) {
double *hmap_buffer = (double *) hmap->buffer;
int x_i = (int)pos.x;
int y_i = (int)pos.y;
double u = pos.x - x_i;
double v = pos.y - y_i;
hmap_buffer[y_i*hmap->width + x_i] += amount * (1 - u) * (1 - v);
hmap_buffer[y_i*hmap->width + x_i + 1] += amount * u * (1 - v);
hmap_buffer[(y_i + 1)*hmap->width + x_i] += amount * (1 - u) * v;
hmap_buffer[(y_i + 1)*hmap->width + x_i + 1] += amount * u * v;
}
/*
* Erodes heighmap `hmap` at position `pos` by amount `amount`.
* Erosion is distributed over an area defined through p_radius.
*/
void erode(image *hmap, vec2 pos, double amount, int radius) {
double *hmap_buffer = (double *) hmap->buffer;
if(radius < 1){
deposit(hmap, pos, -amount);
return;
}
int x0 = (int)pos.x - radius;
int y0 = (int)pos.y - radius;
int x_start = max(0, x0);
int y_start = max(0, y0);
int x_end = min(hmap->width, x0+2*radius+1);
int y_end = min(hmap->height, y0+2*radius+1);
// construct erosion/deposition kernel.
double kernel[2*radius + 1][2*radius + 1];
double kernel_sum = 0;
for(int y = y_start; y < y_end; y++) {
for(int x = x_start; x < x_end; x++) {
double d_x = x - pos.x;
double d_y = y - pos.y;
double distance = sqrt(d_x*d_x + d_y*d_y);
double w = fmax(0, radius - distance);
kernel_sum += w;
kernel[y-y0][x-x0] = w;
}
}
// normalize weights and apply changes on heighmap.
for(int y = y_start; y < y_end; y++) {
for(int x = x_start; x < x_end; x++) {
kernel[y-y0][x-x0] /= kernel_sum;
hmap_buffer[y*hmap->width + x] -= amount * kernel[y-y0][x-x0];
}
}
}
/*
* Returns gradient at (int x, int y) on heightmap `hmap`.
*/
vec2 gradient_at(image *hmap, int x, int y) {
double *hmap_buffer = (double *) hmap->buffer;
int idx = y * hmap->width + x;
//int right = y * hmap->width + min(x, hmap->width - 2);
//int below = min(y, hmap->height - 2) * hmap->width + x;
int right = idx + ((x > hmap->width - 2) ? 0 : 1);
int below = idx + ((y > hmap->height - 2) ? 0 : hmap->width);
vec2 g;
g.x = hmap_buffer[right] - hmap_buffer[idx];
g.y = hmap_buffer[below] - hmap_buffer[idx];
return g;
}
/*
* Returns interpolated gradient and height at (double x, double y) on
* heightmap `hmap`.
*/
hg_tuple height_gradient_at(image *hmap, vec2 pos) {
hg_tuple ret;
vec2 ul, ur, ll, lr, ipl_l, ipl_r;
int x_i = (int)pos.x;
int y_i = (int)pos.y;
double u = pos.x - x_i;
double v = pos.y - y_i;
ul = gradient_at(hmap, x_i, y_i);
ur = gradient_at(hmap, x_i + 1, y_i);
ll = gradient_at(hmap, x_i, y_i + 1);
lr = gradient_at(hmap, x_i + 1, y_i + 1);
ipl_l = add(scalar_mul(1 - v, ul), scalar_mul(v, ll));
ipl_r = add(scalar_mul(1 - v, ur), scalar_mul(v, lr));
ret.gradient = add(scalar_mul(1 - u, ipl_l), scalar_mul(u, ipl_r));
ret.height = bil_interpolate_map_double(hmap, pos);
return ret;
}
/*
* Runs hydraulic erosion simulation.
*/
void simulate_particles(image *hmap, sim_params *params) {
srand(time(NULL));
// simulate each particle
#pragma omp parallel for
for(int i = 0; i < params->n; i++) {
if(!((i+1) % 10000))
printf("Particles simulated: %d\n", i+1);
// spawn particle.
particle p;
double denom = (RAND_MAX / ((double)hmap->width - 1.0));
p.pos = (vec2){(double)rand() / denom, (double)rand() / denom};
p.dir = (vec2){0, 0};
p.vel = 0;
p.sediment = 0;
p.water = 1;
for(int j = 0; j < params->ttl; j++) {
// interpolate gradient g and height h_old at p's position.
vec2 pos_old = p.pos;
hg_tuple hg = height_gradient_at(hmap, pos_old);
vec2 g = hg.gradient;
double h_old = hg.height;
// calculate new dir vector
p.dir = sub(
scalar_mul(params->p_enertia, p.dir),
scalar_mul(1 - params->p_enertia, g)
);
normalize(&p.dir);
// calculate new pos
p.pos = add(p.pos, p.dir);
// check bounds
vec2 pos_new = p.pos;
if(pos_new.x > (hmap->width-1) || pos_new.x < 0 ||
pos_new.y > (hmap->height-1) || pos_new.y < 0)
break;
// new height
double h_new = bil_interpolate_map_double(hmap, pos_new);
double h_diff = h_new - h_old;
// sediment capacity
double c = fmax(-h_diff, params->p_min_slope) * p.vel * p.water * params->p_capacity;
// decide whether to erode or deposit depending on particle properties
if(h_diff > 0 || p.sediment > c) {
double to_deposit = (h_diff > 0) ?
fmin(p.sediment, h_diff) :
(p.sediment - c) * params->p_deposition;
p.sediment -= to_deposit;
deposit(hmap, pos_old, to_deposit);
} else {
double to_erode = fmin((c - p.sediment) * params->p_erosion, -h_diff);
p.sediment += to_erode;
erode(hmap, pos_old, to_erode, params->p_radius);
}
// update `vel` and `water`
p.vel = sqrt(p.vel*p.vel + h_diff*params->p_gravity);
p.water *= (1 - params->p_evaporation);
}
}
}
/*
* Main.
*/
int main(int argc, char *argv[]) {
sim_params params = DEFAULT_PARAM;
image img;
// parse args.
char filepath[FILEPATH_MAXLEN];
char outputfilepath[FILEPATH_MAXLEN];
strcpy(outputfilepath, OUTPUTFILEPATH_DEFAULT);
bool ascii_out = false;
if(parse_args(argc, argv, filepath, outputfilepath, ¶ms, &ascii_out))
exit_with_info(1);
// load pgm heightmap.
if(load_pgm(filepath, &img))
exit_with_info(1);
// simulate hydraulic erosion
simulate_particles(&img, ¶ms);
// Maybe clamp
if (maybe_clamp(&img))
print_clipping_warning();
// Save results
save_pgm(outputfilepath, &img, ascii_out);
// free memory
release_image(&img);
}
|
serial_tree_learner2.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER2_H_
#define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER2_H_
#include <LightGBM/dataset.h>
#include <LightGBM/tree.h>
#include <LightGBM/tree_learner.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/utils/random.h>
#include <string>
#include <cmath>
#include <cstdio>
#include <memory>
#include <random>
#include <vector>
#include "data_partition.hpp"
#include "feature_histogram.hpp"
#include "leaf_splits.hpp"
#include "split_info.hpp"
// #include "serial_tree_learner.h"
#ifdef USE_GPU
// Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled.
// This is necessary to pin the two arrays in memory and make transferring faster.
#include <boost/align/aligned_allocator.hpp>
#endif
using namespace json11;
namespace LightGBM {
/*! \brief forward declaration */
class CostEfficientGradientBoosting2;
/*!
* \brief Used for learning a tree by single machine
*/
class SerialTreeLearner2: public TreeLearner {
public:
friend CostEfficientGradientBoosting2;
explicit SerialTreeLearner2(const Config* config);
~SerialTreeLearner2();
void Init(const Dataset* train_data, bool is_constant_hessian) override;
void ResetTrainingData(const Dataset* train_data) override;
void ResetConfig(const Config* config) override;
Tree* Train(const score_t* gradients, const score_t *hessians, bool is_constant_hessian,
const Json& forced_split_json) override;
void Train_serial2(Tree* tree, const score_t* gradients, const score_t* hessians) override;
Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override;
Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred,
const score_t* gradients, const score_t* hessians) override;
void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override {
data_partition_->SetUsedDataIndices(used_indices, num_data);
}
void SetBaggingData2(const data_size_t* used_indices, data_size_t num_data) override {
data_partition_->SetUsedDataIndices2(used_indices, num_data);
}
void AddPredictionToScore(const Tree* tree, double* out_score) const override {
if (tree->num_leaves() <= 1) { return; }
CHECK(tree->num_leaves() <= data_partition_->num_leaves());
#pragma omp parallel for schedule(static)
for (int i = 0; i < tree->num_leaves(); ++i) {
double output = static_cast<double>(tree->LeafOutput(i));
data_size_t cnt_leaf_data = 0;
auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
for (data_size_t j = 0; j < cnt_leaf_data; ++j) {
out_score[tmp_idx[j]] += output;
// Log::Info("addscore %d %d %f %d",i,j,output,tmp_idx[j]);
}
}
}
void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter,
data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override;
protected:
virtual std::vector<int8_t> GetUsedFeatures(bool is_tree_level);
/*!
* \brief Some initial works before training
*/
virtual void BeforeTrain();
/*!
* \brief Some initial works before FindBestSplit
*/
virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf);
virtual void FindBestSplits();
virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
/*!
* \brief Partition tree and data according best split.
* \param tree Current tree, will be splitted on this function.
* \param best_leaf The index of leaf that will be splitted.
* \param left_leaf The index of left leaf after splitted.
* \param right_leaf The index of right leaf after splitted.
*/
virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf);
/* Force splits with forced_split_json dict and then return num splits forced.*/
virtual int32_t ForceSplits(Tree* tree, const Json& forced_split_json, int* left_leaf,
int* right_leaf, int* cur_depth,
bool *aborted_last_force_split);
/*!
* \brief Get the number of data in a leaf
* \param leaf_idx The index of leaf
* \return The number of data in the leaf_idx leaf
*/
inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const;
/*! \brief number of data */
data_size_t num_data_;
/*! \brief number of features */
int num_features_;
/*! \brief training data */
const Dataset* train_data_;
/*! \brief gradients of current iteration */
const score_t* gradients_;
/*! \brief hessians of current iteration */
const score_t* hessians_;
/*! \brief training data partition on leaves */
std::unique_ptr<DataPartition> data_partition_;
/*! \brief used for generate used features */
Random random_;
/*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */
std::vector<int8_t> is_feature_used_;
/*! \brief used feature indices in current tree */
std::vector<int> used_feature_indices_;
/*! \brief pointer to histograms array of parent of current leaves */
FeatureHistogram* parent_leaf_histogram_array_;
/*! \brief pointer to histograms array of smaller leaf */
FeatureHistogram* smaller_leaf_histogram_array_;
/*! \brief pointer to histograms array of larger leaf */
FeatureHistogram* larger_leaf_histogram_array_;
/*! \brief store best split points for all leaves */
std::vector<SplitInfo> best_split_per_leaf_;
/*! \brief store best split per feature for all leaves */
std::vector<SplitInfo> splits_per_leaf_;
/*! \brief stores best thresholds for all feature for smaller leaf */
std::unique_ptr<LeafSplits> smaller_leaf_splits_;
/*! \brief stores best thresholds for all feature for larger leaf */
std::unique_ptr<LeafSplits> larger_leaf_splits_;
std::vector<int> valid_feature_indices_;
#ifdef USE_GPU
/*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_;
#else
/*! \brief gradients of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_hessians_;
#endif
/*! \brief Store ordered bin */
std::vector<std::unique_ptr<OrderedBin>> ordered_bins_;
/*! \brief True if has ordered bin */
bool has_ordered_bin_ = false;
/*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */
std::vector<char> is_data_in_leaf_;
/*! \brief used to cache historical histogram to speed up*/
HistogramPool histogram_pool_;
/*! \brief config of tree learner*/
const Config* config_;
int num_threads_;
std::vector<int> ordered_bin_indices_;
bool is_constant_hessian_;
std::unique_ptr<CostEfficientGradientBoosting2> cegb_;
};
inline data_size_t SerialTreeLearner2::GetGlobalDataCountInLeaf(int leaf_idx) const {
if (leaf_idx >= 0) {
return data_partition_->leaf_count(leaf_idx);
} else {
return 0;
}
}
} // namespace LightGBM
#endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER2_H_
|
GB_binop__cmplx_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__cmplx_fp32
// A.*B function (eWiseMult): GB_AemultB__cmplx_fp32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__cmplx_fp32
// C+=b function (dense accum): GB_Cdense_accumb__cmplx_fp32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__cmplx_fp32
// C=scalar+B GB_bind1st__cmplx_fp32
// C=scalar+B' GB_bind1st_tran__cmplx_fp32
// C=A+scalar GB_bind2nd__cmplx_fp32
// C=A'+scalar GB_bind2nd_tran__cmplx_fp32
// C type: GxB_FC32_t
// A type: float
// B,b type: float
// BinaryOp: cij = GxB_CMPLXF (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = GxB_CMPLXF (Ax [pA], 0)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = GxB_CMPLXF (Bx [pB], 0)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GxB_CMPLXF (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CMPLX || GxB_NO_FP32 || GxB_NO_CMPLX_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__cmplx_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__cmplx_fp32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__cmplx_fp32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__cmplx_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__cmplx_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__cmplx_fp32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float bij = Bx [p] ;
Cx [p] = GxB_CMPLXF (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__cmplx_fp32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
Cx [p] = GxB_CMPLXF (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = GxB_CMPLXF (x, aij) ; \
}
GrB_Info GB_bind1st_tran__cmplx_fp32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = GxB_CMPLXF (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__cmplx_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
QuadNode.h | /*
* QuadNode.h
*
* Created on: 21.05.2014
* Author: Moritz v. Looz (moritz.looz-corswarem@kit.edu)
*/
#ifndef QUADNODE_H_
#define QUADNODE_H_
#include <vector>
#include <algorithm>
#include <functional>
#include <assert.h>
#include "../../auxiliary/Log.h"
#include "../../auxiliary/Parallel.h"
#include "../../geometric/HyperbolicSpace.h"
using std::vector;
using std::min;
using std::max;
using std::cos;
namespace NetworKit {
template <class T, bool poincare = true>
class QuadNode {
friend class QuadTreeGTest;
private:
double leftAngle;
double minR;
double rightAngle;
double maxR;
Point2D<double> a,b,c,d;
unsigned capacity;
static const unsigned coarsenLimit = 4;
count subTreeSize;
std::vector<T> content;
std::vector<Point2D<double> > positions;
std::vector<double> angles;
std::vector<double> radii;
bool isLeaf;
bool splitTheoretical;
double alpha;
double balance;
index ID;
double lowerBoundR;
public:
std::vector<QuadNode> children;
QuadNode() {
//This should never be called.
leftAngle = 0;
rightAngle = 0;
minR = 0;
maxR = 0;
capacity = 20;
isLeaf = true;
subTreeSize = 0;
balance = 0.5;
splitTheoretical = false;
alpha = 1;
lowerBoundR = maxR;
ID = 0;
}
/**
* Construct a QuadNode for polar coordinates.
*
*
* @param leftAngle Minimal angular coordinate of region, in radians from 0 to 2\pi
* @param minR Minimal radial coordinate of region, between 0 and 1
* @param rightAngle Maximal angular coordinate of region, in radians from 0 to 2\pi
* @param maxR Maximal radial coordinate of region, between 0 and 1
* @param capacity Number of points a leaf cell can store before splitting
* @param minDiameter Minimal diameter of a quadtree node. If the node is already smaller, don't split even if over capacity. Default is 0
* @param splitTheoretical Whether to split in a theoretically optimal way or in a way to decrease measured running times
* @param alpha dispersion Parameter of the point distribution. Only has an effect if theoretical split is true
* @param diagnostics Count how many necessary and unnecessary comparisons happen in leaf cells? Will cause race condition and false sharing in parallel use
*
*/
QuadNode(double leftAngle, double minR, double rightAngle, double maxR, unsigned capacity = 1000, bool splitTheoretical = false, double alpha = 1, double balance = 0.5) {
if (balance <= 0 || balance >= 1) throw std::runtime_error("Quadtree balance parameter must be between 0 and 1.");
if (poincare && maxR > 1) throw std::runtime_error("The Poincare disk has a radius of 1, cannot create quadtree larger than that!");
this->leftAngle = leftAngle;
this->minR = minR;
this->maxR = maxR;
this->rightAngle = rightAngle;
this->a = HyperbolicSpace::polarToCartesian(leftAngle, minR);
this->b = HyperbolicSpace::polarToCartesian(rightAngle, minR);
this->c = HyperbolicSpace::polarToCartesian(rightAngle, maxR);
this->d = HyperbolicSpace::polarToCartesian(leftAngle, maxR);
this->capacity = capacity;
this->alpha = alpha;
this->splitTheoretical = splitTheoretical;
this->balance = balance;
this->lowerBoundR = maxR;
this->ID = 0;
isLeaf = true;
subTreeSize = 0;
}
void split() {
assert(isLeaf);
//heavy lifting: split up!
double middleAngle = (rightAngle - leftAngle) / 2 + leftAngle;
/**
* we want to make sure the space is evenly divided to obtain a balanced tree
* Simply halving the radius will cause a larger space for the outer Quadnode, resulting in an unbalanced tree
*/
double middleR;
if (poincare) {
if (splitTheoretical) {
double hyperbolicOuter = HyperbolicSpace::EuclideanRadiusToHyperbolic(maxR);
double hyperbolicInner = HyperbolicSpace::EuclideanRadiusToHyperbolic(minR);
double hyperbolicMiddle = acosh((1-balance)*cosh(alpha*hyperbolicOuter) + balance*cosh(alpha*hyperbolicInner))/alpha;
middleR = HyperbolicSpace::hyperbolicRadiusToEuclidean(hyperbolicMiddle);
} else {
double nom = maxR - minR;
double denom = pow((1-maxR*maxR)/(1-minR*minR), 0.5)+1;
middleR = nom/denom + minR;
}
} else {
middleR = acosh((1-balance)*cosh(alpha*maxR) + balance*cosh(alpha*minR))/alpha;
}
//one could also use the median here. Results in worse asymptotical complexity, but maybe better runtime?
assert(middleR < maxR);
assert(middleR > minR);
QuadNode<index,poincare> southwest(leftAngle, minR, middleAngle, middleR, capacity, splitTheoretical, alpha, balance);
QuadNode<index,poincare> southeast(middleAngle, minR, rightAngle, middleR, capacity, splitTheoretical, alpha, balance);
QuadNode<index,poincare> northwest(leftAngle, middleR, middleAngle, maxR, capacity, splitTheoretical, alpha, balance);
QuadNode<index,poincare> northeast(middleAngle, middleR, rightAngle, maxR, capacity, splitTheoretical, alpha, balance);
children = {southwest, southeast, northwest, northeast};
isLeaf = false;
}
/**
* Add a point at polar coordinates (angle, R) with content input. May split node if capacity is full
*
* @param input arbitrary content, in our case an index
* @param angle angular coordinate of point, between 0 and 2 pi.
* @param R radial coordinate of point, between 0 and 1.
*/
void addContent(T input, double angle, double R) {
assert(this->responsible(angle, R));
if (lowerBoundR > R) lowerBoundR = R;
if (isLeaf) {
if (content.size() + 1 < capacity) {
content.push_back(input);
angles.push_back(angle);
radii.push_back(R);
Point2D<double> pos = HyperbolicSpace::polarToCartesian(angle, R);
positions.push_back(pos);
} else {
split();
for (index i = 0; i < content.size(); i++) {
this->addContent(content[i], angles[i], radii[i]);
}
assert(subTreeSize == content.size());//we have added everything twice
subTreeSize = content.size();
content.clear();
angles.clear();
radii.clear();
positions.clear();
this->addContent(input, angle, R);
}
}
else {
assert(children.size() > 0);
for (index i = 0; i < children.size(); i++) {
if (children[i].responsible(angle, R)) {
children[i].addContent(input, angle, R);
break;
}
}
subTreeSize++;
}
}
/**
* Remove content at polar coordinates (angle, R). May cause coarsening of the quadtree
*
* @param input Content to be removed
* @param angle Angular coordinate
* @param R Radial coordinate
*
* @return True if content was found and removed, false otherwise
*/
bool removeContent(T input, double angle, double R) {
if (!responsible(angle, R)) return false;
if (isLeaf) {
index i = 0;
for (; i < content.size(); i++) {
if (content[i] == input) break;
}
if (i < content.size()) {
assert(angles[i] == angle);
assert(radii[i] == R);
//remove element
content.erase(content.begin()+i);
positions.erase(positions.begin()+i);
angles.erase(angles.begin()+i);
radii.erase(radii.begin()+i);
return true;
} else {
return false;
}
}
else {
bool removed = false;
bool allLeaves = true;
assert(children.size() > 0);
for (index i = 0; i < children.size(); i++) {
if (!children[i].isLeaf) allLeaves = false;
if (children[i].removeContent(input, angle, R)) {
assert(!removed);
removed = true;
}
}
if (removed) subTreeSize--;
//coarsen?
if (removed && allLeaves && size() < coarsenLimit) {
//coarsen!!
//why not assert empty containers and then insert directly?
vector<T> allContent;
vector<Point2D<double> > allPositions;
vector<double> allAngles;
vector<double> allRadii;
for (index i = 0; i < children.size(); i++) {
allContent.insert(allContent.end(), children[i].content.begin(), children[i].content.end());
allPositions.insert(allPositions.end(), children[i].positions.begin(), children[i].positions.end());
allAngles.insert(allAngles.end(), children[i].angles.begin(), children[i].angles.end());
allRadii.insert(allRadii.end(), children[i].radii.begin(), children[i].radii.end());
}
assert(subTreeSize == allContent.size());
assert(subTreeSize == allPositions.size());
assert(subTreeSize == allAngles.size());
assert(subTreeSize == allRadii.size());
children.clear();
content.swap(allContent);
positions.swap(allPositions);
angles.swap(allAngles);
radii.swap(allRadii);
isLeaf = true;
}
return removed;
}
}
/**
* Check whether the region managed by this node lies outside of an Euclidean circle.
*
* @param query Center of the Euclidean query circle, given in Cartesian coordinates
* @param radius Radius of the Euclidean query circle
*
* @return True if the region managed by this node lies completely outside of the circle
*/
bool outOfReach(Point2D<double> query, double radius) const {
double phi, r;
HyperbolicSpace::cartesianToPolar(query, phi, r);
if (responsible(phi, r)) return false;
//if using native coordinates, call distance calculation
if (!poincare) return hyperbolicDistances(phi, r).first > radius;
//get four edge points
double topDistance, bottomDistance, leftDistance, rightDistance;
if (phi < leftAngle || phi > rightAngle) {
topDistance = min(c.distance(query), d.distance(query));
} else {
topDistance = abs(r - maxR);
}
if (topDistance <= radius) return false;
if (phi < leftAngle || phi > rightAngle) {
bottomDistance = min(a.distance(query), b.distance(query));
} else {
bottomDistance = abs(r - minR);
}
if (bottomDistance <= radius) return false;
double minDistanceR = r*cos(abs(phi-leftAngle));
if (minDistanceR > minR && minDistanceR < maxR) {
leftDistance = query.distance(HyperbolicSpace::polarToCartesian(phi, minDistanceR));
} else {
leftDistance = min(a.distance(query), d.distance(query));
}
if (leftDistance <= radius) return false;
minDistanceR = r*cos(abs(phi-rightAngle));
if (minDistanceR > minR && minDistanceR < maxR) {
rightDistance = query.distance(HyperbolicSpace::polarToCartesian(phi, minDistanceR));
} else {
rightDistance = min(b.distance(query), c.distance(query));
}
if (rightDistance <= radius) return false;
return true;
}
/**
* Check whether the region managed by this node lies outside of an Euclidean circle.
* Functionality is the same as in the method above, but it takes polar coordinates instead of Cartesian ones
*
* @param angle_c Angular coordinate of the Euclidean query circle's center
* @param r_c Radial coordinate of the Euclidean query circle's center
* @param radius Radius of the Euclidean query circle
*
* @return True if the region managed by this node lies completely outside of the circle
*/
bool outOfReach(double angle_c, double r_c, double radius) const {
if (responsible(angle_c, r_c)) return false;
Point2D<double> query = HyperbolicSpace::polarToCartesian(angle_c, r_c);
return outOfReach(query, radius);
}
/**
* @param phi Angular coordinate of query point
* @param r_h radial coordinate of query point in poincare disk
*/
std::pair<double, double> hyperbolicDistances(double phi, double r) const {
double minRHyper, maxRHyper, r_h;
if (poincare) {
minRHyper=HyperbolicSpace::EuclideanRadiusToHyperbolic(this->minR);
maxRHyper=HyperbolicSpace::EuclideanRadiusToHyperbolic(this->maxR);
r_h = HyperbolicSpace::EuclideanRadiusToHyperbolic(r);
} else {
minRHyper=this->minR;
maxRHyper=this->maxR;
r_h = r;
}
double coshr = cosh(r_h);
double sinhr = sinh(r_h);
double coshMinR = cosh(minRHyper);
double coshMaxR = cosh(maxRHyper);
double sinhMinR = sinh(minRHyper);
double sinhMaxR = sinh(maxRHyper);
double cosDiffLeft = cos(phi - leftAngle);
double cosDiffRight = cos(phi - rightAngle);
/**
* If the query point is not within the quadnode, the distance minimum is on the border.
* Need to check whether extremum is between corners:
*/
double coshMinDistance, coshMaxDistance;
//Left border
double lowerLeftDistance = coshMinR*coshr-sinhMinR*sinhr*cosDiffLeft;
double upperLeftDistance = coshMaxR*coshr-sinhMaxR*sinhr*cosDiffLeft;
if (responsible(phi, r)) coshMinDistance = 1; //strictly speaking, this is wrong
else coshMinDistance = min(lowerLeftDistance, upperLeftDistance);
coshMaxDistance = max(lowerLeftDistance, upperLeftDistance);
//double a = cosh(r_h);
double b = sinhr*cosDiffLeft;
double extremum = log((coshr+b)/(coshr-b))/2;
if (extremum < maxRHyper && extremum >= minRHyper) {
double extremeDistance = cosh(extremum)*coshr-sinh(extremum)*sinhr*cosDiffLeft;
coshMinDistance = min(coshMinDistance, extremeDistance);
coshMaxDistance = max(coshMaxDistance, extremeDistance);
}
/**
* cosh is a function from [0,\infty) to [1, \infty)
* Variables thus need
*/
assert(coshMaxDistance >= 1);
assert(coshMinDistance >= 1);
//Right border
double lowerRightDistance = coshMinR*coshr-sinhMinR*sinhr*cosDiffRight;
double upperRightDistance = coshMaxR*coshr-sinhMaxR*sinhr*cosDiffRight;
coshMinDistance = min(coshMinDistance, lowerRightDistance);
coshMinDistance = min(coshMinDistance, upperRightDistance);
coshMaxDistance = max(coshMaxDistance, lowerRightDistance);
coshMaxDistance = max(coshMaxDistance, upperRightDistance);
b = sinhr*cosDiffRight;
extremum = log((coshr+b)/(coshr-b))/2;
if (extremum < maxRHyper && extremum >= minRHyper) {
double extremeDistance = cosh(extremum)*coshr-sinh(extremum)*sinhr*cosDiffRight;
coshMinDistance = min(coshMinDistance, extremeDistance);
coshMaxDistance = max(coshMaxDistance, extremeDistance);
}
assert(coshMaxDistance >= 1);
assert(coshMinDistance >= 1);
//upper and lower borders
if (phi >= leftAngle && phi < rightAngle) {
double lower = cosh(abs(r_h-minRHyper));
double upper = cosh(abs(r_h-maxRHyper));
coshMinDistance = min(coshMinDistance, lower);
coshMinDistance = min(coshMinDistance, upper);
coshMaxDistance = max(coshMaxDistance, upper);
coshMaxDistance = max(coshMaxDistance, lower);
}
assert(coshMaxDistance >= 1);
assert(coshMinDistance >= 1);
//again with mirrored phi
double mirrorphi;
if (phi >= M_PI) mirrorphi = phi - M_PI;
else mirrorphi = phi + M_PI;
if (mirrorphi >= leftAngle && mirrorphi < rightAngle) {
double lower = coshMinR*coshr+sinhMinR*sinhr;
double upper = coshMaxR*coshr+sinhMaxR*sinhr;
coshMinDistance = min(coshMinDistance, lower);
coshMinDistance = min(coshMinDistance, upper);
coshMaxDistance = max(coshMaxDistance, upper);
coshMaxDistance = max(coshMaxDistance, lower);
}
assert(coshMaxDistance >= 1);
assert(coshMinDistance >= 1);
double minDistance, maxDistance;
minDistance = acosh(coshMinDistance);
maxDistance = acosh(coshMaxDistance);
assert(maxDistance >= 0);
assert(minDistance >= 0);
return std::pair<double, double>(minDistance, maxDistance);
}
/**
* Does the point at (angle, r) fall inside the region managed by this QuadNode?
*
* @param angle Angular coordinate of input point
* @param r Radial coordinate of input points
*
* @return True if input point lies within the region of this QuadNode
*/
bool responsible(double angle, double r) const {
return (angle >= leftAngle && angle < rightAngle && r >= minR && r < maxR);
}
/**
* Get all Elements in this QuadNode or a descendant of it
*
* @return vector of content type T
*/
std::vector<T> getElements() const {
if (isLeaf) {
return content;
} else {
assert(content.size() == 0);
assert(angles.size() == 0);
assert(radii.size() == 0);
vector<T> result;
for (index i = 0; i < children.size(); i++) {
std::vector<T> subresult = children[i].getElements();
result.insert(result.end(), subresult.begin(), subresult.end());
}
return result;
}
}
void getCoordinates(vector<double> &anglesContainer, vector<double> &radiiContainer) const {
assert(angles.size() == radii.size());
if (isLeaf) {
anglesContainer.insert(anglesContainer.end(), angles.begin(), angles.end());
radiiContainer.insert(radiiContainer.end(), radii.begin(), radii.end());
}
else {
assert(content.size() == 0);
assert(angles.size() == 0);
assert(radii.size() == 0);
for (index i = 0; i < children.size(); i++) {
children[i].getCoordinates(anglesContainer, radiiContainer);
}
}
}
/**
* Don't use this!
* Code is still in here for a unit test.
*
* Get copy of the leaf cell responsible for a point at (angle, r).
* Expensive because it copies the whole subtree, causes assertion failure if called with the wrong arguments
*
* @param angle Angular coordinate of point
* @param r Radial coordinate of point
*
* @return Copy of leaf cell containing point, or dummy cell not responsible for point
*
*/
QuadNode<T>& getAppropriateLeaf(double angle, double r) {
assert(this->responsible(angle, r));
if (isLeaf) return *this;//will this return the reference to the subtree itself or to a copy?
else {
for (index i = 0; i < children.size(); i++) {
bool foundResponsibleChild = false;
if (children[i].responsible(angle, r)) {
assert(foundResponsibleChild == false);
foundResponsibleChild = true;
return children[i].getAppropriateLeaf(angle, r);
}
}
throw std::runtime_error("No responsible child found.");
}
}
/**
* Main query method, get points lying in a Euclidean circle around the center point.
* Optional limits can be given to get a different result or to reduce unnecessary comparisons
*
* Elements are pushed onto a vector which is a required argument. This is done to reduce copying
*
* Safe to call in parallel if diagnostics are disabled
*
* @param center Center of the query circle
* @param radius Radius of the query circle
* @param result Reference to the vector where the results will be stored
* @param minAngle Optional value for the minimum angular coordinate of the query region
* @param maxAngle Optional value for the maximum angular coordinate of the query region
* @param lowR Optional value for the minimum radial coordinate of the query region
* @param highR Optional value for the maximum radial coordinate of the query region
*/
void getElementsInEuclideanCircle(Point2D<double> center, double radius, vector<T> &result, double minAngle=0, double maxAngle=2*M_PI, double lowR=0, double highR = 1) const {
if (!poincare) throw std::runtime_error("Euclidean query circles not yet implemented for native hyperbolic coordinates.");
if (minAngle >= rightAngle || maxAngle <= leftAngle || lowR >= maxR || highR < lowerBoundR) return;
if (outOfReach(center, radius)) {
return;
}
if (isLeaf) {
const double rsq = radius*radius;
const double queryX = center[0];
const double queryY = center[1];
const count cSize = content.size();
for (index i = 0; i < cSize; i++) {
const double deltaX = positions[i].getX() - queryX;
const double deltaY = positions[i].getY() - queryY;
if (deltaX*deltaX + deltaY*deltaY < rsq) {
result.push_back(content[i]);
}
}
} else {
for (index i = 0; i < children.size(); i++) {
children[i].getElementsInEuclideanCircle(center, radius, result, minAngle, maxAngle, lowR, highR);
}
}
}
count getElementsProbabilistically(Point2D<double> euQuery, std::function<double(double)> prob, bool suppressLeft, vector<T> &result) const {
double phi_q, r_q;
HyperbolicSpace::cartesianToPolar(euQuery, phi_q, r_q);
if (suppressLeft && phi_q > rightAngle) return 0;
TRACE("Getting hyperbolic distances");
auto distancePair = hyperbolicDistances(phi_q, r_q);
double probUB = prob(distancePair.first);
double probLB = prob(distancePair.second);
#ifndef NDEBUG
assert(probLB <= probUB);
#else
((void)(probLB));
#endif
if (probUB > 0.5) probUB = 1;//if we are going to take every second element anyway, no use in calculating expensive jumps
if (probUB == 0) return 0;
//TODO: return whole if probLB == 1
double probdenom = std::log(1-probUB);
if (probdenom == 0) {
DEBUG(probUB, " not zero, but too small too process. Ignoring.");
return 0;
}
TRACE("probUB: ", probUB, ", probdenom: ", probdenom);
count expectedNeighbours = probUB*size();
count candidatesTested = 0;
if (isLeaf) {
const count lsize = content.size();
TRACE("Leaf of size ", lsize);
for (index i = 0; i < lsize; i++) {
//jump!
if (probUB < 1) {
double random = Aux::Random::real();
double delta = std::log(random) / probdenom;
assert(delta == delta);
assert(delta >= 0);
i += delta;
if (i >= lsize) break;
TRACE("Jumped with delta ", delta, " arrived at ", i);
}
//see where we've arrived
candidatesTested++;
double distance;
if (poincare) {
distance = HyperbolicSpace::poincareMetric(positions[i], euQuery);
} else {
distance = HyperbolicSpace::nativeDistance(angles[i], radii[i], phi_q, r_q);
}
assert(distance >= distancePair.first);
double q = prob(distance);
q = q / probUB; //since the candidate was selected by the jumping process, we have to adjust the probabilities
assert(q <= 1);
assert(q >= 0);
//accept?
double acc = Aux::Random::real();
if (acc < q) {
TRACE("Accepted node ", i, " with probability ", q, ".");
result.push_back(content[i]);
}
}
} else {
if (expectedNeighbours < 1) {//select candidates directly instead of calling recursively
TRACE("probUB = ", probUB, ", switching to direct candidate selection.");
assert(probUB < 1);
const count stsize = size();
for (index i = 0; i < stsize; i++) {
double delta = std::log(Aux::Random::real()) / probdenom;
assert(delta >= 0);
i += delta;
TRACE("Jumped with delta ", delta, " arrived at ", i, ". Calling maybeGetKthElement.");
if (i < size()) maybeGetKthElement(probUB, euQuery, prob, i, result);//this could be optimized. As of now, the offset is subtracted separately for each point
else break;
candidatesTested++;
}
} else {//carry on as normal
for (index i = 0; i < children.size(); i++) {
TRACE("Recursively calling child ", i);
candidatesTested += children[i].getElementsProbabilistically(euQuery, prob, suppressLeft, result);
}
}
}
//DEBUG("Expected at most ", expectedNeighbours, " neighbours, got ", result.size() - offset);
return candidatesTested;
}
void maybeGetKthElement(double upperBound, Point2D<double> euQuery, std::function<double(double)> prob, index k, vector<T> &circleDenizens) const {
TRACE("Maybe get element ", k, " with upper Bound ", upperBound);
assert(k < size());
if (isLeaf) {
double distance;
if (poincare) {
distance = HyperbolicSpace::poincareMetric(positions[k], euQuery);
} else {
double phi_q, r_q;
HyperbolicSpace::cartesianToPolar(euQuery, phi_q, r_q);
distance = HyperbolicSpace::nativeDistance(angles[k], radii[k], phi_q, r_q);
}
double acceptance = prob(distance)/upperBound;
TRACE("Is leaf, accept with ", acceptance);
if (Aux::Random::real() < acceptance) circleDenizens.push_back(content[k]);
} else {
TRACE("Call recursively.");
index offset = 0;
for (index i = 0; i < children.size(); i++) {
count childsize = children[i].size();
if (k - offset < childsize) {
children[i].maybeGetKthElement(upperBound, euQuery, prob, k - offset, circleDenizens);
break;
}
offset += childsize;
}
}
}
/**
* Shrink all vectors in this subtree to fit the content.
* Call after quadtree construction is complete, causes better memory usage and cache efficiency
*/
void trim() {
content.shrink_to_fit();
positions.shrink_to_fit();
angles.shrink_to_fit();
radii.shrink_to_fit();
if (!isLeaf) {
for (index i = 0; i < children.size(); i++) {
children[i].trim();
}
}
}
/**
* Number of points lying in the region managed by this QuadNode
*/
count size() const {
return isLeaf ? content.size() : subTreeSize;
}
void recount() {
subTreeSize = 0;
for (index i = 0; i < children.size(); i++) {
children[i].recount();
subTreeSize += children[i].size();
}
}
/**
* Height of subtree hanging from this QuadNode
*/
count height() const {
count result = 1;//if leaf node, the children loop will not execute
for (auto child : children) result = std::max(result, child.height()+1);
return result;
}
/**
* Leaf cells in the subtree hanging from this QuadNode
*/
count countLeaves() const {
if (isLeaf) return 1;
count result = 0;
for (index i = 0; i < children.size(); i++) {
result += children[i].countLeaves();
}
return result;
}
double getLeftAngle() const {
return leftAngle;
}
double getRightAngle() const {
return rightAngle;
}
double getMinR() const {
return minR;
}
double getMaxR() const {
return maxR;
}
index getID() const {
return ID;
}
index indexSubtree(index nextID) {
index result = nextID;
assert(children.size() == 4 || children.size() == 0);
for (index i = 0; i < children.size(); i++) {
result = children[i].indexSubtree(result);
}
this->ID = result;
return result+1;
}
index getCellID(double phi, double r) const {
if (!responsible(phi, r)) return NetworKit::none;
if (isLeaf) return getID();
else {
for (index i = 0; i < children.size(); i++) {
index childresult = children[i].getCellID(phi, r);
if (childresult != NetworKit::none) return childresult;
}
throw std::runtime_error("No responsible child node found even though this node is responsible.");
}
}
index getMaxIDInSubtree() const {
if (isLeaf) return getID();
else {
index result = -1;
for (int i = 0; i < 4; i++) {
result = std::max(children[i].getMaxIDInSubtree(), result);
}
return std::max(result, getID());
}
}
count reindex(count offset) {
if (isLeaf)
{
#pragma omp task
{
index p = offset;
std::generate(content.begin(), content.end(), [&p](){return p++;});
}
offset += size();
} else {
for (int i = 0; i < 4; i++) {
offset = children[i].reindex(offset);
}
}
return offset;
}
};
}
#endif /* QUADNODE_H_ */
|
pmtv-OpenMP.c | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <omp.h>
int main(int argc, char** argv){
int i, j, f, c, intkind, chunk;
double t1, t2, total;
srand(time(NULL));
omp_sched_t kind;
//Leer argumento de entrada (no de componentes del vector)
if (argc<4){
printf("Formato: programa tamaño_matriz sched_var chunk\n");
exit(-1);
}
unsigned int N = atoi(argv[1]); intkind=atoi(argv[2]); chunk=atoi(argv[3]);
// Máximo N =2^32-1=4294967295 (sizeof(unsigned int) = 4 B)
double *v1, *v2, **M;
v1 = (double*) malloc(N*sizeof(double));// malloc necesita el tamaño en bytes
v2 = (double*) malloc(N*sizeof(double)); //si no hay espacio suficiente malloc devuelve NULL
M = (double**) malloc(N*sizeof(double *));
if ( (v1==NULL) || (v2==NULL) || (M==NULL) ){
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
switch(intkind){
case 1: kind = omp_sched_static;
break;
case 2: kind = omp_sched_dynamic;
break;
case 3: kind = omp_sched_guided;
break;
case 4: kind = omp_sched_auto;
break;
}
omp_set_schedule(kind,chunk);
for (i=0; i<N; i++){
M[i] = (double*) malloc(N*sizeof(double));
if ( M[i]==NULL ){
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
}
//A partir de aqui se pueden acceder las componentes de la matriz como M[i][j]
//Inicializar matriz y vectores
//printf("Vector 1: \n\n[");
#pragma omp parallel for schedule(runtime)
for (i=0; i<N; i++)
{
v1[i]=i;
//printf("%.0lf ",v1[i]);
}
//printf("]\n\n");
//printf("Matriz: \n");
#pragma omp parallel for schedule(runtime)
for (f=0; f<N; f++)
{
//printf("\n");
for (c=0; c<f; c++){
M[f][c] = 0;
//printf("%.0lf ", M[f][c]);
}
for (c=f; c<N; c++)
{
M[f][c] = rand()%(1-10 + 1) + 1;
//printf("%.0lf ", M[f][c]);
}
//n++;
}
//Medida de tiempo
t1 = omp_get_wtime();
//Calcular producto de matriz triangular por vector v2 = M · v1
#pragma omp parallel for schedule(runtime)
for (f=0; f<N; f++)
for (c=f; c<N; c++)
v2[f] += M[f][c] * v1[c];
//Medida de tiempo
t2 = omp_get_wtime();
total = t2 - t1;
//Imprimir el resultado y el tiempo de ejecución
printf("Tiempo(seg.):%11.9f\t / Tamaño:%u\t/ V2[0]=%8.6f V2[%d]=%8.6f\n", total,N,v2[0],N-1,v2[N-1]);
if (N<15)
{
printf("\nv2=[");
for (i=0; i<N; i++)
printf("%.0lf ",v2[i]);
printf("]\n");
}
free(v1); // libera el espacio reservado para v1
free(v2); // libera el espacio reservado para v2
for (i=0; i<N; i++)
free(M[i]);
free(M);
return 0;
}
|
anis.c | #include "micro_clib.h"
void compute_uniaxial_anis(double *restrict m, double *restrict field, double *restrict energy, double *restrict Ms_inv,
double *restrict Ku, double *restrict axis, int nx, int ny, int nz) {
int n = nx * ny * nz;
#pragma omp parallel for
for (int i = 0; i < n; i++) {
int j = 3 * i;
if (Ms_inv[i] == 0.0){
field[j] = 0;
field[j + 1] = 0;
field[j + 2] = 0;
energy[i] = 0;
continue;
}
double m_u = m[j] * axis[j] + m[j + 1] * axis[j + 1] + m[j + 2] * axis[j + 2];
field[j] = 2 * Ku[i] * m_u * Ms_inv[i] * MU0_INV * axis[j];
field[j + 1] = 2 * Ku[i] * m_u * Ms_inv[i] * MU0_INV * axis[j + 1];
field[j + 2] = 2 * Ku[i] * m_u * Ms_inv[i] * MU0_INV * axis[j + 2];
energy[i] = Ku[i] * (1 - m_u * m_u);
}
}
void compute_uniaxial4_anis(double *restrict m, double *restrict field, double *restrict energy, double *restrict Ms_inv,
double *restrict K1, double *restrict K2, double *restrict axis, int nx, int ny, int nz) {
int n = nx * ny * nz;
// Follows calculation of OOMMF extension by Hans and Richard Boardman
// http://www.soton.ac.uk/~fangohr/software/oxs_uniaxial4/download/uniaxialanisotropy4.cc
#pragma omp parallel for
for (int i = 0; i < n; i++) {
int j = 3 * i;
if (Ms_inv[i] == 0.0) {
field[j] = 0;
field[j + 1] = 0;
field[j + 2] = 0;
energy[i] = 0;
continue;
}
double k1 = K1[i];
double k2 = K2[i];
double field_mult1 = MU0_INV * 2.0 * k1 * Ms_inv[i];
double field_mult2 = MU0_INV * 4.0 * k2 * Ms_inv[i];
double m_dot_u = m[j] * axis[j] + m[j + 1] * axis[j + 1] + m[j + 2] * axis[j + 2];
if (k1 <= 0) {
field[j + 0] = (field_mult1*m_dot_u) * axis[j + 0] + (field_mult2 * m_dot_u*m_dot_u*m_dot_u) * axis[j + 0];
field[j + 1] = (field_mult1*m_dot_u) * axis[j + 1] + (field_mult2 * m_dot_u*m_dot_u*m_dot_u) * axis[j + 1];
field[j + 2] = (field_mult1*m_dot_u) * axis[j + 2] + (field_mult2 * m_dot_u*m_dot_u*m_dot_u) * axis[j + 2];
energy[i] = -k1*m_dot_u*m_dot_u - k2*m_dot_u*m_dot_u*m_dot_u*m_dot_u;
}
else {
double u_x_m[3];
u_x_m[0] = cross_x(axis[j], axis[j+1], axis[j+2], m[j], m[j+1], m[j+2]);
u_x_m[1] = cross_y(axis[j], axis[j+1], axis[j+2], m[j], m[j+1], m[j+2]);
u_x_m[2] = cross_z(axis[j], axis[j+1], axis[j+2], m[j], m[j+1], m[j+2]);
double u_x_m_mag2 = u_x_m[1]*u_x_m[1] + u_x_m[1]*u_x_m[1] + u_x_m[2]*u_x_m[2];
field[j + 0] = (field_mult1*m_dot_u) * axis[j + 0] + (field_mult2*m_dot_u*m_dot_u*m_dot_u) * axis[j + 0];
field[j + 1] = (field_mult1*m_dot_u) * axis[j + 1] + (field_mult2*m_dot_u*m_dot_u*m_dot_u) * axis[j + 1];
field[j + 2] = (field_mult1*m_dot_u) * axis[j + 2] + (field_mult2*m_dot_u*m_dot_u*m_dot_u) * axis[j + 2];
energy[i] = (k1 + 2*k2)*u_x_m_mag2 - k2*u_x_m_mag2*u_x_m_mag2;
}
}
} |
tim.h | //===------------------------------------------------------------*- C++ -*-===//
//
// Ripples: A C++ Library for Influence Maximization
// Marco Minutoli <marco.minutoli@pnnl.gov>
// Pacific Northwest National Laboratory
//
//===----------------------------------------------------------------------===//
//
// Copyright (c) 2019, Battelle Memorial Institute
//
// Battelle Memorial Institute (hereinafter Battelle) hereby grants permission
// to any person or entity lawfully obtaining a copy of this software and
// associated documentation files (hereinafter “the Software”) to redistribute
// and use the Software in source and binary forms, with or without
// modification. Such person or entity may use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and may permit
// others to do so, subject to the following conditions:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimers.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Other than as used herein, neither the name Battelle Memorial Institute or
// Battelle may be used in any form whatsoever without the express written
// consent of Battelle.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//===----------------------------------------------------------------------===//
#ifndef RIPPLES_TIM_H
#define RIPPLES_TIM_H
#include <algorithm>
#include <cassert>
#include <chrono>
#include <cmath>
#include <cstddef>
#include <deque>
#include <iterator>
#include <queue>
#include <random>
#include <unordered_set>
#include <utility>
#include <omp.h>
#include "ripples/diffusion_simulation.h"
#include "ripples/find_most_influential.h"
#include "ripples/generate_rrr_sets.h"
#include "ripples/utility.h"
#include "CLI/CLI.hpp"
#include "trng/lcg64.hpp"
#include "trng/uniform01_dist.hpp"
#include "trng/uniform_int_dist.hpp"
namespace ripples {
//! \brief The configuration data structure for the TIM+ algorithm.
struct TIMConfiguration : public AlgorithmConfiguration {
double epsilon{0.50}; //!< The epsilon of the IM algorithm
//! \brief Add command line options to configure TIM+.
//!
//! \param app The command-line parser object.
void addCmdOptions(CLI::App &app) {
AlgorithmConfiguration::addCmdOptions(app);
app.add_option("-e,--epsilon", epsilon, "The size of the seed set.")
->required()
->group("Algorithm Options");
}
};
//! TIM+ execution record.
struct TIMExecutionRecord {
//! Number of threads used during the execution.
size_t NumThreads;
//! Number of RRR sets generated.
size_t Theta;
//! Execution time of the Kpt Estimation phase.
std::chrono::duration<double, std::milli> KptEstimation;
//! Execution time of the Kpt Refinement phase.
std::chrono::duration<double, std::milli> KptRefinement;
//! Execution time of the RRR sets generation phase.
std::chrono::duration<double, std::milli> GenerateRRRSets;
//! Execution time of the maximum coverage phase.
std::chrono::duration<double, std::milli> FindMostInfluentialSet;
//! Total execution time.
std::chrono::duration<double, std::milli> Total;
};
//! \brief Compute the number of elements in the RRR set starting at r.
//!
//! \tparam GraphTy The type of the Graph.
//! \tparam PNRG The type of the random number generator.
//! \tparam diff_model_tag The Type-Tag selecting the diffusion model.
//!
//! \param G The original graph.
//! \param r The start vertex.
//! \param generator The random number generator used to sample G.
//!
//! \return The number of elements in the RRR set computed starting from r.
template <typename GraphTy, typename PRNG, typename diff_model_tag>
size_t WR(GraphTy &G, typename GraphTy::vertex_type r, PRNG &generator,
diff_model_tag &&) {
using vertex_type = typename GraphTy::vertex_type;
trng::uniform01_dist<float> value;
std::queue<vertex_type> queue;
std::vector<bool> visited(G.num_nodes(), false);
queue.push(r);
visited[r] = true;
size_t wr = 0;
while (!queue.empty()) {
vertex_type v = queue.front();
queue.pop();
wr += G.degree(v);
if (std::is_same<diff_model_tag, ripples::independent_cascade_tag>::value) {
for (auto u : G.neighbors(v)) {
if (!visited[u.vertex] && value(generator) <= u.weight) {
queue.push(u.vertex);
visited[u.vertex] = true;
}
}
} else if (std::is_same<diff_model_tag,
ripples::linear_threshold_tag>::value) {
float threshold = value(generator);
for (auto u : G.neighbors(v)) {
threshold -= u.weight;
if (threshold > 0) continue;
if (!visited[u.vertex]) {
queue.push(u.vertex);
visited[u.vertex] = true;
break;
}
}
} else {
throw;
}
}
return wr;
}
//! \brief Estimate KPT. Algorithm 2 in the original paper.
//!
//! \tparam GraphTy The type of the graph.
//! \tparam PRNGeneratorty The type of the random number generator.
//! \tparam diff_model_tag The Type-Tag selecting the diffusion model.
//!
//! \param G The original graph.
//! \param k The size of the desired seed set.
//! \param generator The random numeber generator.
//! \param model_tag The diffusion model to be used.
//!
//! \return a lower bond of OPT computed with Algoirthm 2 of the original paper.
template <typename GraphTy, typename PRNGeneratorTy, typename diff_model_tag>
double KptEstimation(GraphTy &G, size_t k, PRNGeneratorTy &generator,
diff_model_tag &&model_tag, sequential_tag &&) {
// Compute KPT* according to Algorithm 2
double KPTStar = 1;
trng::uniform_int_dist root(0, G.num_nodes());
for (size_t i = 1; i < log2(G.num_nodes()); ++i) {
double sum = 0;
size_t c_i =
(6 * log(G.num_nodes()) + 6 * log(log2(G.num_nodes()))) * (1ul << i);
for (size_t j = 0; j < c_i; ++j) {
// Pick a random vertex
typename GraphTy::vertex_type v = root(generator[0]);
double wr =
WR(G, v, generator[0], std::forward<diff_model_tag>(model_tag));
wr /= G.num_edges();
// Equation (8) of the paper.
sum += 1 - pow(1.0 - wr, k);
}
sum /= c_i;
if (sum > (1.0 / (1ul << i))) {
KPTStar = G.num_nodes() * sum / 2;
break;
}
}
return KPTStar;
}
//! \brief Estimate KPT. Parallelization of Algorithm 2 in the original paper.
//!
//! \tparam GraphTy The type of the graph.
//! \tparam PRNGeneratorty The type of the random number generator.
//! \tparam diff_model_tag The Type-Tag selecting the diffusion model.
//!
//! \param G The original graph.
//! \param k The size of the desired seed set.
//! \param generator The random numeber generator.
//! \param model_tag The diffusion model to use.
//!
//! \return a lower bond of OPT computed with Algoirthm 2 of the original paper.
template <typename GraphTy, typename PRNGeneratorTy, typename diff_model_tag>
double KptEstimation(GraphTy &G, size_t k, PRNGeneratorTy &generator,
diff_model_tag &&model_tag, omp_parallel_tag &&) {
double KPTStar = 1.0;
for (size_t i = 2; i < G.num_nodes(); i <<= 1) {
size_t c_i = (6 * log(G.num_nodes()) + 6 * log(log2(G.num_nodes()))) * i;
double sum = 0;
#pragma omp parallel reduction(+ : sum)
{
size_t rank = omp_get_thread_num();
trng::uniform_int_dist root(0, G.num_nodes());
#pragma omp for schedule(guided)
for (size_t j = 0; j < c_i; ++j) {
// Pick a random vertex
typename GraphTy::vertex_type v = root(generator[rank]);
double wr =
WR(G, v, generator[rank], std::forward<diff_model_tag>(model_tag));
wr /= G.num_edges();
// Equation (8) of the paper.
sum += 1 - pow(1.0 - wr, k);
}
}
sum /= c_i;
if (sum > (1.0 / i)) {
KPTStar = G.num_nodes() * sum / 2;
break;
}
}
return KPTStar;
}
//! \brief Estimate the number of Random Reverse Reachability Sets to be
//! computed.
//!
//! \tparam GraphTy The graph type.
//! \tparam PRNGeneratorty The type of the Random Number Generator.
//! \tparam diff_model_tag The Type-Tag selecting the diffusion model.
//! \tparam execution_tag The Type-Tag to selecte the execution policy.
//!
//! \param G The original graph.
//! \param k The size of the seed set to be selected.
//! \param epsilon The approximation factor.
//! \param generator The random number generator.
//! \param R The execution record.
//! \param model_tag The diffusion model to use.
//! \param ex_tag The execution policy to use.
//!
//! \return The number of Random Reverse Reachability sets to be computed.
template <typename GraphTy, typename PRNGeneratorTy, typename diff_model_tag,
typename execution_tag>
size_t ThetaEstimation(GraphTy &G, size_t k, double epsilon,
PRNGeneratorTy &generator, TIMExecutionRecord &R,
diff_model_tag &&model_tag, execution_tag &&ex_tag) {
using vertex_type = typename GraphTy::vertex_type;
auto start = std::chrono::high_resolution_clock::now();
double kpt =
KptEstimation(G, k, generator, std::forward<diff_model_tag>(model_tag),
std::forward<execution_tag>(ex_tag));
auto end = std::chrono::high_resolution_clock::now();
R.KptEstimation = end - start;
start = std::chrono::high_resolution_clock::now();
// epsPrime is set according to the equation at the bottom of Section 4.1
double epsPrime = 5 * cbrt((epsilon * epsilon) / (k + 1));
// The following block implements the refinement algorithm (Algorithm 3)
size_t thetaPrime = (2 + epsPrime) * G.num_nodes() * log(G.num_nodes()) /
(epsPrime * epsPrime * kpt);
std::vector<RRRset<GraphTy>> RR(thetaPrime);
GenerateRRRSets(G, generator, RR.begin(), RR.end(),
std::forward<diff_model_tag>(model_tag),
std::forward<execution_tag>(ex_tag));
auto seeds =
FindMostInfluentialSet(G, k, RR, std::forward<execution_tag>(ex_tag));
double f = double(seeds.first);
double kptPrime = (f * G.num_nodes()) / (1 + epsPrime);
// kpt now contains the best bound we were able to find after refinment.
kpt = std::max(kpt, kptPrime);
end = std::chrono::high_resolution_clock::now();
R.KptRefinement = end - start;
auto logBinomial = [](size_t n, size_t k) -> double {
return n * log(n) - k * log(k) - (n - k) * log(n - k);
};
// Compute lambda from equation (4)
double lambda = ((8 + 2 * epsilon) * G.num_nodes() *
(log(G.num_nodes()) + logBinomial(G.num_nodes(), k)) +
log(2.0)) /
(epsilon * epsilon);
// return theta according to equation (5)
return ceil(lambda / kpt);
}
//! \brief The TIM+ algorithm for Influence Maximization.
//!
//! \tparam GraphTy The type of the graph.
//! \tparam diff_model_tag The Type-Tag selecting the diffusion model.
//! \tparam execution_tag The execution policy tag.
//!
//! \param G The original graph.
//! \param k The size of the seed set.
//! \param epsilon The approximation factor.
//! \param gen A parallel random number generator.
//! \param model_tag The diffusion model to use.
//! \param ex_tag The execution policy to use.
//!
//! \return A set of vertices in the graph.
template <typename GraphTy, typename PRNG, typename diff_model_tag,
typename execution_tag>
auto TIM(const GraphTy &G, size_t k, double epsilon, PRNG &gen,
diff_model_tag &&model_tag, execution_tag &&ex_tag) {
using vertex_type = typename GraphTy::vertex_type;
TIMExecutionRecord Record;
size_t max_num_threads(1);
if (std::is_same<execution_tag, omp_parallel_tag>::value) {
#pragma omp single
max_num_threads = omp_get_max_threads();
}
std::vector<trng::lcg64> generator(max_num_threads, gen);
if (std::is_same<execution_tag, omp_parallel_tag>::value) {
#pragma omp parallel
{
generator[omp_get_thread_num()].split(omp_get_num_threads(),
omp_get_thread_num());
}
}
auto theta = ThetaEstimation(G, k, epsilon, generator, Record,
std::forward<diff_model_tag>(model_tag),
std::forward<execution_tag>(ex_tag));
Record.Theta = theta;
auto start = std::chrono::high_resolution_clock::now();
std::vector<RRRset<GraphTy>> RR(theta);
GenerateRRRSets(G, generator, RR.begin(), RR.end(),
std::forward<diff_model_tag>(model_tag),
std::forward<execution_tag>(ex_tag));
auto end = std::chrono::high_resolution_clock::now();
Record.GenerateRRRSets = end - start;
start = std::chrono::high_resolution_clock::now();
auto seeds =
FindMostInfluentialSet(G, k, RR, std::forward<execution_tag>(ex_tag));
end = std::chrono::high_resolution_clock::now();
Record.FindMostInfluentialSet = end - start;
return std::make_pair(seeds.second, Record);
}
} // namespace ripples
#endif /* RIPPLES_TIM_H */
|
colorspace.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE %
% C O O L O O R R SS P P A A C E %
% C O O L O O RRRR SSS PPPP AAAAA C EEE %
% C O O L O O R R SS P A A C E %
% CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE %
% %
% %
% MagickCore Image Colorspace Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/property.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/gem.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/utility.h"
/*
Typedef declarations.
*/
typedef struct _TransformPacket
{
MagickRealType
x,
y,
z;
} TransformPacket;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R G B T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RGBTransformImage() converts the reference image from sRGB to an alternate
% colorspace. The transformation matrices are not the standard ones: the
% weights are rescaled to normalized the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the RGBTransformImage method is:
%
% MagickBooleanType RGBTransformImage(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
*/
static inline void ConvertXYZToLMS(const double x,const double y,
const double z,double *L,double *M,double *S)
{
double
l,
m,
s;
/*
Convert XYZ to LMS colorspace.
*/
assert(L != (double *) NULL);
assert(M != (double *) NULL);
assert(S != (double *) NULL);
l=0.7328f*x+0.4296f*y-0.1624f*z;
m=(-0.7036f*x+1.6975f*y+0.0415f*z);
s=0.0030f*x+0.0136f*y+0.9834f*z;
*L=QuantumRange*l;
*M=QuantumRange*m;
*S=QuantumRange*s;
}
static inline void ConvertRGBToXYZ(const Quantum red,const Quantum green,
const Quantum blue,double *X,double *Y,double *Z)
{
double
b,
g,
r;
assert(X != (double *) NULL);
assert(Y != (double *) NULL);
assert(Z != (double *) NULL);
r=QuantumScale*red;
g=QuantumScale*green;
b=QuantumScale*blue;
*X=0.41239558896741421610*r+0.35758343076371481710*g+0.18049264738170157350*b;
*Y=0.21258623078559555160*r+0.71517030370341084990*g+0.07220049864333622685*b;
*Z=0.01929721549174694484*r+0.11918386458084853180*g+0.95049712513157976600*b;
}
static inline void ConvertXYZToLab(const double X,const double Y,const double Z,
double *L,double *a,double *b)
{
#define D65X (0.950456f)
#define D65Y (1.0f)
#define D65Z (1.08874f)
#define CIEEpsilon (216.0f/24389.0f)
#define CIEK (24389.0f/27.0f)
double
x,
y,
z;
assert(L != (double *) NULL);
assert(a != (double *) NULL);
assert(b != (double *) NULL);
if ((X/D65X) > CIEEpsilon)
x=pow(X/D65X,1.0/3.0);
else
x=(CIEK*X/D65X+16.0f)/116.0f;
if ((Y/D65Y) > CIEEpsilon)
y=pow(Y/D65Y,1.0/3.0);
else
y=(CIEK*Y/D65Y+16.0f)/116.0f;
if ((Z/D65Z) > CIEEpsilon)
z=pow(Z/D65Z,1.0/3.0);
else
z=(CIEK*Z/D65Z+16.0f)/116.0f;
*L=((116.0f*y)-16.0f)/100.0f;
*a=(500.0f*(x-y))/255.0f+0.5f;
*b=(200.0f*(y-z))/255.0f+0.5f;
}
static inline void ConvertXYZToLuv(const double X,const double Y,const double Z,
double *L,double *u,double *v)
{
double
alpha;
assert(L != (double *) NULL);
assert(u != (double *) NULL);
assert(v != (double *) NULL);
if ((Y/D65Y) > CIEEpsilon)
*L=(double) (116.0f*pow(Y/D65Y,1.0/3.0)-16.0f);
else
*L=CIEK*(Y/D65Y);
alpha=PerceptibleReciprocal(X+15.0f*Y+3.0f*Z);
*u=13.0f*(*L)*((4.0f*alpha*X)-(4.0f*D65X/(D65X+15.0f*D65Y+3.0f*D65Z)));
*v=13.0f*(*L)*((9.0f*alpha*Y)-(9.0f*D65Y/(D65X+15.0f*D65Y+3.0f*D65Z)));
*L/=100.0f;
*u=(*u+134.0f)/354.0f;
*v=(*v+140.0f)/262.0f;
}
MagickExport MagickBooleanType RGBTransformImage(Image *image,
const ColorspaceType colorspace)
{
#define RGBTransformImageTag "RGBTransform/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
primary_info;
register ssize_t
i;
ssize_t
y;
TransformPacket
*x_map,
*y_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(colorspace != sRGBColorspace);
assert(colorspace != TransparentColorspace);
assert(colorspace != UndefinedColorspace);
status=MagickTrue;
progress=0;
exception=(&image->exception);
switch (colorspace)
{
case CMYColorspace:
{
/*
Convert RGB to CMY colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
cyan,
magenta,
yellow;
cyan=DecodePixelGamma((MagickRealType) GetPixelCyan(q));
magenta=DecodePixelGamma((MagickRealType) GetPixelMagenta(q));
yellow=DecodePixelGamma((MagickRealType) GetPixelYellow(q));
SetPixelCyan(q,ClampToQuantum((MagickRealType) (QuantumRange-
cyan)));
SetPixelMagenta(q,ClampToQuantum((MagickRealType) (QuantumRange-
magenta)));
SetPixelYellow(q,ClampToQuantum((MagickRealType) (QuantumRange-
yellow)));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->type=image->matte == MagickFalse ? ColorSeparationType :
ColorSeparationMatteType;
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case CMYKColorspace:
{
MagickPixelPacket
zero;
/*
Convert RGB to CMYK colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
pixel.red=DecodePixelGamma((MagickRealType) pixel.red);
pixel.green=DecodePixelGamma((MagickRealType) pixel.green);
pixel.blue=DecodePixelGamma((MagickRealType) pixel.blue);
ConvertRGBToCMYK(&pixel);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->type=image->matte == MagickFalse ? ColorSeparationType :
ColorSeparationMatteType;
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case GRAYColorspace:
case Rec601LumaColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
gray,
green,
red;
red=DecodePixelGamma((MagickRealType) GetPixelRed(q));
green=DecodePixelGamma((MagickRealType) GetPixelGreen(q));
blue=DecodePixelGamma((MagickRealType) GetPixelBlue(q));
gray=0.298839f*red+0.586811f*green+0.114350f*blue;
SetPixelGray(q,ClampToQuantum(gray));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case HCLColorspace:
{
/*
Transform image from sRGB to HCL.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
chroma,
hue,
luma;
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
ConvertRGBToHCL(red,green,blue,&hue,&chroma,&luma);
SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*hue));
SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*chroma));
SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*luma));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case HSBColorspace:
{
/*
Transform image from sRGB to HSB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
brightness,
hue,
saturation;
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
ConvertRGBToHSB(red,green,blue,&hue,&saturation,&brightness);
SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*
hue));
SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*
saturation));
SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*
brightness));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case HSLColorspace:
{
/*
Transform image from sRGB to HSL.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
hue,
lightness,
saturation;
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
ConvertRGBToHSL(red,green,blue,&hue,&saturation,&lightness);
SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*
hue));
SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*
saturation));
SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*
lightness));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case HWBColorspace:
{
/*
Transform image from sRGB to HWB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blackness,
hue,
whiteness;
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
ConvertRGBToHWB(red,green,blue,&hue,&whiteness,&blackness);
SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*
hue));
SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*
whiteness));
SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*
blackness));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LabColorspace:
{
/*
Transform image from sRGB to Lab.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
a,
b,
L,
X,
Y,
Z;
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLab(X,Y,Z,&L,&a,&b);
SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*L));
SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*a));
SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*b));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LCHColorspace:
{
/*
Transform image from sRGB to LCH.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
a,
b,
C,
H,
L,
X,
Y,
Z;
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLab(X,Y,Z,&L,&a,&b);
C=sqrt(a*a+b*b);
H=atan2(b,a)*180.0/MagickPI;
if (H < 0.0)
H+=1.0;
SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*L));
SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*C));
SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*H));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LMSColorspace:
{
/*
Transform image from sRGB to LMS.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
L,
M,
S,
X,
Y,
Z;
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLMS(X,Y,Z,&L,&M,&S);
SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*L));
SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*M));
SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*S));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
#define DisplayGamma (1.0f/1.7f)
#define FilmGamma 0.6f
#define ReferenceBlack 95.0f
#define ReferenceWhite 685.0f
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform RGB to Log colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma");
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma");
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black");
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white");
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002f/
film_gamma);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
logmap[i]=ScaleMapToQuantum((MagickRealType) (MaxMap*(reference_white+
log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002f/
film_gamma))/1024.0f));
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
SetPixelRed(q,logmap[ScaleQuantumToMap(red)]);
SetPixelGreen(q,logmap[ScaleQuantumToMap(green)]);
SetPixelBlue(q,logmap[ScaleQuantumToMap(blue)]);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LuvColorspace:
{
/*
Transform image from sRGB to Luv.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
L,
u,
v,
X,
Y,
Z;
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLuv(X,Y,Z,&L,&u,&v);
SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*L));
SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*u));
SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*v));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case Rec709LumaColorspace:
{
/*
Transform image from sRGB to Rec709Luma.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gray;
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
gray=0.212600f*red+0.715200f*green+0.072200f*blue;
SetPixelGray(q,ClampToQuantum(gray));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case RGBColorspace:
{
/*
Transform image from sRGB to linear RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case XYZColorspace:
{
/*
Transform image from sRGB to XYZ.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
X,
Y,
Z;
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*X));
SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*Y));
SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*Z));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(&primary_info,0,sizeof(primary_info));
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
I and Q, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0f)/2.0f;
primary_info.z=(double) (MaxMap+1.0f)/2.0f;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.33333f*(float) i);
y_map[i].x=(MagickRealType) (0.33334f*(float) i);
z_map[i].x=(MagickRealType) (0.33333f*(float) i);
x_map[i].y=(MagickRealType) (0.50000f*(float) i);
y_map[i].y=(MagickRealType) (0.00000f*(float) i);
z_map[i].y=(MagickRealType) (-0.50000f*(float) i);
x_map[i].z=(MagickRealType) (-0.25000f*(float) i);
y_map[i].z=(MagickRealType) (0.50000f*(float) i);
z_map[i].z=(MagickRealType) (-0.25000f*(float) i);
}
break;
}
case Rec601YCbCrColorspace:
case YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.601):
Y = 0.298839f0*R+0.586811f0*G+0.114350f0*B
Cb= -0.1687367*R-0.3312640*G+0.5000000*B
Cr= 0.5000000*R-0.4186880*G-0.0813120*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0f)/2.0f;
primary_info.z=(double) (MaxMap+1.0f)/2.0f;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839f*(float) i);
y_map[i].x=(MagickRealType) (0.586811f*(float) i);
z_map[i].x=(MagickRealType) (0.114350f*(float) i);
x_map[i].y=(MagickRealType) (-0.1687367f*(float) i);
y_map[i].y=(MagickRealType) (-0.331264f*(float) i);
z_map[i].y=(MagickRealType) (0.500000f*(float) i);
x_map[i].z=(MagickRealType) (0.500000f*(float) i);
y_map[i].z=(MagickRealType) (-0.418688f*(float) i);
z_map[i].z=(MagickRealType) (-0.081312f*(float) i);
}
break;
}
case Rec709LumaColorspace:
{
/*
Initialize Rec709 luma tables:
G = 0.21260*R+0.71520*G+0.07220*B
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.21260f*(float) i);
y_map[i].x=(MagickRealType) (0.71520f*(float) i);
z_map[i].x=(MagickRealType) (0.07220f*(float) i);
x_map[i].y=(MagickRealType) (0.21260f*(float) i);
y_map[i].y=(MagickRealType) (0.71520f*(float) i);
z_map[i].y=(MagickRealType) (0.07220f*(float) i);
x_map[i].z=(MagickRealType) (0.21260f*(float) i);
y_map[i].z=(MagickRealType) (0.71520f*(float) i);
z_map[i].z=(MagickRealType) (0.07220f*(float) i);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.709):
Y = 0.212600*R+0.715200*G+0.072200*B
Cb= -0.114572*R-0.385428*G+0.500000*B
Cr= 0.500000*R-0.454153*G-0.045847*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0f)/2.0f;
primary_info.z=(double) (MaxMap+1.0f)/2.0f;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.212600f*(float) i);
y_map[i].x=(MagickRealType) (0.715200f*(float) i);
z_map[i].x=(MagickRealType) (0.072200f*(float) i);
x_map[i].y=(MagickRealType) (-0.114572f*(float) i);
y_map[i].y=(MagickRealType) (-0.385428f*(float) i);
z_map[i].y=(MagickRealType) (0.500000f*(float) i);
x_map[i].z=(MagickRealType) (0.500000f*(float) i);
y_map[i].z=(MagickRealType) (-0.454153f*(float) i);
z_map[i].z=(MagickRealType) (-0.045847f*(float) i);
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
Y = 0.298839f*R+0.586811f*G+0.114350f*B
C1= -0.298839f*R-0.586811f*G+0.88600*B
C2= 0.70100*R-0.586811f*G-0.114350f*B
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156));
primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137));
for (i=0; i <= (ssize_t) (0.018*MaxMap); i++)
{
x_map[i].x=0.003962014134275617*i;
y_map[i].x=0.007778268551236748*i;
z_map[i].x=0.001510600706713781*i;
x_map[i].y=(-0.002426619775463276)*i;
y_map[i].y=(-0.004763965913702149)*i;
z_map[i].y=0.007190585689165425*i;
x_map[i].z=0.006927257754597858*i;
y_map[i].z=(-0.005800713697502058)*i;
z_map[i].z=(-0.0011265440570958)*i;
}
for ( ; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.2201118963486454*(1.099f*i-0.099f);
y_map[i].x=0.4321260306242638*(1.099f*i-0.099f);
z_map[i].x=0.08392226148409894*(1.099f*i-0.099f);
x_map[i].y=(-0.1348122097479598)*(1.099f*i-0.099f);
y_map[i].y=(-0.2646647729834528)*(1.099f*i-0.099f);
z_map[i].y=0.3994769827314126*(1.099f*i-0.099f);
x_map[i].z=0.3848476530332144*(1.099f*i-0.099f);
y_map[i].z=(-0.3222618720834477)*(1.099f*i-0.099f);
z_map[i].z=(-0.06258578094976668)*(1.099f*i-0.099f);
}
break;
}
case YIQColorspace:
{
/*
Initialize YIQ tables:
Y = 0.298839f*R+0.586811f*G+0.114350f*B
I = 0.595716*R-0.274453*G-0.321263*B
Q = 0.211456*R-0.522591*G+0.311135*B
I and Q, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0f)/2.0f;
primary_info.z=(double) (MaxMap+1.0f)/2.0f;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839f*(float) i);
y_map[i].x=(MagickRealType) (0.586811f*(float) i);
z_map[i].x=(MagickRealType) (0.114350f*(float) i);
x_map[i].y=(MagickRealType) (0.595716f*(float) i);
y_map[i].y=(MagickRealType) (-0.274453f*(float) i);
z_map[i].y=(MagickRealType) (-0.321263f*(float) i);
x_map[i].z=(MagickRealType) (0.211456f*(float) i);
y_map[i].z=(MagickRealType) (-0.522591f*(float) i);
z_map[i].z=(MagickRealType) (0.311135f*(float) i);
}
break;
}
case YPbPrColorspace:
{
/*
Initialize YPbPr tables (ITU-R BT.601):
Y = 0.298839f0*R+0.586811f0*G+0.114350f0*B
Pb= -0.1687367*R-0.3312640*G+0.5000000*B
Pr= 0.5000000*R-0.4186880*G-0.0813120*B
Pb and Pr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0f)/2.0f;
primary_info.z=(double) (MaxMap+1.0f)/2.0f;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839f*(float) i);
y_map[i].x=(MagickRealType) (0.586811f*(float) i);
z_map[i].x=(MagickRealType) (0.114350f*(float) i);
x_map[i].y=(MagickRealType) (-0.1687367f*(float) i);
y_map[i].y=(MagickRealType) (-0.331264f*(float) i);
z_map[i].y=(MagickRealType) (0.500000f*(float) i);
x_map[i].z=(MagickRealType) (0.500000f*(float) i);
y_map[i].z=(MagickRealType) (-0.418688f*(float) i);
z_map[i].z=(MagickRealType) (-0.081312f*(float) i);
}
break;
}
case YUVColorspace:
{
/*
Initialize YUV tables:
Y = 0.298839f*R+0.586811f*G+0.114350f*B
U = -0.147130*R-0.288860*G+0.436000*B
V = 0.615000*R-0.514990*G-0.100010*B
U and V, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange. Note that U = 0.493*(B-Y), V = 0.877*(R-Y).
*/
primary_info.y=(double) (MaxMap+1.0f)/2.0f;
primary_info.z=(double) (MaxMap+1.0f)/2.0f;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839f*(float) i);
y_map[i].x=(MagickRealType) (0.586811f*(float) i);
z_map[i].x=(MagickRealType) (0.114350f*(float) i);
x_map[i].y=(MagickRealType) (-0.147130f*(float) i);
y_map[i].y=(MagickRealType) (-0.288860f*(float) i);
z_map[i].y=(MagickRealType) (0.436000f*(float) i);
x_map[i].z=(MagickRealType) (0.615000f*(float) i);
y_map[i].z=(MagickRealType) (-0.514990f*(float) i);
z_map[i].z=(MagickRealType) (-0.100001f*(float) i);
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(float) i);
y_map[i].x=(MagickRealType) 0.0f;
z_map[i].x=(MagickRealType) 0.0f;
x_map[i].y=(MagickRealType) 0.0f;
y_map[i].y=(MagickRealType) (1.0*(float) i);
z_map[i].y=(MagickRealType) 0.0f;
x_map[i].z=(MagickRealType) 0.0f;
y_map[i].z=(MagickRealType) 0.0f;
z_map[i].z=(MagickRealType) (1.0*(float) i);
}
break;
}
}
/*
Convert from sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register ssize_t
x;
register PixelPacket
*restrict q;
register size_t
blue,
green,
red;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=ScaleQuantumToMap(ClampToQuantum(DecodePixelGamma(
(MagickRealType) GetPixelRed(q))));
green=ScaleQuantumToMap(ClampToQuantum(DecodePixelGamma(
(MagickRealType) GetPixelGreen(q))));
blue=ScaleQuantumToMap(ClampToQuantum(DecodePixelGamma(
(MagickRealType) GetPixelBlue(q))));
pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+
(MagickRealType) primary_info.x;
pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+
(MagickRealType) primary_info.y;
pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+
(MagickRealType) primary_info.z;
SetPixelRed(q,ScaleMapToQuantum(pixel.red));
SetPixelGreen(q,ScaleMapToQuantum(pixel.green));
SetPixelBlue(q,ScaleMapToQuantum(pixel.blue));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RGBTransformImage)
#endif
proceed=SetImageProgress(image,RGBTransformImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
register size_t
blue,
green,
red;
/*
Convert PseudoClass image.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
MagickPixelPacket
pixel;
red=ScaleQuantumToMap(ClampToQuantum(DecodePixelGamma((MagickRealType)
image->colormap[i].red)));
green=ScaleQuantumToMap(ClampToQuantum(DecodePixelGamma((MagickRealType)
image->colormap[i].green)));
blue=ScaleQuantumToMap(ClampToQuantum(DecodePixelGamma((MagickRealType)
image->colormap[i].blue)));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z;
image->colormap[i].red=ScaleMapToQuantum(pixel.red);
image->colormap[i].green=ScaleMapToQuantum(pixel.green);
image->colormap[i].blue=ScaleMapToQuantum(pixel.blue);
}
(void) SyncImage(image);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorspace() sets the colorspace member of the Image structure.
%
% The format of the SetImageColorspace method is:
%
% MagickBooleanType SetImageColorspace(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
*/
MagickExport MagickBooleanType SetImageColorspace(Image *image,
const ColorspaceType colorspace)
{
if (image->colorspace == colorspace)
return(MagickTrue);
image->colorspace=colorspace;
image->rendering_intent=UndefinedIntent;
image->gamma=1.000f;
(void) ResetMagickMemory(&image->chromaticity,0,sizeof(image->chromaticity));
if (IssRGBColorspace(colorspace) != MagickFalse)
{
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
}
if (IsGrayColorspace(colorspace) != MagickFalse)
image->type=GrayscaleType;
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImageColorspace() transforms an image colorspace.
%
% The format of the TransformImageColorspace method is:
%
% MagickBooleanType TransformImageColorspace(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
*/
MagickExport MagickBooleanType TransformImageColorspace(Image *image,
const ColorspaceType colorspace)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (colorspace == UndefinedColorspace)
return(SetImageColorspace(image,colorspace));
if (image->colorspace == colorspace)
return(MagickTrue); /* same colorspace: no op */
/*
Convert the reference image from an alternate colorspace to sRGB.
*/
(void) DeleteImageProfile(image,"icc");
(void) DeleteImageProfile(image,"icm");
if (IssRGBColorspace(colorspace) != MagickFalse)
return(TransformRGBImage(image,colorspace));
status=MagickTrue;
if (IssRGBColorspace(image->colorspace) == MagickFalse)
status=TransformRGBImage(image,image->colorspace);
if (status == MagickFalse)
return(status);
/*
Convert the reference image from sRGB to an alternate colorspace.
*/
if (RGBTransformImage(image,colorspace) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m R G B I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformRGBImage() converts the reference image from an alternate
% colorspace to sRGB. The transformation matrices are not the standard ones:
% the weights are rescaled to normalize the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the TransformRGBImage method is:
%
% MagickBooleanType TransformRGBImage(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
*/
static inline void ConvertLMSToXYZ(const double L,const double M,const double S,
double *X,double *Y,double *Z)
{
double
l,
m,
s;
assert(X != (double *) NULL);
assert(Y != (double *) NULL);
assert(Z != (double *) NULL);
l=QuantumScale*L;
m=QuantumScale*M;
s=QuantumScale*S;
*X=1.096123820835514*l-0.278869000218287*m+0.182745179382773*s;
*Y=0.454369041975359*l+0.473533154307412*m+0.072097803717229*s;
*Z=(-0.009627608738429)*l-0.005698031216113*m+1.015325639954543*s;
}
static inline void ConvertLabToXYZ(const double L,const double a,const double b,
double *X,double *Y,double *Z)
{
double
x,
y,
z;
assert(X != (double *) NULL);
assert(Y != (double *) NULL);
assert(Z != (double *) NULL);
y=(100.0f*L+16.0f)/116.0f;
x=y+255.0f*(a-0.5f)/500.0f;
z=y-255.0f*(b-0.5f)/200.0f;
if ((x*x*x) > CIEEpsilon)
x=(x*x*x);
else
x=(116.0f*x-16.0f)/CIEK;
if ((y*y*y) > CIEEpsilon)
y=(y*y*y);
else
y=(100.0f*L)/CIEK;
if ((z*z*z) > CIEEpsilon)
z=(z*z*z);
else
z=(116.0f*z-16.0f)/CIEK;
*X=D65X*x;
*Y=D65Y*y;
*Z=D65Z*z;
}
static inline void ConvertLuvToXYZ(const double L,const double u,const double v,
double *X,double *Y,double *Z)
{
assert(X != (double *) NULL);
assert(Y != (double *) NULL);
assert(Z != (double *) NULL);
if ((100.0f*L) > (CIEK*CIEEpsilon))
*Y=(double) pow(((100.0*L)+16.0)/116.0,3.0);
else
*Y=(100.0f*L)/CIEK;
*X=((*Y*((39.0f*(100.0f*L)/((262.0f*v-140.0f)+13.0f*(100.0f*L)*(9.0f*D65Y/
(D65X+15.0f*D65Y+3.0f*D65Z))))-5.0f))+5.0f*(*Y))/((((52.0f*(100.0f*L)/
((354.0f*u-134.0f)+13.0f*(100.0f*L)*(4.0f*D65X/(D65X+15.0f*D65Y+3.0f*
D65Z))))-1.0f)/3.0f)-(-1.0f/3.0f));
*Z=(*X*(((52.0f*(100.0f*L)/((354.0f*u-134.0f)+13.0f*(100.0f*L)*(4.0f*D65X/
(D65X+15.0f*D65Y+3.0f*D65Z))))-1.0f)/3.0f))-5.0f*(*Y);
}
static inline ssize_t RoundToYCC(const MagickRealType value)
{
if (value <= 0.0f)
return(0);
if (value >= 1388.0f)
return(1388);
return((ssize_t) (value+0.5f));
}
static inline void ConvertXYZToRGB(const double x,const double y,const double z,
Quantum *red,Quantum *green,Quantum *blue)
{
double
b,
g,
r;
/*
Convert XYZ to sRGB colorspace.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
r=3.2406f*x-1.5372f*y-0.4986f*z;
g=(-0.9689f*x+1.8758f*y+0.0415f*z);
b=0.0557f*x-0.2040f*y+1.0570f*z;
*red=ClampToQuantum((MagickRealType) QuantumRange*r);
*green=ClampToQuantum((MagickRealType) QuantumRange*g);
*blue=ClampToQuantum((MagickRealType) QuantumRange*b);
}
static inline void ConvertCMYKToRGB(MagickPixelPacket *pixel)
{
pixel->red=((QuantumRange-(QuantumScale*pixel->red*
(QuantumRange-pixel->index)+pixel->index)));
pixel->green=((QuantumRange-(QuantumScale*pixel->green*
(QuantumRange-pixel->index)+pixel->index)));
pixel->blue=((QuantumRange-(QuantumScale*pixel->blue*
(QuantumRange-pixel->index)+pixel->index)));
}
MagickExport MagickBooleanType TransformRGBImage(Image *image,
const ColorspaceType colorspace)
{
#define TransformRGBImageTag "Transform/Image"
static const float
YCCMap[1389] =
{
0.000000, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f,
0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f,
0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f,
0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f,
0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f,
0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f,
0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f,
0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f,
0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f,
0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f,
0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f,
0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f,
0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f,
0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f,
0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f,
0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f,
0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f,
0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f,
0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f,
0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f,
0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f,
0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f,
0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f,
0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f,
0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f,
0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f,
0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f,
0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f,
0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f,
0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f,
0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f,
0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f,
0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f,
0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f,
0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f,
0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f,
0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f,
0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f,
0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f,
0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f,
0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f,
0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f,
0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f,
0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f,
0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f,
0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f,
0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f,
0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f,
0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f,
0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f,
0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f,
0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f,
0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f,
0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f,
0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f,
0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f,
0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f,
0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f,
0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f,
0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f,
0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f,
0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f,
0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f,
0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f,
0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f,
0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f,
0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f,
0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f,
0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f,
0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f,
0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f,
0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f,
0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f,
0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f,
0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f,
0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f,
0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f,
0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f,
0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f,
0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f,
0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f,
0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f,
0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f,
0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f,
0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f,
0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f,
0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f,
0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f,
0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f,
0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f,
0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f,
0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f,
0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f,
0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f,
0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f,
0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f,
0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f,
0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f,
0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f,
0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f,
0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f,
0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f,
0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f,
0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f,
0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f,
0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f,
0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f,
0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f,
0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f,
0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f,
0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f,
0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f,
0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f,
0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f,
0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f,
0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f,
0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f,
0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f,
0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f,
0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f,
0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f,
0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f,
0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f,
0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f,
0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f,
0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f,
0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f,
0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f,
0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f,
0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f,
0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f,
0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f,
0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f,
0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f,
0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f,
0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f,
0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f,
0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f,
0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f,
0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f,
0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f,
0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f,
0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f,
0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f,
0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f,
0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f,
0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f,
0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f,
0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f,
0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f,
0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f,
0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f,
0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f,
0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f,
0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f,
0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f,
0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f,
0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f,
0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f,
0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f,
0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f,
0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f,
0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f,
0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f,
0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f,
0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f,
0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f,
0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f,
0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f,
0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f,
0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f,
0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f,
0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f,
0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f,
0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f,
0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f,
0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f,
0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f,
0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f,
0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f,
0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f,
0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f,
0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f,
0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f,
0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f,
0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f,
0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f,
0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f,
0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f,
0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f,
0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f,
0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f,
0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f,
0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f,
0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f,
0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f,
0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f,
0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f,
0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f,
0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f,
0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f,
0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f,
0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f,
0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f,
0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f,
0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f,
0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f,
0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f,
0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f,
0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f,
0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f,
0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f,
0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f,
0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f,
0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f,
0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f,
0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f,
0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f,
0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f,
0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f,
0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f,
0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f,
0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f,
0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f,
0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f,
0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f,
0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f,
0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f,
0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f,
0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f,
0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f,
0.998559f, 0.999280f, 1.000000
};
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
TransformPacket
*y_map,
*x_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
progress=0;
exception=(&image->exception);
switch (image->colorspace)
{
case CMYColorspace:
{
/*
Transform image from CMY to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
cyan,
magenta,
yellow;
cyan=ClampToQuantum(EncodePixelGamma((MagickRealType)
(QuantumRange-GetPixelCyan(q))));
magenta=ClampToQuantum(EncodePixelGamma((MagickRealType)
(QuantumRange-GetPixelMagenta(q))));
yellow=ClampToQuantum(EncodePixelGamma((MagickRealType)
(QuantumRange-GetPixelYellow(q))));
SetPixelCyan(q,cyan);
SetPixelMagenta(q,magenta);
SetPixelYellow(q,yellow);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case CMYKColorspace:
{
MagickPixelPacket
zero;
/*
Transform image from CMYK to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
ConvertCMYKToRGB(&pixel);
pixel.red=EncodePixelGamma(pixel.red);
pixel.green=EncodePixelGamma(pixel.green);
pixel.blue=EncodePixelGamma(pixel.blue);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case GRAYColorspace:
case Rec601LumaColorspace:
case Rec709LumaColorspace:
{
/*
Transform linear RGB to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
gray;
gray=EncodePixelGamma((MagickRealType) GetPixelGray(q));
SetPixelRed(q,ClampToQuantum(gray));
SetPixelGreen(q,ClampToQuantum(gray));
SetPixelBlue(q,ClampToQuantum(gray));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case HCLColorspace:
{
/*
Transform image from HCL to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
chroma,
hue,
luma;
Quantum
blue,
green,
red;
hue=(double) (QuantumScale*GetPixelRed(q));
chroma=(double) (QuantumScale*GetPixelGreen(q));
luma=(double) (QuantumScale*GetPixelBlue(q));
ConvertHCLToRGB(hue,chroma,luma,&red,&green,&blue);
red=ClampToQuantum(EncodePixelGamma((MagickRealType) red));
green=ClampToQuantum(EncodePixelGamma((MagickRealType) green));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case HSBColorspace:
{
/*
Transform image from HSB to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
brightness,
hue,
saturation;
Quantum
blue,
green,
red;
hue=(double) (QuantumScale*GetPixelRed(q));
saturation=(double) (QuantumScale*GetPixelGreen(q));
brightness=(double) (QuantumScale*GetPixelBlue(q));
ConvertHSBToRGB(hue,saturation,brightness,&red,&green,&blue);
red=ClampToQuantum(EncodePixelGamma((MagickRealType) red));
green=ClampToQuantum(EncodePixelGamma((MagickRealType) green));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case HSLColorspace:
{
/*
Transform image from HSL to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
hue,
lightness,
saturation;
Quantum
blue,
green,
red;
hue=(double) (QuantumScale*GetPixelRed(q));
saturation=(double) (QuantumScale*GetPixelGreen(q));
lightness=(double) (QuantumScale*GetPixelBlue(q));
ConvertHSLToRGB(hue,saturation,lightness,&red,&green,&blue);
red=ClampToQuantum(EncodePixelGamma((MagickRealType) red));
green=ClampToQuantum(EncodePixelGamma((MagickRealType) green));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case HWBColorspace:
{
/*
Transform image from HWB to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blackness,
hue,
whiteness;
Quantum
blue,
green,
red;
hue=(double) (QuantumScale*GetPixelRed(q));
whiteness=(double) (QuantumScale*GetPixelGreen(q));
blackness=(double) (QuantumScale*GetPixelBlue(q));
ConvertHWBToRGB(hue,whiteness,blackness,&red,&green,&blue);
red=ClampToQuantum(EncodePixelGamma((MagickRealType) red));
green=ClampToQuantum(EncodePixelGamma((MagickRealType) green));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LabColorspace:
{
/*
Transform image from Lab to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
a,
b,
L,
X,
Y,
Z;
Quantum
blue,
green,
red;
L=QuantumScale*GetPixelRed(q);
a=QuantumScale*GetPixelGreen(q);
b=QuantumScale*GetPixelBlue(q);
ConvertLabToXYZ(L,a,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
red=ClampToQuantum(EncodePixelGamma((MagickRealType) red));
green=ClampToQuantum(EncodePixelGamma((MagickRealType) green));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LCHColorspace:
{
/*
Transform image from LCH to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
a,
b,
C,
H,
L,
X,
Y,
Z;
Quantum
blue,
green,
red;
L=QuantumScale*GetPixelRed(q);
C=QuantumScale*GetPixelGreen(q);
H=QuantumScale*GetPixelBlue(q);
a=C*cos(H*(MagickPI/180.0f));
b=C*sin(H*(MagickPI/180.0f));
ConvertLabToXYZ(L,a,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
red=ClampToQuantum(EncodePixelGamma((MagickRealType) red));
green=ClampToQuantum(EncodePixelGamma((MagickRealType) green));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LMSColorspace:
{
/*
Transform image from LMS to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
L,
M,
S,
X,
Y,
Z;
Quantum
blue,
green,
red;
L=QuantumScale*GetPixelRed(q);
M=QuantumScale*GetPixelGreen(q);
S=QuantumScale*GetPixelBlue(q);
ConvertLMSToXYZ(L,M,S,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
red=ClampToQuantum(EncodePixelGamma((MagickRealType) red));
green=ClampToQuantum(EncodePixelGamma((MagickRealType) green));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform Log to sRGB colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma");
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma");
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black");
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white");
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002f/
film_gamma);
for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0f); i++)
logmap[i]=(Quantum) 0;
for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0f); i++)
logmap[i]=ClampToQuantum((MagickRealType) QuantumRange/(1.0f-black)*
(pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002f/
film_gamma)-black));
for ( ; i <= (ssize_t) MaxMap; i++)
logmap[i]=QuantumRange;
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
Quantum
blue,
green,
red;
red=ClampToQuantum(EncodePixelGamma((MagickRealType)
logmap[ScaleQuantumToMap(GetPixelRed(q))]));
green=ClampToQuantum(EncodePixelGamma((MagickRealType)
logmap[ScaleQuantumToMap(GetPixelGreen(q))]));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType)
logmap[ScaleQuantumToMap(GetPixelBlue(q))]));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LuvColorspace:
{
/*
Transform image from Luv to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
L,
u,
v,
X,
Y,
Z;
Quantum
blue,
green,
red;
L=QuantumScale*GetPixelRed(q);
u=QuantumScale*GetPixelGreen(q);
v=QuantumScale*GetPixelBlue(q);
ConvertLuvToXYZ(L,u,v,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
red=ClampToQuantum(EncodePixelGamma((MagickRealType) red));
green=ClampToQuantum(EncodePixelGamma((MagickRealType) green));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
{
/*
Transform linear RGB to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
Quantum
blue,
green,
red;
red=ClampToQuantum(EncodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(EncodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType)
GetPixelBlue(q)));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case XYZColorspace:
{
/*
Transform image from XYZ to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
X,
Y,
Z;
Quantum
blue,
green,
red;
X=QuantumScale*GetPixelRed(q);
Y=QuantumScale*GetPixelGreen(q);
Z=QuantumScale*GetPixelBlue(q);
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
red=ClampToQuantum(EncodePixelGamma((MagickRealType) red));
green=ClampToQuantum(EncodePixelGamma((MagickRealType) green));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
switch (image->colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
R = I1+1.00000*I2-0.66668*I3
G = I1+0.00000*I2+1.33333*I3
B = I1-1.00000*I2-0.66668*I3
I and Q, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0f*(float) i);
y_map[i].x=(MagickRealType) (0.500000f*(2.0f*(float) i-MaxMap));
z_map[i].x=(MagickRealType) ((-0.333340f)*(2.0f*(float) i-MaxMap));
x_map[i].y=(MagickRealType) (1.0f*(float) i);
y_map[i].y=(MagickRealType) (0.000000f);
z_map[i].y=(MagickRealType) (0.666665f*(2.0f*(float) i-MaxMap));
x_map[i].z=(MagickRealType) (1.0f*(float) i);
y_map[i].z=(MagickRealType) ((-0.500000f)*(2.0f*(float) i-MaxMap));
z_map[i].z=(MagickRealType) ((-0.333340f)*(2.0f*(float) i-MaxMap));
}
break;
}
case Rec601YCbCrColorspace:
case YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.402000*Cr
G = Y-0.344136*Cb-0.714136*Cr
B = Y+1.772000*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.99999999999914679361*i;
y_map[i].x=(1.2188941887145875e-06)*i;
z_map[i].x=0.5f*1.4019995886561440468*(2.00f*i-MaxMap);
x_map[i].y=0.99999975910502514331*i;
y_map[i].y=0.5f*(-0.34413567816504303521)*(2.00f*i-MaxMap);
z_map[i].y=0.5f*(-0.71413649331646789076)*(2.00f*i-MaxMap);
x_map[i].z=1.00000124040004623180*i;
y_map[i].z=0.5f*1.77200006607230409200*(2.00f*i-MaxMap);
z_map[i].z=2.1453384174593273e-06*i;
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.574800*Cr
G = Y-0.187324*Cb-0.468124*Cr
B = Y+1.855600*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0f*(float) i);
y_map[i].x=(MagickRealType) (0.000000f*(2.0f*(float) i-MaxMap));
z_map[i].x=(MagickRealType) (0.5f*1.574800f*(2.0f*(float) i-MaxMap));
x_map[i].y=(MagickRealType) (1.0f*(float) i);
y_map[i].y=(MagickRealType) (0.5f*(-0.187324f)*(2.0f*(float) i-MaxMap));
z_map[i].y=(MagickRealType) (0.5f*(-0.468124f)*(2.0f*(float) i-MaxMap));
x_map[i].z=(MagickRealType) (1.0f*(float) i);
y_map[i].z=(MagickRealType) (0.5f*1.855600f*(2.0f*(float) i-MaxMap));
z_map[i].z=(MagickRealType) (0.000000f*(2.0f*(float) i-MaxMap));
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
R = Y +1.340762*C2
G = Y-0.317038*C1-0.682243*C2
B = Y+1.632639*C1
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.3584000f*(float) i);
y_map[i].x=(MagickRealType) (0.0000000f);
z_map[i].x=(MagickRealType) (1.8215000f*((float) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].y=(MagickRealType) (1.3584000f*(float) i);
y_map[i].y=(MagickRealType) ((-0.4302726f)*((float) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].y=(MagickRealType) ((-0.9271435f)*((float) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].z=(MagickRealType) (1.3584000f*(float) i);
y_map[i].z=(MagickRealType) (2.2179000f*((float) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].z=(MagickRealType) (0.0000000f);
}
break;
}
case YIQColorspace:
{
/*
Initialize YIQ tables:
R = Y+0.95620*I+0.62140*Q
G = Y-0.27270*I-0.64680*Q
B = Y-1.10370*I+1.70060*Q
I and Q, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=1.0f*i;
y_map[i].x=0.5f*0.9562957197589482261*(2.00000f*i-MaxMap);
z_map[i].x=0.5f*0.6210244164652610754*(2.00000f*i-MaxMap);
x_map[i].y=1.0f*i;
y_map[i].y=0.5f*(-0.2721220993185104464)*(2.00000f*i-MaxMap);
z_map[i].y=0.5f*(-0.6473805968256950427)*(2.00000f*i-MaxMap);
x_map[i].z=1.0f*i;
y_map[i].z=0.5f*(-1.1069890167364901945)*(2.00000f*i-MaxMap);
z_map[i].z=0.5f*1.7046149983646481374*(2.00000f*i-MaxMap);
}
break;
}
case YPbPrColorspace:
{
/*
Initialize YPbPr tables:
R = Y +1.402000*C2
G = Y-0.344136*C1+0.714136*C2
B = Y+1.772000*C1
Pb and Pr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.99999999999914679361*i;
y_map[i].x=(-1.2188941887145875e-06)*(2.0f*i-MaxMap);
z_map[i].x=0.5f*1.4019995886561440468*(2.0f*i-MaxMap);
x_map[i].y=0.99999975910502514331*i;
y_map[i].y=0.5f*(-0.34413567816504303521)*(2.0f*i-MaxMap);
z_map[i].y=0.5f*(-0.71413649331646789076)*(2.0f*i-MaxMap);
x_map[i].z=1.00000124040004623180*i;
y_map[i].z=0.5f*1.77200006607230409200*(2.0f*i-MaxMap);
z_map[i].z=2.1453384174593273e-06*(2.0f*i-MaxMap);
}
break;
}
case YUVColorspace:
{
/*
Initialize YUV tables:
R = Y +1.13983*V
G = Y-0.39464*U-0.58060*V
B = Y+2.03211*U
U and V, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=1.0f*i;
y_map[i].x=(-3.945707070708279e-05)*(2.0f*i-MaxMap);
z_map[i].x=0.5f*1.1398279671717170825*(2.0f*i-MaxMap);
x_map[i].y=1.0f*i;
y_map[i].y=0.5f*(-0.3946101641414141437)*(2.0f*i-MaxMap);
z_map[i].y=0.5f*(-0.5805003156565656797)*(2.0f*i-MaxMap);
x_map[i].z=1.0f*i;
y_map[i].z=0.5f*2.0319996843434342537*(2.0f*i-MaxMap);
z_map[i].z=(-4.813762626262513e-04)*(2.0f*i-MaxMap);
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(float) i);
y_map[i].x=(MagickRealType) 0.0f;
z_map[i].x=(MagickRealType) 0.0f;
x_map[i].y=(MagickRealType) 0.0f;
y_map[i].y=(MagickRealType) (1.0*(float) i);
z_map[i].y=(MagickRealType) 0.0f;
x_map[i].z=(MagickRealType) 0.0f;
y_map[i].z=(MagickRealType) 0.0f;
z_map[i].z=(MagickRealType) (1.0*(float) i);
}
break;
}
}
/*
Convert to sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
blue,
green,
red;
red=ScaleQuantumToMap(GetPixelRed(q));
green=ScaleQuantumToMap(GetPixelGreen(q));
blue=ScaleQuantumToMap(GetPixelBlue(q));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0f*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0f*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0f*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=EncodePixelGamma((MagickRealType)
ScaleMapToQuantum(pixel.red));
pixel.green=EncodePixelGamma((MagickRealType)
ScaleMapToQuantum(pixel.green));
pixel.blue=EncodePixelGamma((MagickRealType)
ScaleMapToQuantum(pixel.blue));
}
SetPixelRed(q,ClampToQuantum(pixel.red));
SetPixelGreen(q,ClampToQuantum(pixel.green));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransformRGBImage)
#endif
proceed=SetImageProgress(image,TransformRGBImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
/*
Convert PseudoClass image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
MagickPixelPacket
pixel;
register size_t
blue,
green,
red;
red=ScaleQuantumToMap(image->colormap[i].red);
green=ScaleQuantumToMap(image->colormap[i].green);
blue=ScaleQuantumToMap(image->colormap[i].blue);
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0f*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0f*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0f*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=EncodePixelGamma((MagickRealType) ScaleMapToQuantum(
pixel.red));
pixel.green=EncodePixelGamma((MagickRealType) ScaleMapToQuantum(
pixel.green));
pixel.blue=EncodePixelGamma((MagickRealType) ScaleMapToQuantum(
pixel.blue));
}
image->colormap[i].red=ClampToQuantum(pixel.red);
image->colormap[i].green=ClampToQuantum(pixel.green);
image->colormap[i].blue=ClampToQuantum(pixel.blue);
}
(void) SyncImage(image);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(MagickTrue);
}
|
rose_slowInput_47.c | #include <omp.h>
typedef double real8;
/************************************************************************
* Function : StressZero
*
* Purpose :
************************************************************************/
void StressZero(real8 *newSxx,real8 *newSyy,real8 *newSzz,real8 *newTxy,real8 *newTxz,real8 *newTyz,const real8 *fun2j,const real8 *shearMod,real8 eosvmax,real8 stresscut,const int *zoneset,const real8 *vc,int length)
{
int i;
int index;
/* This value 1.e-20 is used to prevent underflow. It is NOT a
cuttoff. DO NOT TOUCH THIS VALE. */
real8 stress2 = stresscut * 1.e-20;
real8 nstres2 = -stress2;
#pragma omp parallel for private (index,i) firstprivate (length,stress2)
for (i = 0; i <= length - 1; i += 1) {
index = zoneset[i];
if (shearMod[zoneset[i]] == 0.0 || fun2j[i] < stresscut || vc[i] >= eosvmax) {
newSxx[i] = 0.0;
newSyy[i] = 0.0;
newSzz[i] = 0.0;
newTxy[i] = 0.0;
newTxz[i] = 0.0;
newTyz[i] = 0.0;
}
#if 1
if (newSxx[i] < stress2 && newSxx[i] > nstres2)
newSxx[i] = 0.0;
if (newSyy[i] < stress2 && newSyy[i] > nstres2)
newSyy[i] = 0.0;
if (newSzz[i] < stress2 && newSzz[i] > nstres2)
newSzz[i] = 0.0;
if (newTxy[i] < stress2 && newTxy[i] > nstres2)
newTxy[i] = 0.0;
if (newTxz[i] < stress2 && newTxz[i] > nstres2)
newTxz[i] = 0.0;
if (newTyz[i] < stress2 && newTyz[i] > nstres2)
newTyz[i] = 0.0;
#endif
}
}
|
test.c | #include <stdio.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#define N 100
int test_aligned(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
int *b = a;
// offload
#pragma omp target data map(tofrom: b[0:100])
#pragma omp target parallel for simd aligned(b: 8*sizeof(int))
for(int k=0; k<N; k++) {
b[k] = k;
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int test_collapsed(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target data map(tofrom: a[0:100])
{
#pragma omp target parallel for simd collapse(2)
for(int k=0; k<N/4; k++)
for(int l=0; l<4; l++)
a[k*4+l] = k*4+l;
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int test_lastprivate(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
int n;
// offload
#pragma omp target parallel for simd map(tofrom: a[0:100], n) lastprivate(n)
for(int k=0; k<N; k++) {
a[k] = k;
n = k;
}
a[0] = n;
// host
for(i=0; i<N; i++)
aa[i] = i;
aa[0] = N-1;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int test_linear(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
int l = 0;
// offload
#pragma omp target data map(tofrom: a[0:100])
{
#pragma omp target parallel for simd linear(l: 2)
for(int k=0; k<N; k++) {
l = 2*k;
a[k] = l;
}
}
// host
for(i=0; i<N; i++)
aa[i] = 2*i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;;
}
}
return error;
}
int test_private(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
int n;
// offload
#pragma omp target data map(tofrom: a[0:100])
{
#pragma omp target parallel for simd private(n)
for(int k=0; k<N; k++) {
n = k;
a[k] = n;
}
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int test_safelen(){
int a[N], aa[N];
int i, error = 0, k;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
// TODO: Write a better test for safelen
// Not really a good test for safelen in this case. This works for now.
#pragma omp target parallel for simd map(tofrom: a[0:100]) schedule(static, 100) safelen(2)
for(k=0; k<100; k++) {
if (k > 1){
a[k] = a[k-2] + 2;
}
else{
a[k] = k;
}
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int main()
{
int error = 0;
check_offloading();
// Clauses
error += test_aligned();
error += test_collapsed();
error += test_lastprivate();
error += test_linear();
error += test_private();
error += test_safelen();
// report
printf("done with %d errors\n", error);
return error;
}
|
CutPursuit.h | #pragma once
#include "Graph.h"
#include <math.h>
#include <queue>
#include <iostream>
#include <fstream>
#include <boost/graph/boykov_kolmogorov_max_flow.hpp>
namespace CP {
template <typename T>
struct CPparameter
{
T reg_strenth; //regularization strength, multiply the edge weight
uint32_t cutoff; //minimal component size
uint32_t flow_steps; //number of steps in the optimal binary cut computation
uint32_t kmeans_ite; //number of iteration in the kmeans sampling
uint32_t kmeans_resampling; //number of kmeans re-intilialization
uint32_t verbose; //verbosity
uint32_t max_ite_main; //max number of iterations in the main loop
bool backward_step; //indicates if a backward step should be performed
double stopping_ratio; //when (E(t-1) - E(t) / (E(0) - E(t)) is too small, the algorithm stops
fidelityType fidelity; //the fidelity function
double smoothing; //smoothing term (for Kl divergence only)
bool parallel; //enable/disable parrallelism
T weight_decay; //for continued optimization of the flow steps
};
template <typename T>
class CutPursuit
{
public:
Graph<T> main_graph; //the Graph structure containing the main structure
Graph<T> reduced_graph; //the reduced graph whose vertices are the connected component
std::vector<std::vector<VertexDescriptor<T>>> components; //contains the list of the vertices in each component
std::vector<VertexDescriptor<T>> root_vertex; //the root vertex for each connected components
std::vector<bool> saturated_components; //is the component saturated (uncuttable)
std::vector<std::vector<EdgeDescriptor>> borders; //the list of edges forming the borders between the connected components
VertexDescriptor<T> source; //source vertex for graph cut
VertexDescriptor<T> sink; //sink vertex
uint32_t dim; // dimension of the data
uint32_t nVertex; // number of data point
uint32_t nEdge; // number of edges between vertices (not counting the edge to source/sink)
CP::VertexIterator<T> lastIterator; //iterator pointing to the last vertex which is neither sink nor source
CPparameter<T> parameter;
public:
CutPursuit(uint32_t nbVertex = 1)
{
this->main_graph = Graph<T>(nbVertex);
this->reduced_graph = Graph<T>(1);
this->components = std::vector<std::vector<VertexDescriptor<T>>>(1);
this->root_vertex = std::vector<VertexDescriptor<T>>(1,0);
this->saturated_components = std::vector<bool>(1,false);
this->source = VertexDescriptor<T>();
this->sink = VertexDescriptor<T>();
this->dim = 1;
this->nVertex = 1;
this->nEdge = 0;
this->parameter.flow_steps = 3;
this->parameter.kmeans_ite = 5;
this->parameter.kmeans_resampling = 3;
this->parameter.verbose = 2;
this->parameter.max_ite_main = 6;
this->parameter.backward_step = true;
this->parameter.stopping_ratio = 0.0001;
this->parameter.fidelity = L2;
this->parameter.smoothing = 0.1;
this->parameter.parallel = true;
this->parameter.weight_decay = 0.7;
}
virtual ~CutPursuit(){
};
//=============================================================================================
std::pair<std::vector<T>, std::vector<T>> run()
{
//first initilialize the structure
this->initialize();
if (this->parameter.verbose > 0)
{
std::cout << "Graph " << boost::num_vertices(this->main_graph) << " vertices and "
<< boost::num_edges(this->main_graph) << " edges and observation of dimension "
<< this->dim << '\n';
}
T energy_zero = this->compute_energy().first; //energy with 1 component
T old_energy = energy_zero; //energy at the previous iteration
//vector with time and energy, useful for benchmarking
std::vector<T> energy_out(this->parameter.max_ite_main ),time_out(this->parameter.max_ite_main);
TimeStack ts; ts.tic();
//the main loop
for (uint32_t ite_main = 1; ite_main <= this->parameter.max_ite_main; ite_main++)
{
//--------those two lines are the whole iteration-------------------------
uint32_t saturation = this->split(); //compute optimal binary partition
this->reduce(); //compute the new reduced graph
//-------end of the iteration - rest is stopping check and display------
std::pair<T,T> energy = this->compute_energy();
energy_out.push_back((energy.first + energy.second));
time_out.push_back(ts.tocDouble());
if (this->parameter.verbose > 1)
{
printf("Iteration %3i - %4i components - ", ite_main, (int)this->components.size());
printf("Saturation %5.1f %% - ",100*saturation / (double) this->nVertex);
switch (this->parameter.fidelity)
{
case L2:
{
printf("Quadratic Energy %4.3f %% - ", 100 * (energy.first + energy.second) / energy_zero);
break;
}
case linear:
{
printf("Linear Energy %10.1f - ", energy.first + energy.second);
break;
}
case KL:
{
printf("KL Energy %4.3f %% - ", 100 * (energy.first + energy.second) / energy_zero);
break;
}
case SPG:
{
printf("Quadratic Energy %4.3f %% - ", 100 * (energy.first + energy.second) / energy_zero);
break;
}
}
std::cout << "Timer " << ts.toc() << std::endl;
}
//----stopping checks-----
if (saturation == (double) this->nVertex)
{ //all components are saturated
if (this->parameter.verbose > 1)
{
std::cout << "All components are saturated" << std::endl;
}
break;
}
if ((old_energy - energy.first - energy.second) / (old_energy)
< this->parameter.stopping_ratio)
{ //relative energy progress stopping criterion
if (this->parameter.verbose > 1)
{
std::cout << "Stopping criterion reached" << std::endl;
}
break;
}
if (ite_main>=this->parameter.max_ite_main)
{ //max number of iteration
if (this->parameter.verbose > 1)
{
std::cout << "Max number of iteration reached" << std::endl;
}
break;
}
old_energy = energy.first + energy.second;
}
if (this->parameter.cutoff > 0)
{
this->cutoff();
}
return std::pair<std::vector<T>, std::vector<T>>(energy_out, time_out);
}
//=============================================================================================
//=========== VIRTUAL METHODS DEPENDING ON THE CHOICE OF FIDELITY FUNCTION =====================
//=============================================================================================
//
//=============================================================================================
//============================= SPLIT ===========================================
//=============================================================================================
virtual uint32_t split()
{
//compute the optimal binary partition
return 0;
}
//=============================================================================================
//================================ compute_energy_L2 ====================================
//=============================================================================================
virtual std::pair<T,T> compute_energy()
{
//compute the current energy
return std::pair<T,T>(0,0);
}
//=============================================================================================
//================================= COMPUTE_VALUE =========================================
//=============================================================================================
virtual std::pair<std::vector<T>, T> compute_value(const uint32_t & ind_com)
{
//compute the optimal the values associated with the current partition
return std::pair<std::vector<T>, T>(std::vector<T>(0),0);
}
//=============================================================================================
//================================= COMPUTE_MERGE_GAIN =========================================
//=============================================================================================
virtual std::pair<std::vector<T>, T> compute_merge_gain(const VertexDescriptor<T> & comp1
, const VertexDescriptor<T> & comp2)
{
//compute the gain of mergeing two connected components
return std::pair<std::vector<T>, T>(std::vector<T>(0),0);
}
//=============================================================================================
//========================== END OF VIRTUAL METHODS ===========================================
//=============================================================================================
//
//=============================================================================================
//============================= INITIALIZE ===========================================
//=============================================================================================
void initialize()
{
//build the reduced graph with one component, fill the first vector of components
//and add the sink and source nodes
VertexIterator<T> ite_ver, ite_ver_end;
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
this->components[0] = std::vector<VertexDescriptor<T>> (0);//(this->nVertex);
this->root_vertex[0] = *boost::vertices(this->main_graph).first;
this->nVertex = boost::num_vertices(this->main_graph);
this->nEdge = boost::num_edges(this->main_graph);
//--------compute the first reduced graph----------------------------------------------------------
for (boost::tie(ite_ver, ite_ver_end) = boost::vertices(this->main_graph);
ite_ver != ite_ver_end; ++ite_ver)
{
this->components[0].push_back(*ite_ver);
}
this->lastIterator = ite_ver;
this->compute_value(0);
//--------build the link to source and sink--------------------------------------------------------
this->source = boost::add_vertex(this->main_graph);
this->sink = boost::add_vertex(this->main_graph);
uint32_t eIndex = boost::num_edges(this->main_graph);
ite_ver = boost::vertices(this->main_graph).first;
for (uint32_t ind_ver = 0; ind_ver < this->nVertex ; ind_ver++)
{
// note that source and edge will have many nieghbors, and hence boost::edge should never be called to get
// the in_edge. use the out_edge and then reverse_Edge
addDoubledge<T>(this->main_graph, this->source, boost::vertex(ind_ver, this->main_graph), 0.,
eIndex, edge_attribute_map , false);
eIndex +=2;
addDoubledge<T>(this->main_graph, boost::vertex(ind_ver, this->main_graph), this->sink, 0.,
eIndex, edge_attribute_map, false);
eIndex +=2;
++ite_ver;
}
}
//=============================================================================================
//================================ COMPUTE_REDUCE_VALUE ====================================
//=============================================================================================
void compute_reduced_value()
{
for (uint32_t ind_com = 0; ind_com < this->components.size(); ++ind_com)
{ //compute the reduced value of each component
compute_value(ind_com);
}
}
//=============================================================================================
//============================= ACTIVATE_EDGES ==========================================
//=============================================================================================
uint32_t activate_edges(bool allows_saturation = true)
{ //this function analyzes the optimal binary partition to detect:
//- saturated components (i.e. uncuttable)
//- new activated edges
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
//saturation is the proportion of nodes in saturated components
uint32_t saturation = 0;
uint32_t nb_comp = this->components.size();
//---- first check if the component are saturated-------------------------
//#pragma omp parallel for if (this->parameter.parallel) schedule(dynamic)
for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++)
{
if (this->saturated_components[ind_com])
{ //ind_com is saturated, we increement saturation by ind_com size
saturation += this->components[ind_com].size();
continue;
}
std::vector<T> totalWeight(2,0);
for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ind_ver++)
{
bool isSink
= (vertex_attribute_map(this->components[ind_com][ind_ver]).color
== vertex_attribute_map(this->sink).color);
if (isSink)
{
totalWeight[0] += vertex_attribute_map(this->components[ind_com][ind_ver]).weight;
}
else
{
totalWeight[1] += vertex_attribute_map(this->components[ind_com][ind_ver]).weight;
}
}
if (allows_saturation && ((totalWeight[0] == 0)||(totalWeight[1] == 0)))
{
//the component is saturated
this->saturateComponent(ind_com);
saturation += this->components[ind_com].size();
}
}
//----check which edges have been activated----
EdgeIterator<T> ite_edg, ite_edg_end;
uint32_t color_v1, color_v2, color_combination;
for (boost::tie(ite_edg, ite_edg_end) = boost::edges(this->main_graph);
ite_edg != ite_edg_end; ++ite_edg)
{
if (!edge_attribute_map(*ite_edg).realEdge )
{
continue;
}
color_v1 = vertex_attribute_map(boost::source(*ite_edg, this->main_graph)).color;
color_v2 = vertex_attribute_map(boost::target(*ite_edg, this->main_graph)).color;
//color_source = 0, color_sink = 4, uncolored = 1
//we want an edge when a an interface source/sink
//this corresponds to a sum of 4
//for the case of uncolored nodes we arbitrarily chose source-uncolored
color_combination = color_v1 + color_v2;
if ((color_combination == 0)||(color_combination == 2)||(color_combination == 2)
||(color_combination == 8))
{ //edge between two vertices of the same color
continue;
}
//the edge is active!
edge_attribute_map(*ite_edg).isActive = true;
edge_attribute_map(*ite_edg).capacity = 0;
vertex_attribute_map(boost::source(*ite_edg, this->main_graph)).isBorder = true;
vertex_attribute_map(boost::target(*ite_edg, this->main_graph)).isBorder = true;
}
return saturation;
}
//=============================================================================================
//============================= REDUCE ===========================================
//=============================================================================================
void reduce()
{ //compute the reduced graph, and if need be performed a backward check
this->compute_connected_components();
if (this->parameter.backward_step)
{ //compute the structure of the reduced graph
this->compute_reduced_graph();
//check for beneficial merges
this->merge(false);
}
else
{ //compute only the value associated to each connected components
this->compute_reduced_value();
}
}
//=============================================================================================
//============================== compute_connected_components=========================================
//=============================================================================================
void compute_connected_components()
{ //this function compute the connected components of the graph with active edges removed
//the boolean vector indicating wether or not the edges and vertices have been seen already
//the root is the first vertex of a component
//this function is written such that the new components are appended at the end of components
//this allows not to recompute saturated component
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
VertexIndexMap<T> vertex_index_map =get(boost::vertex_index, this->main_graph);
//indicate which edges and nodes have been seen already by the dpsearch
std::vector<bool> edges_seen (this->nEdge, false);
std::vector<bool> vertices_seen (this->nVertex+2, false);
vertices_seen[vertex_index_map(this->source)] = true;
vertices_seen[vertex_index_map(this->sink)] = true;
//-------- start with the known roots------------------------------------------------------
//#pragma omp parallel for if (this->parameter.parallel) schedule(dynamic)
for (uint32_t ind_com = 0; ind_com < this->root_vertex.size(); ind_com++)
{
VertexDescriptor<T> root = this->root_vertex[ind_com]; //the first vertex of the component
if (this->saturated_components[ind_com])
{ //this component is saturated, we don't need to recompute it
for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ++ind_ver)
{
vertices_seen[vertex_index_map(this->components[ind_com][ind_ver])] = true;
}
}
else
{ //compute the new content of this component
this->components.at(ind_com) = connected_comp_from_root(root, this->components.at(ind_com).size()
, vertices_seen , edges_seen);
}
}
//----now look for components that did not already exists----
VertexIterator<T> ite_ver;
for (ite_ver = boost::vertices(this->main_graph).first;
ite_ver != this->lastIterator; ++ite_ver)
{
if (vertices_seen[vertex_index_map(*ite_ver)])
{
continue;
} //this vertex is not currently in a connected component
VertexDescriptor<T> root = *ite_ver; //we define it as the root of a new component
uint32_t current_component_size =
this->components[vertex_attribute_map(root).in_component].size();
this->components.push_back(
connected_comp_from_root(root, current_component_size
, vertices_seen, edges_seen));
this->root_vertex.push_back(root);
this->saturated_components.push_back(false);
}
this->components.shrink_to_fit();
}
//=============================================================================================
//============================== CONNECTED_COMP_FROM_ROOT=========================================
//=============================================================================================
inline std::vector<VertexDescriptor<T>> connected_comp_from_root(const VertexDescriptor<T> & root
, const uint32_t & size_comp, std::vector<bool> & vertices_seen , std::vector<bool> & edges_seen)
{
//this function compute the connected component of the graph with active edges removed
// associated with the root ROOT by performing a depth search first
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
VertexIndexMap<T> vertex_index_map = get(boost::vertex_index, this->main_graph);
EdgeIndexMap<T> edge_index_map = get(&EdgeAttribute<T>::index, this->main_graph);
std::vector<VertexDescriptor<T>> vertices_added; //the vertices in the current connected component
// vertices_added contains the vertices that have been added to the current coomponent
vertices_added.reserve(size_comp);
//heap_explore contains the vertices to be added to the current component
std::vector<VertexDescriptor<T>> vertices_to_add;
vertices_to_add.reserve(size_comp);
VertexDescriptor<T> vertex_current; //the node being consideed
EdgeDescriptor edge_current, edge_reverse; //the edge being considered
//fill the heap with the root node
vertices_to_add.push_back(root);
while (vertices_to_add.size()>0)
{ //as long as there are vertices left to add
vertex_current = vertices_to_add.back(); //the current node is the last node to add
vertices_to_add.pop_back(); //remove the current node from the vertices to add
if (vertices_seen[vertex_index_map(vertex_current)])
{ //this vertex has already been treated
continue;
}
vertices_added.push_back(vertex_current); //we add the current node
vertices_seen[vertex_index_map(vertex_current)] = true ; //and flag it as seen
//----we now explore the neighbors of current_node
typename boost::graph_traits<Graph<T>>::out_edge_iterator ite_edg, ite_edg_end;
for (boost::tie(ite_edg,ite_edg_end) = boost::out_edges(vertex_current, this->main_graph);
ite_edg != ite_edg_end; ++ite_edg)
{ //explore edges leaving current_node
edge_current = *ite_edg;
if (edge_attribute_map(*ite_edg).isActive || (edges_seen[edge_index_map(edge_current)]))
{ //edge is either active or treated, we skip it
continue;
}
//the target of this edge is a node to add
edge_reverse = edge_attribute_map(edge_current).edge_reverse;
edges_seen[edge_index_map(edge_current)] = true;
edges_seen[edge_index_map(edge_reverse)] = true;
vertices_to_add.push_back(boost::target(edge_current, this->main_graph));
}
}
vertices_added.shrink_to_fit();
return vertices_added;
}
//=============================================================================================
//================================ COMPUTE_REDUCE_GRAPH ====================================
//=============================================================================================
void compute_reduced_graph()
{ //compute the adjacency structure between components as well as weight and value of each component
//this is stored in the reduced graph structure
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
this->reduced_graph = Graph<T>(this->components.size());
VertexAttributeMap<T> component_attribute_map = boost::get(boost::vertex_bundle, this->reduced_graph);
//----fill the value sof the reduced graph----
#ifdef OPENMP
#pragma omp parallel for schedule(dynamic)
#endif
for (uint32_t ind_com = 0; ind_com < this->components.size(); ind_com++)
{
std::pair<std::vector<T>, T> component_values_and_weight = this->compute_value(ind_com);
//----fill the value and weight field of the reduced graph-----------------------------
VertexDescriptor<T> reduced_vertex = boost::vertex(ind_com, this->reduced_graph);
component_attribute_map[reduced_vertex] = VertexAttribute<T>(this->dim);
component_attribute_map(reduced_vertex).weight
= component_values_and_weight.second;
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
component_attribute_map(reduced_vertex).value[i_dim]
= component_values_and_weight.first[i_dim];
}
}
//------compute the edges of the reduced graph
EdgeAttributeMap<T> border_edge_attribute_map = boost::get(boost::edge_bundle, this->reduced_graph);
this->borders.clear();
EdgeDescriptor edge_current, border_edge_current;
uint32_t ind_border_edge = 0, comp1, comp2, component_source, component_target;
VertexDescriptor<T> source_component, target_component;
bool reducedEdgeExists;
typename boost::graph_traits<Graph<T>>::edge_iterator ite_edg, ite_edg_end;
for (boost::tie(ite_edg,ite_edg_end) = boost::edges(this->main_graph); ite_edg != ite_edg_end; ++ite_edg)
{
if (!edge_attribute_map(*ite_edg).realEdge)
{ //edges linking the source or edge node do not take part
continue;
}
edge_current = *ite_edg;
//compute the connected components of the source and target of current_edge
comp1 = vertex_attribute_map(boost::source(edge_current, this->main_graph)).in_component;
comp2 = vertex_attribute_map(boost::target(edge_current, this->main_graph)).in_component;
if (comp1==comp2)
{ //this edge links two nodes in the same connected component
continue;
}
//by convention we note component_source the smallest index and
//component_target the largest
component_source = std::min(comp1,comp2);
component_target = std::max(comp1,comp2);
//retrieve the corresponding vertex in the reduced graph
source_component = boost::vertex(component_source, this->reduced_graph);
target_component = boost::vertex(component_target, this->reduced_graph);
//try to add the border-edge linking those components in the reduced graph
boost::tie(border_edge_current, reducedEdgeExists)
= boost::edge(source_component, target_component, this->reduced_graph);
if (!reducedEdgeExists)
{ //this border-edge did not already existed in the reduced graph
//border_edge_current = boost::add_edge(source_component, target_component, this->reduced_graph).first;
border_edge_current = boost::add_edge(source_component, target_component, this->reduced_graph).first;
border_edge_attribute_map(border_edge_current).index = ind_border_edge;
border_edge_attribute_map(border_edge_current).weight = 0;
ind_border_edge++;
//create a new entry for the borders list containing this border
this->borders.push_back(std::vector<EdgeDescriptor>(0));
}
//add the weight of the current edge to the weight of the border-edge
border_edge_attribute_map(border_edge_current).weight += 0.5*edge_attribute_map(edge_current).weight;
this->borders[border_edge_attribute_map(border_edge_current).index].push_back(edge_current);
}
}
//=============================================================================================
//================================ MERGE ====================================
//=============================================================================================
uint32_t merge(bool is_cutoff)
{
// TODO: right now we only do one loop through the heap of potential mergeing, and only
//authorize one mergeing per component. We could update the gain and merge until it is no longer
//beneficial
//check wether the energy can be decreased by removing edges from the reduced graph
//----load graph structure---
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
VertexAttributeMap<T> component_attribute_map
= boost::get(boost::vertex_bundle, this->reduced_graph);
EdgeAttributeMap<T> border_edge_attribute_map
= boost::get(boost::edge_bundle, this->reduced_graph);
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
VertexIndexMap<T> component_index_map = boost::get(boost::vertex_index, this->reduced_graph);
//-----------------------------------
EdgeDescriptor border_edge_current;
typename boost::graph_traits<Graph<T>>::edge_iterator ite_border, ite_border_end;
typename std::vector<EdgeDescriptor>::iterator ite_border_edge;
VertexDescriptor<T> source_component, target_component;
uint32_t ind_source_component, ind_target_component, border_edge_currentIndex;
//gain_current is the vector of gains associated with each mergeing move
//std::vector<T> gain_current(boost::num_edges(this->reduced_graph));
//we store in merge_queue the potential mergeing with a priority on the potential gain
std::priority_queue<ComponentsFusion<T>, std::vector<ComponentsFusion<T>>, lessComponentsFusion<T>> merge_queue;
T gain; // the gain obtained by removing the border corresponding to the edge in the reduced graph
for (boost::tie(ite_border,ite_border_end) = boost::edges(this->reduced_graph); ite_border != ite_border_end; ++ite_border)
{
//a first pass go through all the edges in the reduced graph and compute the gain obtained by
//mergeing the corresponding vertices
border_edge_current = *ite_border;
border_edge_currentIndex = border_edge_attribute_map(border_edge_current).index;
//retrieve the two components corresponding to this border
source_component = boost::source(border_edge_current, this->reduced_graph);
target_component = boost::target(border_edge_current, this->reduced_graph);
if (is_cutoff && component_attribute_map(source_component).weight >= this->parameter.cutoff
&&component_attribute_map(target_component).weight >= this->parameter.cutoff)
{
continue;
}
ind_source_component = component_index_map(source_component);
ind_target_component = component_index_map(target_component);
//----now compute the gain of mergeing those two components-----
// compute the fidelity lost by mergeing the two connected components
std::pair<std::vector<T>, T> merge_gain = compute_merge_gain(source_component, target_component);
// the second part is due to the removing of the border
gain = merge_gain.second
+ border_edge_attribute_map(border_edge_current).weight * this->parameter.reg_strenth;
//mergeing_information store the indexes of the components as well as the edge index and the gain
//in a structure ordered by the gain
ComponentsFusion<T> mergeing_information(ind_source_component, ind_target_component, border_edge_currentIndex, gain);
mergeing_information.merged_value = merge_gain.first;
if (is_cutoff || gain>0)
{ //it is beneficial to merge those two components
//we add them to the merge_queue
merge_queue.push(mergeing_information);
//gain_current.at(border_edge_currentIndex) = gain;
}
}
uint32_t n_merged = 0;
//----go through the priority queue of merges and perform them as long as it is beneficial---
//is_merged indicate which components no longer exists because they have been merged with a neighboring component
std::vector<bool> is_merged(this->components.size(), false);
//to_destroy indicates the components that are needed to be removed
std::vector<bool> to_destroy(this->components.size(), false);
while(merge_queue.size()>0)
{ //loop through the potential mergeing and accept the ones that decrease the energy
ComponentsFusion<T> mergeing_information = merge_queue.top();
if (!is_cutoff && mergeing_information.merge_gain<=0)
{ //no more mergeing provide a gain in energy
break;
}
merge_queue.pop();
if (is_merged.at(mergeing_information.comp1) || (is_merged.at(mergeing_information.comp2)))
{
//at least one of the components have already been merged
continue;
}
n_merged++;
//---proceed with the fusion of comp1 and comp2----
//add the vertices of comp2 to comp1
this->components[mergeing_information.comp1].insert(this->components[mergeing_information.comp1].end()
,components[mergeing_information.comp2].begin(), this->components[mergeing_information.comp2].end());
//if comp1 was saturated it might not be anymore
this->saturated_components[mergeing_information.comp1] = false;
//the new weight is the sum of both weights
component_attribute_map(mergeing_information.comp1).weight
+= component_attribute_map(mergeing_information.comp2).weight;
//the new value is already computed in mergeing_information
component_attribute_map(mergeing_information.comp1).value = mergeing_information.merged_value;
//we deactivate the border between comp1 and comp2
for (ite_border_edge = this->borders.at(mergeing_information.border_index).begin();
ite_border_edge != this->borders.at(mergeing_information.border_index).end() ; ++ite_border_edge)
{
edge_attribute_map(*ite_border_edge).isActive = false;
}
is_merged.at(mergeing_information.comp1) = true;
is_merged.at(mergeing_information.comp2) = true;
to_destroy.at(mergeing_information.comp2) = true;
}
//we now rebuild the vectors components, rootComponents and saturated_components
std::vector<std::vector<VertexDescriptor<T>>> new_components;
std::vector<VertexDescriptor<T>> new_root_vertex;
std::vector<bool> new_saturated_components;
uint32_t ind_new_component = 0;
for (uint32_t ind_com = 0; ind_com < this->components.size(); ind_com++)
{
if (to_destroy.at(ind_com))
{ //this component has been removed
continue;
}//this components is kept
new_components.push_back(this->components.at(ind_com));
new_root_vertex.push_back(this->root_vertex.at(ind_com));
new_saturated_components.push_back(saturated_components.at(ind_com));
//if (is_merged.at(ind_com))
//{ //we need to update the value of the vertex in this component
for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ++ind_ver)
{
vertex_attribute_map(this->components[ind_com][ind_ver]).value
= component_attribute_map(boost::vertex(ind_com, this->reduced_graph)).value;
vertex_attribute_map(this->components[ind_com][ind_ver]).in_component
= ind_new_component;//ind_com;
}
//}
ind_new_component++;
}
this->components = new_components;
this->root_vertex = new_root_vertex;
this->saturated_components = new_saturated_components;
return n_merged;
}
//=============================================================================================
//================================ CUTOFF ====================================
//=============================================================================================
void cutoff()
{
int i = 0;
uint32_t n_merged;
while (true)
{
//this->compute_connected_components();
this->compute_reduced_graph();
n_merged = merge(true);
i++;
if (n_merged==0 || i>50)
{
break;
}
}
}
// //=============================================================================================
// //================================ CUTOFF ====================================
// //=============================================================================================
// void cutoff()
// {
// // Loop through all components and merge the one smaller than the cutoff.
// // It merges components which increase he energy the least
// //----load graph structure---
// VertexAttributeMap<T> vertex_attribute_map
// = boost::get(boost::vertex_bundle, this->main_graph);
// VertexAttributeMap<T> reduced_vertex_attribute_map
// = boost::get(boost::vertex_bundle, this->reduced_graph);
// EdgeAttributeMap<T> reduced_edge_attribute_map
// = boost::get(boost::edge_bundle, this->reduced_graph);
// EdgeAttributeMap<T> edge_attribute_map
// = boost::get(boost::edge_bundle, this->main_graph);
// VertexIndexMap<T> reduced_vertex_index_map = boost::get(boost::vertex_index, this->reduced_graph);
// EdgeIndexMap<T> reduced_edge_index_map = get(&EdgeAttribute<T>::index, this->reduced_graph);
// //-----------------------------------
// typename boost::graph_traits<Graph<T>>::vertex_iterator ite_comp, ite_comp_end;
// typename boost::graph_traits<Graph<T>>::out_edge_iterator ite_edg_out, ite_edg_out_end;
// typename boost::graph_traits<Graph<T>>::in_edge_iterator ite_edg_in, ite_edg_in_end;
// typename std::vector<EdgeDescriptor>::iterator ite_border_edge;
// VertexDescriptor<T> current_vertex, neighbor_vertex;
// //gain_current is the vector of gains associated with each mergeing move
// //we store in merge_queue the potential mergeing with a priority on the potential gain
// T gain; // the gain obtained by removing the border corresponding to the edge in the reduced graph
// std::vector<bool> to_destroy(this->components.size(), false); //components merged to be removed
// while (true)
// {
// this->compute_connected_components();
// this->compute_reduced_graph();
// bool has_merged = false;
// std::cout << "CUTTING OFF : " << this->components.size() << "COMPONENTS " << std::endl;
// for (boost::tie(ite_comp,ite_comp_end) = boost::vertices(this->reduced_graph); ite_comp != ite_comp_end; ++ite_comp)
// {
// current_vertex = *ite_comp;
// if (reduced_vertex_attribute_map(current_vertex).weight > this->parameter.cutoff
// || to_destroy.at(reduced_vertex_index_map(current_vertex)))
// {//component big enough to not be cut or already removed
// continue;
// }
// std::priority_queue<ComponentsFusion<T>, std::vector<ComponentsFusion<T>>, lessComponentsFusion<T>> merge_queue;
//std::cout << "COMPONENT " << reduced_vertex_index_map(current_vertex) << " IS OF SIZE"<< reduced_vertex_attribute_map(current_vertex).weight << std::endl;
// for (boost::tie(ite_edg_out,ite_edg_out_end) = boost::out_edges(current_vertex, this->reduced_graph);
// ite_edg_out != ite_edg_out_end; ++ite_edg_out)
// { //explore all neighbors
// neighbor_vertex = boost::target(*ite_edg_out, this->reduced_graph);
// std::pair<std::vector<T>, T> merge_gain = compute_merge_gain(current_vertex, neighbor_vertex);
// gain = merge_gain.second
// + reduced_edge_attribute_map(*ite_edg_out).weight * this->parameter.reg_strenth;
// ComponentsFusion<T> mergeing_information(reduced_vertex_index_map(current_vertex), reduced_vertex_index_map(neighbor_vertex)
// , reduced_edge_index_map(*ite_edg_out), gain);
// mergeing_information.merged_value = merge_gain.first;
// merge_queue.push(mergeing_information);
//std::cout << " NEI OUT " <<reduced_vertex_index_map(neighbor_vertex) << " GAIN"<< gain << std::endl;
// }
// for (boost::tie(ite_edg_in,ite_edg_in_end) = boost::in_edges(current_vertex, this->reduced_graph);
// ite_edg_in != ite_edg_in_end; ++ite_edg_in)
// { //explore all neighbors
// neighbor_vertex = boost::source(*ite_edg_in, this->reduced_graph);
// std::pair<std::vector<T>, T> merge_gain = compute_merge_gain(current_vertex, neighbor_vertex);
// gain = merge_gain.second
// + reduced_edge_attribute_map(*ite_edg_in).weight * this->parameter.reg_strenth;
// ComponentsFusion<T> mergeing_information(reduced_vertex_index_map(current_vertex), reduced_vertex_index_map(neighbor_vertex)
// , reduced_edge_index_map(*ite_edg_in), gain);
// mergeing_information.merged_value = merge_gain.first;
// merge_queue.push(mergeing_information);
//std::cout << " NEI IN" <<reduced_vertex_index_map(neighbor_vertex) << " GAIN"<< gain << std::endl;
// }
// if (merge_queue.empty())
// {
// continue;
// }
// has_merged = true;
// //select the most advantegeous neighboring components and merge it.
// ComponentsFusion<T> mergeing_information = merge_queue.top();
//std::cout << "BEST NEIGHBORS = " << mergeing_information.comp1 << " - " << mergeing_information.comp2 << " = " << mergeing_information .merge_gain
// << " Weight " << reduced_vertex_attribute_map(mergeing_information.comp2).weight << std::endl;
// this->components[mergeing_information.comp1].insert(this->components[mergeing_information.comp1].end()
// ,components[mergeing_information.comp2].begin(), this->components[mergeing_information.comp2].end());
// //the new weight is the sum of both weights
// reduced_vertex_attribute_map(mergeing_information.comp1).weight
// += reduced_vertex_attribute_map(mergeing_information.comp2).weight;
// //the new value is already computed in mergeing_information
// reduced_vertex_attribute_map(mergeing_information.comp1).value = mergeing_information.merged_value;
// //we deactivate the border between comp1 and comp2
// for (ite_border_edge = this->borders.at(mergeing_information.border_index).begin();
// ite_border_edge != this->borders.at(mergeing_information.border_index).end() ; ++ite_border_edge)
// {
// edge_attribute_map(*ite_border_edge).isActive = false;
// }
// to_destroy.at(mergeing_information.comp2) = true;
//std::cout << "=> " << reduced_vertex_index_map(current_vertex) << " IS OF SIZE"<< reduced_vertex_attribute_map(current_vertex).weight << std::endl;
// }
// //if (!has_merged)
// //{
// break;
// //}
// }
// //we now rebuild the vectors components, rootComponents and saturated_components
// std::vector<std::vector<VertexDescriptor<T>>> new_components;
// uint32_t ind_new_component = 0;
// for (uint32_t ind_com = 0; ind_com < this->components.size(); ind_com++)
// {
// if (to_destroy.at(ind_com))
// { //this component has been removed
// continue;
// }//this components is kept
// new_components.push_back(this->components.at(ind_com));
// //if (is_merged.at(ind_com))
// //{ //we need to update the value of the vertex in this component
// for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ++ind_ver)
// {
// vertex_attribute_map(this->components[ind_com][ind_ver]).value
// = reduced_vertex_attribute_map(boost::vertex(ind_com, this->reduced_graph)).value;
// vertex_attribute_map(this->components[ind_com][ind_ver]).in_component
// = ind_new_component;//ind_com;
// }
// //}
// ind_new_component++;
// }
// this->components = new_components;
// }
//===============================================================================================
//==========================saturateComponent====================================================
//===============================================================================================
inline void saturateComponent(const uint32_t & ind_com)
{ //this component is uncuttable and needs to be removed from further graph-cuts
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
this->saturated_components[ind_com] = true;
for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++)
{
VertexDescriptor<T> desc_v = this->components[ind_com][i_ver];
// because of the adjacency structure NEVER access edge (source,v) directly!
EdgeDescriptor edg_ver2source = boost::edge(desc_v, this->source,this->main_graph).first;
EdgeDescriptor edg_source2ver = edge_attribute_map(edg_ver2source).edge_reverse; //use edge_reverse instead
EdgeDescriptor edg_sink2ver = boost::edge(desc_v, this->sink,this->main_graph).first;
// we set the capacities of edges to source and sink to zero
edge_attribute_map(edg_source2ver).capacity = 0.;
edge_attribute_map(edg_sink2ver).capacity = 0.;
}
}
};
}
|
GB_binop__lor_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__lor_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__lor_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_int8)
// A*D function (colscale): GB (_AxD__lor_int8)
// D*A function (rowscale): GB (_DxB__lor_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_int8)
// C=scalar+B GB (_bind1st__lor_int8)
// C=scalar+B' GB (_bind1st_tran__lor_int8)
// C=A+scalar GB (_bind2nd__lor_int8)
// C=A'+scalar GB (_bind2nd_tran__lor_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_INT8 || GxB_NO_LOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GrB_Descriptor_wait.c | //------------------------------------------------------------------------------
// GrB_Descriptor_wait: wait for a user-defined GrB_Descriptor to complete
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// In SuiteSparse:GraphBLAS, a user-defined GrB_Descriptor has no pending
// operations to wait for. All this method does is verify that the descriptor
// is properly initialized, and then it does an OpenMP flush. Note that unlike
// other methods, passing in a NULL pointer, or a pointer to a NULL descriptor
// is valid, since a NULL descriptor results in default settings.
#include "GB.h"
GrB_Info GrB_Descriptor_wait // no work, just check if GrB_Descriptor is valid
(
GrB_Descriptor *desc // required; may not be NULL a pointer to NULL
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
#pragma omp flush
GB_WHERE1 ("GrB_Descriptor_wait (&desc)") ;
if (desc != NULL && (*desc) != NULL)
{
GB_RETURN_IF_FAULTY (*desc) ;
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
centroid.c |
// compute the element centroid
void centroid(
long long int ndim,
long long int nelem,
long long int npointsperelem,
long long int *connectivity,
double* points,
double* centroid){
long long int i, j, k, idx;
double c;
#pragma omp parallel for private(i, j, k, idx, c)
for (i=0; i<nelem; i++) {
for (j=0; j<ndim; j++) {
c = 0.;
for (k=0; k<npointsperelem; k++) {
idx = connectivity[i * npointsperelem + k];
c = c + points[idx * ndim + j];
}
centroid[i * ndim + j] = c / npointsperelem;
}
}
}
|
DataTypeConversions.h | //
// Created by raver119 on 21.11.17.
//
#ifndef LIBND4J_DATATYPECONVERSIONS_H
#define LIBND4J_DATATYPECONVERSIONS_H
#include <pointercast.h>
#include <helpers/logger.h>
#include <op_boilerplate.h>
#include <array/DataType.h>
#include <types/float16.h>
#include <helpers/BitwiseUtils.h>
namespace nd4j {
template <typename T>
class DataTypeConversions {
public:
static FORCEINLINE void convertType(T* buffer, void* src, DataType dataType, ByteOrder order, Nd4jLong length) {
bool isBe = BitwiseUtils::isBE();
bool canKeep = (isBe && order == ByteOrder::BE) || (!isBe && order == ByteOrder::LE);
switch (dataType) {
case DataType_FLOAT: {
if (std::is_same<T, float>::value && canKeep) {
memcpy(buffer, src, length * sizeof(T));
} else {
auto tmp = reinterpret_cast<float *>(src);
#if __GNUC__ <= 4
if (!canKeep)
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e]));
else
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = static_cast<T>(tmp[e]);
#else
//#pragma omp parallel for simd schedule(guided)
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = canKeep ? static_cast<T>(tmp[e]) : BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e]));
#endif
}
}
break;
case DataType_DOUBLE: {
if (std::is_same<T, double>::value && canKeep) {
memcpy(buffer, src, length * sizeof(T));
} else {
auto tmp = reinterpret_cast<double *>(src);
#if __GNUC__ <= 4
if (!canKeep)
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e]));
else
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = static_cast<T>(tmp[e]);
#else
//#pragma omp parallel for simd schedule(guided)
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = canKeep ? static_cast<T>(tmp[e]) : BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e]));
#endif
}
}
break;
case DataType_HALF: {
if (std::is_same<T, float16>::value && canKeep) {
memcpy(buffer, src, length * sizeof(T));
} else {
auto tmp = reinterpret_cast<float16 *>(src);
#if __GNUC__ <= 4
if (!canKeep)
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e]));
else
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = static_cast<T>(tmp[e]);
#else
//#pragma omp parallel for simd schedule(guided)
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = canKeep ? static_cast<T>(tmp[e]) : BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e]));
#endif
}
}
break;
default: {
nd4j_printf("Unsupported DataType requested: [%i]\n", static_cast<int>(dataType));
throw std::runtime_error("Unsupported DataType");
}
}
}
};
}
#endif //LIBND4J_DATATYPECONVERSIONS_H
|
update_monodomain.c | //
// Created by sachetto on 13/10/17.
//
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#include "../alg/grid/grid.h"
#include "../config/update_monodomain_config.h"
#include "../utils/utils.h"
#include "../single_file_libraries/stb_ds.h"
#ifdef COMPILE_CUDA
#include "../gpu_utils/gpu_utils.h"
#endif
UPDATE_MONODOMAIN(update_monodomain_default) {
real_cpu alpha;
bool use_gpu = the_ode_solver->gpu;
real_cpu beta = the_solver->beta;
real_cpu cm = the_solver->cm;
real_cpu dt_pde = the_solver->dt;
int n_equations_cell_model = the_ode_solver->model_data.number_of_ode_equations;
real *sv = the_ode_solver->sv;
#ifdef COMPILE_CUDA
real *vms = NULL;
size_t mem_size = initial_number_of_cells * sizeof(real);
if(use_gpu) {
vms = (real *)malloc(mem_size);
check_cuda_errors(cudaMemcpy(vms, sv, mem_size, cudaMemcpyDeviceToHost));
}
#endif
int i;
#pragma omp parallel for private(alpha)
for(i = 0; i < num_active_cells; i++) {
alpha = ALPHA(beta, cm, dt_pde, active_cells[i]->discretization.x, active_cells[i]->discretization.y, active_cells[i]->discretization.z);
if(use_gpu) {
#ifdef COMPILE_CUDA
active_cells[i]->b = vms[active_cells[i]->sv_position] * alpha;
#endif
} else {
active_cells[i]->b = sv[active_cells[i]->sv_position * n_equations_cell_model] * alpha;
}
}
#ifdef COMPILE_CUDA
free(vms);
#endif
}
UPDATE_MONODOMAIN(update_monodomain_ddm)
{
real_cpu alpha;
bool use_gpu = the_ode_solver->gpu;
real_cpu beta = the_solver->beta;
real_cpu cm = the_solver->cm;
real_cpu dt_pde = the_solver->dt;
int n_equations_cell_model = the_ode_solver->model_data.number_of_ode_equations;
real *sv = the_ode_solver->sv;
#ifdef COMPILE_CUDA
real *vms = NULL;
size_t mem_size = initial_number_of_cells * sizeof(real);
if(use_gpu)
{
vms = (real *)malloc(mem_size);
check_cuda_errors(cudaMemcpy(vms, sv, mem_size, cudaMemcpyDeviceToHost));
}
#endif
int i;
#pragma omp parallel for private(alpha)
for(i = 0; i < num_active_cells; i++)
{
// 1) Calculate alpha for the diagonal element
alpha = ALPHA(beta, cm, dt_pde, active_cells[i]->discretization.x, active_cells[i]->discretization.y, active_cells[i]->discretization.z);
if(use_gpu)
{
#ifdef COMPILE_CUDA
active_cells[i]->b = vms[active_cells[i]->sv_position] * alpha;
#endif
}
else
{
active_cells[i]->b = sv[active_cells[i]->sv_position * n_equations_cell_model] * alpha;
}
// 2) Calculate kappas
// We need to capture the neighbours from the current volume
struct element *cell_elements = active_cells[i]->elements;
uint32_t max_elements = arrlen(cell_elements);
real_cpu dx = active_cells[i]->discretization.x;
real_cpu dy = active_cells[i]->discretization.y;
real_cpu dz = active_cells[i]->discretization.z;
real_cpu kappa_x = active_cells[i]->kappa.x;
real_cpu kappa_y = active_cells[i]->kappa.y;
real_cpu kappa_z = active_cells[i]->kappa.z;
for (int j = 1; j < max_elements; j++)
{
int k = cell_elements[j].column;
if (cell_elements[j].direction == 'n') // North cell
{
real_cpu multiplier = (dx * dy) / dz;
if(use_gpu)
{
#ifdef COMPILE_CUDA
active_cells[i]->b -= vms[active_cells[k]->sv_position] * multiplier * kappa_z / dt_pde;
active_cells[i]->b += vms[active_cells[i]->sv_position] * multiplier * kappa_z / dt_pde;
#endif
}
else
{
active_cells[i]->b -= sv[active_cells[k]->sv_position * n_equations_cell_model] * multiplier * kappa_z / dt_pde;
active_cells[i]->b += sv[active_cells[i]->sv_position * n_equations_cell_model] * multiplier * kappa_z / dt_pde;
}
}
else if (cell_elements[j].direction == 's') // South cell
{
real_cpu multiplier = (dx * dy) / dz;
if(use_gpu)
{
#ifdef COMPILE_CUDA
active_cells[i]->b -= vms[active_cells[k]->sv_position] * multiplier * kappa_z / dt_pde;
active_cells[i]->b += vms[active_cells[i]->sv_position] * multiplier * kappa_z / dt_pde;
#endif
}
else
{
active_cells[i]->b -= sv[active_cells[k]->sv_position * n_equations_cell_model] * multiplier * kappa_z / dt_pde;
active_cells[i]->b += sv[active_cells[i]->sv_position * n_equations_cell_model] * multiplier * kappa_z / dt_pde;
}
}
else if (cell_elements[j].direction == 'e') // East cell
{
real_cpu multiplier = (dx * dz) / dy;
if(use_gpu)
{
#ifdef COMPILE_CUDA
active_cells[i]->b -= vms[active_cells[k]->sv_position] * multiplier * kappa_y / dt_pde;
active_cells[i]->b += vms[active_cells[i]->sv_position] * multiplier * kappa_y / dt_pde;
#endif
}
else
{
active_cells[i]->b -= sv[active_cells[k]->sv_position * n_equations_cell_model] * multiplier * kappa_y / dt_pde;
active_cells[i]->b += sv[active_cells[i]->sv_position * n_equations_cell_model] * multiplier * kappa_y / dt_pde;
}
}
else if (cell_elements[j].direction == 'w') // West cell
{
real_cpu multiplier = (dx * dz) / dy;
if(use_gpu)
{
#ifdef COMPILE_CUDA
active_cells[i]->b -= vms[active_cells[k]->sv_position] * multiplier * kappa_y / dt_pde;
active_cells[i]->b += vms[active_cells[i]->sv_position] * multiplier * kappa_y / dt_pde;
#endif
}
else
{
active_cells[i]->b -= sv[active_cells[k]->sv_position * n_equations_cell_model] * multiplier * kappa_y / dt_pde;
active_cells[i]->b += sv[active_cells[i]->sv_position * n_equations_cell_model] * multiplier * kappa_y / dt_pde;
}
}
else if (cell_elements[j].direction == 'f') // Forward cell
{
real_cpu multiplier = (dy * dz) / dx;
if(use_gpu)
{
#ifdef COMPILE_CUDA
active_cells[i]->b -= vms[active_cells[k]->sv_position] * multiplier * kappa_x / dt_pde;
active_cells[i]->b += vms[active_cells[i]->sv_position] * multiplier * kappa_x / dt_pde;
#endif
}
else
{
active_cells[i]->b -= sv[active_cells[k]->sv_position * n_equations_cell_model] * multiplier * kappa_x / dt_pde;
active_cells[i]->b += sv[active_cells[i]->sv_position * n_equations_cell_model] * multiplier * kappa_x / dt_pde;
}
}
else if (cell_elements[j].direction == 'b') // Backward cell
{
real_cpu multiplier = (dy * dz) / dx;
if(use_gpu)
{
#ifdef COMPILE_CUDA
active_cells[i]->b -= vms[active_cells[k]->sv_position] * multiplier * kappa_x / dt_pde;
active_cells[i]->b += vms[active_cells[i]->sv_position] * multiplier * kappa_x / dt_pde;
#endif
}
else
{
active_cells[i]->b -= sv[active_cells[k]->sv_position * n_equations_cell_model] * multiplier * kappa_x / dt_pde;
active_cells[i]->b += sv[active_cells[i]->sv_position * n_equations_cell_model] * multiplier * kappa_x / dt_pde;
}
}
}
}
#ifdef COMPILE_CUDA
free(vms);
#endif
}
|
GB_unop__identity_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint16_uint16)
// op(A') function: GB (_unop_tran__identity_uint16_uint16)
// C type: uint16_t
// A type: uint16_t
// cast: uint16_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
1
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint16_uint16)
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lis_matvec_ell.c | /* Copyright (C) 2002-2012 The SSI Project. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the project nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE
PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "lis_config.h"
#else
#ifdef HAVE_CONFIG_WIN32_H
#include "lis_config_win32.h"
#endif
#endif
#include <stdio.h>
#include <stdlib.h>
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
#include <string.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "lislib.h"
void lis_matvec_ell(LIS_MATRIX A, LIS_SCALAR x[], LIS_SCALAR y[])
{
LIS_INT i,j,jj,is,ie;
LIS_INT n,maxnzr,nprocs,my_rank;
n = A->n;
if( A->is_splited )
{
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=0; i<n; i++)
{
y[i] = A->D->value[i]*x[i];
}
for(j=0;j<A->L->maxnzr;j++)
{
jj = j*n;
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=0;i<n;i++)
{
y[i] += A->L->value[jj + i] * x[A->L->index[jj + i]];
}
}
for(j=0;j<A->U->maxnzr;j++)
{
jj = j*n;
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=0;i<n;i++)
{
y[i] += A->U->value[jj + i] * x[A->U->index[jj + i]];
}
}
}
else
{
maxnzr = A->maxnzr;
#ifdef _OPENMP
nprocs = omp_get_max_threads();
#else
nprocs = 1;
#endif
#ifdef _OPENMP
#pragma omp parallel private(i,j,jj,is,ie,my_rank)
#endif
{
#ifdef _OPENMP
my_rank = omp_get_thread_num();
#else
my_rank = 0;
#endif
LIS_GET_ISIE(my_rank,nprocs,n,is,ie);
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=is;i<ie;i++)
{
y[i] = 0.0;
}
for(j=0;j<maxnzr;j++)
{
jj = j*n;
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=is;i<ie;i++)
{
y[i] += A->value[jj + i] * x[A->index[jj + i]];
}
}
}
}
}
void lis_matvect_ell(LIS_MATRIX A, LIS_SCALAR x[], LIS_SCALAR y[])
{
LIS_INT i,j,jj;
LIS_INT n,np,maxnzr;
#ifdef _OPENMP
LIS_INT k,is,ie,nprocs;
LIS_SCALAR t;
LIS_SCALAR *w;
#endif
n = A->n;
np = A->np;
if( A->is_splited )
{
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=0; i<n; i++)
{
y[i] = A->D->value[i]*x[i];
}
for(j=0;j<A->L->maxnzr;j++)
{
jj = j*n;
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=0;i<n;i++)
{
y[A->L->index[jj + i]] += A->L->value[jj + i] * x[i];
}
}
for(j=0;j<A->U->maxnzr;j++)
{
jj = j*n;
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=0;i<n;i++)
{
y[A->U->index[jj + i]] += A->U->value[jj + i] * x[i];
}
}
}
else
{
#ifdef _OPENMP
maxnzr = A->maxnzr;
nprocs = omp_get_max_threads();
w = (LIS_SCALAR *)lis_malloc( nprocs*np*sizeof(LIS_SCALAR),"lis_matvect_ell::w" );
#pragma omp parallel private(i,j,t,jj,k,is,ie)
{
k = omp_get_thread_num();
LIS_GET_ISIE(k,nprocs,n,is,ie);
#pragma omp for
for(j=0;j<nprocs;j++)
{
memset( &w[j*np], 0, np*sizeof(LIS_SCALAR) );
}
for(j=0;j<maxnzr;j++)
{
jj = j*n;
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=is;i<ie;i++)
{
w[k*np + A->index[jj + i]] += A->value[jj + i] * x[i];
}
}
#pragma omp barrier
#pragma omp for
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=0;i<np;i++)
{
t = 0.0;
for(j=0;j<nprocs;j++)
{
t += w[j*np+i];
}
y[i] = t;
}
}
lis_free(w);
#else
maxnzr = A->maxnzr;
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=0; i<n; i++)
{
y[i] = 0.0;
}
for(j=0;j<maxnzr;j++)
{
jj = j*n;
#ifdef USE_VEC_COMP
#pragma cdir nodep
#endif
for(i=0;i<n;i++)
{
y[A->index[jj + i]] += A->value[jj + i] * x[i];
}
}
#endif
}
}
|
GB_binop__bshift_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__bshift_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__bshift_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_int32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_int32)
// C=scalar+B GB (_bind1st__bshift_int32)
// C=scalar+B' GB (_bind1st_tran__bshift_int32)
// C=A+scalar GB (_bind2nd__bshift_int32)
// C=A'+scalar GB (_bind2nd_tran__bshift_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_bitshift_int32 (aij, bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_int32 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_INT32 || GxB_NO_BSHIFT_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bshift_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bshift_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bshift_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_int32 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_int32 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int32 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int32 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ast-dump-openmp-parallel-master-XFAIL.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -fopenmp-version=50 -ast-dump %s 2>&1 | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
// REQUIRES: broken-PR41022
// https://bugs.llvm.org/show_bug.cgi?id=41022
void test_zero() {
#pragma omp parallel master
;
}
// CHECK: {{.*}}ast-dump-openmp-parallel-master-XFAIL.c:4:22: warning: extra tokens at the end of '#pragma omp parallel' are ignored
void test_one() {
#pragma omp parallel master
{ ; }
}
// CHECK: {{.*}}ast-dump-openmp-parallel-master-XFAIL.c:10:22: warning: extra tokens at the end of '#pragma omp parallel' are ignored
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-parallel-master-XFAIL.c:3:1, line:6:1> line:3:6 test_zero 'void ()'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:18, line:6:1>
// CHECK-NEXT: | `-OMPParallelDirective {{.*}} <line:4:9, col:28>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-NullStmt {{.*}} <col:3> openmp_structured_block
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-master-XFAIL.c:4:9) *const restrict'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:9:1, line:12:1> line:9:6 test_one 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:17, line:12:1>
// CHECK-NEXT: `-OMPParallelDirective {{.*}} <line:10:9, col:28>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:11:3, col:7>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-CompoundStmt {{.*}} <col:3, col:7> openmp_structured_block
// CHECK-NEXT: | `-NullStmt {{.*}} <col:5>
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-master-XFAIL.c:10:9) *const restrict'
|
FGT_fmt_plug.c | /*
* Fortigate (FortiOS) Password cracker
*
* This software is Copyright (c) 2012 Mat G. <mat.jtr at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*
* Passwords are located in "config system admin" part of the configuration file :
*
* config system admin
* edit "<username>"
* set password ENC AK1wTiFOMv7mZOTvQNmKQBAY98hZZjSRLxAY8vZp8NlDWU=
*
* Password is : AK1|base64encode(salt|hashed_password)
* where hashed_password is SHA1(salt|password|fortinet_magic)
*
* salt is 12 bytes long
* hashed_password is 20 bytes long (SHA1 salt)
* encoded password is 47 bytes long (3 bytes for AK1 and 44 bytes of base64encode(salt|hashed_password))
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_FGT;
#elif FMT_REGISTERS_H
john_register_one(&fmt_FGT);
#else
#include <string.h>
#include "common.h"
#include "formats.h"
#include "misc.h"
#include "sha.h"
#include "base64_convert.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
#include <omp.h>
#ifdef __MIC__
#ifndef OMP_SCALE
#define OMP_SCALE 8192
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 32768 // tuned on AMD K8 dual-HT (XOP)
#endif
#endif // __MIC__
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Fortigate"
#define FORMAT_NAME "FortiOS"
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 32
#define CIPHERTEXT_LENGTH 44
#define HASH_LENGTH CIPHERTEXT_LENGTH + 3
#define BINARY_SIZE 20
#define BINARY_ALIGN 4
#define SALT_SIZE 12
#define SALT_ALIGN 4
#define FORTINET_MAGIC "\xa3\x88\xba\x2e\x42\x4c\xb0\x4a\x53\x79\x30\xc1\x31\x07\xcc\x3f\xa1\x32\x90\x29\xa9\x81\x5b\x70"
#define FORTINET_MAGIC_LENGTH 24
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests fgt_tests[] =
{
{"AK1wTiFOMv7mZOTvQNmKQBAY98hZZjSRLxAY8vZp8NlDWU=", "fortigate"},
{"AK1Vd1SCGVtAAT931II/U22WTppAISQkITHOlz0ukIg4nA=", "admin"},
{"AK1DZLDpqz335ElPtuiNTpguiozY7xVaHjHYnxw6sNlI6A=", "ftnt"},
{NULL}
};
static SHA_CTX ctx_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int (*saved_key_len);
static uint32_t (*crypt_key)[BINARY_SIZE / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
saved_key_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key_len));
}
static void done(void)
{
MEM_FREE(saved_key_len);
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
if (strncmp(ciphertext, "AK1", 3))
return 0;
if (strlen(ciphertext) != HASH_LENGTH)
return 0;
return 1;
}
static void * get_salt(char *ciphertext)
{
static union {
char b[SALT_SIZE];
uint32_t dummy;
} out;
char buf[SALT_SIZE+BINARY_SIZE+1];
base64_convert(ciphertext+3, e_b64_mime, CIPHERTEXT_LENGTH, buf, e_b64_raw, sizeof(buf), flg_Base64_NO_FLAGS, 0);
memcpy(out.b, buf, SALT_SIZE);
return out.b;
}
static void set_salt(void *salt)
{
SHA1_Init(&ctx_salt);
SHA1_Update(&ctx_salt, salt, SALT_SIZE);
}
static void set_key(char *key, int index)
{
saved_key_len[index] = strnzcpyn(saved_key[index], key, sizeof(*saved_key));
}
static char * get_key(int index)
{
return saved_key[index];
}
static void * get_binary(char *ciphertext)
{
static union {
char b[BINARY_SIZE];
uint32_t dummy;
} bin;
char buf[SALT_SIZE+BINARY_SIZE+1];
memset(buf, 0, sizeof(buf));
base64_convert(ciphertext+3, e_b64_mime, CIPHERTEXT_LENGTH, buf, e_b64_raw, sizeof(buf), flg_Base64_NO_FLAGS, 0);
// skip over the 12 bytes of salt and get only the hashed password
memcpy(bin.b, buf+SALT_SIZE, BINARY_SIZE);
return bin.b;
}
static int cmp_all(void *binary, int count)
{
uint32_t b0 = *(uint32_t *)binary;
int i;
for (i = 0; i < count; i++) {
if (b0 != *(uint32_t *)crypt_key[i])
continue;
if (!memcmp(binary, crypt_key[i], BINARY_SIZE))
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int i=0;
char *cp=FORTINET_MAGIC;
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(ctx_salt, count, saved_key, saved_key_len, crypt_key, cp)
#endif
#if defined (_OPENMP) || MAX_KEYS_PER_CRYPT>1
for (i = 0; i < count; i++)
#endif
{
SHA_CTX ctx;
memcpy(&ctx, &ctx_salt, sizeof(ctx));
SHA1_Update(&ctx, saved_key[i], saved_key_len[i]);
SHA1_Update(&ctx, cp, FORTINET_MAGIC_LENGTH);
SHA1_Final((unsigned char*)crypt_key[i], &ctx);
}
return count;
}
#define COMMON_GET_HASH_VAR crypt_key
#include "common-get-hash.h"
static int salt_hash(void *salt)
{
uint32_t mysalt = *(uint32_t *)salt;
return mysalt & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_FGT = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP ,
{ NULL },
{ NULL },
fgt_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__abs_int32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_int32_int32)
// op(A') function: GB (_unop_tran__abs_int32_int32)
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CAST(z, aij) \
int32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = aij ; \
Cx [pC] = GB_IABS (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_int32_int32)
(
int32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_int32_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
singleModificado2.c | #include <stdio.h>
#include <omp.h>
int main() {
int n = 9, i, a, b[n];
for (i = 0; i < n; i++) b[i] = -1;
#pragma omp parallel
{
#pragma omp single
{
printf("Introduce valor de inicialización a: ");
scanf("%d", &a);
printf("Single ejecutada por el thread %d\n",
omp_get_thread_num());
}
#pragma omp for
for (i = 0; i < n; i++)
b[i] = a;
}
#pragma omp master
{
printf("Dentro de la región parallel:\n");
printf("Master ejecutada por el thread %d\n", omp_get_thread_num());
for (i = 0; i < n; i++) printf("b[%d] = %d\t", i, b[i]);
printf("\n");
}
}
|
poisson_3d-a.pluto.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
/*
* N is the number of points
* T is the number of timesteps
*/
#ifdef HAS_DECLS
#include "decls.h"
#else
#define N 600L
#define T 600L
#endif
#define NUM_FP_OPS 15
/* Define our arrays */
// double A[2][N][N][N];
double total=0; double sum_err_sqr=0;
int chtotal=0;
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) {
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char * argv[]) {
long int t, i, j, k;
const int BASE = 1024;
long count=0;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0;
// double A[2][N][N][N];
double ****A = (double ****)malloc(2 * sizeof (double ***));
int l;
for (l = 0; l < 2; l++){
A[l] = (double ***) malloc(N * sizeof(double **));
for (i = 0; i < N; i++){
A[l][i] = (double **) malloc(N * sizeof(double *));
for (j = 0; j < N; j++)
A[l][i][j] = (double *) malloc(N * sizeof (double));
}
}
printf("Number of points = %ld\t|Number of timesteps = %ld\t", N, T);
/* Initialization */
srand(42); // seed with a constant value to verify results
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
for (k = 0; k < N; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
A[1][i][j][k] = 0.0;
}
}
}
#ifdef TIME
gettimeofday(&start, 0);
#endif
// #undef N
// #define N 150L
#undef T
#define T 300L
/* Copyright (C) 1991-2012 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* We do support the IEC 559 math functionality, real and complex. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((N >= 3) && (T >= 1)) {
for (t1=-1;t1<=T-1;t1++) {
lbp=ceild(t1,2);
ubp=floord(2*t1+N-1,4);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(ceild(t1-1,2),ceild(t1-1,4)),ceild(4*t2-N,4));t3<=min(min(floord(2*T+N-3,4),floord(t1+2*t2+N,4)),floord(2*t1+N+1,4));t3++) {
for (t4=max(max(max(ceild(t1-1023,1024),ceild(4*t2-N-2044,2048)),ceild(4*t3-N-2044,2048)),ceild(1023*t1-1046529,1048576));t4<=min(min(min(floord(4*t3+N,2048),floord(2*T+N-3,2048)),floord(t1+2*t2+N,2048)),floord(2*t1+N+1,2048));t4++) {
if ((t1 <= min(floord(2048*t4-N+1,2),floord(4*t2+2048*t4-N-1,4))) && (t2 <= 512*t4-1) && (t3 <= 512*t4-1) && (t4 >= ceild(N-1,2048))) {
if ((N+1)%2 == 0) {
for (t6=max(4*t2,-4*t1+4*t2+4096*t4-2*N+1);t6<=min(4*t2+3,-4*t1+4*t2+4096*t4-2*N+4);t6++) {
for (t7=max(4*t3,2048*t4-N+3);t7<=4*t3+3;t7++) {
A[0][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)][(N-2)] = 2.666*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)][(N-2)] - (0.166*A[1][(-2048*t4+t6+N-2)-1][(-2048*t4+t7+N-2)][(N-2)] + 0.166*A[1][(-2048*t4+t6+N-2)+1][(-2048*t4+t7+N-2)][(N-2)] + 0.166*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)-1][(N-2)] + 0.166*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)+1][(N-2)] + 0.166*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)][(N-2)+1] + 0.166*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)][(N-2)-1])- (0.0833*A[1][(-2048*t4+t6+N-2)-1][(-2048*t4+t7+N-2)-1][(N-2)] + 0.0833*A[1][(-2048*t4+t6+N-2)+1][(-2048*t4+t7+N-2)-1][(N-2)] + 0.0833*A[1][(-2048*t4+t6+N-2)-1][(-2048*t4+t7+N-2)+1][(N-2)] + 0.0833*A[1][(-2048*t4+t6+N-2)+1][(-2048*t4+t7+N-2)+1][(N-2)] + 0.0833*A[1][(-2048*t4+t6+N-2)-1][(-2048*t4+t7+N-2)][(N-2)-1] + 0.0833*A[1][(-2048*t4+t6+N-2)+1][(-2048*t4+t7+N-2)][(N-2)-1] + 0.0833*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)-1][(N-2)-1] + 0.0833*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)+1][(N-2)-1] + 0.0833*A[1][(-2048*t4+t6+N-2)-1][(-2048*t4+t7+N-2)][(N-2)+1] + 0.0833*A[1][(-2048*t4+t6+N-2)+1][(-2048*t4+t7+N-2)][(N-2)+1] + 0.0833*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)-1][(N-2)+1] + 0.0833*A[1][(-2048*t4+t6+N-2)][(-2048*t4+t7+N-2)+1][(N-2)+1]);;
}
}
}
}
if ((t1 <= min(floord(4*t3-N+1,2),floord(4*t2+4*t3-N-1,4))) && (t2 <= t3-1) && (t3 >= ceild(N-1,4))) {
if ((N+1)%2 == 0) {
for (t6=max(4*t2,-4*t1+4*t2+8*t3-2*N+1);t6<=min(4*t2+3,-4*t1+4*t2+8*t3-2*N+4);t6++) {
for (t8=max(2048*t4,4*t3-N+3);t8<=min(4*t3,2048*t4+2047);t8++) {
A[0][(-4*t3+t6+N-2)][(N-2)][(-4*t3+t8+N-2)] = 2.666*A[1][(-4*t3+t6+N-2)][(N-2)][(-4*t3+t8+N-2)] - (0.166*A[1][(-4*t3+t6+N-2)-1][(N-2)][(-4*t3+t8+N-2)] + 0.166*A[1][(-4*t3+t6+N-2)+1][(N-2)][(-4*t3+t8+N-2)] + 0.166*A[1][(-4*t3+t6+N-2)][(N-2)-1][(-4*t3+t8+N-2)] + 0.166*A[1][(-4*t3+t6+N-2)][(N-2)+1][(-4*t3+t8+N-2)] + 0.166*A[1][(-4*t3+t6+N-2)][(N-2)][(-4*t3+t8+N-2)+1] + 0.166*A[1][(-4*t3+t6+N-2)][(N-2)][(-4*t3+t8+N-2)-1])- (0.0833*A[1][(-4*t3+t6+N-2)-1][(N-2)-1][(-4*t3+t8+N-2)] + 0.0833*A[1][(-4*t3+t6+N-2)+1][(N-2)-1][(-4*t3+t8+N-2)] + 0.0833*A[1][(-4*t3+t6+N-2)-1][(N-2)+1][(-4*t3+t8+N-2)] + 0.0833*A[1][(-4*t3+t6+N-2)+1][(N-2)+1][(-4*t3+t8+N-2)] + 0.0833*A[1][(-4*t3+t6+N-2)-1][(N-2)][(-4*t3+t8+N-2)-1] + 0.0833*A[1][(-4*t3+t6+N-2)+1][(N-2)][(-4*t3+t8+N-2)-1] + 0.0833*A[1][(-4*t3+t6+N-2)][(N-2)-1][(-4*t3+t8+N-2)-1] + 0.0833*A[1][(-4*t3+t6+N-2)][(N-2)+1][(-4*t3+t8+N-2)-1] + 0.0833*A[1][(-4*t3+t6+N-2)-1][(N-2)][(-4*t3+t8+N-2)+1] + 0.0833*A[1][(-4*t3+t6+N-2)+1][(N-2)][(-4*t3+t8+N-2)+1] + 0.0833*A[1][(-4*t3+t6+N-2)][(N-2)-1][(-4*t3+t8+N-2)+1] + 0.0833*A[1][(-4*t3+t6+N-2)][(N-2)+1][(-4*t3+t8+N-2)+1]);;
}
}
}
}
if ((t1 >= 0) && (2*t1 == 4*t2-N+1)) {
for (t7=max(4*t3,2*t1+2);t7<=min(4*t3+3,2*t1+N-1);t7++) {
for (t8=max(2048*t4,2*t1+2);t8<=min(2048*t4+2047,2*t1+N-1);t8++) {
if ((2*t1+3*N+1)%4 == 0) {
A[0][(N-2)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(N-2)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(N-2)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(N-2)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(N-2)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(N-2)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(N-2)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(N-2)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(N-2)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(N-2)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(N-2)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(N-2)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(N-2)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(N-2)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(N-2)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(N-2)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(N-2)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(N-2)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(N-2)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(N-2)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 == 2*t2) && (t1 >= max(ceild(4*t3-N+1,2),ceild(2048*t4-N+1,2)))) {
for (t7=max(4*t3,2*t1+2);t7<=min(4*t3+3,2*t1+N-1);t7++) {
for (t8=max(2048*t4,2*t1+2);t8<=min(2048*t4+2047,2*t1+N-1);t8++) {
if (t1%2 == 0) {
A[0][1][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][1][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][1 -1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][1 +1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][1][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][1 -1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][1 +1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][1 -1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][1 +1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][1 -1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][1 +1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][1][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][1][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][1 -1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][1 +1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][1][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][1][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 <= min(min(T-2,2*t3-1),1024*t4+1021)) && (2*t1 == 4*t2-N+1)) {
for (t7=max(4*t3,2*t1+3);t7<=min(2*t1+N,4*t3+3);t7++) {
for (t8=max(2048*t4,2*t1+3);t8<=min(2*t1+N,2048*t4+2047);t8++) {
if ((2*t1+3*N+1)%4 == 0) {
A[1][(N-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(N-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(N-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(N-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(N-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(N-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(N-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(N-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(N-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(N-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(N-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(N-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(N-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(N-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(N-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(N-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(N-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(N-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(N-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(N-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
}
if ((t1 == 2*t2) && (t1 <= min(min(T-2,2*t3-2),1024*t4+1020))) {
for (t7=4*t3;t7<=min(2*t1+N,4*t3+3);t7++) {
for (t8=max(2048*t4,2*t1+3);t8<=min(2*t1+N,2048*t4+2047);t8++) {
if (t1%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
}
if ((t1 == 2*t3) && (t1 <= min(floord(2048*t4-N+2048,2),2*t2-2)) && (t1 >= max(ceild(4*t2-N+2,2),1024*t4))) {
for (t8=2*t1+1;t8<=2*t1+N-2;t8++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][1][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][1 -1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][1 -1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][1 +1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][1 +1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)+1]);;
}
}
for (t7=2*t1+2;t7<=2*t1+3;t7++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][1] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 +1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 -1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 +1]);;
}
for (t8=2*t1+2;t8<=2*t1+N-2;t8++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
}
if (t1%2 == 0) {
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
if (t1%2 == 0) {
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)+1]);;
}
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=2*t1+2;t7<=2*t1+3;t7++) {
for (t8=2*t1+2;t8<=2*t1+N-1;t8++) {
if (t1%2 == 0) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
}
if ((t1 == 2*t3) && (t1 <= min(min(floord(2048*t4-N+2048,2),2*t2-2),1024*t4-2)) && (t1 >= max(ceild(4*t2-N+2,2),ceild(2048*t4-N+2,2)))) {
for (t8=2048*t4;t8<=2*t1+N-2;t8++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][1][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][1 -1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][1 -1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][1 +1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][1 +1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)+1]);;
}
}
for (t7=2*t1+2;t7<=2*t1+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N-2;t8++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
}
if (t1%2 == 0) {
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
if (t1%2 == 0) {
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)+1]);;
}
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=2*t1+2;t7<=2*t1+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N-1;t8++) {
if (t1%2 == 0) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
}
if ((t1 == 2*t3) && (t1 <= 2*t2-2) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(2048*t4-N+2049,2)),1024*t4))) {
for (t8=2*t1+1;t8<=2048*t4+2047;t8++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][1][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][1 -1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][1 -1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][1 +1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][1 +1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)+1]);;
}
}
for (t7=2*t1+2;t7<=2*t1+3;t7++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][1] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 +1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 -1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 +1]);;
}
for (t8=2*t1+2;t8<=2048*t4+2047;t8++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
}
if (t1%2 == 0) {
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=2*t1+2;t7<=2*t1+3;t7++) {
for (t8=2*t1+2;t8<=2048*t4+2047;t8++) {
if (t1%2 == 0) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
}
if ((t1 == 2*t3) && (t1 <= min(2*t2-2,1024*t4-2)) && (t1 >= max(ceild(4*t2-N+2,2),ceild(2048*t4-N+2049,2)))) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][1][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][1][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][1 -1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][1 -1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][1 +1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][1 +1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][1 -1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][1 +1][(-2*t1+t8)+1]);;
}
}
for (t7=2*t1+2;t7<=2*t1+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if (t1%2 == 0) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
}
if (t1%2 == 0) {
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=2*t1+2;t7<=2*t1+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if (t1%2 == 0) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
}
if ((t1 <= min(min(floord(4*t3-N+4,2),floord(2048*t4-N+2048,2)),2*t2-1)) && (t1 >= max(ceild(4*t3-N+2,2),1024*t4))) {
for (t7=4*t3;t7<=2*t1+N-2;t7++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][1] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 +1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 -1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 +1]);;
for (t8=2*t1+2;t8<=2*t1+N-2;t8++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)+1]);;
}
for (t8=2*t1+2;t8<=2*t1+N-1;t8++) {
A[0][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)+1]);;
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=4*t3;t7<=2*t1+N-1;t7++) {
for (t8=2*t1+2;t8<=2*t1+N-1;t8++) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 <= min(min(floord(2048*t4-N+2048,2),2*t2-1),2*t3-1)) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(4*t3-N+5,2)),1024*t4))) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][1] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 +1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 -1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 +1]);;
for (t8=2*t1+2;t8<=2*t1+N-2;t8++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)+1]);;
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2*t1+2;t8<=2*t1+N-1;t8++) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 <= min(min(floord(4*t3-N+4,2),2*t2-1),1024*t4+1022)) && (t1 >= max(max(ceild(4*t3-N+2,2),ceild(2048*t4-N+2049,2)),1024*t4))) {
for (t7=4*t3;t7<=2*t1+N-2;t7++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][1] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 +1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 -1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 +1]);;
for (t8=2*t1+2;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
for (t8=2*t1+2;t8<=2048*t4+2047;t8++) {
A[0][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)+1]);;
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=4*t3;t7<=2*t1+N-1;t7++) {
for (t8=2*t1+2;t8<=2048*t4+2047;t8++) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 <= min(min(2*t2-1,2*t3-1),1024*t4+1022)) && (t1 >= max(max(max(ceild(4*t2-N+2,2),ceild(4*t3-N+5,2)),ceild(2048*t4-N+2049,2)),1024*t4))) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][1] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 +1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][1 -1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][1 +1]);;
for (t8=2*t1+2;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2*t1+2;t8<=2048*t4+2047;t8++) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 <= min(min(min(floord(4*t3-N+4,2),floord(2048*t4-N+2048,2)),2*t2-1),1024*t4-1)) && (t1 >= max(max(0,ceild(4*t3-N+2,2)),ceild(2048*t4-N+2,2)))) {
for (t7=4*t3;t7<=2*t1+N-2;t7++) {
for (t8=2048*t4;t8<=2*t1+N-2;t8++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)+1]);;
}
for (t8=2048*t4;t8<=2*t1+N-1;t8++) {
A[0][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)+1]);;
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=4*t3;t7<=2*t1+N-1;t7++) {
for (t8=2048*t4;t8<=2*t1+N-1;t8++) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 <= min(min(min(floord(2048*t4-N+2048,2),2*t2-1),2*t3-1),1024*t4-1)) && (t1 >= max(max(max(0,ceild(4*t2-N+2,2)),ceild(4*t3-N+5,2)),ceild(2048*t4-N+2,2)))) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N-2;t8++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-2)+1]);;
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N-1;t8++) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 <= min(min(floord(4*t3-N+4,2),2*t2-1),1024*t4-1)) && (t1 >= max(max(0,ceild(4*t3-N+2,2)),ceild(2048*t4-N+2049,2)))) {
for (t7=4*t3;t7<=2*t1+N-2;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[0][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(N-2)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(N-2)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(N-2)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(N-2)+1][(-2*t1+t8-1)+1]);;
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=4*t3;t7<=2*t1+N-1;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((t1 <= min(min(2*t2-1,2*t3-1),1024*t4-1)) && (t1 >= max(max(max(0,ceild(4*t2-N+2,2)),ceild(4*t3-N+5,2)),ceild(2048*t4-N+2049,2)))) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 2.666*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] - (0.166*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1] + 0.166*A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])- (0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)-1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)+1][(-2*t1+t8)] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)-1] + 0.0833*A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)+1] + 0.0833*A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)+1]);;
A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
for (t6=4*t2+1;t6<=min(4*t2+2,2*t1+N-1);t6++) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 2.666*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] - (0.166*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.166*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])- (0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)-1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)+1][(-2*t1+t8-1)] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)-1] + 0.0833*A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)+1] + 0.0833*A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)+1]);;
}
}
}
}
if ((N == 4) && (t1 == 2*t2-1) && (t1 == 2*t3-1) && (t1 <= min(T-2,1024*t4+1021))) {
for (t7=2*t1+3;t7<=2*t1+4;t7++) {
for (t8=2*t1+3;t8<=2*t1+4;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=2*t1+3;t7<=2*t1+4;t7++) {
for (t8=2*t1+3;t8<=2*t1+4;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
for (t8=2*t1+4;t8<=2*t1+5;t8++) {
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
}
if ((N >= 5) && (t1 == 2*t2-1) && (t1 == 2*t3-1) && (t1 <= min(floord(2048*t4-N+2046,2),T-2)) && (t1 >= 1024*t4-1)) {
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][1][(-2*t1+t8-2)] = 2.666*A[0][3][1][(-2*t1+t8-2)] - (0.166*A[0][3 -1][1][(-2*t1+t8-2)] + 0.166*A[0][3 +1][1][(-2*t1+t8-2)] + 0.166*A[0][3][1 -1][(-2*t1+t8-2)] + 0.166*A[0][3][1 +1][(-2*t1+t8-2)] + 0.166*A[0][3][1][(-2*t1+t8-2)+1] + 0.166*A[0][3][1][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][1 -1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][1 +1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][1 -1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][1 +1][(-2*t1+t8-2)+1]);;
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][1] = 2.666*A[0][3][(-2*t1+t7-2)][1] - (0.166*A[0][3 -1][(-2*t1+t7-2)][1] + 0.166*A[0][3 +1][(-2*t1+t7-2)][1] + 0.166*A[0][3][(-2*t1+t7-2)-1][1] + 0.166*A[0][3][(-2*t1+t7-2)+1][1] + 0.166*A[0][3][(-2*t1+t7-2)][1 +1] + 0.166*A[0][3][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 +1]);;
}
for (t8=2*t1+4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][2][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
}
if ((t1 == 2*t2-1) && (t1 == 2*t3-1) && (t1 <= min(min(floord(2048*t4-N+2046,2),T-2),1024*t4-3)) && (t1 >= ceild(2048*t4-N,2))) {
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][1][(-2*t1+t8-2)] = 2.666*A[0][3][1][(-2*t1+t8-2)] - (0.166*A[0][3 -1][1][(-2*t1+t8-2)] + 0.166*A[0][3 +1][1][(-2*t1+t8-2)] + 0.166*A[0][3][1 -1][(-2*t1+t8-2)] + 0.166*A[0][3][1 +1][(-2*t1+t8-2)] + 0.166*A[0][3][1][(-2*t1+t8-2)+1] + 0.166*A[0][3][1][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][1 -1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][1 +1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][1 -1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][1 +1][(-2*t1+t8-2)+1]);;
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][2][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
}
if ((t1 == 2*t2-1) && (t1 == 2*t3-1) && (t1 <= min(T-2,1024*t4+1021)) && (t1 >= max(ceild(2048*t4-N+2047,2),1024*t4-1))) {
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][1][(-2*t1+t8-2)] = 2.666*A[0][3][1][(-2*t1+t8-2)] - (0.166*A[0][3 -1][1][(-2*t1+t8-2)] + 0.166*A[0][3 +1][1][(-2*t1+t8-2)] + 0.166*A[0][3][1 -1][(-2*t1+t8-2)] + 0.166*A[0][3][1 +1][(-2*t1+t8-2)] + 0.166*A[0][3][1][(-2*t1+t8-2)+1] + 0.166*A[0][3][1][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][1 -1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][1 +1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][1 -1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][1 +1][(-2*t1+t8-2)+1]);;
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][1] = 2.666*A[0][3][(-2*t1+t7-2)][1] - (0.166*A[0][3 -1][(-2*t1+t7-2)][1] + 0.166*A[0][3 +1][(-2*t1+t7-2)][1] + 0.166*A[0][3][(-2*t1+t7-2)-1][1] + 0.166*A[0][3][(-2*t1+t7-2)+1][1] + 0.166*A[0][3][(-2*t1+t7-2)][1 +1] + 0.166*A[0][3][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 +1]);;
}
for (t8=2*t1+4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
}
if ((t1 == 2*t2-1) && (t1 == 2*t3-1) && (t1 <= min(T-2,1024*t4-3)) && (t1 >= ceild(2048*t4-N+2047,2))) {
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][1][(-2*t1+t8-2)] = 2.666*A[0][3][1][(-2*t1+t8-2)] - (0.166*A[0][3 -1][1][(-2*t1+t8-2)] + 0.166*A[0][3 +1][1][(-2*t1+t8-2)] + 0.166*A[0][3][1 -1][(-2*t1+t8-2)] + 0.166*A[0][3][1 +1][(-2*t1+t8-2)] + 0.166*A[0][3][1][(-2*t1+t8-2)+1] + 0.166*A[0][3][1][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][1 -1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][1 +1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][1 -1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][1 +1][(-2*t1+t8-2)+1]);;
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
}
if ((N >= 5) && (t1 == 2*t2-1) && (t1 <= min(min(floord(4*t3-N+2,2),floord(2048*t4-N+2046,2)),T-2)) && (t1 >= max(ceild(4*t3-N,2),1024*t4-1))) {
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][1] = 2.666*A[0][3][(-2*t1+t7-2)][1] - (0.166*A[0][3 -1][(-2*t1+t7-2)][1] + 0.166*A[0][3 +1][(-2*t1+t7-2)][1] + 0.166*A[0][3][(-2*t1+t7-2)-1][1] + 0.166*A[0][3][(-2*t1+t7-2)+1][1] + 0.166*A[0][3][(-2*t1+t7-2)][1 +1] + 0.166*A[0][3][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 +1]);;
}
for (t8=2*t1+4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][2][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
for (t8=2*t1+4;t8<=2*t1+N+1;t8++) {
if ((t1+1)%2 == 0) {
A[0][2][(N-2)][(-2*t1+t8-3)] = 2.666*A[1][2][(N-2)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(N-2)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(N-2)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(N-2)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(N-2)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(N-2)+1][(-2*t1+t8-3)+1]);;
}
}
}
if ((t1 == 2*t2-1) && (t1 <= min(min(min(floord(4*t3-N+2,2),floord(2048*t4-N+2046,2)),T-2),1024*t4-3)) && (t1 >= ceild(4*t3-N,2))) {
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][2][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
for (t8=2048*t4;t8<=2*t1+N+1;t8++) {
if ((t1+1)%2 == 0) {
A[0][2][(N-2)][(-2*t1+t8-3)] = 2.666*A[1][2][(N-2)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(N-2)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(N-2)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(N-2)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(N-2)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(N-2)+1][(-2*t1+t8-3)+1]);;
}
}
}
if ((t1 == 2*t2-1) && (t1 <= min(min(floord(2048*t4-N+2046,2),T-2),2*t3-3)) && (t1 >= max(ceild(4*t3-N+3,2),1024*t4-1))) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][1] = 2.666*A[0][3][(-2*t1+t7-2)][1] - (0.166*A[0][3 -1][(-2*t1+t7-2)][1] + 0.166*A[0][3 +1][(-2*t1+t7-2)][1] + 0.166*A[0][3][(-2*t1+t7-2)-1][1] + 0.166*A[0][3][(-2*t1+t7-2)+1][1] + 0.166*A[0][3][(-2*t1+t7-2)][1 +1] + 0.166*A[0][3][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 +1]);;
}
for (t8=2*t1+4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][2][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
}
if ((t1 == 2*t2-1) && (t1 <= min(min(min(floord(2048*t4-N+2046,2),T-2),2*t3-3),1024*t4-3)) && (t1 >= max(ceild(2048*t4-N,2),ceild(4*t3-N+3,2)))) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][2][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
}
if ((t1 == 2*t2-1) && (t1 <= min(min(floord(4*t3-N+2,2),T-2),1024*t4+1021)) && (t1 >= max(max(ceild(4*t3-N,2),ceild(2048*t4-N+2047,2)),1024*t4-1))) {
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][1] = 2.666*A[0][3][(-2*t1+t7-2)][1] - (0.166*A[0][3 -1][(-2*t1+t7-2)][1] + 0.166*A[0][3 +1][(-2*t1+t7-2)][1] + 0.166*A[0][3][(-2*t1+t7-2)-1][1] + 0.166*A[0][3][(-2*t1+t7-2)+1][1] + 0.166*A[0][3][(-2*t1+t7-2)][1 +1] + 0.166*A[0][3][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 +1]);;
}
for (t8=2*t1+4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
for (t8=2*t1+4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[0][2][(N-2)][(-2*t1+t8-3)] = 2.666*A[1][2][(N-2)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(N-2)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(N-2)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(N-2)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(N-2)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(N-2)+1][(-2*t1+t8-3)+1]);;
}
}
}
if ((t1 == 2*t2-1) && (t1 <= min(min(floord(4*t3-N+2,2),T-2),1024*t4-3)) && (t1 >= max(ceild(4*t3-N,2),ceild(2048*t4-N+2047,2)))) {
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[0][2][(N-2)][(-2*t1+t8-3)] = 2.666*A[1][2][(N-2)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(N-2)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(N-2)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(N-2)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(N-2)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(N-2)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(N-2)+1][(-2*t1+t8-3)+1]);;
}
}
}
if ((t1 == 2*t2-1) && (t1 <= min(min(T-2,2*t3-3),1024*t4+1021)) && (t1 >= max(max(ceild(4*t3-N+3,2),ceild(2048*t4-N+2047,2)),1024*t4-1))) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][1] = 2.666*A[0][3][(-2*t1+t7-2)][1] - (0.166*A[0][3 -1][(-2*t1+t7-2)][1] + 0.166*A[0][3 +1][(-2*t1+t7-2)][1] + 0.166*A[0][3][(-2*t1+t7-2)-1][1] + 0.166*A[0][3][(-2*t1+t7-2)+1][1] + 0.166*A[0][3][(-2*t1+t7-2)][1 +1] + 0.166*A[0][3][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][1 +1]);;
}
for (t8=2*t1+4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
}
if ((t1 == 2*t2-1) && (t1 <= min(min(T-2,2*t3-3),1024*t4-3)) && (t1 >= max(ceild(4*t3-N+3,2),ceild(2048*t4-N+2047,2)))) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][1 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][2][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][2][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][2 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][2 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][2][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][3][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][3][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][3 -1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 +1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][3 -1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3 +1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][3][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][2][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][2][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][2 -1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 +1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][2 -1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2 +1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][2][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
}
if ((N >= 6) && (t1 <= min(min(T-2,2*t3-1),1024*t4+1021)) && (2*t1 == 4*t2-N+2)) {
for (t6=2*t1+N-1;t6<=2*t1+N;t6++) {
for (t7=max(4*t3,2*t1+3);t7<=min(2*t1+N,4*t3+3);t7++) {
for (t8=max(2048*t4,2*t1+3);t8<=min(2*t1+N,2048*t4+2047);t8++) {
if ((2*t1+3*N+2)%4 == 0) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
}
for (t7=max(4*t3,2*t1+4);t7<=4*t3+3;t7++) {
for (t8=max(2048*t4,2*t1+4);t8<=min(2048*t4+2047,2*t1+N+1);t8++) {
if ((2*t1+3*N+2)%4 == 0) {
A[0][(N-2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(N-2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(N-2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(N-2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(N-2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(N-2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(N-2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(N-2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(N-2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(N-2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(N-2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(N-2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(N-2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(N-2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(N-2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(N-2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(N-2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(N-2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(N-2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(N-2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
}
if ((t1 == 2*t3-1) && (t1 <= min(min(floord(2048*t4-N+2046,2),T-2),2*t2-3)) && (t1 >= max(ceild(4*t2-N+3,2),1024*t4-1))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
}
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)+1]);;
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 +1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 +1]);;
}
for (t8=2*t1+4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1+1)%2 == 0) {
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
}
if ((t1 == 2*t3-1) && (t1 <= min(min(min(floord(2048*t4-N+2046,2),T-2),2*t2-3),1024*t4-3)) && (t1 >= max(ceild(2048*t4-N,2),ceild(4*t2-N+3,2)))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
}
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)+1]);;
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1+1)%2 == 0) {
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
}
if ((t1 == 2*t3-1) && (t1 <= min(min(T-2,2*t2-3),1024*t4+1021)) && (t1 >= max(max(ceild(4*t2-N+3,2),ceild(2048*t4-N+2047,2)),1024*t4-1))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
}
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)+1]);;
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 +1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 +1]);;
}
for (t8=2*t1+4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
}
if ((t1 == 2*t3-1) && (t1 <= min(min(T-2,2*t2-3),1024*t4-3)) && (t1 >= max(ceild(4*t2-N+3,2),ceild(2048*t4-N+2047,2)))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=2*t1+3;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
}
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][1][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1 -1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1 +1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 -1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][1 +1][(-2*t1+t8-2)+1]);;
}
}
for (t7=2*t1+4;t7<=2*t1+5;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
if ((t1+1)%2 == 0) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
if ((t1+1)%2 == 0) {
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
}
if ((t1 <= min(min(min(floord(4*t3-N+2,2),floord(2048*t4-N+2046,2)),T-2),2*t2-2)) && (t1 >= max(max(ceild(4*t3-N,2),ceild(4*t2-N+3,2)),1024*t4-1))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 +1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 +1]);;
for (t8=2*t1+4;t8<=2*t1+N;t8++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)+1]);;
}
for (t8=2*t1+4;t8<=2*t1+N+1;t8++) {
A[0][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1 <= min(min(min(min(floord(4*t3-N+2,2),floord(2048*t4-N+2046,2)),T-2),2*t2-2),1024*t4-2)) && (t1 >= max(ceild(4*t3-N,2),ceild(4*t2-N+3,2)))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)+1]);;
}
for (t8=2048*t4;t8<=2*t1+N+1;t8++) {
A[0][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1 <= min(min(min(floord(2048*t4-N+2046,2),T-2),2*t2-2),2*t3-2)) && (t1 >= max(max(ceild(4*t2-N+3,2),ceild(4*t3-N+3,2)),1024*t4-1))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2*t1+3;t8<=2*t1+N;t8++) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 +1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 +1]);;
for (t8=2*t1+4;t8<=2*t1+N;t8++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
if ((t1 <= min(min(min(min(floord(2048*t4-N+2046,2),T-2),2*t2-2),2*t3-2),1024*t4-2)) && (t1 >= max(max(ceild(2048*t4-N,2),ceild(4*t2-N+3,2)),ceild(4*t3-N+3,2)))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2*t1+N;t8++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-2)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(N-2)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-2)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-2)+1]);;
}
}
if ((t1 <= min(min(min(floord(4*t3-N+2,2),T-2),2*t2-2),1024*t4+1021)) && (t1 >= max(max(max(ceild(4*t3-N,2),ceild(4*t2-N+3,2)),ceild(2048*t4-N+2047,2)),1024*t4-1))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 +1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 +1]);;
for (t8=2*t1+4;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
for (t8=2*t1+4;t8<=2048*t4+2047;t8++) {
A[0][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1 <= min(min(min(floord(4*t3-N+2,2),T-2),2*t2-2),1024*t4-2)) && (t1 >= max(max(ceild(4*t3-N,2),ceild(4*t2-N+3,2)),ceild(2048*t4-N+2047,2)))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=2*t1+N;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[0][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(N-2)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(N-2)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(N-2)+1][(-2*t1+t8-3)+1]);;
}
}
if ((t1 <= min(min(min(T-2,2*t2-2),2*t3-2),1024*t4+1021)) && (t1 >= max(max(max(ceild(4*t2-N+3,2),ceild(4*t3-N+3,2)),ceild(2048*t4-N+2047,2)),1024*t4-1))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2*t1+3;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 +1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][1 -1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 -1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][1 +1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][1 +1]);;
for (t8=2*t1+4;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
if ((t1 <= min(min(min(T-2,2*t2-2),2*t3-2),1024*t4-2)) && (t1 >= max(max(ceild(4*t2-N+3,2),ceild(4*t3-N+3,2)),ceild(2048*t4-N+2047,2)))) {
for (t6=4*t2+1;t6<=4*t2+2;t6++) {
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
}
}
}
for (t7=4*t3;t7<=4*t3+3;t7++) {
for (t8=2048*t4;t8<=2048*t4+2047;t8++) {
A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 2.666*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] - (0.166*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.166*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])- (0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)-1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)+1][(-2*t1+t8-2)] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)-1] + 0.0833*A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)+1] + 0.0833*A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)+1]);;
A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 2.666*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] - (0.166*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.166*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])- (0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)-1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)+1][(-2*t1+t8-3)] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)-1] + 0.0833*A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)+1] + 0.0833*A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)+1]);;
}
}
}
if ((t1 >= 2*t3) && (t3 <= min(floord(T-2,2),512*t4+510))) {
for (t6=max(max(4*t2,4*t3+3),-4*t1+4*t2+8*t3+1);t6<=min(min(4*t2+3,4*t3+N),-4*t1+4*t2+8*t3+4);t6++) {
for (t8=max(2048*t4,4*t3+3);t8<=min(4*t3+N,2048*t4+2047);t8++) {
A[1][(-4*t3+t6-2)][1][(-4*t3+t8-2)] = 2.666*A[0][(-4*t3+t6-2)][1][(-4*t3+t8-2)] - (0.166*A[0][(-4*t3+t6-2)-1][1][(-4*t3+t8-2)] + 0.166*A[0][(-4*t3+t6-2)+1][1][(-4*t3+t8-2)] + 0.166*A[0][(-4*t3+t6-2)][1 -1][(-4*t3+t8-2)] + 0.166*A[0][(-4*t3+t6-2)][1 +1][(-4*t3+t8-2)] + 0.166*A[0][(-4*t3+t6-2)][1][(-4*t3+t8-2)+1] + 0.166*A[0][(-4*t3+t6-2)][1][(-4*t3+t8-2)-1])- (0.0833*A[0][(-4*t3+t6-2)-1][1 -1][(-4*t3+t8-2)] + 0.0833*A[0][(-4*t3+t6-2)+1][1 -1][(-4*t3+t8-2)] + 0.0833*A[0][(-4*t3+t6-2)-1][1 +1][(-4*t3+t8-2)] + 0.0833*A[0][(-4*t3+t6-2)+1][1 +1][(-4*t3+t8-2)] + 0.0833*A[0][(-4*t3+t6-2)-1][1][(-4*t3+t8-2)-1] + 0.0833*A[0][(-4*t3+t6-2)+1][1][(-4*t3+t8-2)-1] + 0.0833*A[0][(-4*t3+t6-2)][1 -1][(-4*t3+t8-2)-1] + 0.0833*A[0][(-4*t3+t6-2)][1 +1][(-4*t3+t8-2)-1] + 0.0833*A[0][(-4*t3+t6-2)-1][1][(-4*t3+t8-2)+1] + 0.0833*A[0][(-4*t3+t6-2)+1][1][(-4*t3+t8-2)+1] + 0.0833*A[0][(-4*t3+t6-2)][1 -1][(-4*t3+t8-2)+1] + 0.0833*A[0][(-4*t3+t6-2)][1 +1][(-4*t3+t8-2)+1]);;
}
}
}
if ((t1 >= 1024*t4+1022) && (t4 <= floord(T-1024,1024))) {
for (t6=max(max(4*t2,2048*t4+2047),-4*t1+4*t2+4096*t4+4089);t6<=min(min(4*t2+3,2048*t4+N+2044),-4*t1+4*t2+4096*t4+4092);t6++) {
for (t7=max(4*t3,2048*t4+2047);t7<=min(4*t3+3,2048*t4+N+2044);t7++) {
A[1][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)][1] = 2.666*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)][1] - (0.166*A[0][(-2048*t4+t6-2046)-1][(-2048*t4+t7-2046)][1] + 0.166*A[0][(-2048*t4+t6-2046)+1][(-2048*t4+t7-2046)][1] + 0.166*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)-1][1] + 0.166*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)+1][1] + 0.166*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)][1 +1] + 0.166*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)][1 -1])- (0.0833*A[0][(-2048*t4+t6-2046)-1][(-2048*t4+t7-2046)-1][1] + 0.0833*A[0][(-2048*t4+t6-2046)+1][(-2048*t4+t7-2046)-1][1] + 0.0833*A[0][(-2048*t4+t6-2046)-1][(-2048*t4+t7-2046)+1][1] + 0.0833*A[0][(-2048*t4+t6-2046)+1][(-2048*t4+t7-2046)+1][1] + 0.0833*A[0][(-2048*t4+t6-2046)-1][(-2048*t4+t7-2046)][1 -1] + 0.0833*A[0][(-2048*t4+t6-2046)+1][(-2048*t4+t7-2046)][1 -1] + 0.0833*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)-1][1 -1] + 0.0833*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)+1][1 -1] + 0.0833*A[0][(-2048*t4+t6-2046)-1][(-2048*t4+t7-2046)][1 +1] + 0.0833*A[0][(-2048*t4+t6-2046)+1][(-2048*t4+t7-2046)][1 +1] + 0.0833*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)-1][1 +1] + 0.0833*A[0][(-2048*t4+t6-2046)][(-2048*t4+t7-2046)+1][1 +1]);;
}
}
}
}
}
}
}
}
/* End of CLooG code */
// #undef N
// #define N 300L
#undef T
#define T 600L
#ifdef TIME
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6);
printf("|Time taken: %7.5lfs\t", (tdiff * 0.001) * 1.0e3);
printf("|MFLOPS: %f\n", ((((double)NUM_FP_OPS * N *N * N * (T-1)) / tdiff) / 1000000L));
#endif
#ifdef VERIFY
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
for (k = 0; k < N; k++) {
total+= A[T%2][i][j][k] ;
}
}
}
printf("|sum: %e\t", total);
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
for (k = 0; k < N; k++) {
sum_err_sqr += (A[T%2][i][j][k] - (total/N))*(A[T%2][i][j][k] - (total/N));
}
}
}
printf("|rms(A) = %7.2f\t", sqrt(sum_err_sqr));
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
for (k = 0; k < N; k++) {
chtotal += ((char *)A[T%2][i][j])[k];
}
}
}
printf("|sum(rep(A)) = %d\n", chtotal);
#endif
for (l = 0; l < 2; l++){
for (i = 0; i < N; i++){
for (j = 0; j < N; j++)
free(A[l][i][j]); // = (double *) malloc(N * sizeof (double));
free(A[l][i]); // = (double **) malloc(N * sizeof(double *));
}
free(A[l]); // = (double ***) malloc(N * sizeof(double **));
}
return 0;
}
// icc -O3 -fp-model precise heat_1d_np.c -o op-heat-1d-np -lm
// /* @ begin PrimeTile (num_tiling_levels=1; first_depth=1; last_depth=-1; boundary_tiling_level=-1;) @*/
// /* @ begin PrimeRegTile (scalar_replacement=0; T1t5=4; T1t6=4; T1t7=4; T1t8=4; ) @*/
// /* @ end @*/
|
convolution_pack16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m512 _sum = _mm512_setzero_ps();
if (bias_data_ptr)
{
_sum = _mm512_loadu_ps(bias_data_ptr + p * 16);
}
const float* kptr = weight_data_packed.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const float* sptr = m.row(i * stride_h) + j * stride_w * 16;
for (int k = 0; k < maxk; k++)
{
const float* slptr = sptr + space_ofs[k] * 16;
__m512 _val0 = _mm512_set1_ps(slptr[0]);
__m512 _val1 = _mm512_set1_ps(slptr[1]);
__m512 _val2 = _mm512_set1_ps(slptr[2]);
__m512 _val3 = _mm512_set1_ps(slptr[3]);
__m512 _val4 = _mm512_set1_ps(slptr[4]);
__m512 _val5 = _mm512_set1_ps(slptr[5]);
__m512 _val6 = _mm512_set1_ps(slptr[6]);
__m512 _val7 = _mm512_set1_ps(slptr[7]);
__m512 _val8 = _mm512_set1_ps(slptr[8]);
__m512 _val9 = _mm512_set1_ps(slptr[9]);
__m512 _vala = _mm512_set1_ps(slptr[10]);
__m512 _valb = _mm512_set1_ps(slptr[11]);
__m512 _valc = _mm512_set1_ps(slptr[12]);
__m512 _vald = _mm512_set1_ps(slptr[13]);
__m512 _vale = _mm512_set1_ps(slptr[14]);
__m512 _valf = _mm512_set1_ps(slptr[15]);
__m512 _w0 = _mm512_load_ps(kptr + 16 * 0);
__m512 _w1 = _mm512_load_ps(kptr + 16 * 1);
__m512 _w2 = _mm512_load_ps(kptr + 16 * 2);
__m512 _w3 = _mm512_load_ps(kptr + 16 * 3);
__m512 _w4 = _mm512_load_ps(kptr + 16 * 4);
__m512 _w5 = _mm512_load_ps(kptr + 16 * 5);
__m512 _w6 = _mm512_load_ps(kptr + 16 * 6);
__m512 _w7 = _mm512_load_ps(kptr + 16 * 7);
__m512 _w8 = _mm512_load_ps(kptr + 16 * 8);
__m512 _w9 = _mm512_load_ps(kptr + 16 * 9);
__m512 _wa = _mm512_load_ps(kptr + 16 * 10);
__m512 _wb = _mm512_load_ps(kptr + 16 * 11);
__m512 _wc = _mm512_load_ps(kptr + 16 * 12);
__m512 _wd = _mm512_load_ps(kptr + 16 * 13);
__m512 _we = _mm512_load_ps(kptr + 16 * 14);
__m512 _wf = _mm512_load_ps(kptr + 16 * 15);
_sum = _mm512_fmadd_ps(_val0, _w0, _sum);
_sum = _mm512_fmadd_ps(_val1, _w1, _sum);
_sum = _mm512_fmadd_ps(_val2, _w2, _sum);
_sum = _mm512_fmadd_ps(_val3, _w3, _sum);
_sum = _mm512_fmadd_ps(_val4, _w4, _sum);
_sum = _mm512_fmadd_ps(_val5, _w5, _sum);
_sum = _mm512_fmadd_ps(_val6, _w6, _sum);
_sum = _mm512_fmadd_ps(_val7, _w7, _sum);
_sum = _mm512_fmadd_ps(_val8, _w8, _sum);
_sum = _mm512_fmadd_ps(_val9, _w9, _sum);
_sum = _mm512_fmadd_ps(_vala, _wa, _sum);
_sum = _mm512_fmadd_ps(_valb, _wb, _sum);
_sum = _mm512_fmadd_ps(_valc, _wc, _sum);
_sum = _mm512_fmadd_ps(_vald, _wd, _sum);
_sum = _mm512_fmadd_ps(_vale, _we, _sum);
_sum = _mm512_fmadd_ps(_valf, _wf, _sum);
kptr += 256;
}
}
_sum = activation_avx512(_sum, activation_type, activation_params);
_mm512_store_ps(outptr, _sum);
outptr += 16;
}
}
}
}
|
piMP.c | #include <omp.h>
static long num_steps=100000;
void int main(int argc, char const *argv[]) {
int i;
double x, pi, sum=0.;
step=1.0/(double) num_steps;
#pragma omp parallel for reduction(+:sum) private(x)
for (i=0; i<=num_steps; i++) {
x=(i-0.5)*step;
sum+=4.0/(1.0+x*x);
}
pi step*sum;
return 0;
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
curvilinear_parity_and_outer_boundary_conditions.h |
// First we define the struct that will be used to store the 10 parity conditions at all gridpoints:
// We store the 10 parity conditions in a struct consisting of 10 integers, one for each condition.
// Note that these conditions can only take one of two values: +1 or -1.
typedef struct parity_conditions {
int8_t parity[10];
} parity_condition;
typedef struct ghostzone_map {
short i0,i1,i2;
} gz_map;
void set_bc_parity_conditions(REAL parity[10], const REAL xx0,const REAL xx1,const REAL xx2,
const REAL xx0_inbounds,const REAL xx1_inbounds,const REAL xx2_inbounds) {
#include "set_parity_conditions.h"
}
void set_up_bc_gz_map_and_parity_conditions(const int Nxx_plus_2NGHOSTS[3], REAL *xx[3],
const REAL dxx[3], const REAL xxmin[3], const REAL xxmax[3],
gz_map *bc_gz_map, parity_condition *bc_parity_conditions) {
LOOP_REGION(0,Nxx_plus_2NGHOSTS[0],0,Nxx_plus_2NGHOSTS[1],0,Nxx_plus_2NGHOSTS[2]) {
// First find Cartesian coordinate corresponding to (x_0,x_1,x_2)=(xx[0][i0],xx[1][i1],xx[2][i2]):
REAL xCart[3];
xxCart(xx, i0,i1,i2, xCart);
REAL Cartx = xCart[0];
REAL Carty = xCart[1];
REAL Cartz = xCart[2];
// Next find the (i0_inbounds,i1_inbounds,i2_inbounds) corresponding to the above Cartesian coordinate.
// If (i0_inbounds,i1_inbounds,i2_inbounds) is in a ghost zone, then it must equal (i0,i1,i2), and
// the point is an outer boundary point.
// Otherwise (i0_inbounds,i1_inbounds,i2_inbounds) is in the grid interior, and data at (i0,i1,i2)
// must be replaced with data at (i0_inbounds,i1_inbounds,i2_inbounds), but multiplied by the
// appropriate parity condition (+/- 1).
REAL Cart_to_xx0_inbounds,Cart_to_xx1_inbounds,Cart_to_xx2_inbounds;
#include "Cart_to_xx.h"
int i0_inbounds = (int)( (Cart_to_xx0_inbounds - xxmin[0] - (1.0/2.0)*dxx[0] + ((REAL)NGHOSTS)*dxx[0])/dxx[0] + 0.5 );
int i1_inbounds = (int)( (Cart_to_xx1_inbounds - xxmin[1] - (1.0/2.0)*dxx[1] + ((REAL)NGHOSTS)*dxx[1])/dxx[1] + 0.5 );
int i2_inbounds = (int)( (Cart_to_xx2_inbounds - xxmin[2] - (1.0/2.0)*dxx[2] + ((REAL)NGHOSTS)*dxx[2])/dxx[2] + 0.5 );
REAL xCart_orig[3]; for(int ii=0;ii<3;ii++) xCart_orig[ii] = xCart[ii];
xxCart(xx, i0_inbounds,i1_inbounds,i2_inbounds, xCart);
#define EPS_ABS 1e-8
if(fabs( (double)(xCart_orig[0] - xCart[0]) ) > EPS_ABS ||
fabs( (double)(xCart_orig[1] - xCart[1]) ) > EPS_ABS ||
fabs( (double)(xCart_orig[2] - xCart[2]) ) > EPS_ABS) {
printf("Error. Cartesian disagreement: ( %.15e %.15e %.15e ) != ( %.15e %.15e %.15e )\n",
(double)xCart_orig[0],(double)xCart_orig[1],(double)xCart_orig[2],
(double)xCart[0],(double)xCart[1],(double)xCart[2]);
exit(1);
}
if(i0_inbounds-i0 == 0 && i1_inbounds-i1 == 0 && i2_inbounds-i2 == 0) {
bc_gz_map[IDX3(i0,i1,i2)].i0=-1;
bc_gz_map[IDX3(i0,i1,i2)].i1=-1;
bc_gz_map[IDX3(i0,i1,i2)].i2=-1;
for(int which_parity=0; which_parity<10; which_parity++) {
bc_parity_conditions[IDX3(i0,i1,i2)].parity[which_parity] = 1;
}
} else {
bc_gz_map[IDX3(i0,i1,i2)].i0=i0_inbounds;
bc_gz_map[IDX3(i0,i1,i2)].i1=i1_inbounds;
bc_gz_map[IDX3(i0,i1,i2)].i2=i2_inbounds;
const REAL xx0 = xx[0][i0];
const REAL xx1 = xx[1][i1];
const REAL xx2 = xx[2][i2];
const REAL xx0_inbounds = xx[0][i0_inbounds];
const REAL xx1_inbounds = xx[1][i1_inbounds];
const REAL xx2_inbounds = xx[2][i2_inbounds];
REAL REAL_parity_array[10];
set_bc_parity_conditions(REAL_parity_array, xx0,xx1,xx2, xx0_inbounds,xx1_inbounds,xx2_inbounds);
for(int whichparity=0;whichparity<10;whichparity++) {
//printf("Good? Parity %d evaluated to %e\n",whichparity,REAL_parity_array[whichparity]);
// Perform sanity check on parity array output: should be +1 or -1 to within 8 significant digits:
if( (REAL_parity_array[whichparity] > 0 && fabs(REAL_parity_array[whichparity] - (+1)) > 1e-8) ||
(REAL_parity_array[whichparity] <= 0 && fabs(REAL_parity_array[whichparity] - (-1)) > 1e-8) ) {
printf("Error. Parity evaluated to %e , which is not within 8 significant digits of +1 or -1.",REAL_parity_array[whichparity]);
exit(1);
}
if(REAL_parity_array[whichparity] < 0.0) bc_parity_conditions[IDX3(i0,i1,i2)].parity[whichparity] = -1;
if(REAL_parity_array[whichparity] > 0.0) bc_parity_conditions[IDX3(i0,i1,i2)].parity[whichparity] = +1;
}
}
}
}
// Part P6: Declare boundary condition OB_UPDATE macro,
// which updates a single face of the 3D grid cube
// with
// 1. quadratic polynomial extrapolation, if the face
// corresponds to an outer boundary, or
// 2. parity condition, if the face maps to a point
// in the grid interior.
const int MAXFACE = -1;
const int NUL = +0;
const int MINFACE = +1;
#define OB_UPDATE(inner,which_gf, bc_gz_map,bc_parity_conditions, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \
LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) { \
const int idx3 = IDX3(i0,i1,i2); \
if(bc_gz_map[idx3].i0 == -1 && inner==0) { \
gfs[IDX4(which_gf,i0,i1,i2)] = \
+3.0*gfs[IDX4(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \
-3.0*gfs[IDX4(which_gf,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)] \
+1.0*gfs[IDX4(which_gf,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \
} else if(bc_gz_map[idx3].i0 != -1 && inner==1) { \
gfs[IDX4(which_gf,i0,i1,i2)] = \
( (REAL)bc_parity_conditions[idx3].parity[gfs_parity[which_gf]] )* \
gfs[IDX4(which_gf, \
bc_gz_map[idx3].i0, \
bc_gz_map[idx3].i1, \
bc_gz_map[idx3].i2)]; \
} \
}
// Part P7: Boundary condition driver routine: Apply BCs to all six
// boundary faces of the cube, filling in the innermost
// ghost zone first, and moving outward.
void apply_bcs(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],
gz_map *bc_gz_map,parity_condition *bc_parity_conditions,int num_gfs,const int8_t *gfs_parity, REAL *gfs) {
#pragma omp parallel for
for(int which_gf=0;which_gf<num_gfs;which_gf++) {
int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS };
int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS };
for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) {
for(int inner=0;inner<2;inner++) {
// After updating each face, adjust imin[] and imax[]
// to reflect the newly-updated face extents.
OB_UPDATE(inner,which_gf, bc_gz_map,bc_parity_conditions, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--;
OB_UPDATE(inner,which_gf, bc_gz_map,bc_parity_conditions, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++;
OB_UPDATE(inner,which_gf, bc_gz_map,bc_parity_conditions, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--;
OB_UPDATE(inner,which_gf, bc_gz_map,bc_parity_conditions, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++;
OB_UPDATE(inner,which_gf, bc_gz_map,bc_parity_conditions, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--;
OB_UPDATE(inner,which_gf, bc_gz_map,bc_parity_conditions, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++;
if(inner==0) { for(int ii=0;ii<3;ii++) {imin[ii]++; imax[ii]--;} }
}
}
}
} |
linear_algebra.c | #include "../include/linear_algebra.h"
#include <omp.h>
void modular_multiplication(mpz_t a, mpz_t b, mpz_t c, mpz_t n) {
mpz_mul (a, b, c);
mpz_mod (a, a, n);
}
unsigned get_k_i(word ** M, unsigned long k,
unsigned long i) {
unsigned long I = i / N_BITS;
unsigned long n_shift = N_BITS - ((i % N_BITS ) + 1);
return (get_matrix_l(M,k,I) >> n_shift) & 1;
}
void set_k_i(word ** M, unsigned long k,
unsigned long i, unsigned int value) {
unsigned long I = i / N_BITS;
unsigned long n_shift = N_BITS - ((i % N_BITS ) + 1);
word b = get_matrix_l(M, k, I);
b = b | (((unsigned long) value % 2UL) << n_shift);
set_matrix_l(M, k, I, b);
}
void add_vector_z2(word ** M, unsigned long k,
unsigned long j, unsigned long n_blocks) {
for(unsigned long I = 0; I < n_blocks; ++I) {
word b = get_matrix_l(M, k, I) ^ get_matrix_l(M, j, I);
set_matrix_l(M, k, I, b);
}
}
void add_vector_z(mpz_t ** M, unsigned long k,
unsigned long j, unsigned long n_col) {
mpz_t sum;
mpz_init(sum);
mpz_t x;
mpz_init(x);
mpz_t y;
mpz_init(y);
for(unsigned long i = 0; i < n_col; ++i) {
get_matrix_mpz(x, M, k, i);
get_matrix_mpz(y, M, j, i);
mpz_add(sum, x, y); // M[k][i] = M[k][i] + M[j][i]
set_matrix_mpz(M, k, i, sum);
}
mpz_clear(sum);
mpz_clear(x);
mpz_clear(y);
}
void get_wt_k(word ** M, unsigned long k, unsigned long n_col,
struct row_stats * wt) {
// Inizializzo indicando l'ultimo bit nella posizione dopo l'ultima
wt->b_dx = n_col;
// Numero bit a 1 = 0
wt->n_bit = 0;
// Scorro partendo dalla fine fino a trovare il primo 1
unsigned long i = 0;
while(get_k_i(M, k, i) == 0 && i < n_col)
++i;
// Se ho raggiunto la fine non ci sono bit a 1 ed esco
if(i >= n_col)
return;
wt->b_dx = i;
for(i = i; i < n_col; ++i)
if(get_k_i(M, k, i))
wt->n_bit++;
}
void gaussian_elimination(mpz_t ** M_z,
word ** M_z2,
mpz_t * As,
mpz_t N,
unsigned long n_row,
unsigned long n_col,
unsigned long n_blocks,
struct row_stats wt[]) {
double t1, t2;
double t_Z2 = 0;
double t_Z = 0;
double t_As = 0;
double t_get_wt = 0;
int threads = omp_get_num_threads();
int chunck = n_row / 4;
for(unsigned long i = 0; i < n_col; ++i) {
unsigned long j;
for(j = 0; j < n_row && wt[j].b_dx != i; ++j)
; // avanzo j e basta
#pragma omp parallel for schedule(dynamic, n_row/4)
for(unsigned k = j + 1; k < n_row; ++k) {
if(get_k_i(M_z2, k, i)) { // il bit v(k)(i) deve essere a 1
add_vector_z2(M_z2, k, j, n_blocks); // v(k) = v(k) + v(j) mod 2
add_vector_z(M_z, k, j, n_col); // v(k) = v(k) + v(j)
// (A_k + s) = (A_k + s) * (A_j + s)
modular_multiplication(As[k], As[k], As[j], N);
get_wt_k(M_z2, k, n_col, & wt[k]); // aggiorno wt
}
}
}
}
unsigned factorization(mpz_t N, // numero da fattorizzare
unsigned int * factor_base,
word ** M_z2, // esponenti mod 2
mpz_t ** M_z, // esponenti interi
mpz_t * As, // (Ai + s) moltiplicati tra loro
struct row_stats * wt, // zeri sulle righe
unsigned long n_row, // #fattorizzaz. complete
unsigned long n_primes, // numero base di fattori
mpz_t m) { // fattore non banale di N
mpz_t mpz_temp;
mpz_init(mpz_temp);
mpz_t mpz_prime;
mpz_init(mpz_prime);
mpz_t X;
mpz_init(X);
mpz_t Y;
mpz_init(Y);
mpz_t q;
mpz_init(q);
mpz_t exp;
mpz_init(exp);
for(unsigned long i = 0; i < n_row; ++i)
if(wt[i].n_bit == 0) { // dipendenza trovata
mpz_set_ui(Y, 1);
for(int j = 0; j < n_primes; ++j) {
mpz_set_ui(mpz_prime, factor_base[j]);
get_matrix_mpz(exp, M_z, i, j);
mpz_divexact_ui(exp, exp, 2); // exp = exp / 2
// temp = (factor_base[j])^(M_z[i][j]) mod N
mpz_powm(mpz_temp, mpz_prime, exp, N);
// Y = Y * temp mod N
modular_multiplication(Y, Y, mpz_temp, N);
}
mpz_set(X, As[i]);
mpz_add(X, X, Y); // X = X + Y
mpz_gcd(m, X, N); // m = mcd(X + Y, N)
mpz_divexact(q, N, m); // q = N / m;
if(mpz_cmp(m, N) < 0 && mpz_cmp_ui(m, 1) > 0) {
return 1;
}
}
mpz_clear(exp);
mpz_clear(q);
mpz_clear(mpz_prime);
mpz_clear(X);
mpz_clear(Y);
}
|
cycle_share.c | // SPDX-License-Identifier: BSD-2-Clause
/*
Copyright 1998-2018 Bernard Parent
Copyright 2020 Minindu Weerakoon
Copyright 2001 Giovanni Fusina
Copyright 2002 Thomas E. Schwartzentruber
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cycle/share/cycle_share.h>
#include <src/data.h>
#include <src/common.h>
#include <src/bdry.h>
#include <src/init.h>
#include <cycle/ts/_ts.h>
#include <cycle/tsemf/_tsemf.h>
#include <cycle/_cycle.h>
#include <cycle/res/_res.h>
#include <cycle/resconv/_resconv.h>
#include <cycle/restime/_restime.h>
#include <model/fluid/_fluid.h>
#include <model/emfield/_emfield.h>
#include <model/metrics/_metrics.h>
#include <model/fluid/_fluid.h>
#ifdef OPENMPTHREADS
#define maxloopthread LONG_MAX
#define maxzonethread LONG_MAX
#else
#define maxloopthread 256
#define maxzonethread 256
#endif
#define MAXRATIO_DTAUMAX_DTAUMIN 100.0
typedef struct {
np_t *np;
gl_t *gl;
long theta,ls,le;
} segment_t;
typedef struct {
np_t *np;
gl_t *gl;
long theta,ls,le;
void (*funct)(np_t *, gl_t *, long, long, long);
} segmentarg_t;
typedef struct {
np_t *np;
gl_t *gl;
zone_t zone;
void (*funct)(np_t *, gl_t *, zone_t);
} threadzone_t;
void *segmentfunct(void *segmentarg){
(((segmentarg_t *) segmentarg)->funct)(
((segmentarg_t *) segmentarg)->np,
((segmentarg_t *) segmentarg)->gl,
((segmentarg_t *) segmentarg)->theta,
((segmentarg_t *) segmentarg)->ls,
((segmentarg_t *) segmentarg)->le);
return(NULL);
}
void find_musclvarscycle(np_t np, gl_t *gl, musclvarscycle_t musclvars){
find_musclvars(np,gl,musclvars);
#ifdef _RESTIME_STORAGE_TRAPEZOIDAL_MUSCLVARS
long flux;
// for (flux=0; flux<nf; flux++) musclvars[nf+flux]=musclvars[flux];
for (flux=0; flux<nf; flux++) musclvars[nf+flux]=np.bs->trapezoidalm1[flux];
#endif
}
static void execute_function_on_all_segments(segmentarg_t *segmentarg, long numsegment, int SEGMENTWORK){
if (
#if !defined(POSIXTHREADS) && !defined(OPENMPTHREADS)
TRUE
#else
(SEGMENTWORK==SEGMENTWORK_LIGHT && segmentarg[0].gl->NOSHORTTHREADS)
#endif
){
long cnt;
for (cnt=0; cnt<numsegment; cnt++){
segmentarg[cnt].funct(segmentarg[cnt].np,segmentarg[cnt].gl,segmentarg[cnt].theta,segmentarg[cnt].ls,segmentarg[cnt].le);
}
} else {
#ifdef POSIXTHREADS
long cnt;
void *retval;
pthread_t *pthread;
pthread=(pthread_t *)malloc((numsegment+3)*sizeof(pthread_t));
for (cnt=0; cnt<numsegment; cnt++){
if (pthread_create(&((pthread)[cnt]), NULL, segmentfunct, (void *)(&(segmentarg[cnt]))))
fatal_error("Cannot create thread.");
}
for (cnt=0; cnt<numsegment; cnt++){
if (pthread_join(pthread[cnt],&retval))
fatal_error("Cannot join thread %ld.",cnt);
}
free(pthread);
#endif
#ifdef OPENMPTHREADS
long cnt;
#pragma omp parallel for private(cnt) schedule(dynamic)
for (cnt=0; cnt<numsegment; cnt++){
segmentarg[cnt].funct(segmentarg[cnt].np,segmentarg[cnt].gl,segmentarg[cnt].theta,segmentarg[cnt].ls,segmentarg[cnt].le);
}
#endif
}
}
static void create_segments(np_t *np, gl_t *gl, long theta, long ls, long le,
void funct(np_t *, gl_t *, long, long, long),
segmentarg_t *segmentarg, long *cntsegment, bool COUNTFLAG, int TYPELEVEL,
bool is_node_valid_local(np_t, int)){
long l,lm1,ls_local,le_local;
bool INSIDE;
l=ls;
ls_local=ls; /* only needed to avoid compiler warning */
INSIDE=FALSE;
do {
lm1=l;
l=_l_plus_one(l,gl,theta);
if ((!INSIDE) && (is_node_valid_local(np[l],TYPELEVEL))) {
ls_local=lm1;
INSIDE=TRUE;
}
if ((INSIDE) && ((!is_node_valid_local(np[l],TYPELEVEL)) || (l==le))){
le_local=l;
if (!COUNTFLAG) {
segmentarg[*cntsegment].np=np;
segmentarg[*cntsegment].gl=gl;
segmentarg[*cntsegment].theta=theta;
segmentarg[*cntsegment].ls=_l_plus_one(ls_local,gl,theta);
segmentarg[*cntsegment].le=_l_minus_one(le_local,gl,theta);
segmentarg[*cntsegment].funct=funct;
}
(*cntsegment)++;
INSIDE=FALSE;
}
} while (l!=le);
if (INSIDE) fatal_error("Problem setting up segments.");
}
void sweep_with_1D_segments(np_t *np, gl_t *gl, zone_t zone,
void funct(np_t *, gl_t *, long, long, long),
int sweeptype, int TYPELEVEL, bool is_node_valid_local(np_t, int),
int SEGMENTWORK, int GRIDLEVEL){
long j,k,cntsegment,numthread;
ifn1D( long i; )
segmentarg_t *segmentarg;
int cnt;
bool COUNTFLAG;
numthread=0;
assert(is_zone_in_zone(zone,gl->domain_all));
segmentarg=(segmentarg_t *)malloc(sizeof(segmentarg_t));
/* do this loop twice: the first time just to count.. */
for (cnt=0; cnt<2; cnt++){
if (cnt==0) COUNTFLAG=TRUE; else COUNTFLAG=FALSE;
if (!COUNTFLAG) segmentarg=(segmentarg_t *)realloc(segmentarg,numthread*sizeof(segmentarg_t));
/* the first dimension loop */
if (sweeptype==SWEEPTYPE_IJK || sweeptype==SWEEPTYPE_I) {
cntsegment=0;
for_2DL(j,zone.js,zone.je){
if (mod(j-gl->domain_all.js,GRIDLEVEL)==0){
for_3DL(k,zone.ks,zone.ke){
if (mod(k-gl->domain_all.ks,GRIDLEVEL)==0){
create_segments(np,gl,0,_ai(gl,zone.is-1,j,k),_ai(gl,zone.ie+1,j,k),
funct, segmentarg,&cntsegment, (bool)COUNTFLAG, TYPELEVEL,is_node_valid_local);
if (cntsegment>=maxloopthread) {
numthread=max(numthread,cntsegment);
if (!COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK);
cntsegment=0;
}
}
}
}
}
if (cntsegment>0 && !COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK);
numthread=max(numthread,cntsegment);
}
/* the second dimension loop */
#ifdef _2DL
if (sweeptype==SWEEPTYPE_IJK || sweeptype==SWEEPTYPE_J) {
cntsegment=0;
for_1DL(i,zone.is,zone.ie){
if (mod(i-gl->domain_all.is,GRIDLEVEL)==0){
for_3DL(k,zone.ks,zone.ke){
if (mod(k-gl->domain_all.ks,GRIDLEVEL)==0){
create_segments(np,gl,1,_ai(gl,i,zone.js-1,k),_ai(gl,i,zone.je+1,k),
funct, segmentarg,&cntsegment,(bool)COUNTFLAG, TYPELEVEL,is_node_valid_local);
if (cntsegment>=maxloopthread) {
numthread=max(numthread,cntsegment);
if (!COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK);
cntsegment=0;
}
}
}
}
}
if (cntsegment>0 && !COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK);
numthread=max(numthread,cntsegment);
}
#endif
/* the third dimension loop */
#ifdef _3DL
if (sweeptype==SWEEPTYPE_IJK || sweeptype==SWEEPTYPE_K) {
cntsegment=0;
for_1DL(i,zone.is,zone.ie){
if (mod(i-gl->domain_all.is,GRIDLEVEL)==0){
for_2DL(j,zone.js,zone.je){
if (mod(j-gl->domain_all.js,GRIDLEVEL)==0){
create_segments(np,gl,2,_ai(gl,i,j,zone.ks-1),_ai(gl,i,j,zone.ke+1),
funct, segmentarg, &cntsegment,(bool)COUNTFLAG, TYPELEVEL,is_node_valid_local);
if (cntsegment>=maxloopthread) {
numthread=max(numthread,cntsegment);
if (!COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK);
cntsegment=0;
}
}
}
}
}
if (cntsegment>0 && !COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK);
numthread=max(numthread,cntsegment);
}
#endif
}
free(segmentarg);
}
/* the following first sets the offset to 0, then 1, then -1 */
static long _node_offset_from_cnt(long cnt){
long offset;
offset=0;
if (cnt==0) offset=0;
if (cnt==1) offset=1;
if (cnt==2) offset=-1;
return(offset);
}
void update_bdry_node(np_t *np, gl_t *gl, long l){
long dim,dimsgn,l_C,l_B,l_A,l_D;
bool BDRYDIRECFOUND;
#ifdef _2DL
long offset1,offset2,cnt1,cnt2;
#endif
#ifdef _3D
long offset3,cnt3;
#endif
bool UPDATED;
assert(is_node_bdry(np[l],TYPELEVEL_FLUID_WORK));
UPDATED=FALSE;
BDRYDIRECFOUND=find_bdry_direc(np, gl, l, TYPELEVEL_FLUID_WORK, &dim, &dimsgn);
if (is_node_link(np[l],TYPELEVEL_FLUID_WORK)) {
// in case the boundary node is a link, U has already been updated: simply update the prim variables
find_prim_fluid(np, l, gl);
UPDATED=TRUE;
}
if (BDRYDIRECFOUND && !UPDATED){
l_A=l;
l_B=_al(gl,l,dim,dimsgn);
l_C=_al(gl,l,dim,dimsgn*2);
if (is_node_inner(np[_al(gl,l,dim,dimsgn*3)],TYPELEVEL_FLUID_WORK)) l_D=_al(gl,l,dim,dimsgn*3);
else l_D=l_C;
assert(is_node_inner(np[l_C],TYPELEVEL_FLUID_WORK));
assert(is_node_inner(np[l_B],TYPELEVEL_FLUID_WORK));
update_bdry_fluid(np,gl,l_A,l_B,l_C,l_D,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_FLUID_WORK);
UPDATED=TRUE;
}
/* now, do the corners */
if (!UPDATED) {
#ifdef _2D
for (cnt1=0; cnt1<=2; cnt1++){
for (cnt2=0; cnt2<=2; cnt2++){
offset1=_node_offset_from_cnt(cnt1);
offset2=_node_offset_from_cnt(cnt2);
l_C=_all(gl,l,0,offset1*2,1,offset2*2);
l_B=_all(gl,l,0,offset1,1,offset2);
l_A=l;
l_D=l_C;
if ( is_node_inner(np[l_B],TYPELEVEL_FLUID_WORK)
&& is_node_inner(np[l_C],TYPELEVEL_FLUID_WORK) && !UPDATED){
update_bdry_fluid(np,gl,l_A,l_B,l_C,l_D,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_FLUID_WORK);
UPDATED=TRUE;
}
}
}
#endif
#ifdef _3D
for (cnt1=0; cnt1<=2; cnt1++){
for (cnt2=0; cnt2<=2; cnt2++){
for (cnt3=0; cnt3<=2; cnt3++){
offset1=_node_offset_from_cnt(cnt1);
offset2=_node_offset_from_cnt(cnt2);
offset3=_node_offset_from_cnt(cnt3);
l_C=_al(gl,
_al(gl,
_al(gl,l,0,offset1*2),
1,offset2*2),
2,offset3*2);
l_B=_al(gl,
_al(gl,
_al(gl,l,0,offset1),
1,offset2),
2,offset3);
l_A=l;
l_D=l_C;
if ( is_node_inner(np[l_B],TYPELEVEL_FLUID_WORK)
&& is_node_inner(np[l_C],TYPELEVEL_FLUID_WORK) && !UPDATED){
update_bdry_fluid(np,gl,l_A,l_B,l_C,l_D,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_FLUID_WORK);
UPDATED=TRUE;
}
}
}
}
#endif
}
if (!UPDATED) {
fatal_error("Problem updating boundary node in update_bdry_node() function.");
}
}
void update_bdry_nodes_on_segment(np_t *np, gl_t *gl, long theta, long ls, long le){
long l;
for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){
if (is_node_bdry(np[l],TYPELEVEL_FLUID_WORK)){
thread_lock_node_set(np,l,THREADTYPE_ZONE);
update_bdry_node(np, gl, l);
thread_lock_node_unset(np,l,THREADTYPE_ZONE);
}
}
}
void update_bdry_nodes(np_t *np, gl_t *gl, zone_t zone){
sweep_with_1D_segments(np, gl, zone, &update_bdry_nodes_on_segment, SWEEPTYPE_I, TYPELEVEL_FLUID_WORK,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE);
}
#ifdef DISTMPI
#define numfluidvars (nf+1+max(0,hbw_resconv_fluid-1)*nmc)
#define numlinkvars ((hbw_resconv_fluid-1)*nmc)
#define DOUBLE_INT_MAX 100000000000000
typedef double sendvars_t[max(nfe,numfluidvars)];
typedef struct {
sendvars_t vars;
int proc;
long l;
bool SENT;
} sendnode_t;
void update_linked_nodes_2(np_t *np, gl_t *gl, int TYPELEVEL){
int rankrecv,numproc,ranksend,thisrank;
long i,j,k;
zone_t zonesend,zonerecv,zone;
MPI_Status MPI_Status1;
MPI_Comm_rank(MPI_COMM_WORLD, &thisrank);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
/* here we need to mpi the linkmusclvars */
for (ranksend=0; ranksend<numproc; ranksend++){
zonesend=_domain_from_rank(ranksend,gl);
for (rankrecv=0; rankrecv<numproc; rankrecv++){
if (rankrecv!=ranksend && (ranksend==thisrank || rankrecv==thisrank)){
zonerecv=_domain_lim_from_rank(rankrecv,gl);
if (is_zone_intersecting_zone(zonesend,zonerecv)){
zone=_zone_intersection(zonesend,zonerecv);
for_ijk(zone,is,js,ks,ie,je,ke){
if (ranksend==thisrank) {
// if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)) printf("x");
if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK) && is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)){
assert(np[_ai(gl,i,j,k)].numlinkmusclvars!=0);
assert(np[_ai(gl,i,j,k)].linkmusclvars!=NULL);
MPI_Send(&np[_ai(gl,i,j,k)].numlinkmusclvars,1,MPI_INT,rankrecv,0,MPI_COMM_WORLD);
MPI_Send(np[_ai(gl,i,j,k)].linkmusclvars,numlinkvars,MPI_DOUBLE,rankrecv,0,MPI_COMM_WORLD);
}
}
if (rankrecv==thisrank) {
if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK) && is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)){
MPI_Recv(&np[_ai(gl,i,j,k)].numlinkmusclvars,1,MPI_INT,ranksend,0,MPI_COMM_WORLD,&MPI_Status1);
assert(np[_ai(gl,i,j,k)].linkmusclvars!=NULL);
MPI_Recv(np[_ai(gl,i,j,k)].linkmusclvars,numlinkvars,MPI_DOUBLE,ranksend,0,MPI_COMM_WORLD,&MPI_Status1);
}
}
}
}
}
}
}
MPI_Barrier(MPI_COMM_WORLD);
}
void update_linked_nodes(np_t *np, gl_t *gl, int TYPELEVEL){
long i,j,k,l1,l2,flux,offset,l,cntlink;
MPI_Status MPI_Status1;
musclvarscycle_t musclvars;
sendvars_t mpivars;
int thisrank,numproc,rank2,rank1,thisproc;
int packsize,buffersize,bbuffersize;
double *buffer,*bbuffer;
sendnode_t *sendnode;
long numsendvars,numvars,numsend,cntsend,cnt;
double *sendvars;
int *recvproc;
int cntproc;
zone_t zone;
zone=gl->domain;
switch (TYPELEVEL){
case TYPELEVEL_FLUID:
numvars=numfluidvars;
break;
case TYPELEVEL_FLUID_WORK:
numvars=numfluidvars;
break;
#ifdef EMFIELD
case TYPELEVEL_EMFIELD:
numvars=nfe;
break;
#endif
default:
fatal_error("TYPELEVEL can not be set to %d.\n",TYPELEVEL);
numvars=0;
}
sendnode=(sendnode_t *)malloc(sizeof(sendnode_t));
sendvars=(double *)malloc(sizeof(double));
cntsend=0;
MPI_Comm_rank(MPI_COMM_WORLD, &thisrank);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
MPI_Pack_size( 1, MPI_DOUBLE, MPI_COMM_WORLD, &packsize );
recvproc=(int *)malloc((numproc+2)*sizeof(int));
buffersize = min(INT_MAX,nmc*(zone.ie-zone.is)*(zone.je-zone.js)if3DL(*(zone.ke-zone.ks)) * (MPI_BSEND_OVERHEAD + packsize));
buffer = (double *)malloc( buffersize );
MPI_Buffer_attach( buffer, buffersize );
for_ijk(zone,is,js,ks,ie,je,ke){
np[_al(gl,i,j,k)].numlinkmusclvars=0;
}
/* first send the packets */
cntsend=0;
for_ijk(zone,is,js,ks,ie,je,ke){
if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL)){
#ifdef _CYCLE_MULTIZONE
fatal_error("Linked nodes can not be used with Multizone cycle yet. Need to update update_linked_nodes() function.");
#endif
#ifdef _CYCLE_MULTIZONE_MARCHING
fatal_error("Linked nodes can not be used with MultizoneMarching cycle yet. Need to update update_linked_nodes() function.");
#endif
if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL)){
for (cntlink=0; cntlink<_num_node_link(np[_ai(gl,i,j,k)],TYPELEVEL); cntlink++){
l1=_ai_all(gl,i,j,k);
l2=_node_link(np[_ai(gl,i,j,k)],cntlink,TYPELEVEL);
rank1=_node_rank(gl, i, j, k);
rank2=_node_rank(gl, _i_all(l2,gl,0), _i_all(l2,gl,1), _i_all(l2,gl,2));
if (rank1==thisrank) {
if (TYPELEVEL==TYPELEVEL_FLUID_WORK || TYPELEVEL==TYPELEVEL_FLUID){
for (flux=0; flux<nf; flux++) mpivars[flux]=np[_l_from_l_all(gl,l1)].bs->U[flux];
mpivars[nf]=(double)_nodes_between_link_and_bdry_limited(np, gl, _l_from_l_all(gl,l1), l2, TYPELEVEL, max(0,hbw_resconv_fluid-1));
for (offset=1; offset<hbw_resconv_fluid; offset++) {
// find_prim_fluid(np, _al_link(np, gl, _l_from_l_all(gl,l1), offset, TYPELEVEL), gl);
find_musclvarscycle(np[_al_link(np, gl, _l_from_l_all(gl,l1), l2, offset, TYPELEVEL)], gl, musclvars);
for (flux=0; flux<nmc; flux++) mpivars[1+flux+nf+(offset-1)*nmc]=musclvars[flux];
}
if (rank1!=rank2){
for (flux=0; flux<numvars; flux++) sendnode[cntsend].vars[flux]=mpivars[flux];
sendnode[cntsend].proc=(int)rank2;
sendnode[cntsend].l=l2;
sendnode[cntsend].SENT=FALSE;
cntsend++;
sendnode=(sendnode_t *)realloc(sendnode,(cntsend+1)*sizeof(sendnode_t));
} else {
/* no need to send with MPI*/
//printf("\n --(%ld,%ld,%ld) %d",i,j,k,thisrank);
l=_l_from_l_all(gl,l2);
for (flux=0; flux<nf; flux++) np[l].bs->U[flux]=mpivars[flux];
assert(np[l].linkmusclvars!=NULL);
assert(is_node_bdry(np[l],TYPELEVEL));
assert(is_node_link(np[l],TYPELEVEL));
np[l].numlinkmusclvars=(short)round(mpivars[nf]);
for (offset=1; offset<hbw_resconv_fluid; offset++) {
for (flux=0; flux<nmc; flux++) np[l].linkmusclvars[flux+(offset-1)*nmc]=mpivars[1+flux+nf+(offset-1)*nmc];
}
}
}
#ifdef EMFIELD
if (TYPELEVEL==TYPELEVEL_EMFIELD){
for (flux=0; flux<numvars; flux++) mpivars[flux]=np[_l_from_l_all(gl,l1)].bs->Uemfield[flux];
if (rank1!=rank2) {
for (flux=0; flux<numvars; flux++) sendnode[cntsend].vars[flux]=mpivars[flux];
sendnode[cntsend].proc=(int)rank2;
sendnode[cntsend].l=l2;
sendnode[cntsend].SENT=FALSE;
cntsend++;
sendnode=(sendnode_t *)realloc(sendnode,(cntsend+1)*sizeof(sendnode_t));
} else {
/* no need to send with MPI */
for (flux=0; flux<nfe; flux++) np[_l_from_l_all(gl,l2)].bs->Uemfield[flux]=mpivars[flux];
}
}
#endif
}
}
}
}
}
numsend=cntsend;
/* send nodes in block one proc at a time */
do {
thisproc=-1;
numsendvars=0;
for (cntsend=0; cntsend<numsend; cntsend++){
if (thisproc==-1 && !sendnode[cntsend].SENT) thisproc=sendnode[cntsend].proc;
if (sendnode[cntsend].proc==thisproc){
assert(!sendnode[cntsend].SENT);
sendvars=(double *)realloc(sendvars,(numsendvars+2*numvars)*sizeof(double));
for (flux=0; flux<numvars; flux++) sendvars[numsendvars+flux]=sendnode[cntsend].vars[flux];
numsendvars+=numvars;
#ifndef NDEBUG
sendvars[numsendvars]=(double)mod(sendnode[cntsend].l,DOUBLE_INT_MAX);
numsendvars++;
#endif
sendnode[cntsend].SENT=TRUE;
}
}
if (thisproc!=-1){
if (MPI_Bsend(&numsendvars,1,MPI_LONG,thisproc,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("Problem with MPI_Bsend in update_linked_nodes().");
if (MPI_Bsend(sendvars,numsendvars,MPI_DOUBLE,thisproc,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("Problem with MPI_Bsend in update_linked_nodes().");
}
} while (thisproc!=-1);
for (cnt=0; cnt<(numproc+2); cnt++){
recvproc[cnt]=-1;
}
for_ijk(zone,is,js,ks,ie,je,ke){
if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL) && is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL)){
l1=_node_link(np[_ai(gl,i,j,k)],0,TYPELEVEL);
rank2=_node_rank(gl, i, j, k);
rank1=_node_rank(gl, _i_all(l1,gl,0), _i_all(l1,gl,1), _i_all(l1,gl,2));
if (rank1!=rank2 && rank2==thisrank){
/* rank1 is one process that we will need to get data from; store it in recvproc */
cntproc=0;
while(recvproc[cntproc]!=-1 && recvproc[cntproc]!=rank1 ) {
cntproc++;
}
assert(cntproc<numproc);
recvproc[cntproc]=rank1;
}
}
}
cntproc=0;
while (recvproc[cntproc]!=-1) {
thisproc=recvproc[cntproc];
MPI_Recv(&numsendvars,1,MPI_LONG,thisproc,0,MPI_COMM_WORLD,&MPI_Status1);
sendvars=(double *)realloc(sendvars,numsendvars*sizeof(double));
MPI_Recv(sendvars,numsendvars,MPI_DOUBLE,thisproc,0,MPI_COMM_WORLD,&MPI_Status1);
cntsend=0;
for_ijk(zone,is,js,ks,ie,je,ke){
if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL) && is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL)){
l2=_ai_all(gl,i,j,k);
assert(is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL));
l1=_node_link(np[_ai(gl,i,j,k)],0,TYPELEVEL);
rank2=_node_rank(gl, i, j, k);
rank1=_node_rank(gl, _i_all(l1,gl,0), _i_all(l1,gl,1), _i_all(l1,gl,2));
if (rank1!=rank2 && rank2==thisrank){
if (thisproc==rank1){
for (flux=0; flux<numvars; flux++) mpivars[flux]=sendvars[cntsend+flux];
cntsend+=numvars;
#ifndef NDEBUG
assert(mod(l2,DOUBLE_INT_MAX)==(long)sendvars[cntsend]);
cntsend++;
#endif
l=_l_from_l_all(gl,l2);
assert(is_node_bdry(np[l],TYPELEVEL));
assert(is_node_link(np[l],TYPELEVEL));
if (TYPELEVEL==TYPELEVEL_FLUID_WORK || TYPELEVEL==TYPELEVEL_FLUID){
for (flux=0; flux<nf; flux++) np[l].bs->U[flux]=mpivars[flux];
assert(np[l].linkmusclvars!=NULL);
np[l].numlinkmusclvars=(short)round(mpivars[nf]);
for (offset=1; offset<hbw_resconv_fluid; offset++) {
for (flux=0; flux<nmc; flux++)
np[l].linkmusclvars[flux+(offset-1)*nmc]=mpivars[1+flux+nf+(offset-1)*nmc];
}
}
#ifdef EMFIELD
if (TYPELEVEL==TYPELEVEL_EMFIELD){
for (flux=0; flux<nfe; flux++) np[l].bs->Uemfield[flux]=mpivars[flux];
}
#endif
}
}
}
}
cntproc++;
}
MPI_Buffer_detach( &bbuffer, &bbuffersize );
free(buffer);
MPI_Barrier(MPI_COMM_WORLD);
free(sendnode);
free(recvproc);
free(sendvars);
update_linked_nodes_2(np, gl, TYPELEVEL);
}
#else//DISTMPI
void update_linked_nodes(np_t *np, gl_t *gl, int TYPELEVEL){
long i,j,k,l1,l2,flux;
for_ijk(gl->domain,is,js,ks,ie,je,ke){
l1=_ai(gl,i,j,k);
if (is_node_bdry(np[l1],TYPELEVEL) && is_node_link(np[l1],TYPELEVEL)){
#ifdef _CYCLE_MULTIZONE
fatal_error("Linked nodes can not be used with Multizone cycle yet. Need to update update_linked_nodes() function.");
#endif
#ifdef _CYCLE_MULTIZONE_MARCHING
fatal_error("Linked nodes can not be used with MultizoneMarching cycle yet. Need to update update_linked_nodes() function.");
#endif
assert(is_node_bdry(np[l1],TYPELEVEL));
l2=_node_link(np[l1],0,TYPELEVEL);
if (TYPELEVEL==TYPELEVEL_FLUID_WORK || TYPELEVEL==TYPELEVEL_FLUID){
for (flux=0; flux<nf; flux++) np[l1].bs->U[flux]=np[l2].bs->U[flux];
}
#ifdef EMFIELD
if (TYPELEVEL==TYPELEVEL_EMFIELD){
for (flux=0; flux<nfe; flux++) np[l1].bs->Uemfield[flux]=np[l2].bs->Uemfield[flux];
}
#endif
}
}
}
#endif//DISTMPI
static bool is_node_in_region(bool(*FUNCT)(gl_t *, long, long, long),
gl_t *gl, long i, long j, long k){
bool tmp;
tmp=FUNCT(gl,i,j,k);
return(tmp);
}
static bool is_node_in_region_extended_by_bb(bool(*FUNCT)(gl_t *, long, long, long),
gl_t *gl, long i, long j, long k){
bool tmp;
long cnti,cntj,cntk;
tmp=FALSE;
for_1DL(cnti,i-hbw_bdry_fluid,i+hbw_bdry_fluid){
for_2DL(cntj,j-hbw_bdry_fluid,j+hbw_bdry_fluid){
for_3DL(cntk,k-hbw_bdry_fluid,k+hbw_bdry_fluid){
if (FUNCT(gl,cnti,cntj,cntk)) tmp=TRUE;
}
}
}
return(tmp);
}
void resume_nodes_specified_in_function(np_t *np, gl_t *gl,
bool(*FUNCT)(gl_t *, long, long, long)){
long i,j,k;
long *noderes;
long *bdryres;
long numnoderes,numbdryres,cnt;
copy_base_to_work_node_type(np,gl,gl->domain_lim);
noderes=(long *)malloc((gl->domain.ie-gl->domain.is+4)if2DL(*(gl->domain.je-gl->domain.js+4))
if3DL(*(gl->domain.ke-gl->domain.ks+4))*sizeof(long));
bdryres=(long *)malloc((gl->domain.ie-gl->domain.is+4)if2DL(*(gl->domain.je-gl->domain.js+4))
if3DL(*(gl->domain.ke-gl->domain.ks+4))*sizeof(long));
numnoderes=0;
numbdryres=0;
for_ijk(gl->domain,is-1,js-1,ks-1,ie+1,je+1,ke+1){
if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)
&& is_node_in_region_extended_by_bb(FUNCT,gl,i,j,k)) {
if (resume_node(&(np[_ai(gl,i,j,k)])) ) {
if (is_node_in_region(FUNCT,gl,i,j,k) ){
bdryres[numbdryres]=_ai(gl,i,j,k);
numbdryres++;
}
noderes[numnoderes]=_ai(gl,i,j,k);
numnoderes++;
}
} else {
suspend_node(&(np[_ai(gl,i,j,k)]));
}
}
/* rebuild the working variables of the inner nodes of the nodes resumed*/
for (cnt=0; cnt<numnoderes; cnt++){
if (is_node_resumed(np[noderes[cnt]]) && is_node_inner(np[noderes[cnt]],TYPELEVEL_FLUID)){
find_prim_fluid(np,noderes[cnt],gl);
}
}
/* rebuild the working variables of the boundary nodes of the nodes resumed*/
for (cnt=0; cnt<numbdryres; cnt++){
if (is_node_resumed(np[bdryres[cnt]]) && is_node_bdry(np[bdryres[cnt]],TYPELEVEL_FLUID)) {
update_bdry_node(np,gl,bdryres[cnt]);
}
}
/* suspend all nodes needed only to compute the boundary nodes.
this is necessary to ensure that all non-suspended nodes are properly updated.*/
for_ijk(gl->domain,is-1,js-1,ks-1,ie+1,je+1,ke+1){
if (!(is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && is_node_in_region(FUNCT,gl,i,j,k)))
suspend_node(&(np[_ai(gl,i,j,k)]));
}
free(noderes);
free(bdryres);
}
void resume_nodes_only_in_zone_and_update_bdry_nodes(np_t *np, gl_t *gl, zone_t zone){
long i,j,k;
long *noderes;
long *bdryres;
long numnoderes,numbdryres,cnt;
copy_base_to_work_node_type(np,gl,gl->domain_lim);
noderes=(long *)malloc((gl->domain_lim.ie-gl->domain_lim.is+1)
if2DL(*(gl->domain_lim.je-gl->domain_lim.js+1))
if3DL(*(gl->domain_lim.ke-gl->domain_lim.ks+1))*sizeof(long));
bdryres=(long *)malloc((gl->domain_lim.ie-gl->domain_lim.is+1)
if2DL(*(gl->domain_lim.je-gl->domain_lim.js+1))
if3DL(*(gl->domain_lim.ke-gl->domain_lim.ks+1))*sizeof(long));
numnoderes=0;
numbdryres=0;
for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){
if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)
&& (i>=zone.is-hbw_bdry_fluid) && (i<=zone.ie+hbw_bdry_fluid)
if2DL(&& (j>=zone.js-hbw_bdry_fluid) && (j<=zone.je+hbw_bdry_fluid))
if3DL(&& (k>=zone.ks-hbw_bdry_fluid) && (k<=zone.ke+hbw_bdry_fluid))) {
if (resume_node(&(np[_ai(gl,i,j,k)])) ) {
if (is_node_in_zone(i,j,k,zone)){
bdryres[numbdryres]=_ai(gl,i,j,k);
numbdryres++;
}
noderes[numnoderes]=_ai(gl,i,j,k);
numnoderes++;
}
} else {
suspend_node(&(np[_ai(gl,i,j,k)]));
}
}
/* rebuild the working variables of the inner nodes of the nodes resumed*/
#ifdef OPENMPTHREADS
#pragma omp parallel for private(cnt) schedule(dynamic)
#endif
for (cnt=0; cnt<numnoderes; cnt++){
if (is_node_resumed(np[noderes[cnt]]) && is_node_inner(np[noderes[cnt]],TYPELEVEL_FLUID)){
find_prim_fluid(np,noderes[cnt],gl);
}
}
free(noderes);
/* rebuild the working variables of the boundary nodes of the nodes resumed*/
#ifdef OPENMPTHREADS
#pragma omp parallel for private(cnt) schedule(dynamic)
#endif
for (cnt=0; cnt<numbdryres; cnt++){
if (is_node_resumed(np[bdryres[cnt]]) && is_node_bdry(np[bdryres[cnt]],TYPELEVEL_FLUID)) {
find_ijk_from_l(gl, bdryres[cnt], &i, &j, &k);
if (is_node_in_zone(i, j, k, gl->domain)){
update_bdry_node(np,gl,bdryres[cnt]);
} else {
find_prim_fluid(np,bdryres[cnt],gl);
}
}
}
free(bdryres);
/* suspend all nodes needed only to compute the boundary nodes.
this is necessary to ensure that all non-suspended nodes are properly updated.*/
for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){
if (!(is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)
&& is_node_in_zone(i,j,k,zone)))
suspend_node(&(np[_ai(gl,i,j,k)]));
}
}
void resume_nodes_in_zone(np_t *np, gl_t *gl, zone_t zone){
long i,j,k;
long *noderes;
long numnoderes,cnt;
zone_t zoneint;
copy_base_to_work_node_type(np,gl,gl->domain_lim);
noderes=(long *)malloc((gl->domain_lim.ie-gl->domain_lim.is+1)
if2DL(*(gl->domain_lim.je-gl->domain_lim.js+1))
if3DL(*(gl->domain_lim.ke-gl->domain_lim.ks+1))*sizeof(long));
numnoderes=0;
zoneint=_zone_intersection(gl->domain_lim,zone);
for_ijk(zoneint,is,js,ks,ie,je,ke){
if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)) {
if (resume_node(&(np[_ai(gl,i,j,k)])) ) {
noderes[numnoderes]=_ai(gl,i,j,k);
numnoderes++;
}
}
}
/* rebuild the working variables of the inner nodes of the nodes resumed*/
#ifdef OPENMPTHREADS
#pragma omp parallel for private(cnt) schedule(dynamic)
#endif
for (cnt=0; cnt<numnoderes; cnt++){
if (is_node_resumed(np[noderes[cnt]]) && is_node_valid(np[noderes[cnt]],TYPELEVEL_FLUID)){
find_prim_fluid(np,noderes[cnt],gl);
}
}
free(noderes);
}
void resume_nodes_only_in_zone(np_t *np, gl_t *gl, zone_t zone){
long i,j,k;
long *noderes;
long numnoderes,cnt;
copy_base_to_work_node_type(np,gl,gl->domain_lim);
noderes=(long *)malloc((gl->domain_lim.ie-gl->domain_lim.is+1)
if2DL(*(gl->domain_lim.je-gl->domain_lim.js+1))
if3DL(*(gl->domain_lim.ke-gl->domain_lim.ks+1))*sizeof(long));
numnoderes=0;
for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){
if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)
&& (i>=zone.is) && (i<=zone.ie)
if2DL(&& (j>=zone.js) && (j<=zone.je))
if3DL(&& (k>=zone.ks) && (k<=zone.ke))) {
if (resume_node(&(np[_ai(gl,i,j,k)])) ) {
noderes[numnoderes]=_ai(gl,i,j,k);
numnoderes++;
}
} else {
suspend_node(&(np[_ai(gl,i,j,k)]));
}
}
/* rebuild the working variables of the inner nodes of the nodes resumed*/
#ifdef OPENMPTHREADS
#pragma omp parallel for private(cnt) schedule(dynamic)
#endif
for (cnt=0; cnt<numnoderes; cnt++){
if (is_node_resumed(np[noderes[cnt]]) && is_node_valid(np[noderes[cnt]],TYPELEVEL_FLUID)){
find_prim_fluid(np,noderes[cnt],gl);
}
}
free(noderes);
/* suspend all nodes needed only to compute the boundary nodes.
this is necessary to ensure that all non-suspended nodes are properly updated.*/
for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){
if (!(is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)
&& is_node_in_zone(i,j,k,zone)))
suspend_node(&(np[_ai(gl,i,j,k)]));
}
}
#ifdef UNSTEADY
void increase_time_level(np_t *np, gl_t *gl){
long i,j,k,flux,l;
gl->time+=gl->dt;
gl->iter=0;
add_double_to_codex(&(gl->cycle.codex),"time",gl->time);
for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){
l=_ai(gl,i,j,k);
if ((is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID))){
for (flux=0; flux<nf; flux++){
#if _RESTIME_BW > 3
np[l].bs->Um3[flux]=np[l].bs->Um2[flux];
#endif
#if _RESTIME_BW > 2
np[l].bs->Um2[flux]=np[l].bs->Um1[flux];
#endif
np[l].bs->Um1[flux]=np[l].bs->U[flux];
#ifdef _RESTIME_STORAGE_TRAPEZOIDAL_RESIDUAL
np[l].bs->trapezoidalm1[flux]=np[l].bs->trapezoidalm1_next[flux];
#endif
}
#ifdef _RESTIME_STORAGE_TRAPEZOIDAL_MUSCLVARS
find_musclvars(np[l],gl,np[l].bs->trapezoidalm1);
#endif
}
#ifdef EMFIELD
if ((is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD))){
for (flux=0; flux<nfe; flux++){
np[l].bs->Uemfieldm1[flux]=np[l].bs->Uemfield[flux];
}
}
#endif
}
}
#endif//UNSTEADY
void runtime_actions(char *actionname, char **argum, SOAP_codex_t *codex){
char *oldfilename;
oldfilename=(char *)malloc(sizeof(char)*(5+strlen((((readcontrolarg_t *)codex->action_args)->gl->output_filename))));
strcpy(oldfilename,(((readcontrolarg_t *)codex->action_args)->gl->output_filename));
if (strcmp(actionname,"WriteDataFile")==0) {
if (SOAP_number_argums(*argum)==1){
SOAP_substitute_all_argums(argum, codex);
SOAP_get_argum_string(codex,&(((readcontrolarg_t *)codex->action_args)->gl->output_filename),*argum,0);
}
if (SOAP_number_argums(*argum)>1){
SOAP_fatal_error(codex,"Action WriteDataFile() can not be called with more than 1 argument. Either it is called with one argument (a string containing the data file name) or with no argument. If no argument is given, the default data file name as specified on the command line will be used.");
}
write_data_file(*((readcontrolarg_t *)codex->action_args)->np,
((readcontrolarg_t *)codex->action_args)->gl);
codex->ACTIONPROCESSED=TRUE;
}
strcpy((((readcontrolarg_t *)codex->action_args)->gl->output_filename),oldfilename);
free(oldfilename);
if (strcmp(actionname,"Init")==0) {
read_init(*argum, codex);
codex->action=&runtime_actions;
((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED=TRUE;
#ifdef EMFIELD
((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED_EMFIELD=TRUE;
#endif
codex->ACTIONPROCESSED=TRUE;
}
if (strcmp(actionname,"Model")==0) {
read_model(*argum, codex);
codex->action=&runtime_actions;
((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED=TRUE;
#ifdef EMFIELD
((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED_EMFIELD=TRUE;
#endif
codex->ACTIONPROCESSED=TRUE;
}
if (strcmp(actionname,"Disc")==0) {
read_disc(*argum, codex);
codex->action=&runtime_actions;
((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED=TRUE;
#ifdef EMFIELD
((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED_EMFIELD=TRUE;
#endif
codex->ACTIONPROCESSED=TRUE;
}
if (strcmp(actionname,"Cycle")==0) {
read_cycle(*argum, codex);
codex->action=&runtime_actions;
codex->ACTIONPROCESSED=TRUE;
}
runtime_actions_cycle_specific(actionname,argum,codex);
}
void write_cycle_template(FILE **controlfile){
wfprintf(*controlfile,
"\n\n"
"Cycle(\n"
);
write_cycle_fluid_template(controlfile);
#ifdef EMFIELD
write_cycle_emfield_template(controlfile);
#endif
write_runtime_template(controlfile);
wfprintf(*controlfile,
");\n"
);
}
void read_cycle_actions(char *actionname, char **argum, SOAP_codex_t *codex){
gl_t *gl;
gl=((readcontrolarg_t *)codex->action_args)->gl;
if (strcmp(actionname,_CYCLE_ACTIONNAME)==0 && !gl->CONTROL_READ) {
if (((readcontrolarg_t *)codex->action_args)->VERBOSE) wfprintf(stdout,"%s..",_CYCLE_ACTIONNAME);
((readcontrolarg_t *)codex->action_args)->
gl->cycle.code_runtime=(char *)malloc((strlen(*argum)+2)*sizeof(char));
strcpy(((readcontrolarg_t *)codex->action_args)->gl->cycle.code_runtime,*argum);
((readcontrolarg_t *)codex->action_args)->gl->cycle.RUNTIMEMODULEFOUND=TRUE;
codex->ACTIONPROCESSED=TRUE;
}
read_cycle_fluid_actions(actionname, argum, codex);
read_cycle_emfield_actions(actionname, argum, codex);
}
void read_cycle(char *argum, SOAP_codex_t *codexcontrol){
gl_t *gl;
gl=((readcontrolarg_t *)codexcontrol->action_args)->gl;
if (!gl->CONTROL_READ){
gl->cycle.RUNTIMEMODULEFOUND=FALSE;
}
codexcontrol->action=&read_cycle_actions;
SOAP_process_code(argum, codexcontrol, SOAP_VARS_KEEP_ALL);
if (!gl->CONTROL_READ){
if (!gl->CYCLE_FLUID_READ)
fatal_error("The fluid module %s() was not found within Cycle().",_FLUID_ACTIONNAME);
if (!gl->CYCLE_EMFIELD_READ)
fatal_error("The emfield module %s() was not found within Cycle().",_EMFIELD_ACTIONNAME);
if (!gl->cycle.RUNTIMEMODULEFOUND)
fatal_error("The module %s() was not found within Cycle().",_CYCLE_ACTIONNAME);
init_cycle(argum,codexcontrol);
}
}
void write_disc_template(FILE **controlfile){
wfprintf(*controlfile,
"\n\n"
"Disc(\n"
);
write_disc_fluid_template(controlfile);
#ifdef EMFIELD
write_disc_emfield_template(controlfile);
#endif
write_disc_resconv_template(controlfile);
write_disc_restime_template(controlfile);
wfprintf(*controlfile,
");\n"
);
}
void read_disc_actions(char *actionname, char **argum, SOAP_codex_t *codex){
// gl_t *gl;
// gl=((readcontrolarg_t *)codex->action_args)->gl;
read_disc_fluid_actions(actionname, argum, codex);
read_disc_emfield_actions(actionname, argum, codex);
read_disc_resconv_actions(actionname, argum, codex);
read_disc_restime_actions(actionname, argum, codex);
}
void read_disc(char *argum, SOAP_codex_t *codexcontrol){
gl_t *gl;
gl=((readcontrolarg_t *)codexcontrol->action_args)->gl;
codexcontrol->action=&read_disc_actions;
gl->DISC_FLUID_READ=FALSE;
gl->DISC_EMFIELD_READ=FALSE;
gl->DISC_RESCONV_READ=FALSE;
gl->DISC_RESTIME_READ=FALSE;
SOAP_process_code(argum, codexcontrol, SOAP_VARS_KEEP_ALL);
if (!gl->CONTROL_READ){
if (!gl->DISC_FLUID_READ)
fatal_error("The fluid module %s() was not found within Disc().",_FLUID_ACTIONNAME);
if (!gl->DISC_EMFIELD_READ)
fatal_error("The emfield module %s() was not found within Disc().",_EMFIELD_ACTIONNAME);
if (!gl->DISC_RESCONV_READ)
fatal_error("The residual convection module %s() was not found within Disc().",_RESCONV_ACTIONNAME);
if (!gl->DISC_RESTIME_READ)
fatal_error("The residual time module %s() was not found within Disc().",_RESTIME_ACTIONNAME);
}
}
#ifdef DISTMPI
/* not used anymore */
void MPI_Allreduce_Sum_Cliplist(char **cliplist_str){
int rank,numproc,proc,thiscliplist_len;
char *cliplistmem_str,*thiscliplist_str;
cliplistmem_str=(char *)malloc((strlen(*cliplist_str)+10)*sizeof(char));
strcpy(cliplistmem_str,*cliplist_str);
thiscliplist_str=(char *)malloc(sizeof(char));
strcpy(*cliplist_str,"");
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
for (proc=0; proc<numproc; proc++){
if (proc==rank) {
thiscliplist_len=strlen(cliplistmem_str);
thiscliplist_str=(char *)realloc(thiscliplist_str,sizeof(char)*(thiscliplist_len+1));
strcpy(thiscliplist_str,cliplistmem_str);
}
MPI_Bcast(&thiscliplist_len,1,MPI_INT,proc,MPI_COMM_WORLD);
thiscliplist_str=(char *)realloc(thiscliplist_str,sizeof(char)*(thiscliplist_len+1));
MPI_Bcast(thiscliplist_str,thiscliplist_len+1,MPI_CHAR,proc,MPI_COMM_WORLD);
*cliplist_str=(char *)realloc(*cliplist_str,sizeof(char)*(strlen(*cliplist_str)+thiscliplist_len+1));
strcat(*cliplist_str,thiscliplist_str);
}
free(cliplistmem_str);
free(thiscliplist_str);
}
void find_clipped_variables_all(gl_t *gl){
int rank,numproc,proc,cnt;
int thisclipnamenum,thisclipname_len;
char *thisclipname;
long thisclipnum;
reset_clipped_variables_all(gl);
thisclipname=(char *)malloc(sizeof(char));
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
for (proc=0; proc<numproc; proc++){
if (proc==rank) {
thisclipnamenum=gl->model.clipnamenum;
}
MPI_Bcast(&thisclipnamenum,1,MPI_INT,proc,MPI_COMM_WORLD);
for (cnt=0; cnt<thisclipnamenum; cnt++){
if (proc==rank) {
thisclipname_len=strlen(gl->model.clipname[cnt]);
}
MPI_Bcast(&thisclipname_len,1,MPI_INT,proc,MPI_COMM_WORLD);
thisclipname=(char *)realloc(thisclipname,sizeof(char)*(thisclipname_len+1));
if (proc==rank) {
strcpy(thisclipname,gl->model.clipname[cnt]);
thisclipnum=gl->model.clipnum[cnt];
}
MPI_Bcast(thisclipname,thisclipname_len+1,MPI_CHAR,proc,MPI_COMM_WORLD);
MPI_Bcast(&thisclipnum,1,MPI_LONG,proc,MPI_COMM_WORLD);
add_to_clipped_variables_all(gl, thisclipname, thisclipnum);
// if (rank==0) printf("\n_%s(%ld)%d_",thisclipname,thisclipnum,proc);
}
}
free(thisclipname);
}
#endif
void update_runtime_codex_xi_from_gl(gl_t *gl, SOAP_codex_t *codex){
char *cliplist_str;
#ifdef DISTMPI
int rank,proc;
long ijk_ximax;
struct {
double ximax;
int rank;
} ximaxrank,ximaxrank_max;
#ifdef EMFIELD
long ijk_ximax_emfield;
struct {
double ximax;
int rank;
} ximaxrank_emfield,ximaxrank_max_emfield;
#endif
#endif//DISTMPI
#ifdef DISTMPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &proc);
if (rank!=0) codex->SCREENOUTPUT=FALSE;
#endif
cliplist_str=(char *)malloc(sizeof(char));
#ifdef DISTMPI
find_clipped_variables_all(gl);
find_clipped_variables_list_all(gl,&cliplist_str);
add_string_to_codex(codex,"clipinfo",cliplist_str);
find_clipped_muscl_variables_list_all(gl,&cliplist_str);
add_string_to_codex(codex,"clipinfo_muscl",cliplist_str);
find_clipped_bdry_variables_list_all(gl,&cliplist_str);
add_string_to_codex(codex,"clipinfo_bdry",cliplist_str);
#else
find_clipped_variables_list(gl,&cliplist_str);
add_string_to_codex(codex,"clipinfo",cliplist_str);
find_clipped_muscl_variables_list(gl,&cliplist_str);
add_string_to_codex(codex,"clipinfo_muscl",cliplist_str);
find_clipped_bdry_variables_list(gl,&cliplist_str);
add_string_to_codex(codex,"clipinfo_bdry",cliplist_str);
//MPI_Allreduce_Sum_Cliplist(&cliplist_str);
#endif
free(cliplist_str);
#ifdef DISTMPI
ximaxrank.ximax=gl->ximax;
ximaxrank.rank=rank;
MPI_Allreduce(&ximaxrank, &ximaxrank_max, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD);
add_double_to_codex(codex,"ximax",ximaxrank_max.ximax);
ijk_ximax=gl->i_ximax;
MPI_Bcast(&ijk_ximax,1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD);
add_int_to_codex(codex,"i_ximax",ijk_ximax);
#ifdef EMFIELD
ximaxrank_emfield.ximax=gl->ximax_emfield;
ximaxrank_emfield.rank=rank;
MPI_Allreduce(&ximaxrank_emfield, &ximaxrank_max_emfield, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD);
add_double_to_codex(codex,"ximax_emfield",ximaxrank_max_emfield.ximax);
ijk_ximax_emfield=gl->i_ximax_emfield;
MPI_Bcast(&ijk_ximax_emfield,1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD);
add_int_to_codex(codex,"i_ximax_emfield",ijk_ximax_emfield);
#endif
#ifdef _2DL
ijk_ximax=gl->j_ximax;
MPI_Bcast(&ijk_ximax,1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD);
add_int_to_codex(codex,"j_ximax",ijk_ximax);
#ifdef EMFIELD
ijk_ximax_emfield=gl->j_ximax_emfield;
MPI_Bcast(&ijk_ximax_emfield,1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD);
add_int_to_codex(codex,"j_ximax_emfield",ijk_ximax_emfield);
#endif
#endif//_2DL
#ifdef _3DL
ijk_ximax=gl->k_ximax;
MPI_Bcast(&ijk_ximax,1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD);
add_int_to_codex(codex,"k_ximax",ijk_ximax);
#ifdef EMFIELD
ijk_ximax_emfield=gl->k_ximax_emfield;
MPI_Bcast(&ijk_ximax_emfield,1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD);
add_int_to_codex(codex,"k_ximax_emfield",ijk_ximax_emfield);
#endif
#endif//_3DL
#else//DISTMPI
add_double_to_codex(codex,"ximax",gl->ximax);
add_int_to_codex(codex,"i_ximax",gl->i_ximax);
#ifdef EMFIELD
add_double_to_codex(codex,"ximax_emfield",gl->ximax_emfield);
add_int_to_codex(codex,"i_ximax_emfield",gl->i_ximax_emfield);
#endif
#ifdef _2DL
add_int_to_codex(codex,"j_ximax",gl->j_ximax);
#ifdef EMFIELD
add_int_to_codex(codex,"j_ximax_emfield",gl->j_ximax_emfield);
#endif
#endif//_2DL
#ifdef _3DL
add_int_to_codex(codex,"k_ximax",gl->k_ximax);
#ifdef EMFIELD
add_int_to_codex(codex,"k_ximax_emfield",gl->k_ximax_emfield);
#endif
#endif//_3DL
#endif//DISTMPI
}
void update_runtime_codex_vars_except_xi_from_gl(gl_t *gl, SOAP_codex_t *codex){
#ifdef DISTMPI
double effiter_U_sum,effiter_R_sum;
int rank,proc;
#ifdef EMFIELD
double effiter_U_sum_emfield,effiter_R_sum_emfield;
#endif
#endif//DISTMPI
#ifdef DISTMPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &proc);
if (rank!=0) codex->SCREENOUTPUT=FALSE;
#endif
add_int_to_codex(codex,"iter", gl->iter);
add_double_to_codex(codex,"xiverge",gl->cycle.fluid.xiverge);
add_string_to_codex(codex,"outputfilename", gl->output_filename);
#ifdef EMFIELD
add_double_to_codex(codex,"xiverge_emfield",gl->cycle.emfield.xiverge);
#endif
#if defined(UNSTEADY)
add_double_to_codex(codex,"time",gl->time);
#endif
add_double_to_codex(codex,"CFL",gl->CFL);
#ifdef UNSTEADY
add_double_to_codex(codex,"dt",gl->dt);
#endif
#ifdef _CYCLE_MULTIZONE_MARCHING
add_double_to_codex(codex,"window.is",gl->window.is);
add_double_to_codex(codex,"window.ie",gl->window.ie);
add_int_to_codex(&(gl->cycle.codex), "numzones_updated", 0);
add_int_to_codex(&(gl->cycle.codex), "numzones_total", 0);
#endif
#ifdef _CYCLE_MULTIZONE
add_int_to_codex(&(gl->cycle.codex), "numzones_updated", 0);
add_int_to_codex(&(gl->cycle.codex), "numzones_total", 0);
#endif
#ifdef DISTMPI
MPI_Allreduce(&gl->effiter_U, &effiter_U_sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
add_double_to_codex(codex,"effiter_U",effiter_U_sum);
MPI_Allreduce(&gl->effiter_R, &effiter_R_sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
add_double_to_codex(codex,"effiter_R",effiter_R_sum);
#ifdef EMFIELD
MPI_Allreduce(&gl->effiter_U_emfield, &effiter_U_sum_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
add_double_to_codex(codex,"effiter_U_emfield",effiter_U_sum_emfield);
MPI_Allreduce(&gl->effiter_R_emfield, &effiter_R_sum_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
add_double_to_codex(codex,"effiter_R_emfield",effiter_R_sum_emfield);
#endif
#else//DISTMPI
add_double_to_codex(codex,"effiter_U",gl->effiter_U);
add_double_to_codex(codex,"effiter_R",gl->effiter_R);
#ifdef EMFIELD
add_double_to_codex(codex,"Lc",gl->Lc);
// add_double_to_codex(codex,"relaxEMF",gl->relaxEMF);
add_double_to_codex(codex,"effiter_U_emfield",gl->effiter_U_emfield);
add_double_to_codex(codex,"effiter_R_emfield",gl->effiter_R_emfield);
#endif
#endif//DISTMPI
}
void add_constants_to_codex(gl_t *gl, SOAP_codex_t *codex){
char str[100];
sprintf(str, "%d", TSEMF_DEFAULT);
SOAP_add_to_vars(codex,"TSEMF_DEFAULT",str);
sprintf(str, "%d", TSEMF_ADI);
SOAP_add_to_vars(codex,"TSEMF_ADI",str);
sprintf(str, "%d", TSEMF_DDADI);
SOAP_add_to_vars(codex,"TSEMF_DDADI",str);
sprintf(str, "%d", TSEMF_IMAF);
SOAP_add_to_vars(codex,"TSEMF_IMAF",str);
sprintf(str, "%d", TSEMF_ADIIMAF);
SOAP_add_to_vars(codex,"TSEMF_ADIIMAF",str);
sprintf(str, "%d", TSEMF_NEWTON);
SOAP_add_to_vars(codex,"TSEMF_NEWTON",str);
sprintf(str, "%d", TSEMF_ADIi);
SOAP_add_to_vars(codex,"TSEMF_ADIi",str);
sprintf(str, "%d", TSEMF_ADIk);
SOAP_add_to_vars(codex,"TSEMF_ADIk",str);
sprintf(str, "%d", TSEMF_IMAFk);
SOAP_add_to_vars(codex,"TSEMF_IMAFk",str);
sprintf(str, "%d", TSEMF_IMAFi);
SOAP_add_to_vars(codex,"TSEMF_IMAFi",str);
sprintf(str, "%d", TSEMF_SOR);
SOAP_add_to_vars(codex,"TSEMF_SOR",str);
sprintf(str, "%d", TSEMF_SOR2);
SOAP_add_to_vars(codex,"TSEMF_SOR2",str);
sprintf(str, "%d", PRECON_CONSTANTTIMESTEP);
SOAP_add_to_vars(codex,"PRECON_CONSTANTTIMESTEP",str);
sprintf(str, "%d", PRECON_LOCALTIMESTEP);
SOAP_add_to_vars(codex,"PRECON_LOCALTIMESTEP",str);
sprintf(str, "%d", PRECON_LOCALTIMESTEP2);
SOAP_add_to_vars(codex,"PRECON_LOCALTIMESTEP2",str);
sprintf(str, "%d", PRECON_LOCALEIGENVALUE);
SOAP_add_to_vars(codex,"PRECON_LOCALEIGENVALUE",str);
sprintf(str, "%d", PRECON_LOCALEIGENVALUE2);
SOAP_add_to_vars(codex,"PRECON_LOCALEIGENVALUE2",str);
}
void process_code_runtime(np_t *np, gl_t *gl, char *code_runtime, SOAP_codex_t *codex){
char *code;
SOAP_vars_t *varsmem;
readcontrolarg_t Runtimearg;
varsmem=(SOAP_vars_t *)malloc(sizeof(SOAP_vars_t));
SOAP_copy_all_vars(codex->vars, &varsmem);
Runtimearg.np=&np;
Runtimearg.gl=gl;
Runtimearg.input=(input_t *)malloc(sizeof(input_t));
Runtimearg.input->READDATAFILE=FALSE;
Runtimearg.TYPELEVEL=TYPELEVEL_FLUID;
Runtimearg.module_level=0;
Runtimearg.POSTMODULE=FALSE;
Runtimearg.CYCLEMODULE=FALSE;
Runtimearg.RESETITERCOUNT=FALSE;
Runtimearg.VERBOSE=FALSE;
Runtimearg.gl_post=*gl;
Runtimearg.domain_post=gl->domain;
Runtimearg.np_post=np;
if (!gl->cycle.RUNTIMEMODULEFOUND)
fatal_error("The %s() module was not found within Cycle().",_CYCLE_ACTIONNAME);
code=(char *)malloc((strlen(code_runtime)+2)*sizeof(char));
strcpy(code,code_runtime);
codex->ACTION=TRUE;
codex->action=&runtime_actions;
codex->action_args=(void *)&Runtimearg;
((readcontrolarg_t *)codex->action_args)->np=&np;
((readcontrolarg_t *)codex->action_args)->gl=gl;
/* if (codex->action_being_processed==NULL){
codex->action_being_processed=(char *)malloc((strlen(_CYCLE_ACTIONNAME)+2)*sizeof(char));
strcpy(codex->action_being_processed,_CYCLE_ACTIONNAME);
}*/
codex->VERBOSE=FALSE;
codex->SCREENOUTPUT=TRUE;
add_constants_to_codex(gl, codex);
update_runtime_codex_xi_from_gl(gl, codex);
update_runtime_codex_vars_except_xi_from_gl(gl,codex);
SOAP_process_code(code, codex, SOAP_VARS_KEEP_ALL);
gl->CFL=SOAP_var_value(codex,"CFL");
#ifdef UNSTEADY
gl->dt=SOAP_var_value(codex,"dt");
#endif
gl->ximax=SOAP_var_value(codex,"ximax");
assert(gl->CFL>=0.0e0);
/* here, make sure that all changes to vars within runtime module are erased, because those
will not be written to datafile
-> CFL and ximax and dt are exception to this, and this is why they are probed
through SOAP_var_value above */
if (gl->RESETRUNTIMEVARS){
SOAP_free_all_vars(codex->vars);
SOAP_copy_all_vars(varsmem,&(codex->vars));
}
free(Runtimearg.input);
SOAP_free_all_vars(varsmem);
free(varsmem);
free(code);
reset_clipped_variables(gl);
}
void find_ximax(np_t *np, gl_t *gl, zone_t zone, int IJK_UPDATE){
long i,j,k;
double xi;
gl->ximax=0.0e0;
for_ijk(zone,is,js,ks,ie,je,ke){
if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)) {
assert(is_node_resumed(np[_ai(gl,i,j,k)]));
xi=np[_ai(gl,i,j,k)].wk->xi;
if (xi<-1.0e99 || isnan(xi)) {
fatal_error("problem with xi (xi=%E) at i=%ld, j=%ld, k=%ld.",xi,i,j,k);
}
if (xi>=gl->ximax) {
gl->ximax=xi;
if (IJK_UPDATE==IJK_UPDATE_YES) {
gl->i_ximax=i;
gl->j_ximax=j;
gl->k_ximax=k;
}
}
}
}
}
/*
static void PrintZones(zone_t *zones, long numzone){
long cnt;
for (cnt=0; cnt<numzone; cnt++){
printf("%ld is=%ld js=%ld ie=%ld je=%ld\n",cnt,zones[cnt].is,zones[cnt].js,
zones[cnt].ie,zones[cnt].je);
}
printf("\n");
}
*/
static void rearrange_overlapping_zones(zone_t *zones, long numzone){
long cnt1,cnt2;
/* PrintZone(zones,numzones); */
for (cnt1=0; cnt1<numzone; cnt1++){
for (cnt2=0; cnt2<numzone; cnt2++){
if (cnt2!=cnt1){
/* do overlap along i :
make ie of zones[cnt1] smaller and is of zones[cnt2] bigger */
if (if3DL( zones[cnt1].ks==zones[cnt2].ks && )
if2DL( zones[cnt1].js==zones[cnt2].js && )
if3DL( zones[cnt1].ke==zones[cnt2].ke && )
if2DL( zones[cnt1].je==zones[cnt2].je && )
zones[cnt1].ie< zones[cnt2].ie &&
zones[cnt1].ie>=zones[cnt2].is) {
zones[cnt1].ie=(zones[cnt1].ie+zones[cnt2].is)/2;
zones[cnt2].is=zones[cnt1].ie+1;
if ( zones[cnt1].is>zones[cnt1].ie
|| zones[cnt2].is>zones[cnt2].ie )
fatal_error("Problem modifying zones along i.");
}
}
}
}
#ifdef _2DL
for (cnt1=0; cnt1<numzone; cnt1++){
for (cnt2=0; cnt2<numzone; cnt2++){
if (cnt2!=cnt1){
/* do overlap along j :
make je of zones[cnt1] smaller and js of zones[cnt2] bigger*/
if (if3DL( zones[cnt1].ks==zones[cnt2].ks && )
zones[cnt1].is==zones[cnt2].is &&
if3DL( zones[cnt1].ke==zones[cnt2].ke && )
zones[cnt1].ie==zones[cnt2].ie &&
zones[cnt1].je< zones[cnt2].je &&
zones[cnt1].je>=zones[cnt2].js) {
zones[cnt1].je=(zones[cnt1].je+zones[cnt2].js)/2;
zones[cnt2].js=zones[cnt1].je+1;
if ( zones[cnt1].js>zones[cnt1].je
|| zones[cnt2].js>zones[cnt2].je )
fatal_error("Problem modifying zones along j.");
}
}
}
}
#endif
#ifdef _3DL
for (cnt1=0; cnt1<numzone; cnt1++){
for (cnt2=0; cnt2<numzone; cnt2++){
if (cnt2!=cnt1){
/* do overlap along k :
make je of zones[cnt1] smaller and js of zones[cnt2] bigger*/
if (zones[cnt1].is==zones[cnt2].is &&
zones[cnt1].js==zones[cnt2].js &&
zones[cnt1].ie==zones[cnt2].ie &&
zones[cnt1].je==zones[cnt2].je &&
zones[cnt1].ke< zones[cnt2].ke &&
zones[cnt1].ke>=zones[cnt2].ks) {
zones[cnt1].ke=(zones[cnt1].ke+zones[cnt2].ks)/2;
zones[cnt2].ks=zones[cnt1].ke+1;
if ( zones[cnt1].ks>zones[cnt1].ke
|| zones[cnt2].ks>zones[cnt2].ke )
fatal_error("Problem modifying zones along k.");
}
}
}
}
#endif
/* PrintZone(zone,numzone); */
}
/* setup multizone situated inside zone */
void setup_multizone(np_t *np, gl_t *gl, zone_t zone, zone_t lim, double xiverge,
long zonelength, bool UPDATE_ALL_ZONES, multizone_t *multizone){
long cnt;
long numsubzones;
zone_t *subzones;
double ximax;
long i,j,k;
/* find the zones for the ts process */
subzones=(zone_t *)malloc(sizeof(zone_t));
find_subzones_in_zone_given_zonelength(zonelength, zone, &numsubzones, &subzones);
/* find out which zones need to be updated */
multizone->numzones_ts=0;
multizone->ts=(zone_t *)malloc(numsubzones*sizeof(zone_t));
for (cnt=0; cnt<numsubzones; cnt++){
ximax=0.0e0;
for_ijk(subzones[cnt],is,js,ks,ie,je,ke){
if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)) {
ximax=max(ximax,np[_ai(gl,i,j,k)].wk->xi);
}
}
if (ximax>xiverge || UPDATE_ALL_ZONES) {
multizone->ts[multizone->numzones_ts]=subzones[cnt];
(multizone->numzones_ts)++;
}
}
/* setup res and bdry, limited by lim_is,lim_js, etc*/
multizone->bdry=(zone_t *)malloc(multizone->numzones_ts*sizeof(zone_t));
multizone->res=(zone_t *)malloc(multizone->numzones_ts*sizeof(zone_t));
for (cnt=0; cnt<multizone->numzones_ts; cnt++){
multizone->bdry[cnt].is=max(lim.is,multizone->ts[cnt].is-hbw_bdry_fluid);
multizone->bdry[cnt].ie=min(lim.ie,multizone->ts[cnt].ie+hbw_bdry_fluid);
#ifdef _2DL
multizone->bdry[cnt].js=max(lim.js,multizone->ts[cnt].js-hbw_bdry_fluid);
multizone->bdry[cnt].je=min(lim.je,multizone->ts[cnt].je+hbw_bdry_fluid);
#endif
#ifdef _3DL
multizone->bdry[cnt].ks=max(lim.ks,multizone->ts[cnt].ks-hbw_bdry_fluid);
multizone->bdry[cnt].ke=min(lim.ke,multizone->ts[cnt].ke+hbw_bdry_fluid);
#endif
multizone->res[cnt].is=max(lim.is,multizone->ts[cnt].is-hbw_bdry_fluid-hbw_res_fluid);
multizone->res[cnt].ie=min(lim.ie,multizone->ts[cnt].ie+hbw_bdry_fluid+hbw_res_fluid);
#ifdef _2DL
multizone->res[cnt].js=max(lim.js,multizone->ts[cnt].js-hbw_bdry_fluid-hbw_res_fluid);
multizone->res[cnt].je=min(lim.je,multizone->ts[cnt].je+hbw_bdry_fluid+hbw_res_fluid);
#endif
#ifdef _3DL
multizone->res[cnt].ks=max(lim.ks,multizone->ts[cnt].ks-hbw_bdry_fluid-hbw_res_fluid);
multizone->res[cnt].ke=min(lim.ke,multizone->ts[cnt].ke+hbw_bdry_fluid+hbw_res_fluid);
#endif
}
multizone->numzones_total=numsubzones;
multizone->numzones_res=multizone->numzones_ts;
multizone->numzones_bdry=multizone->numzones_ts;
free(subzones);
rearrange_overlapping_zones(multizone->res,multizone->numzones_res);
}
void *thread_zone(void *threadzone){
np_t * np = ((threadzone_t *) threadzone)->np;
gl_t * gl = ((threadzone_t *) threadzone)->gl;
zone_t zone = ((threadzone_t *) threadzone)->zone;
((threadzone_t *) threadzone)->funct(np,gl,zone);
return(NULL);
}
void create_thread_zone(np_t *np, gl_t * gl, zone_t zone, void (*funct)(np_t *, gl_t *, zone_t zone),
pthread_t *pthread, threadzone_t *threadzone){
threadzone->np=np;
threadzone->gl=gl;
threadzone->zone=zone;
threadzone->funct=funct;
#ifdef ZONETHREADS
if (pthread_create(pthread, NULL, thread_zone, threadzone))
fatal_error("Cannot create thread.");
#else
(*thread_zone)(threadzone);
#endif
}
void join_all_threads_zone(long numthread, pthread_t *pthread, bool COUNTFLAG){
#ifdef ZONETHREADS
long thread;
void *retval;
if (!COUNTFLAG) {
for (thread=0; thread<numthread; thread++){
if (pthread_join(pthread[thread],&retval))
fatal_error("Cannot join thread %ld.",thread);
}
}
#endif
}
static void update_U_from_dUstar_1(np_t *np, gl_t *gl, long theta, long ls, long le){
long l;
for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){
thread_lock_node_set(np,l,THREADTYPE_ZONE);
add_dUstar_to_U(np,l,gl,np[l].wk->dUstar);
thread_lock_node_unset(np,l,THREADTYPE_ZONE);
/* - if not using SMALLTHREADS, only need to lock for the loop threads,
since gl is local for the zone thread
- if using SMALLTHREADS, then need to lock for both the loop and zone threads
For now, lock for both the loop and zone threads */
thread_lock_global_set(gl,THREADTYPE_ALL);
gl->effiter_U+=1.0/(double)(gl->nn);
thread_lock_global_unset(gl,THREADTYPE_ALL);
}
}
static void update_U_from_dUstar(np_t *np, gl_t *gl, zone_t zone){
sweep_with_1D_segments(np,gl,zone,&update_U_from_dUstar_1,SWEEPTYPE_I,TYPELEVEL_FLUID_WORK,
&is_node_inner,SEGMENTWORK_HEAVY,GRIDLEVEL_ONE);
}
long _numthread_optimized(long numzone){
long l,cnt,lmax,numthread;
numthread=numzone;
if (numzone>maxzonethread) {
lmax=0;
for (cnt=1; cnt<=maxzonethread; cnt++){
l=mod(numzone,cnt);
if (l==0) l=cnt;
if (l>lmax) {
numthread=cnt;
lmax=l;
}
}
}
return(numthread);
}
#ifdef DISTMPI
void exchange_U(np_t *np, gl_t *gl){
int bl,rankrecv,numproc,ranksend,thisrank,pack_size_Ulocal,pack_size_cnt;
long i,j,k,flux,iterator,cnt,prevcnt,total;
long primcnt=0;
long bufsize=0;
long *recvcnt,*sendcnt,*processcnt;
long *primnodenums=NULL;
long *processnodenums;
double *buf,*bufptr;
zone_t zonesend,zonerecv,zone;
flux_t *recvUlocal;
flux_t *sendUlocal=NULL;
MPI_Status MPI_Status1;
MPI_Comm_rank(MPI_COMM_WORLD, &thisrank);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
recvcnt=(long *)malloc(numproc*sizeof(long));
sendcnt=(long *)malloc(numproc*sizeof(long));
processcnt=(long *)malloc(numproc*sizeof(long));
processnodenums=(long *)malloc(numproc*sizeof(long));
for (i=0; i<numproc; i++){
sendcnt[i]=0;
processcnt[i]=0;
}
for (ranksend=0; ranksend<numproc; ranksend++){
zonesend=_domain_from_rank(ranksend,gl);
for (rankrecv=0; rankrecv<numproc; rankrecv++){
if (rankrecv!=ranksend && (ranksend==thisrank || rankrecv==thisrank)){
zonerecv=_domain_lim_from_rank(rankrecv,gl);
if (is_zone_intersecting_zone(zonesend,zonerecv)){
zone=_zone_intersection(zonesend,zonerecv);
for_ijk(zone,is,js,ks,ie,je,ke){
if (ranksend==thisrank) {
for (total=0,iterator=0; iterator<numproc; iterator++) total+=sendcnt[iterator];
for (prevcnt=0,iterator=0; iterator<=rankrecv; iterator++) prevcnt+=sendcnt[iterator];
sendUlocal=(flux_t *)realloc(sendUlocal,(total+1)*sizeof(flux_t));
for (iterator=prevcnt+1;iterator<total+1;iterator++){
for (flux=0; flux<nf; flux++) *(*(sendUlocal + iterator) + flux)=*(*(sendUlocal + iterator-1) + flux);
}
for (flux=0; flux<nf; flux++) *(*(sendUlocal + prevcnt) + flux)=np[_ai(gl,i,j,k)].bs->U[flux];
sendcnt[rankrecv]++;
}
if (rankrecv==thisrank){
for (prevcnt=0,iterator=0; iterator<=ranksend; iterator++) prevcnt+=processcnt[iterator];
processnodenums=(long *)realloc(processnodenums,(prevcnt+1)*sizeof(long));
processnodenums[prevcnt]=_ai(gl,i,j,k);
processcnt[ranksend]++;
if (is_node_resumed(np[_ai(gl,i,j,k)])){
primnodenums=(long *)realloc(primnodenums,(primcnt+1)*sizeof(long));
primnodenums[primcnt]=_ai(gl,i,j,k);
primcnt++;
}
}
}
}
}
}
}
if(numproc != 1){
for (rankrecv=0; rankrecv<numproc; rankrecv++){
if (thisrank!=rankrecv){
MPI_Pack_size(nf*sendcnt[rankrecv],MPI_DOUBLE,MPI_COMM_WORLD,&pack_size_Ulocal);
MPI_Pack_size(1,MPI_LONG,MPI_COMM_WORLD,&pack_size_cnt);
bufsize+=(2*MPI_BSEND_OVERHEAD)+pack_size_Ulocal+pack_size_cnt;
}
}
buf=(double *)malloc(bufsize);
MPI_Buffer_attach(buf, bufsize);
for (rankrecv=0; rankrecv<numproc; rankrecv++){
if (thisrank!=rankrecv){
for (prevcnt=0,iterator=0; iterator<rankrecv; iterator++) prevcnt+=sendcnt[iterator];
MPI_Bsend(&sendcnt[rankrecv],1,MPI_LONG,rankrecv,1,MPI_COMM_WORLD);
MPI_Bsend(&sendUlocal[prevcnt],nf*sendcnt[rankrecv],MPI_DOUBLE,rankrecv,0,MPI_COMM_WORLD);
}
}
free(sendUlocal);
for (ranksend=0; ranksend<numproc; ranksend++){
if (thisrank!=ranksend){
MPI_Recv(&recvcnt[ranksend],1,MPI_LONG,ranksend,1,MPI_COMM_WORLD,&MPI_Status1);
recvUlocal=(flux_t *)malloc(recvcnt[ranksend]*sizeof(flux_t));
MPI_Recv(recvUlocal,recvcnt[ranksend]*nf,MPI_DOUBLE,ranksend,0,MPI_COMM_WORLD,&MPI_Status1);
for (cnt=0; cnt<recvcnt[ranksend]; cnt++){
for (prevcnt=0,iterator=0; iterator<ranksend; iterator++) prevcnt+=processcnt[iterator];
for (flux=0; flux<nf; flux++) np[processnodenums[prevcnt+cnt]].bs->U[flux]=*(*(recvUlocal + cnt) + flux);
}
free(recvUlocal);
}
}
#ifdef OPENMPTHREADS
#pragma omp parallel for private(cnt) schedule(dynamic)
#endif
for (cnt=0; cnt<primcnt; cnt++) find_prim_fluid(np,primnodenums[cnt],gl);
MPI_Buffer_detach(&bufptr,&bl);
free(buf);
}
free(processnodenums);
free(primnodenums);
free(processcnt);
free(recvcnt);
free(sendcnt);
MPI_Barrier(MPI_COMM_WORLD);
}
void exchange_U_old(np_t *np, gl_t *gl){ //same as above but without the MPI_Buffer
int rankrecv,numproc,ranksend,thisrank;
long i,j,k,flux;
long cnt = 0;
long *nodenums = NULL;
zone_t zonesend,zonerecv,zone;
flux_t Ulocal;
MPI_Status MPI_Status1;
MPI_Comm_rank(MPI_COMM_WORLD, &thisrank);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
for (ranksend=0; ranksend<numproc; ranksend++){
zonesend=_domain_from_rank(ranksend,gl);
for (rankrecv=0; rankrecv<numproc; rankrecv++){
if (rankrecv!=ranksend && (ranksend==thisrank || rankrecv==thisrank)){
zonerecv=_domain_lim_from_rank(rankrecv,gl);
if (is_zone_intersecting_zone(zonesend,zonerecv)){
zone=_zone_intersection(zonesend,zonerecv);
for_ijk(zone,is,js,ks,ie,je,ke){
if (ranksend==thisrank) {
for (flux=0; flux<nf; flux++) Ulocal[flux]=np[_ai(gl,i,j,k)].bs->U[flux];
MPI_Send(Ulocal,nf,MPI_DOUBLE,rankrecv,0,MPI_COMM_WORLD);
}
if (rankrecv==thisrank) {
MPI_Recv(Ulocal,nf,MPI_DOUBLE,ranksend,0,MPI_COMM_WORLD,&MPI_Status1);
for (flux=0; flux<nf; flux++) np[_ai(gl,i,j,k)].bs->U[flux]=Ulocal[flux];
if (is_node_resumed(np[_ai(gl,i,j,k)])){
nodenums=(long *)realloc(nodenums,(cnt+1)*sizeof(long));
nodenums[cnt]=_ai(gl,i,j,k);
cnt++;
}
}
}
}
}
}
}
#ifdef OPENMPTHREADS
#pragma omp parallel for private(i) schedule(dynamic)
#endif
for (i=0; i<cnt; i++) find_prim_fluid(np,nodenums[i],gl);
free(nodenums);
MPI_Barrier(MPI_COMM_WORLD);
}
#endif
void update_U_with_multizone(np_t *np, gl_t *gl, multizone_t multizone){
long cnt,numzonethread,cntthread;
pthread_t *pthread;
threadzone_t *threadzone;
/* Find dUstar for inner nodes*/
numzonethread=_numthread_optimized(multizone.numzones_ts);
pthread=(pthread_t *)malloc(numzonethread*sizeof(pthread_t));
threadzone=(threadzone_t *)malloc(numzonethread*sizeof(threadzone_t));
cntthread=0;
for (cnt=0; cnt<multizone.numzones_ts; cnt++) {
create_thread_zone(np, gl, multizone.ts[cnt], &find_dU, &(pthread[cntthread]), &(threadzone[cntthread]));
cntthread++;
if (cntthread==numzonethread) {
join_all_threads_zone(cntthread, pthread, FALSE);
cntthread=0;
}
}
if (cntthread>0) join_all_threads_zone(cntthread, pthread, FALSE);
for (cnt=0; cnt<multizone.numzones_ts; cnt++) update_U_from_dUstar(np, gl, multizone.ts[cnt]);
free(pthread);
free(threadzone);
}
void update_bdry_nodes_with_multizone(np_t *np, gl_t *gl, multizone_t multizone){
long cnt;
for (cnt=0; cnt<multizone.numzones_bdry; cnt++) update_bdry_nodes(np, gl, multizone.bdry[cnt]);
}
void find_residual_with_multizone(np_t *np, gl_t *gl, multizone_t multizone){
long cnt,numzonethread,cntthread;
pthread_t *pthread;
threadzone_t *threadzone;
numzonethread=_numthread_optimized(multizone.numzones_res);
pthread=(pthread_t *)malloc(numzonethread*sizeof(pthread_t));
threadzone=(threadzone_t *)malloc(numzonethread*sizeof(threadzone_t));
cntthread=0;
for (cnt=0; cnt<multizone.numzones_res; cnt++) {
create_thread_zone(np, gl, multizone.res[cnt], &find_residual, &(pthread[cntthread]), &(threadzone[cntthread]));
cntthread++;
if (cntthread==numzonethread) {
join_all_threads_zone(cntthread, pthread, FALSE);
cntthread=0;
}
}
if (cntthread>0) {
join_all_threads_zone(cntthread, pthread, FALSE);
}
free(pthread);
free(threadzone);
}
void solve_multizone(np_t *np, gl_t *gl, multizone_t multizone){
update_U_with_multizone(np,gl,multizone);
update_bdry_nodes_with_multizone(np,gl,multizone);
find_residual_with_multizone(np,gl,multizone);
}
void free_multizone(multizone_t *multizone){
free(multizone->res);
free(multizone->bdry);
free(multizone->ts);
}
void check_residual(np_t *np, gl_t *gl, zone_t zone){
resume_nodes_in_zone(np, gl, zone);
#ifdef EMFIELD
update_prim_emfield_mem_in_zone(np, gl, zone);
#endif
find_residual(np, gl, zone);
find_ximax(np,gl,zone,IJK_UPDATE_YES);
#ifdef EMFIELD
find_residual_emfield(np,gl,zone);
find_ximax_emfield(np, gl, zone);
#endif
#ifdef DISTMPI
int rank,proc;
struct {
double ximax;
int rank;
} ximaxrank,ximaxrank_max;
#ifdef EMFIELD
struct {
double ximax;
int rank;
} ximaxrank_emfield,ximaxrank_max_emfield;
#endif
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &proc);
ximaxrank.ximax=gl->ximax;
ximaxrank.rank=rank;
MPI_Allreduce(&ximaxrank, &ximaxrank_max, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD);
gl->ximax=ximaxrank_max.ximax;
MPI_Bcast(&(gl->i_ximax),1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD);
#ifdef EMFIELD
ximaxrank_emfield.ximax=gl->ximax_emfield;
ximaxrank_emfield.rank=rank;
MPI_Allreduce(&ximaxrank_emfield, &ximaxrank_max_emfield, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD);
gl->ximax_emfield=ximaxrank_max_emfield.ximax;
MPI_Bcast(&(gl->i_ximax_emfield),1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD);
#endif
#ifdef _2DL
MPI_Bcast(&(gl->j_ximax),1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD);
#ifdef EMFIELD
MPI_Bcast(&(gl->j_ximax_emfield),1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD);
#endif
#endif //_2DL
#ifdef _3DL
MPI_Bcast(&(gl->k_ximax),1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD);
#ifdef EMFIELD
MPI_Bcast(&(gl->k_ximax_emfield),1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD);
#endif
#endif //_3DL
#endif //DISTMPI
}
double _xi(np_t np, gl_t *gl, flux_t Res){
long flux;
double xi,xitmp;
assert_np(np,is_node_resumed(np));
xi=0.0;
for (flux=0; flux<nf; flux++) {
xitmp=fabs(Res[flux]/_Omega(np,gl)/gl->cycle.fluid.Uref[flux]);
xi=max(xi,xitmp);
if (isnan(xitmp)){
fatal_error("problem computing xitmp in function _xi() in cycle_share.c;\n xitmp=%E\n Res[%ld]=%E\n Omega=%E\n Uref[%ld]=%E\n",xitmp,flux,Res[flux],_Omega(np,gl),flux,gl->cycle.fluid.Uref[flux]);
}
}
return(xi);
}
static void find_Delta_Lambda_for_dtau_local(np_t *np, gl_t *gl, long l, long dim, flux_t Delta_Lambda){
long offset,maxoffset,flux,dim2;
flux_t Delta_Lambda_tmp;
find_Delta_Lambda_for_dtau(np, gl, l, dim, Delta_Lambda);
if (gl->PRECONDITIONER==PRECON_LOCALTIMESTEP2){
maxoffset=1;
for (dim2=dim; dim2<=dim; dim2++){
for (offset=1; offset<=maxoffset; offset++){
if (is_node_inner(np[_al(gl,l,dim2,-offset)],TYPELEVEL_FLUID_WORK)){
find_Delta_Lambda_for_dtau(np, gl, _al(gl,l,dim2,-offset), dim, Delta_Lambda_tmp);
for (flux=0; flux<nf; flux++)
Delta_Lambda[flux]=max(Delta_Lambda[flux],Delta_Lambda_tmp[flux]);
}
if (is_node_inner(np[_al(gl,l,dim2,+offset)],TYPELEVEL_FLUID_WORK)){
find_Delta_Lambda_for_dtau(np, gl, _al(gl,l,dim2,+offset), dim, Delta_Lambda_tmp);
for (flux=0; flux<nf; flux++)
Delta_Lambda[flux]=max(Delta_Lambda[flux],Delta_Lambda_tmp[flux]);
}
}
}
}
}
void find_dtau(np_t *np, gl_t *gl, long l, flux_t dtau){
double dtaumin,dtaumax;
long dim,flux;
double dtaulocal[nf][nd];
#ifdef UNSTEADY
sqmat_t LambdaZ;
#endif
flux_t Delta_Lambda;
assert_np(np[l],is_node_inner(np[l],TYPELEVEL_FLUID_WORK));
if (gl->PRECONDITIONER!=PRECON_CONSTANTTIMESTEP){
#ifdef UNSTEADY
find_LambdaZ(np,gl,l,LambdaZ);
set_matrix_to_identity(LambdaZ); //turn off effect of LambdaZ -> seems to be detrimental not beneficial
for (dim=0; dim<nd; dim++){
find_Delta_Lambda_for_dtau_local(np, gl, l, dim, Delta_Lambda);
for (flux=0; flux<nf; flux++){
assert(LambdaZ[flux][flux]>0.0);
dtaulocal[flux][dim]=gl->dt/LambdaZ[flux][flux]/notzero(Delta_Lambda[flux]*gl->dt/LambdaZ[flux][flux]+1.0,1e-39);
}
}
#else
for (dim=0; dim<nd; dim++){
find_Delta_Lambda_for_dtau_local(np, gl, l, dim, Delta_Lambda);
for (flux=0; flux<nf; flux++){
dtaulocal[flux][dim]=1.0/notzero(Delta_Lambda[flux],1e-39);
}
}
#endif
/* find optimal dtaus for each flux */
for (flux=0; flux<nf; flux++){
dtaumin=1.0e99;
dtaumax=0.0e0;
for (dim=0; dim<nd; dim++){
dtaumin=min(dtaulocal[flux][dim],dtaumin);
dtaumax=max(dtaulocal[flux][dim],dtaumax);
}
dtaumax=min(dtaumin*MAXRATIO_DTAUMAX_DTAUMIN,dtaumax);
dtau[flux]=gl->CFL*pow(dtaumin,1.0e0-gl->sigma1)*pow(dtaumax,gl->sigma1);
}
} else {
for (flux=0; flux<nf; flux++){
dtau[flux]=gl->dtau;
}
}
}
void find_constant_dtau(np_t *np, gl_t *gl, long l, double *dtau){
long flux;
flux_t dtau_vector;
double dtaumin,dtaumax;
find_dtau(np,gl,l,dtau_vector);
/* average min and max dtau */
dtaumin=1.0e99;
dtaumax=-1.0e99;
for (flux=0; flux<nf; flux++) dtaumin=min(dtaumin,dtau_vector[flux]);
for (flux=0; flux<nf; flux++) dtaumax=max(dtaumax,dtau_vector[flux]);
dtaumax=min(dtaumin*MAXRATIO_DTAUMAX_DTAUMIN,dtaumax);
*dtau=pow(dtaumin,1.0-gl->sigma2)*pow(dtaumax,gl->sigma2);
}
#ifdef EMFIELD
#ifdef DISTMPI
void exchange_U_emfield(np_t *np, gl_t *gl){
int rankrecv,numproc,ranksend,thisrank;
long i,j,k,flux;
zone_t zonesend,zonerecv,zone;
fluxemfield_t Ulocal;
MPI_Status MPI_Status1;
MPI_Comm_rank(MPI_COMM_WORLD, &thisrank);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
for (ranksend=0; ranksend<numproc; ranksend++){
zonesend=_domain_from_rank(ranksend,gl);
for (rankrecv=0; rankrecv<numproc; rankrecv++){
if (rankrecv!=ranksend && (ranksend==thisrank || rankrecv==thisrank)){
zonerecv=_domain_lim_from_rank(rankrecv,gl);
if (is_zone_intersecting_zone(zonesend,zonerecv)){
zone=_zone_intersection(zonesend,zonerecv);
for_ijk(zone,is,js,ks,ie,je,ke){
if (ranksend==thisrank) {
for (flux=0; flux<nfe; flux++) Ulocal[flux]=np[_ai(gl,i,j,k)].bs->Uemfield[flux];
MPI_Send(Ulocal,nfe,MPI_DOUBLE,rankrecv,0,MPI_COMM_WORLD);
}
if (rankrecv==thisrank) {
MPI_Recv(Ulocal,nfe,MPI_DOUBLE,ranksend,0,MPI_COMM_WORLD,&MPI_Status1);
for (flux=0; flux<nfe; flux++) np[_ai(gl,i,j,k)].bs->Uemfield[flux]=Ulocal[flux];
}
}
}
}
}
}
MPI_Barrier(MPI_COMM_WORLD);
}
void exchange_U_emfield_old(np_t *np, gl_t *gl){
int rank;
long i,j,k,flux;
fluxemfield_t Ulocal;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for_ijk (gl->domain_all,is,js,ks,ie,je,ke){
if (rank==_node_rank(gl, i, j, k) && is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) {
for (flux=0; flux<nfe; flux++) {
Ulocal[flux]=np[_ai(gl,i,j,k)].bs->Uemfield[flux];
}
}
MPI_Bcast_Node(Ulocal,nfe,MPI_DOUBLE,_node_rank(gl,i,j,k),MPI_COMM_WORLD,i,j,k,gl);
if (is_node_in_zone(i,j,k,gl->domain_lim) && is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) {
for (flux=0; flux<nfe; flux++) {
np[_ai(gl,i,j,k)].bs->Uemfield[flux]=Ulocal[flux];
}
}
}
MPI_Barrier(MPI_COMM_WORLD);
}
#endif
void update_prim_emfield_mem_in_zone_1(np_t *np, gl_t *gl, long theta, long ls, long le){
long l;
//printf("(%ld,%ld) to (%ld,%ld)\n",_i(ls,gl,0),_i(ls,gl,1),_i(le,gl,0),_i(le,gl,1));
for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){
if (is_node_valid(np[l],TYPELEVEL_EMFIELD)){
find_prim_emfield_mem_1(np, gl, l);
}
}
}
void update_prim_emfield_mem_in_zone_2(np_t *np, gl_t *gl, long theta, long ls, long le){
long l;
for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){
if (is_node_valid(np[l],TYPELEVEL_EMFIELD)){
find_prim_emfield_mem_2(np, gl, l);
}
}
}
void update_prim_emfield_mem_in_zone_3(np_t *np, gl_t *gl, long theta, long ls, long le){
long l;
for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){
if (is_node_valid(np[l],TYPELEVEL_EMFIELD)){
find_prim_emfield_mem_3(np, gl, l);
}
}
}
#ifdef _TSEMF_STORE_COEFFICIENTS
void update_prim_emfield_mem_in_zone_4(np_t *np, gl_t *gl, long theta, long ls, long le){
long l,dim,flux;
for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){
if (is_node_inner(np[l],TYPELEVEL_EMFIELD)){
for (flux=0; flux<nfe; flux++){
find_dtau_emfield(np,gl,l,flux,&(np[l].bs->dtauemfield[flux]));
np[l].bs->coeffp0sum[flux]=0.0;
for (dim=0; dim<nd; dim++){
find_linearization_coefficients_inner_node_emfield(np, gl, l, dim, flux, &(np[l].bs->coeffm1[dim][flux]), &(np[l].bs->coeffp0[dim][flux]), &(np[l].bs->coeffp1[dim][flux]));
np[l].bs->coeffp0sum[flux]+=np[l].bs->coeffp0[dim][flux];
}
}
}
}
}
#endif
void update_prim_emfield_mem_in_zone(np_t *np, gl_t *gl, zone_t zone){
sweep_with_1D_segments(np,gl,zone,&update_prim_emfield_mem_in_zone_1,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE);
sweep_with_1D_segments(np,gl,zone,&update_prim_emfield_mem_in_zone_2,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE);
sweep_with_1D_segments(np,gl,zone,&update_prim_emfield_mem_in_zone_3,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE);
#ifdef _TSEMF_STORE_COEFFICIENTS
sweep_with_1D_segments(np,gl,zone,&update_prim_emfield_mem_in_zone_4,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE);
#endif
}
void add_convection_residual_emfield(long theta, long ls, long le, np_t *np, gl_t *gl){
long l,flux;
fluxemfield_t Fm1h;
for (l=ls; l!=_l_plus_one(_l_plus_one(le,gl,theta),gl,theta); l=_l_plus_one(l,gl,theta)){
find_Fstar_interface_emfield(np,gl,_al(gl,l,theta,-1),_al(gl,l,theta,+0),theta,Fm1h);
for (flux=0; flux<nfe; flux++){
if (l!=_l_plus_one(le,gl,theta)) np[l].bs->Resemfield[flux]-=Fm1h[flux];
if (l!=ls) np[_al(gl,l,theta,-1)].bs->Resemfield[flux]+=Fm1h[flux];
}
}
}
void add_source_residual_emfield(long theta, long ls, long le, np_t *np, gl_t *gl){
long l;
long flux;
fluxemfield_t S;
if (theta==0) {
for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){
find_Sstar_emfield(np,gl,l,S);
for (flux=0; flux<nfe; flux++) np[l].bs->Resemfield[flux]-=S[flux];
}
}
}
void update_residual_emfield(np_t *np, gl_t *gl, long theta, long ls, long le){
add_convection_residual_emfield(theta,ls,le,np,gl);
add_source_residual_emfield(theta,ls,le,np,gl);
}
void initialize_residual_emfield(np_t *np, gl_t *gl, long theta, long ls, long le){
long l,flux;
for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){
for (flux=0; flux<nfe; flux++) np[l].bs->Resemfield[flux]=0.0e0;
gl->effiter_R_emfield+=1.0e0/(double)gl->nn;
}
}
void update_bdry_node_emfield(np_t *np, gl_t *gl, long l){
long dim,l_C,l_B,l_A;
long dimsgn;
bool BDRYDIRECFOUND;
#ifdef _2DL
long dim1; long dim2;
#endif
#ifdef _3D
long dim3;
#endif
bool UPDATED;
assert(is_node_bdry(np[l],TYPELEVEL_EMFIELD));
UPDATED=FALSE;
BDRYDIRECFOUND=find_bdry_direc(np, gl, l, TYPELEVEL_EMFIELD, &dim, &dimsgn);
if (is_node_link(np[l],TYPELEVEL_EMFIELD)) {
// in case the boundary node is a link, Uemf has already been updated
UPDATED=TRUE;
}
if (!UPDATED && BDRYDIRECFOUND){
l_A=l;
l_B=_al(gl,l,dim,dimsgn);
l_C=_al(gl,l,dim,dimsgn*2);
assert(is_node_inner(np[l_C],TYPELEVEL_EMFIELD));
assert(is_node_inner(np[l_B],TYPELEVEL_EMFIELD));
update_bdry_emfield(np,gl,l_A,l_B,l_C,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_EMFIELD);
UPDATED=TRUE;
}
/* now, do the corners */
if (!UPDATED) {
#ifdef _2D
for (dim1=-1; dim1<=1; dim1++){
for (dim2=-1; dim2<=1; dim2++){
l_C=_all(gl,l,0,dim1*2,1,dim2*2);
l_B=_all(gl,l,0,dim1,1,dim2);
l_A=l;
if ( is_node_inner(np[l_B],TYPELEVEL_EMFIELD)
&& is_node_inner(np[l_C],TYPELEVEL_EMFIELD) && !UPDATED){
update_bdry_emfield(np,gl,l_A,l_B,l_C,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_EMFIELD);
UPDATED=TRUE;
}
}
}
#endif
#ifdef _3D
for (dim1=-1; dim1<=1; dim1++){
for (dim2=-1; dim2<=1; dim2++){
for (dim3=-1; dim3<=1; dim3++){
l_C=_al(gl,
_al(gl,
_al(gl,l,0,dim1*2),
1,dim2*2),
2,dim3*2);
l_B=_al(gl,
_al(gl,
_al(gl,l,0,dim1),
1,dim2),
2,dim3);
l_A=l;
if ( is_node_inner(np[l_B],TYPELEVEL_EMFIELD)
&& is_node_inner(np[l_C],TYPELEVEL_EMFIELD) && !UPDATED){
update_bdry_emfield(np,gl,l_A,l_B,l_C,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_EMFIELD);
UPDATED=TRUE;
}
}
}
}
#endif
}
}
void update_bdry_nodes_on_segment_emfield(np_t *np, gl_t *gl, long theta, long ls, long le){
long l;
for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){
if (is_node_bdry(np[l],TYPELEVEL_EMFIELD)){
thread_lock_node_set(np,l,THREADTYPE_ZONE);
update_bdry_node_emfield(np, gl, l);
thread_lock_node_unset(np,l,THREADTYPE_ZONE);
}
}
}
void update_bdry_nodes_emfield(np_t *np, gl_t *gl, zone_t zone){
sweep_with_1D_segments(np, gl, zone, &update_bdry_nodes_on_segment_emfield, SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE);
}
void find_residual_emfield(np_t *np, gl_t *gl, zone_t zone){
long i,j,k;
/* now, let's find the residual and store it in bs->dUstaremfield*/
sweep_with_1D_segments(np,gl,zone,&initialize_residual_emfield, SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE);
sweep_with_1D_segments(np,gl,zone,&update_residual_emfield, SWEEPTYPE_IJK, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_HEAVY,GRIDLEVEL_ONE);
/* let's find max residual, and put it in gl*/
for_ijk(zone,is,js,ks,ie,je,ke){
if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) {
np[_ai(gl,i,j,k)].bs->_xi_emfield=_xi_emfield(np[_ai(gl,i,j,k)],gl,np[_ai(gl,i,j,k)].bs->Resemfield);
}
}
}
void find_ximax_emfield(np_t *np, gl_t *gl, zone_t zone){
long i,j,k;
gl->ximax_emfield=0.0e0;
for_ijk(zone,is,js,ks,ie,je,ke){
if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD) && np[_ai(gl,i,j,k)].bs->_xi_emfield>=gl->ximax_emfield) {
gl->ximax_emfield=np[_ai(gl,i,j,k)].bs->_xi_emfield;
gl->i_ximax_emfield=i;
gl->j_ximax_emfield=j;
gl->k_ximax_emfield=k;
}
}
}
void read_UpdateEMField_arguments(char **argum, SOAP_codex_t *codex, gl_t *gl){
SOAP_substitute_all_argums(argum, codex);
gl->Lc=SOAP_get_argum_double(codex,*argum,0);
gl->relaxEMF=SOAP_get_argum_double(codex,*argum,1);
gl->numsubiter_tsemf=4; /* make the default number of subiterations equal to 4 */
gl->tsemfmethod=TSEMF_DEFAULT;
if (gl->Lc<=0.0) fatal_error("The length scale Lc must be positive when calling UpdateEMField().");
if (gl->relaxEMF<=0.0) fatal_error("The relaxation factor relaxEMF must be positive when calling UpdateEMField().");
if (gl->relaxEMF>2.0) fatal_error("The relaxation factor relaxEMF must be less than 2 when calling UpdateEMField().");
if (gl->numsubiter_tsemf<=0.0) fatal_error("The number of subiterations subiter_tsemf must be positive when calling UpdateEMField().");
#ifdef UNSTEADY
gl->dt=SOAP_get_argum_double(codex,*argum,2);
if (gl->dt<=0.0) fatal_error("The time step dt must be positive when calling UpdateEMField().");
if (SOAP_number_argums(*argum)>3) gl->tsemfmethod=SOAP_get_argum_long(codex,*argum,3);
if (SOAP_number_argums(*argum)>4){
if (gl->tsemfmethod==TSEMF_SOR || gl->tsemfmethod==TSEMF_SOR2 || gl->tsemfmethod==TSEMF_ADIIMAF || gl->tsemfmethod==TSEMF_IMAF || gl->tsemfmethod==TSEMF_IMAFk || gl->tsemfmethod==TSEMF_IMAFi)
gl->numsubiter_tsemf=SOAP_get_argum_long(codex,*argum,4);
else fatal_error("UpdateEMField accepts the number of subiterations as a 5th argument only if TSEMF_SOR, TSEMF_SOR2, TSEMF_ADIIMAF, TSEMF_IMAF, TSMEF_IMAFk, TSMEF_IMAFi is specified.");
}
#else
if (SOAP_number_argums(*argum)>2) gl->tsemfmethod=SOAP_get_argum_long(codex,*argum,2);
if (SOAP_number_argums(*argum)>3) {
if (gl->tsemfmethod==TSEMF_SOR || gl->tsemfmethod==TSEMF_SOR2 || gl->tsemfmethod==TSEMF_ADIIMAF || gl->tsemfmethod==TSEMF_IMAF || gl->tsemfmethod==TSEMF_IMAFk || gl->tsemfmethod==TSEMF_IMAFi)
gl->numsubiter_tsemf=SOAP_get_argum_long(codex,*argum,3);
else fatal_error("UpdateEMField accepts the number of subiterations as a 4th argument only if TSEMF_SOR, TSEMF_SOR2, TSEMF_ADIIMAF, TSEMF_IMAF, TSEMF_IMAFk, TSMEF_IMAFi is specified.");
}
#endif
}
void solve_TDMA_emfield(np_t *np, gl_t *gl, long theta, long ls, long le, int TYPELEVEL, EXM_tdmaline_t *tdma, long numlines){
#ifdef DISTMPI
long line,cnt,i,j,k,i_s,j_s,k_s;
double tmp;
MPI_Status MPI_Status1;
if (gl->EM_MPIBDRY_EXPLICIT){
EXM_solve_TDMA(tdma, numlines);
} else {
/* if ls node is inner node, need to obtain the tdma[0] from another process that owns ls */
if (is_node_inner(np[ls],TYPELEVEL)){
find_ijk_from_l(gl, ls, &i, &j, &k);
assert(_ai_all(gl,i,j,k)<LONG_MAX);
if (MPI_Recv(tdma[0].val,4,MPI_DOUBLE,_node_rank(gl,i,j,k),_ai_all(gl,i,j,k),MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in solve_TDMA_emfield");
assert(tdma[0].val[0]==0.0);
}
for (line=0; line<numlines-1; line++){
assert(tdma[line].val[1]!=0.0);
tmp = -(tdma[line+1].val[0] / tdma[line].val[1]);
for (cnt = 1; cnt <= 2; cnt++)
tdma[line+1].val[cnt - 1] += tdma[line].val[cnt] * tmp;
tdma[line+1].val[3] += tdma[line].val[3] * tmp;
tdma[line+1].val[0] = 0.0;
}
/* if le node is inner node, need to send the tdma[numlines-2] to another process that owns le */
if (is_node_inner(np[le],TYPELEVEL)){
find_ijk_from_l(gl, le, &i, &j, &k);
find_ijk_from_l(gl, _l_minus_one(le,gl,theta), &i_s, &j_s, &k_s);
assert(_ai_all(gl,i,j,k)<LONG_MAX);
if (MPI_Send(tdma[numlines-2].val,4,MPI_DOUBLE,_node_rank(gl,i,j,k),_ai_all(gl,i_s,j_s,k_s),MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in solve_TDMA_emfield");
}
/* if le node is inner node, need to obtain the tdma[numlines-1] from another process that owns le */
if (is_node_inner(np[le],TYPELEVEL)){
find_ijk_from_l(gl, le, &i, &j, &k);
assert(_ai_all(gl,i,j,k)<LONG_MAX);
if (MPI_Recv(tdma[numlines-1].val,4,MPI_DOUBLE,_node_rank(gl,i,j,k),_ai_all(gl,i,j,k),MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in solve_TDMA_emfield");
assert(tdma[numlines-1].val[2]==0.0);
}
for (line=numlines-1; line>0; line--){
assert(tdma[line].val[1]!=0.0);
tdma[line].val[3] /= tdma[line].val[1];
tdma[line].val[1] = 1.0;
tdma[line-1].val[3] -= tdma[line].val[3] * tdma[line-1].val[2];
tdma[line-1].val[2] = 0.0;
}
assert(tdma[0].val[1]!=0.0);
tdma[0].val[3] /= tdma[0].val[1];
tdma[0].val[1] = 1.0;
/* if ls node is inner node, need to send the tdma[1] to another process that owns ls */
if (is_node_inner(np[ls],TYPELEVEL)){
find_ijk_from_l(gl, ls, &i, &j, &k);
find_ijk_from_l(gl, _l_plus_one(ls,gl,theta), &i_s, &j_s, &k_s);
assert(_ai_all(gl,i,j,k)<LONG_MAX);
if (MPI_Send(tdma[1].val,4,MPI_DOUBLE,_node_rank(gl,i,j,k),_ai_all(gl,i_s,j_s,k_s),MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in solve_TDMA_emfield");
}
}
#else
EXM_solve_TDMA(tdma, numlines);
#endif
}
#endif//EMFIELD
|
dense_wt.c | /* Copyright (c) 2016 Drew Schmidt
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Functions for computing covariance, (pearson) correlation, and cosine similarity
#include <stdlib.h>
#include <string.h>
#include "coop.h"
#include "utils/fill.h"
#include "utils/mmult.h"
#include "utils/safeomp.h"
#define WT_UNBIASED 1
#define WT_ML 2
#define BADWT -1
static inline int wtchecks(const int m, const double *wt)
{
double sum = 0;
PLEASE_VECTORIZE
for (int i=0; i<m; i++)
{
if (wt[i] < 0)
return BADWT;
sum += wt[i];
}
if (sum != 1.0)
return BADWT;
}
static void wtcp(const int method, const int m, const int n, const double * const restrict x, const int wtlen, const double * const restrict wt)
{
double alpha;
if (method == WT_UNBIASED)
{
if (wtlen == 1)
alpha = 1. / (1. - ((double)m)*wt[0]*wt[0]);
else
{
alpha = 0.;
PLEASE_VECTORIZE
for (int i=0; i<m; i++)
alpha = wt[i]*wt[i];
alpha = 1. - alpha;
}
}
else
alpha = 1.;
// FIXME
// crossprod(m, n, alpha, x, c);
}
static inline void center_wt(const int m, const int n, const double * const restrict x, const double * const restrict wt, double * restrict colmeans)
{
#pragma omp parallel for default(none) shared(colmeans) if(m*n>OMP_MIN_SIZE)
for (int j=0; j<n; j++)
{
const int mj = m*j;
colmeans[j] = 0.;
SAFE_SIMD
for (int i=0; i<m; i++)
colmeans[j] += wt[j] * x[i + mj];
}
}
// TODO just operate on x in place, leave the copy to the user
int coop_covar_wt_mat(const int method, const int m, const int n, const double * const restrict x, int wtlen, const double * const restrict wt, double * restrict colmeans, double *restrict cov)
{
double wtval;
double *wt_pt;
if (wt == NULL)
{
wtval = 1./((double) m);
wt_pt = &wtval;
wtlen = 1;
}
else
wt_pt = wt;
double *x_cp = malloc(m*n*sizeof(*x));
CHECKMALLOC(x_cp);
memcpy(x_cp, x, m*n*sizeof(*x));
center_wt(m, n, x, wt, colmeans);
coop_scale(true, false, m, n, x_cp, colmeans, NULL);
wtcp(method, m, n, x, wtlen, wt);
symmetrize(n, cov);
return COOP_OK;
}
|
bli_dotv_bgq_int.c | /*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas at Austin
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of The University of Texas at Austin nor the names
of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "blis.h"
void bli_ddotv_bgq_int
(
conj_t conjx,
conj_t conjy,
dim_t n,
double* restrict x, inc_t incx,
double* restrict y, inc_t incy,
double* restrict rho,
cntx_t* restrict cntx
)
{
bool_t use_ref = FALSE;
// If the vector lengths are zero, set rho to zero and return.
if ( bli_zero_dim1( n ) ) {
PASTEMAC(d,set0s)( rho );
return;
}
// If there is anything that would interfere with our use of aligned
// vector loads/stores, call the reference implementation.
if ( incx != 1 || incy != 1 || bli_is_unaligned_to( ( siz_t )x, 32 ) || bli_is_unaligned_to( ( siz_t )y, 32 ) )
use_ref = TRUE;
// Call the reference implementation if needed.
if ( use_ref ) {
BLIS_DDOTV_KERNEL_REF( conjx, conjy, n, x, incx, y, incy, rho, cntx );
return;
}
dim_t n_run = n / 4;
dim_t n_left = n % 4;
double rhos = 0.0;
#pragma omp parallel reduction(+:rhos)
{
dim_t n_threads;
dim_t t_id = omp_get_thread_num();
n_threads = omp_get_num_threads();
vector4double rhov = vec_splats( 0.0 );
vector4double xv, yv;
for ( dim_t i = t_id; i < n_run; i += n_threads )
{
xv = vec_lda( 0 * sizeof(double), &x[i*4] );
yv = vec_lda( 0 * sizeof(double), &y[i*4] );
rhov = vec_madd( xv, yv, rhov );
}
rhos += vec_extract( rhov, 0 );
rhos += vec_extract( rhov, 1 );
rhos += vec_extract( rhov, 2 );
rhos += vec_extract( rhov, 3 );
}
for ( dim_t i = 0; i < n_left; i++ )
{
rhos += x[4*n_run + i] * y[4*n_run + i];
}
*rho = rhos;
}
|
DRB057-jacobiinitialize-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Use of private() clause
*/
#include <stdio.h>
#include <math.h>
#define MSIZE 200
int n=MSIZE, m=MSIZE;
double alpha = 0.0543;
double u[MSIZE][MSIZE], f[MSIZE][MSIZE], uold[MSIZE][MSIZE];
double dx, dy;
void
initialize ()
{
int i, j, xx, yy;
dx = 2.0 / (n - 1);
dy = 2.0 / (m - 1);
/* Initialize initial condition and RHS */
#pragma omp parallel for private(i,j,xx,yy)
for (i = 0; i < n; i++)
#pragma omp parallel for private(j,xx,yy)
for (j = 0; j < m; j++)
{
xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */
yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */
u[i][j] = 0.0;
f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)
- 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);
}
}
int main()
{
initialize();
int i, j;
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
printf("%lf %lf\n", u[i][j], f[i][j]);
}
}
return 0;
}
|
GB_unop__identity_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__(none))
// op(A') function: GB (_unop_tran__identity_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
#if 0
GrB_Info GB (_unop_apply__(none))
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nco_s1d.c | /* $Header$ */
/* Purpose: NCO utilities for Sparse-1D (S1D) datasets */
/* Copyright (C) 2020--present Charlie Zender
This file is part of NCO, the netCDF Operators. NCO is free software.
You may redistribute and/or modify NCO under the terms of the
3-Clause BSD License with exceptions described in the LICENSE file */
#include "nco_s1d.h" /* Sparse-1D datasets */
const char * /* O [sng] String describing sparse-type */
nco_s1d_sng /* [fnc] Convert sparse-1D type enum to string */
(const nco_s1d_typ_enm nco_s1d_typ) /* I [enm] Sparse-1D type enum */
{
/* Purpose: Convert sparse-type enum to string */
switch(nco_s1d_typ){
case nco_s1d_clm: return "Sparse Column (cols1d) format";
case nco_s1d_grd: return "Sparse Gridcell (grid1d) format";
case nco_s1d_lnd: return "Sparse Landunit (land1d) format";
case nco_s1d_pft: return "Sparse PFT (pfts1d) format" ;
default: nco_dfl_case_generic_err(); break;
} /* !nco_s1d_typ_enm */
/* Some compilers: e.g., SGI cc, need return statement to end non-void functions */
return (char *)NULL;
} /* !nco_s1d_sng() */
int /* O [rcd] Return code */
nco_s1d_unpack /* [fnc] Unpack sparse-1D CLM/ELM variables into full file */
(rgr_sct * const rgr, /* I/O [sct] Regridding structure */
trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */
{
/* Purpose: Read sparse CLM/ELM input file, inflate and write into output file */
/* Usage:
ncks -D 1 -O -C --s1d ~/data/bm/elm_mali_bg_hst.nc ~/foo.nc
ncks -D 1 -O -C --s1d -v cols1d_topoglc --hrz=${DATA}/bm/elm_mali_ig_hst.nc ${DATA}/bm/elm_mali_rst.nc ~/foo.nc
ncks -D 1 -O -C --s1d -v GPP,pfts1d_wtgcell ~/beth_in.nc ~/foo.nc
ncremap --dbg=1 --vrb=3 --devnull=No --nco='--dbg=1' -P elm -m ${DATA}/maps/map_ne30np4_to_fv128x256_aave.20160301.nc ~/foo.nc ~/foo_rgr.nc */
const char fnc_nm[]="nco_s1d_unpack()"; /* [sng] Function name */
char *fl_in;
char *fl_out;
char *fl_tpl; /* [sng] Template file (contains horizontal grid) */
char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */
char *grd_nm_in=(char *)strdup("gridcell");
char *lnd_nm_in=(char *)strdup("landunit");
char *clm_nm_in=(char *)strdup("column");
char *pft_nm_in=(char *)strdup("pft");
char *mec_nm_out=(char *)strdup("mec");
int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */
int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */
int fll_md_old; /* [enm] Old fill mode */
int in_id; /* I [id] Input netCDF file ID */
int md_open; /* [enm] Mode flag for nc_open() call */
int out_id; /* I [id] Output netCDF file ID */
int rcd=NC_NOERR;
int tpl_id; /* [id] Input netCDF file ID (for horizontal grid template) */
long int clm_idx;
long int grd_idx_out;
long int idx_out;
//long int lat_idx;
//long int lon_idx;
long int pft_idx;
int dmn_idx; /* [idx] Dimension index */
/* Initialize local copies of command-line values */
dfl_lvl=rgr->dfl_lvl;
fl_in=rgr->fl_in;
fl_out=rgr->fl_out;
in_id=rgr->in_id;
out_id=rgr->out_id;
/* Search for horizontal grid */
char *bnd_nm_in=rgr->bnd_nm; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */
char *col_nm_in=rgr->col_nm_in; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */
char *lat_nm_in=rgr->lat_nm_in; /* [sng] Name of input dimension to recognize as latitude */
char *lon_nm_in=rgr->lon_nm_in; /* [sng] Name of input dimension to recognize as longitude */
int dmn_id_bnd_in=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_col_in=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_lat_in=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_lon_in=NC_MIN_INT; /* [id] Dimension ID */
nco_bool FL_RTR_RMT_LCN;
nco_bool flg_grd_1D=False; /* [flg] Unpacked data are on unstructured (1D) grid */
nco_bool flg_grd_2D=False; /* [flg] Unpacked data are on rectangular (2D) grid */
nco_bool flg_grd_dat=False; /* [flg] Use horizontal grid from required input data file */
nco_bool flg_grd_tpl=False; /* [flg] Use horizontal grid from optional horizontal grid template file */
nco_bool flg_nm_hst=False; /* [flg] Names in data file are as in history files ("ltype_"...) */
nco_bool flg_nm_rst=False; /* [flg] Names in data file are as in restart files ("ilun_"...) */
/* Does data file have unstructured grid?
MB: Routine must handle two semantically distinct meanings of "column":
1. The horizontal dimension in an unstructured grid
2. A fraction of a landunit, which is a fraction of a CTSM/ELM gridcell
In particular, a column is a fraction of a vegetated, urban, glacier, or crop landunit
This routine distinguishes these meanings by abbreviating (1) as "col" and (2) as "clm"
This usage maintains the precedent that "col" is the horizontal unstructured dimension in nco_rgr.c
It is necessary though unintuitive that "cols1d" variable metadata will use the "clm" abbreviation */
if(col_nm_in && (rcd=nco_inq_dimid_flg(in_id,col_nm_in,&dmn_id_col_in)) == NC_NOERR) /* do nothing */;
else if((rcd=nco_inq_dimid_flg(in_id,"lndgrid",&dmn_id_col_in)) == NC_NOERR) col_nm_in=strdup("lndgrid"); /* CLM */
if(dmn_id_col_in != NC_MIN_INT) flg_grd_1D=True;
/* Does data file have RLL grid? */
if(!flg_grd_1D){
if(lat_nm_in && (rcd=nco_inq_dimid_flg(in_id,lat_nm_in,&dmn_id_lat_in)) == NC_NOERR) /* do nothing */;
else if((rcd=nco_inq_dimid_flg(in_id,"latitude",&dmn_id_lat_in)) == NC_NOERR) lat_nm_in=strdup("lndgrid"); /* CF */
if(lon_nm_in && (rcd=nco_inq_dimid_flg(in_id,lon_nm_in,&dmn_id_lon_in)) == NC_NOERR) /* do nothing */;
else if((rcd=nco_inq_dimid_flg(in_id,"longitude",&dmn_id_lon_in)) == NC_NOERR) lon_nm_in=strdup("lndgrid"); /* CF */
} /* !flg_grd_1D */
if(dmn_id_lat_in != NC_MIN_INT && dmn_id_lon_in != NC_MIN_INT) flg_grd_2D=True;
/* Set where to obtain horizontal grid */
if(flg_grd_1D || flg_grd_2D) flg_grd_dat=True; else flg_grd_tpl=True;
if(flg_grd_tpl && !rgr->fl_hrz){
(void)fprintf(stderr,"%s: ERROR %s did not locate horizontal grid in input data file and no optional horizontal gridfile was provided.\nHINT: Use option --hrz to specify file with horizontal grid used by input data.\n",nco_prg_nm_get(),fnc_nm);
nco_exit(EXIT_FAILURE);
} /* !flg_grd_tpl */
/* Open grid template file iff necessary */
if(flg_grd_tpl && rgr->fl_hrz){
char *fl_pth_lcl=NULL;
nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */
nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */
nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */
size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */
/* Duplicate (because nco_fl_mk_lcl() free()'s its fl_in) */
fl_tpl=(char *)strdup(rgr->fl_hrz);
/* Make sure file is on local system and is readable or die trying */
fl_tpl=nco_fl_mk_lcl(fl_tpl,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
/* Open file using appropriate buffer size hints and verbosity */
if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE;
if(SHARE_OPEN) md_open=md_open|NC_SHARE;
rcd+=nco_fl_open(fl_tpl,md_open,&bfr_sz_hnt,&tpl_id);
/* Same logic used to search for grid in data file and to search for grid in template file...
Does template file have unstructured grid? */
if(col_nm_in && (rcd=nco_inq_dimid_flg(tpl_id,col_nm_in,&dmn_id_col_in)) == NC_NOERR) /* do nothing */;
else if((rcd=nco_inq_dimid_flg(tpl_id,"lndgrid",&dmn_id_col_in)) == NC_NOERR) col_nm_in=strdup("lndgrid"); /* CLM */
if(dmn_id_col_in != NC_MIN_INT) flg_grd_1D=True;
/* Does template file have RLL grid? */
if(!flg_grd_1D){
if(lat_nm_in && (rcd=nco_inq_dimid_flg(tpl_id,lat_nm_in,&dmn_id_lat_in)) == NC_NOERR) /* do nothing */;
else if((rcd=nco_inq_dimid_flg(tpl_id,"latitude",&dmn_id_lat_in)) == NC_NOERR) lat_nm_in=strdup("lndgrid"); /* CF */
if(lon_nm_in && (rcd=nco_inq_dimid_flg(tpl_id,lon_nm_in,&dmn_id_lon_in)) == NC_NOERR) /* do nothing */;
else if((rcd=nco_inq_dimid_flg(tpl_id,"longitude",&dmn_id_lon_in)) == NC_NOERR) lon_nm_in=strdup("lndgrid"); /* CF */
} /* !flg_grd_1D */
if(dmn_id_lat_in != NC_MIN_INT && dmn_id_lon_in != NC_MIN_INT) flg_grd_2D=True;
/* Set where to obtain horizontal grid */
if(!flg_grd_1D && !flg_grd_2D){
(void)fprintf(stderr,"%s: ERROR %s did not locate horizontal grid in input data file %s or in template file %s.\nHINT: One of those files must contain the grid dimensions and coordinates used by the packed data in the input data file.\n",nco_prg_nm_get(),fnc_nm,fl_in,fl_tpl);
nco_exit(EXIT_FAILURE);
} /* !flg_grd_1D */
} /* !flg_grd_tpl */
int cols1d_gridcell_index_id=NC_MIN_INT; /* [id] Gridcell index of column */
int cols1d_ixy_id=NC_MIN_INT; /* [id] Column 2D longitude index */
int cols1d_jxy_id=NC_MIN_INT; /* [id] Column 2D latitude index */
int cols1d_lat_id=NC_MIN_INT; /* [id] Column latitude */
int cols1d_lon_id=NC_MIN_INT; /* [id] Column longitude */
int cols1d_ityp_id=NC_MIN_INT; /* [id] Column type */
int cols1d_ityplun_id=NC_MIN_INT; /* [id] Column landunit type */
int grid1d_ixy_id=NC_MIN_INT; /* [id] Gridcell 2D longitude index */
int grid1d_jxy_id=NC_MIN_INT; /* [id] Gridcell 2D latitude index */
int grid1d_lat_id=NC_MIN_INT; /* [id] Gridcell latitude */
int grid1d_lon_id=NC_MIN_INT; /* [id] Gridcell longitude */
int land1d_gridcell_index_id=NC_MIN_INT; /* [id] Gridcell index of landunit */
int land1d_ixy_id=NC_MIN_INT; /* [id] Landunit 2D longitude index */
int land1d_jxy_id=NC_MIN_INT; /* [id] Landunit 2D latitude index */
int land1d_lat_id=NC_MIN_INT; /* [id] Landunit latitude */
int land1d_lon_id=NC_MIN_INT; /* [id] Landunit longitude */
int pfts1d_column_index_id=NC_MIN_INT; /* [id] Column index of PFT */
int pfts1d_gridcell_index_id=NC_MIN_INT; /* [id] Gridcell index of PFT */
int pfts1d_ityp_veg_id=NC_MIN_INT; /* [id] PFT vegetation type */
int pfts1d_ityplun_id=NC_MIN_INT; /* [id] PFT landunit type */
int pfts1d_ixy_id=NC_MIN_INT; /* [id] PFT 2D longitude index */
int pfts1d_jxy_id=NC_MIN_INT; /* [id] PFT 2D latitude index */
int pfts1d_lat_id=NC_MIN_INT; /* [id] PFT latitude */
int pfts1d_lon_id=NC_MIN_INT; /* [id] PFT longitude */
//int pfts1d_wtgcell_id=NC_MIN_INT; /* [id] PFT weight relative to corresponding gridcell */
int dmn_id_clm_in=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_grd_in=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_lnd_in=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_pft_in=NC_MIN_INT; /* [id] Dimension ID */
nco_bool flg_s1d_clm=False; /* [flg] Dataset contains sparse variables for columns */
nco_bool flg_s1d_grd=False; /* [flg] Dataset contains sparse variables for gridcells */
nco_bool flg_s1d_lnd=False; /* [flg] Dataset contains sparse variables for landunits */
nco_bool flg_s1d_pft=False; /* [flg] Dataset contains sparse variables for PFTs */
rcd=nco_inq_att_flg(in_id,NC_GLOBAL,"ilun_vegetated_or_bare_soil",(nc_type *)NULL,(long *)NULL);
if(rcd == NC_NOERR) flg_nm_rst=True;
rcd=nco_inq_att_flg(in_id,NC_GLOBAL,"ltype_vegetated_or_bare_soil",(nc_type *)NULL,(long *)NULL);
if(rcd == NC_NOERR) flg_nm_hst=True;
assert(!(flg_nm_hst && flg_nm_rst));
if(!flg_nm_hst && !flg_nm_rst){
(void)fprintf(stderr,"%s: ERROR %s reports input data file lacks expected global attributes\n",nco_prg_nm_get(),fnc_nm);
nco_exit(EXIT_FAILURE);
} /* !flg_nm_hst */
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s will assume input attributes and variables use CLM/ELM %s naming conventions like %s\n",nco_prg_nm_get(),fnc_nm,flg_nm_hst ? "history file" : "restart file",flg_nm_hst ? "\"ltype_...\"" : "\"ilun_...\"");
rcd=nco_inq_varid_flg(in_id,"cols1d_lat",&cols1d_lat_id);
if(cols1d_lat_id != NC_MIN_INT) flg_s1d_clm=True;
if(flg_s1d_clm){
rcd=nco_inq_varid(in_id,"cols1d_ixy",&cols1d_ixy_id);
rcd=nco_inq_varid(in_id,"cols1d_jxy",&cols1d_jxy_id);
rcd=nco_inq_varid(in_id,"cols1d_lon",&cols1d_lon_id);
rcd=nco_inq_varid_flg(in_id,"cols1d_gridcell_index",&cols1d_gridcell_index_id); /* ELM/MALI restart */
rcd=nco_inq_varid_flg(in_id,"cols1d_ityp",&cols1d_ityp_id); /* ELM/MALI restart */
if(flg_nm_hst) rcd=nco_inq_varid(in_id,"cols1d_itype_lunit",&cols1d_ityplun_id); else rcd=nco_inq_varid(in_id,"cols1d_ityplun",&cols1d_ityplun_id);
} /* !flg_s1d_clm */
rcd=nco_inq_varid_flg(in_id,"grid1d_lat",&grid1d_lat_id);
if(grid1d_lat_id != NC_MIN_INT) flg_s1d_grd=True;
if(flg_s1d_grd){
rcd=nco_inq_varid(in_id,"grid1d_ixy",&grid1d_ixy_id);
rcd=nco_inq_varid(in_id,"grid1d_jxy",&grid1d_jxy_id);
rcd=nco_inq_varid(in_id,"grid1d_lon",&grid1d_lon_id);
} /* !flg_s1d_grd */
rcd=nco_inq_varid_flg(in_id,"land1d_lat",&land1d_lat_id);
if(land1d_lat_id != NC_MIN_INT) flg_s1d_lnd=True;
if(flg_s1d_lnd){
rcd=nco_inq_varid_flg(in_id,"land1d_gridcell_index",&land1d_gridcell_index_id);
rcd=nco_inq_varid(in_id,"land1d_ixy",&land1d_ixy_id);
rcd=nco_inq_varid(in_id,"land1d_jxy",&land1d_jxy_id);
rcd=nco_inq_varid(in_id,"land1d_lon",&land1d_lon_id);
} /* !flg_s1d_lnd */
rcd=nco_inq_varid_flg(in_id,"pfts1d_lat",&pfts1d_lat_id);
if(pfts1d_lat_id != NC_MIN_INT) flg_s1d_pft=True;
if(flg_s1d_pft){
rcd=nco_inq_varid(in_id,"pfts1d_ixy",&pfts1d_ixy_id);
rcd=nco_inq_varid(in_id,"pfts1d_jxy",&pfts1d_jxy_id);
rcd=nco_inq_varid(in_id,"pfts1d_lon",&pfts1d_lon_id);
rcd=nco_inq_varid_flg(in_id,"pfts1d_column_index",&pfts1d_column_index_id);
rcd=nco_inq_varid_flg(in_id,"pfts1d_gridcell_index",&pfts1d_gridcell_index_id);
//if(flg_nm_hst) rcd=nco_inq_varid(in_id,"pfts1d_wtgcell",&pfts1d_wtgcell_id); else rcd=nco_inq_varid(in_id,"pfts1d_wtxy",&pfts1d_wtgcell_id);
if(flg_nm_hst) rcd=nco_inq_varid(in_id,"pfts1d_itype_lunit",&pfts1d_ityplun_id); else rcd=nco_inq_varid(in_id,"pfts1d_ityplun",&pfts1d_ityplun_id);
if(flg_nm_hst) rcd=nco_inq_varid(in_id,"pfts1d_itype_veg",&pfts1d_ityp_veg_id); else rcd=nco_inq_varid(in_id,"pfts1d_itypveg",&pfts1d_ityp_veg_id);
} /* !flg_s1d_pft */
if(!(flg_s1d_clm || flg_s1d_lnd || flg_s1d_pft)){
(void)fprintf(stderr,"%s: ERROR %s does not detect any of the key variables (currently cols1d_lat, land1d_lat, pfts1d_lat) used to indicate presence of sparse-packed (S1D) variables\nHINT: Be sure the target dataset (file) contains S1D variables---not all CLM/ELM history (as opposed to restart) files do\n",nco_prg_nm_get(),fnc_nm);
nco_exit(EXIT_FAILURE);
} /* !flg_s1d_clm... */
if(flg_s1d_clm) rcd=nco_inq_dimid(in_id,clm_nm_in,&dmn_id_clm_in);
if(flg_s1d_grd) rcd=nco_inq_dimid(in_id,grd_nm_in,&dmn_id_grd_in);
if(flg_s1d_lnd) rcd=nco_inq_dimid(in_id,lnd_nm_in,&dmn_id_lnd_in);
if(flg_s1d_pft) rcd=nco_inq_dimid(in_id,pft_nm_in,&dmn_id_pft_in);
if(nco_dbg_lvl_get() >= nco_dbg_std){
(void)fprintf(stderr,"%s: INFO %s necessary information to unpack cols1d variables\n",nco_prg_nm_get(),flg_s1d_clm ? "Found all" : "Could not find");
(void)fprintf(stderr,"%s: INFO %s necessary information to unpack land1d variables\n",nco_prg_nm_get(),flg_s1d_lnd ? "Found all" : "Could not find");
(void)fprintf(stderr,"%s: INFO %s necessary information to unpack pfts1d variables\n",nco_prg_nm_get(),flg_s1d_pft ? "Found all" : "Could not find");
} /* !dbg */
/* Collect other information from data and template files */
int dmn_nbr_in; /* [nbr] Number of dimensions in input file */
int dmn_nbr_out; /* [nbr] Number of dimensions in output file */
int var_nbr; /* [nbr] Number of variables in file */
rcd=nco_inq(in_id,&dmn_nbr_in,&var_nbr,(int *)NULL,(int *)NULL);
const unsigned int trv_nbr=trv_tbl->nbr; /* [idx] Number of traversal table entries */
int var_cpy_nbr=0; /* [nbr] Number of copied variables */
int var_rgr_nbr=0; /* [nbr] Number of unpacked variables */
int var_xcl_nbr=0; /* [nbr] Number of deleted variables */
int var_crt_nbr=0; /* [nbr] Number of created variables */
//long idx; /* [idx] Generic index */
unsigned int idx_tbl; /* [idx] Counter for traversal table */
char *dmn_nm_cp; /* [sng] Dimension name as char * to reduce indirection */
nco_bool has_clm; /* [flg] Contains column dimension */
nco_bool has_grd; /* [flg] Contains gridcell dimension */
nco_bool has_lnd; /* [flg] Contains landunit dimension */
nco_bool has_pft; /* [flg] Contains PFT dimension */
nco_bool need_clm=False; /* [flg] At least one variable to unpack needs column dimension */
nco_bool need_grd=False; /* [flg] At least one variable to unpack needs gridcell dimension */
nco_bool need_lnd=False; /* [flg] At least one variable to unpack needs landunit dimension */
nco_bool need_mec=False; /* [flg] At least one variable to unpack needs MEC dimension */
nco_bool need_pft=False; /* [flg] At least one variable to unpack needs PFT dimension */
trv_sct trv; /* [sct] Traversal table object structure to reduce indirection */
/* Define unpacking flag for each variable */
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){
trv=trv_tbl->lst[idx_tbl];
if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){
dmn_nbr_in=trv_tbl->lst[idx_tbl].nbr_dmn;
has_clm=has_grd=has_lnd=has_pft=False;
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
/* Pre-determine flags necessary during next loop */
dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm;
if(!has_clm && clm_nm_in) has_clm=!strcmp(dmn_nm_cp,clm_nm_in);
if(!has_grd && grd_nm_in) has_grd=!strcmp(dmn_nm_cp,grd_nm_in);
if(!has_lnd && lnd_nm_in) has_lnd=!strcmp(dmn_nm_cp,lnd_nm_in);
if(!has_pft && pft_nm_in) has_pft=!strcmp(dmn_nm_cp,pft_nm_in);
} /* !dmn_idx */
/* Unpack variables that contain a sparse-1D dimension */
if(has_clm || has_grd || has_lnd || has_pft){
trv_tbl->lst[idx_tbl].flg_rgr=True;
var_rgr_nbr++;
if(has_clm) need_clm=True;
if(has_grd) need_grd=True;
if(has_lnd) need_lnd=True;
if(has_pft) need_pft=True;
} /* endif */
/* Copy all variables that are not regridded or omitted */
if(!trv_tbl->lst[idx_tbl].flg_rgr) var_cpy_nbr++;
} /* end nco_obj_typ_var */
} /* end idx_tbl */
if(!var_rgr_nbr) (void)fprintf(stdout,"%s: WARNING %s reports no variables fit unpacking criteria. The sparse data unpacker expects at least one variable to unpack, and variables not unpacked are copied straight to output. HINT: If the name(s) of the input sparse-1D dimensions (e.g., \"column\", \"landunit\", and \"pft\") do not match NCO's preset defaults (case-insensitive unambiguous forms and abbreviations of \"column\", \"landunit\", and/or \"pft\", respectively) then change the dimension names that NCO looks for. Instructions are at http://nco.sf.net/nco.html#sparse. For CTSM/ELM sparse-1D coordinate grids, the \"column\", \"landunit\", and \"pft\" variable names can be set with, e.g., \"ncks --rgr column_nm=clm#landunit_nm=lnd#pft_nm=pft\" or \"ncremap -R '--rgr clm=clm#lnd=lnd#pft=pft'\".\n",nco_prg_nm_get(),fnc_nm);
if(nco_dbg_lvl_get() >= nco_dbg_fl){
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){
trv=trv_tbl->lst[idx_tbl];
if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr) (void)fprintf(stderr,"Unpack %s? %s\n",trv.nm,trv.flg_rgr ? "Yes" : "No");
} /* end idx_tbl */
} /* end dbg */
long clm_nbr_in=NC_MIN_INT; /* [nbr] Number of columns in input data */
long grd_nbr_in=NC_MIN_INT; /* [nbr] Number of gridcells in input data */
long lnd_nbr_in=NC_MIN_INT; /* [nbr] Number of landunits in input data */
long pft_nbr_in=NC_MIN_INT; /* [nbr] Number of PFTs in input data */
long clm_nbr_out=NC_MIN_INT; /* [nbr] Number of columns in output data */
long grd_nbr_out=NC_MIN_INT; /* [nbr] Number of gridcells in output data */
long lnd_nbr_out=NC_MIN_INT; /* [nbr] Number of landunits in output data */
long mec_nbr_out=NC_MIN_INT; /* [nbr] Number of MECs in output data */
long pft_nbr_out=NC_MIN_INT; /* [nbr] Number of PFTs in output data */
if(need_clm) rcd=nco_inq_dimlen(in_id,dmn_id_clm_in,&clm_nbr_in);
if(need_grd) rcd=nco_inq_dimlen(in_id,dmn_id_grd_in,&grd_nbr_in);
if(need_lnd) rcd=nco_inq_dimlen(in_id,dmn_id_lnd_in,&lnd_nbr_in);
if(need_pft) rcd=nco_inq_dimlen(in_id,dmn_id_pft_in,&pft_nbr_in);
int hrz_id; /* [id] Horizontal grid netCDF file ID */
long bnd_nbr=int_CEWI; /* [nbr] Number of boundaries for output time and rectangular grid coordinates, and number of vertices for output non-rectangular grid coordinates */
long col_nbr; /* [nbr] Number of columns */
long lon_nbr; /* [nbr] Number of longitudes */
long lat_nbr; /* [nbr] Number of latitudes */
size_t grd_sz_in; /* [nbr] Number of elements in single layer of input grid */
size_t grd_sz_out; /* [nbr] Number of elements in single layer of output grid */
if(flg_grd_dat) hrz_id=in_id; else hrz_id=tpl_id;
/* Locate bounds dimension, if any, in file containing horizontal grid */
if(bnd_nm_in && (rcd=nco_inq_dimid_flg(hrz_id,bnd_nm_in,&dmn_id_bnd_in)) == NC_NOERR) /* do nothing */;
else if((rcd=nco_inq_dimid_flg(hrz_id,"nv",&dmn_id_bnd_in)) == NC_NOERR) bnd_nm_in=strdup("nv"); /* fxm */
else if((rcd=nco_inq_dimid_flg(hrz_id,"nvertices",&dmn_id_bnd_in)) == NC_NOERR) bnd_nm_in=strdup("nvertices"); /* CICE */
else if((rcd=nco_inq_dimid_flg(hrz_id,"maxEdges",&dmn_id_bnd_in)) == NC_NOERR) bnd_nm_in=strdup("maxEdges"); /* MPAS */
if(flg_grd_1D) rcd=nco_inq_dimlen(hrz_id,dmn_id_col_in,&col_nbr);
if(flg_grd_2D){
rcd=nco_inq_dimlen(hrz_id,dmn_id_lat_in,&lat_nbr);
rcd=nco_inq_dimlen(hrz_id,dmn_id_lon_in,&lon_nbr);
} /* !flg_grd_2D */
if(dmn_id_bnd_in != NC_MIN_INT) rcd=nco_inq_dimlen(hrz_id,dmn_id_bnd_in,&bnd_nbr);
if(grd_nbr_in != NC_MIN_INT){
grd_sz_in=grd_nbr_in;
}else{
grd_sz_in= flg_grd_1D ? col_nbr : lat_nbr*lon_nbr;
} /* !grd_nbr_in */
grd_sz_out= flg_grd_1D ? col_nbr : lat_nbr*lon_nbr;
/* Lay-out unpacked file */
char *bnd_nm_out=NULL;
char *col_nm_out=NULL;
char *lat_nm_out=NULL;
char *lon_nm_out=NULL;
char *lat_dmn_nm_out;
char *lon_dmn_nm_out;
int dmn_id_bnd_out=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_col_out=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_lat_out=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_lon_out=NC_MIN_INT; /* [id] Dimension ID */
if(rgr->bnd_nm) bnd_nm_out=rgr->bnd_nm; else bnd_nm_out=bnd_nm_in;
if(rgr->col_nm_out) col_nm_out=rgr->col_nm_out; else col_nm_out=col_nm_in;
if(rgr->lat_dmn_nm) lat_dmn_nm_out=rgr->lat_dmn_nm; else lat_dmn_nm_out=lat_nm_in;
if(rgr->lon_dmn_nm) lon_dmn_nm_out=rgr->lon_dmn_nm; else lon_dmn_nm_out=lon_nm_in;
if(rgr->lat_nm_out) lat_nm_out=rgr->lat_nm_out; else lat_nm_out=lat_nm_in;
if(rgr->lon_nm_out) lon_nm_out=rgr->lon_nm_out; else lon_nm_out=lon_nm_in;
/* Define horizontal dimensions before all else */
if(flg_grd_1D){
rcd=nco_def_dim(out_id,col_nm_out,col_nbr,&dmn_id_col_out);
} /* !flg_grd_1D */
if(flg_grd_2D){
rcd=nco_def_dim(out_id,lat_nm_out,lat_nbr,&dmn_id_lat_out);
rcd=nco_def_dim(out_id,lon_nm_out,lon_nbr,&dmn_id_lon_out);
} /* !flg_grd_2D */
if(dmn_id_bnd_in != NC_MIN_INT) rcd=nco_def_dim(out_id,bnd_nm_out,bnd_nbr,&dmn_id_bnd_out);
char *clm_nm_out=NULL;
char *grd_nm_out=NULL;
char *lnd_nm_out=NULL;
char *pft_nm_out=NULL;
if(need_clm) clm_nm_out=(char *)strdup(clm_nm_in);
if(need_grd) grd_nm_out=(char *)strdup(grd_nm_in);
if(need_lnd) lnd_nm_out=(char *)strdup(lnd_nm_in);
if(need_pft) pft_nm_out=(char *)strdup(pft_nm_in);
int dmn_id_clm_out=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_lnd_out=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_mec_out=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_pft_out=NC_MIN_INT; /* [id] Dimension ID */
/* fxm: make an ilun enumerated type? */
int ilun_vegetated_or_bare_soil; /* 1 [enm] */
int ilun_crop; /* 2 [enm] */
int ilun_landice; /* 3 [enm] */
int ilun_landice_multiple_elevation_classes; /* 4 [enm] */
int ilun_deep_lake; /* 5 [enm] */
int ilun_wetland; /* 6 [enm] */
int ilun_urban_tbd; /* 7 [enm] */
int ilun_urban_hd; /* 8 [enm] */
int ilun_urban_md; /* 9 [enm] */
if(flg_nm_hst){
rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_vegetated_or_bare_soil",&ilun_vegetated_or_bare_soil,NC_INT);
rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_crop",&ilun_crop,NC_INT);
rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_landice",&ilun_landice,NC_INT);
rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_landice_multiple_elevation_classes",&ilun_landice_multiple_elevation_classes,NC_INT);
rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_deep_lake",&ilun_deep_lake,NC_INT);
rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_wetland",&ilun_wetland,NC_INT);
rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_urban_tbd",&ilun_urban_tbd,NC_INT);
rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_urban_hd",&ilun_urban_hd,NC_INT);
rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_urban_md",&ilun_urban_md,NC_INT);
}else{ /* !flg_nm_hst */
rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_vegetated_or_bare_soil",&ilun_vegetated_or_bare_soil,NC_INT);
rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_crop",&ilun_crop,NC_INT);
rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_landice",&ilun_landice,NC_INT);
rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_landice_multiple_elevation_classes",&ilun_landice_multiple_elevation_classes,NC_INT);
rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_deep_lake",&ilun_deep_lake,NC_INT);
rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_wetland",&ilun_wetland,NC_INT);
rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_urban_tbd",&ilun_urban_tbd,NC_INT);
rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_urban_hd",&ilun_urban_hd,NC_INT);
rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_urban_md",&ilun_urban_md,NC_INT);
} /* !flg_nm_hst */
/* Determine output Column dimension if needed */
int *cols1d_ityp=NULL; /* [id] Column type */
int *cols1d_ityplun=NULL; /* [id] Column landunit type */
if(need_clm){
if(cols1d_ityp_id != NC_MIN_INT) cols1d_ityp=(int *)nco_malloc(clm_nbr_in*sizeof(int));
cols1d_ityplun=(int *)nco_malloc(clm_nbr_in*sizeof(int));
if(cols1d_ityp_id != NC_MIN_INT) rcd=nco_get_var(in_id,cols1d_ityp_id,cols1d_ityp,NC_INT);
rcd=nco_get_var(in_id,cols1d_ityplun_id,cols1d_ityplun,NC_INT);
mec_nbr_out=0;
for(clm_idx=0;clm_idx<clm_nbr_in;clm_idx++){
if(cols1d_ityplun[clm_idx] != ilun_landice_multiple_elevation_classes) continue;
while(cols1d_ityplun[clm_idx++] == ilun_landice_multiple_elevation_classes) mec_nbr_out++;
break;
} /* !clm_idx */
/* NB: landice_MEC (ilun=4, usually) landunits have 10 (always, AFAICT) glacier elevation classes */
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO mec_nbr_out = %ld\n",nco_prg_nm_get(),mec_nbr_out);
} /* !need_clm */
/* Determine output Grid dimension if needed:
CLM/ELM 'gridcell' dimension counts each gridcell that contains land
Replace this dimension by horizontal dimension(s) in input data file */
if(need_clm){
if(flg_grd_1D) grd_nbr_out=col_nbr;
if(flg_grd_2D) grd_nbr_out=lat_nbr*lon_nbr;
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO grd_nbr_out = %ld\n",nco_prg_nm_get(),grd_nbr_out);
} /* !need_grd */
/* Determine output Landunit dimension if needed */
if(need_lnd){
lnd_nbr_out=3; /* fxm: Based on TBUILD variable for 3 urban landunit types */
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO lnd_nbr_out = %ld\n",nco_prg_nm_get(),lnd_nbr_out);
} /* !need_lnd */
/* Determine output PFT dimension if needed */
//double *pfts1d_wtgcell=NULL; /* [id] PFT weight relative to corresponding gridcell */
int *pfts1d_ityp_veg=NULL; /* [id] PFT vegetation type */
int *pfts1d_ityplun=NULL; /* [id] PFT landunit type */
int *pfts1d_ixy=NULL; /* [id] PFT 2D longitude index */
int *pfts1d_jxy=NULL; /* [id] PFT 2D latitude index */
int pft_typ; /* [enm] PFT type */
if(need_pft){
//pfts1d_wtgcell=(double *)nco_malloc(pft_nbr_in*sizeof(double));
pfts1d_ityp_veg=(int *)nco_malloc(pft_nbr_in*sizeof(int));
pfts1d_ityplun=(int *)nco_malloc(pft_nbr_in*sizeof(int));
//rcd=nco_get_var(in_id,pfts1d_wtgcell_id,pfts1d_wtgcell,NC_DOUBLE);
rcd=nco_get_var(in_id,pfts1d_ityp_veg_id,pfts1d_ityp_veg,NC_INT);
rcd=nco_get_var(in_id,pfts1d_ityplun_id,pfts1d_ityplun,NC_INT);
pft_nbr_out=0;
for(pft_idx=0;pft_idx<pft_nbr_in;pft_idx++){
if((pfts1d_ityplun[pft_idx] != ilun_vegetated_or_bare_soil) && (pfts1d_ityplun[pft_idx] != ilun_crop)) continue;
/* Skip bare ground */
while(pfts1d_ityp_veg[++pft_idx] != 0) pft_nbr_out++;
break;
} /* !pft_idx */
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO pft_nbr_out = %ld\n",nco_prg_nm_get(),pft_nbr_out);
pfts1d_ixy=(int *)nco_malloc(pft_nbr_in*sizeof(int));
rcd=nco_get_var(in_id,pfts1d_ixy_id,pfts1d_ixy,NC_INT);
if(flg_grd_2D){
pfts1d_jxy=(int *)nco_malloc(pft_nbr_in*sizeof(int));
rcd=nco_get_var(in_id,pfts1d_jxy_id,pfts1d_jxy,NC_INT);
} /* !flg_grd_2D */
} /* !need_pft */
/* Define unpacked versions of needed dimensions before all else */
(void)fprintf(stdout,"%s: DEBUG quark1\n",nco_prg_nm_get());
if(need_clm && clm_nbr_out > 0L) rcd=nco_def_dim(out_id,clm_nm_out,clm_nbr_out,&dmn_id_clm_out);
if(need_lnd && lnd_nbr_out > 0L) rcd=nco_def_dim(out_id,lnd_nm_out,lnd_nbr_out,&dmn_id_lnd_out);
if(need_pft && pft_nbr_out > 0L) rcd=nco_def_dim(out_id,pft_nm_out,pft_nbr_out,&dmn_id_pft_out);
/* Assume MECs are new output dimension if they are enumerated in input */
if(mec_nbr_out > 0L) rcd=nco_def_dim(out_id,mec_nm_out,mec_nbr_out,&dmn_id_mec_out);
/* Pre-allocate dimension ID and cnt/srt space */
char *var_nm; /* [sng] Variable name */
int *dmn_ids_in=NULL; /* [id] Dimension IDs */
int *dmn_ids_out=NULL; /* [id] Dimension IDs */
int dmn_nbr_max; /* [nbr] Maximum number of dimensions variable can have in input or output */
int var_id_in; /* [id] Variable ID */
int var_id_out; /* [id] Variable ID */
long *dmn_cnt_in=NULL;
long *dmn_cnt_out=NULL;
long *dmn_srt=NULL;
nc_type var_typ; /* [enm] Variable type (same for input and output variable) */
nco_bool PCK_ATT_CPY=True; /* [flg] Copy attributes "scale_factor", "add_offset" */
int dmn_in_fst; /* [idx] Offset of input- relative to output-dimension due to non-MRV dimension insertion */
int dmn_nbr_rec; /* [nbr] Number of unlimited dimensions */
int *dmn_ids_rec=NULL; /* [id] Unlimited dimension IDs */
rcd+=nco_inq_ndims(in_id,&dmn_nbr_max);
dmn_ids_in=(int *)nco_malloc(dmn_nbr_max*sizeof(int));
dmn_ids_out=(int *)nco_malloc(dmn_nbr_max*sizeof(int));
if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt);
dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in);
if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out);
dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
/* Obtain record dimension information from data file (restart files have no time dimension) */
rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,(int *)NULL);
if(dmn_nbr_rec > 0){
dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int));
rcd=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec);
} /* !dmn_nbr_rec */
int shuffle; /* [flg] Turn-on shuffle filter */
int deflate; /* [flg] Turn-on deflate filter */
deflate=(int)True;
shuffle=NC_SHUFFLE;
dfl_lvl=rgr->dfl_lvl;
fl_out_fmt=rgr->fl_out_fmt;
//const int dmn_nbr_0D=0; /* [nbr] Rank of 0-D grid variables (scalars) */
const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */
const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */
nc_type crd_typ_in;
nc_type crd_typ_out;
/* Required grid variables */
int lat_in_id; /* [id] Variable ID for latitude */
int lat_out_id; /* [id] Variable ID for latitude */
int lon_in_id; /* [id] Variable ID for longitude */
int lon_out_id; /* [id] Variable ID for longitude */
rcd=nco_inq_varid(hrz_id,lat_nm_in,&lat_in_id);
rcd=nco_inq_varid(hrz_id,lon_nm_in,&lon_in_id);
rcd=nco_inq_vartype(hrz_id,lat_in_id,&crd_typ_in);
/* NB: ELM/CLM history files default to NC_FLOAT for most grid variables
To convert to NC_DOUBLE on output, also convert _FillValue attribute type consistently */
crd_typ_out=crd_typ_in;
/* Optional grid variables */
char *area_nm;
char *sgs_frc_nm;
char *lat_bnd_nm;
char *lon_bnd_nm;
char *sgs_msk_nm;
int area_in_id=NC_MIN_INT; /* [id] Variable ID for area */
int area_out_id=NC_MIN_INT; /* [id] Variable ID for area */
int sgs_frc_in_id=NC_MIN_INT; /* [id] Variable ID for fraction */
int sgs_frc_out_id=NC_MIN_INT; /* [id] Variable ID for fraction */
int lat_bnd_in_id=NC_MIN_INT; /* [id] Variable ID for latitude bounds */
int lat_bnd_out_id=NC_MIN_INT; /* [id] Variable ID for latitude bounds */
int lon_bnd_in_id=NC_MIN_INT; /* [id] Variable ID for longitude bounds */
int lon_bnd_out_id=NC_MIN_INT; /* [id] Variable ID for longitude bounds */
int sgs_msk_in_id=NC_MIN_INT; /* [id] Variable ID for mask */
int sgs_msk_out_id=NC_MIN_INT; /* [id] Variable ID for mask */
nco_bool flg_area_out=False; /* [flg] Add area to output */
nco_bool flg_lat_bnd_out=False; /* [flg] Add latitude bounds to output */
nco_bool flg_lon_bnd_out=False; /* [flg] Add longitude bounds to output */
nco_bool flg_sgs_frc_out=False; /* [flg] Add fraction to output */
nco_bool flg_sgs_msk_out=False; /* [flg] Add mask to output */
area_nm=rgr->area_nm ? rgr->area_nm : strdup("area");
lat_bnd_nm=rgr->lat_bnd_nm ? rgr->lat_bnd_nm : strdup("lat_bnd");
lon_bnd_nm=rgr->lon_bnd_nm ? rgr->lon_bnd_nm : strdup("lon_bnd");
sgs_frc_nm=rgr->sgs_frc_nm ? rgr->sgs_frc_nm : strdup("landfrac");
sgs_msk_nm=rgr->sgs_msk_nm ? rgr->sgs_msk_nm : strdup("landmask");
if((rcd=nco_inq_varid_flg(hrz_id,area_nm,&area_in_id)) == NC_NOERR) flg_area_out=True;
if((rcd=nco_inq_varid_flg(hrz_id,lat_bnd_nm,&lat_bnd_in_id)) == NC_NOERR) flg_lat_bnd_out=True;
if((rcd=nco_inq_varid_flg(hrz_id,lon_bnd_nm,&lon_bnd_in_id)) == NC_NOERR) flg_lon_bnd_out=True;
if((rcd=nco_inq_varid_flg(hrz_id,sgs_frc_nm,&sgs_frc_in_id)) == NC_NOERR) flg_sgs_frc_out=True;
if((rcd=nco_inq_varid_flg(hrz_id,sgs_msk_nm,&sgs_msk_in_id)) == NC_NOERR) flg_sgs_msk_out=True;
(void)fprintf(stdout,"%s: DEBUG quark2\n",nco_prg_nm_get());
if(flg_grd_1D){
rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col_out,&lat_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl);
(void)nco_att_cpy(hrz_id,out_id,lat_in_id,lat_out_id,PCK_ATT_CPY);
var_crt_nbr++;
rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col_out,&lon_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl);
(void)nco_att_cpy(hrz_id,out_id,lon_in_id,lon_out_id,PCK_ATT_CPY);
var_crt_nbr++;
if(flg_lat_bnd_out){
dmn_ids_out[0]=dmn_id_col_out;
dmn_ids_out[1]=dmn_id_bnd_out;
rcd+=nco_def_var(out_id,lat_bnd_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_out_id,shuffle,deflate,dfl_lvl);
(void)nco_att_cpy(hrz_id,out_id,lat_bnd_in_id,lat_bnd_out_id,PCK_ATT_CPY);
var_crt_nbr++;
} /* !flg_lat_bnd_out */
if(flg_lon_bnd_out){
dmn_ids_out[0]=dmn_id_col_out;
dmn_ids_out[1]=dmn_id_bnd_out;
rcd+=nco_def_var(out_id,lon_bnd_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_out_id,shuffle,deflate,dfl_lvl);
(void)nco_att_cpy(hrz_id,out_id,lon_bnd_in_id,lon_bnd_out_id,PCK_ATT_CPY);
var_crt_nbr++;
} /* !flg_lon_bnd_out */
if(flg_area_out){
rcd+=nco_def_var(out_id,area_nm,crd_typ_out,dmn_nbr_1D,&dmn_id_col_out,&area_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl);
(void)nco_att_cpy(hrz_id,out_id,area_in_id,area_out_id,PCK_ATT_CPY);
var_crt_nbr++;
} /* !flg_area_out */
if(flg_sgs_frc_out){
rcd+=nco_def_var(out_id,sgs_frc_nm,crd_typ_out,dmn_nbr_1D,&dmn_id_col_out,&sgs_frc_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,sgs_frc_out_id,shuffle,deflate,dfl_lvl);
(void)nco_att_cpy(hrz_id,out_id,sgs_frc_in_id,sgs_frc_out_id,PCK_ATT_CPY);
var_crt_nbr++;
} /* !flg_sgs_frc_out */
if(flg_sgs_msk_out){
rcd+=nco_def_var(out_id,sgs_msk_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_col_out,&sgs_msk_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,sgs_msk_out_id,shuffle,deflate,dfl_lvl);
(void)nco_att_cpy(hrz_id,out_id,sgs_msk_in_id,sgs_msk_out_id,PCK_ATT_CPY);
var_crt_nbr++;
} /* !flg_sgs_msk_out */
} /* !flg_grd_1D */
if(flg_grd_2D){
rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lat_out,&lat_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl);
(void)nco_att_cpy(hrz_id,out_id,lat_in_id,lat_out_id,PCK_ATT_CPY);
var_crt_nbr++;
rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lon_out,&lon_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl);
(void)nco_att_cpy(hrz_id,out_id,lon_in_id,lon_out_id,PCK_ATT_CPY);
var_crt_nbr++;
if(flg_lat_bnd_out){
dmn_ids_out[0]=dmn_id_lat_out;
dmn_ids_out[1]=dmn_id_bnd_out;
rcd+=nco_def_var(out_id,lat_bnd_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_out_id,shuffle,deflate,dfl_lvl);
(void)nco_att_cpy(hrz_id,out_id,lat_bnd_in_id,lat_bnd_out_id,PCK_ATT_CPY);
var_crt_nbr++;
} /* !flg_lat_bnd_out */
if(flg_lon_bnd_out){
dmn_ids_out[0]=dmn_id_lon_out;
dmn_ids_out[1]=dmn_id_bnd_out;
rcd+=nco_def_var(out_id,lon_bnd_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_out_id,shuffle,deflate,dfl_lvl);
(void)nco_att_cpy(hrz_id,out_id,lon_bnd_in_id,lon_bnd_out_id,PCK_ATT_CPY);
var_crt_nbr++;
} /* !flg_lon_bnd_out */
dmn_ids_out[0]=dmn_id_lat_out;
dmn_ids_out[1]=dmn_id_lon_out;
if(flg_area_out){
rcd+=nco_def_var(out_id,area_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&area_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl);
(void)nco_att_cpy(hrz_id,out_id,area_in_id,area_out_id,PCK_ATT_CPY);
var_crt_nbr++;
} /* !flg_area_out */
if(flg_sgs_frc_out){
rcd+=nco_def_var(out_id,sgs_frc_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&sgs_frc_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,sgs_frc_out_id,shuffle,deflate,dfl_lvl);
(void)nco_att_cpy(hrz_id,out_id,sgs_frc_in_id,sgs_frc_out_id,PCK_ATT_CPY);
var_crt_nbr++;
} /* !flg_sgs_frc_out */
if(flg_sgs_msk_out){
rcd+=nco_def_var(out_id,sgs_msk_nm,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids_out,&sgs_msk_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,sgs_msk_out_id,shuffle,deflate,dfl_lvl);
(void)nco_att_cpy(hrz_id,out_id,sgs_msk_in_id,sgs_msk_out_id,PCK_ATT_CPY);
var_crt_nbr++;
} /* !flg_sgs_msk_out */
} /* !flg_grd_2D */
int flg_pck; /* [flg] Variable is packed on disk */
nco_bool has_mss_val; /* [flg] Has numeric missing value attribute */
nco_bool flg_add_spc_crd; /* [flg] Add spatial coordinates to S1D variable */
float mss_val_flt;
double mss_val_dbl;
nco_s1d_typ_enm nco_s1d_typ; /* [enm] Sparse-1D type of input variable */
aed_sct aed_mtd_fll_val;
(void)fprintf(stdout,"%s: DEBUG quark3\n",nco_prg_nm_get());
/* Define unpacked S1D and copied variables in output file */
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){
trv=trv_tbl->lst[idx_tbl];
if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){
var_nm=trv.nm;
/* Preserve input type in output type */
var_typ=trv.var_typ;
dmn_nbr_in=trv.nbr_dmn;
dmn_nbr_out=trv.nbr_dmn;
rcd=nco_inq_varid(in_id,var_nm,&var_id_in);
rcd=nco_inq_varid_flg(out_id,var_nm,&var_id_out);
/* If variable has not been defined, define it */
if(rcd != NC_NOERR){
if(trv.flg_rgr){
/* Unpack */
(void)fprintf(stdout,"%s: DEBUG quark4\n",nco_prg_nm_get());
rcd=nco_inq_vardimid(in_id,var_id_in,dmn_ids_in);
dmn_in_fst=0;
flg_add_spc_crd=False;
rcd=nco_inq_var_packing(in_id,var_id_in,&flg_pck);
if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports S1D variable \"%s\" is packed so results unpredictable. HINT: If regridded values seems weird, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,var_nm);
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm);
if(clm_nm_in && !strcmp(dmn_nm,clm_nm_in)){
if(mec_nbr_out > 0L){
/* Change input column dimension to MEC if present */
dmn_ids_out[dmn_idx]=dmn_id_mec_out;
dmn_cnt_out[dmn_idx]=mec_nbr_out;
dmn_in_fst++;
dmn_nbr_out++;
} /* !mec_nbr_out */
flg_add_spc_crd=True;
}else if(!strcmp(dmn_nm,grd_nm_in)){
/* Gridcell dimension disappears to become spatial dimension in output */
flg_add_spc_crd=True;
}else if(!strcmp(dmn_nm,lnd_nm_in)){
/* Change landunit dimension */
dmn_ids_out[dmn_idx]=dmn_id_lnd_out;
dmn_cnt_out[dmn_idx]=lnd_nbr_out;
flg_add_spc_crd=True;
}else if(!strcmp(dmn_nm,pft_nm_in)){
if(pft_nbr_out > 0L){
/* Change input PFT dimension to PFT if present */
dmn_ids_out[dmn_idx]=dmn_id_pft_out;
dmn_cnt_out[dmn_idx]=pft_nbr_out;
dmn_in_fst++;
dmn_nbr_out++;
} /* !pft_nbr_out */
flg_add_spc_crd=True;
}else{
/* Dimensions [clm/lnd/pft]_nm_in were pre-defined above as [clm/lnd/pft]_nm_out, replicate all other dimensions */
rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_ids_out+dmn_idx);
} /* !clm */
if(rcd != NC_NOERR){
/* Current input dimension is not yet in output file */
(void)fprintf(stdout,"%s: DEBUG var_nm = %s, dmn_nm = %s\n",nco_prg_nm_get(),var_nm,dmn_nm);
rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_out+dmn_idx);
/* Check-for and, if found, retain record dimension property */
for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++)
if(dmn_ids_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx])
dmn_cnt_out[dmn_idx]=NC_UNLIMITED;
rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx);
} /* !rcd */
if(flg_add_spc_crd){
/* Follow by spatial dimension(s) */
if(flg_grd_1D){
dmn_ids_out[dmn_idx+dmn_in_fst]=dmn_id_col_out;
dmn_cnt_out[dmn_idx+dmn_in_fst]=col_nbr;
} /* !flg_grd_1D */
if(flg_grd_2D){
dmn_ids_out[dmn_idx+dmn_in_fst]=dmn_id_lat_out;
dmn_cnt_out[dmn_idx+dmn_in_fst]=lat_nbr;
dmn_in_fst++;
dmn_nbr_out++;
dmn_ids_out[dmn_idx+dmn_in_fst]=dmn_id_lon_out;
dmn_cnt_out[dmn_idx+dmn_in_fst]=lon_nbr;
} /* !flg_grd_2D */
} /* !flg_add_spc_crd */
} /* !dmn_idx */
}else{ /* !flg_rgr */
/* Replicate non-S1D variables */
(void)fprintf(stdout,"%s: DEBUG quark5\n",nco_prg_nm_get());
rcd=nco_inq_vardimid(in_id,var_id_in,dmn_ids_in);
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm);
rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_ids_out+dmn_idx);
if(rcd != NC_NOERR){
rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_out+dmn_idx);
/* Check-for and, if found, retain record dimension property */
for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++)
if(dmn_ids_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx])
dmn_cnt_out[dmn_idx]=NC_UNLIMITED;
rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx);
} /* !rcd */
} /* !dmn_idx */
} /* !flg_rgr */
(void)fprintf(stdout,"%s: DEBUG quark6 defining %s...\n",nco_prg_nm_get(),var_nm);
rcd=nco_def_var(out_id,var_nm,var_typ,dmn_nbr_out,dmn_ids_out,&var_id_out);
(void)fprintf(stdout,"%s: DEBUG quark7 defined %s\n",nco_prg_nm_get(),var_nm);
/* Duplicate netCDF4 settings when possible */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC){
/* Deflation */
if(dmn_nbr_out > 0){
int dfl_lvl_in; /* [enm] Deflate level [0..9] */
rcd=nco_inq_var_deflate(in_id,var_id_in,&shuffle,&deflate,&dfl_lvl_in);
/* Copy original deflation settings */
if(deflate || shuffle) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl_in);
/* Overwrite HDF Lempel-Ziv compression level, if requested */
if(dfl_lvl == 0) deflate=(int)False; else deflate=(int)True;
/* Turn-off shuffle when uncompressing otherwise chunking requests may fail */
if(dfl_lvl == 0) shuffle=NC_NOSHUFFLE;
/* Shuffle never, to my knowledge, increases filesize, so shuffle by default when manually deflating */
if(dfl_lvl >= 0) shuffle=NC_SHUFFLE;
if(dfl_lvl >= 0) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl);
} /* !dmn_nbr_out */
} /* !NC_FORMAT_NETCDF4 */
(void)nco_att_cpy(in_id,out_id,var_id_in,var_id_out,PCK_ATT_CPY);
/* Variables with subterranean levels and missing-value extrapolation must have _FillValue attribute */
(void)fprintf(stdout,"%s: DEBUG quark8\n",nco_prg_nm_get());
nco_bool flg_add_msv_att; /* [flg] Extrapolation requires _FillValue */
flg_add_msv_att=False;
if(flg_add_msv_att && trv.flg_rgr){
has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl);
if(!has_mss_val){
nco_bool flg_att_chg; /* [flg] _FillValue attribute was written */
aed_mtd_fll_val.var_nm=var_nm;
aed_mtd_fll_val.id=var_id_out;
aed_mtd_fll_val.type=var_typ;
if(var_typ == NC_FLOAT) aed_mtd_fll_val.val.fp=&mss_val_flt;
else if(var_typ == NC_DOUBLE) aed_mtd_fll_val.val.dp=&mss_val_dbl;
flg_att_chg=nco_aed_prc(out_id,var_id_out,aed_mtd_fll_val);
if(!flg_att_chg && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: WARNING %s reports unsuccessful attempt to create _FillValue attribute for variable %s\n",nco_prg_nm_get(),fnc_nm,var_nm);
} /* !has_mss_val */
} /* !flg_add_msv_att */
} /* !rcd */
} /* !var */
} /* end idx_tbl */
(void)fprintf(stdout,"%s: DEBUG quark9\n",nco_prg_nm_get());
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
/* Begin data mode */
(void)nco_enddef(out_id);
/* Copy coordinate system before closing template file
NB: nco_cpy_var_val() cannot be used here when coordinates are in fl_tpl not fl_in */
(void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,lat_nm_in,(lmt_sct *)NULL,(int)0);
(void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,lon_nm_in,(lmt_sct *)NULL,(int)0);
if(flg_lat_bnd_out) (void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,lat_bnd_nm,(lmt_sct *)NULL,(int)0);
if(flg_lon_bnd_out) (void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,lon_bnd_nm,(lmt_sct *)NULL,(int)0);
if(flg_sgs_frc_out) (void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,sgs_frc_nm,(lmt_sct *)NULL,(int)0);
if(flg_sgs_msk_out) (void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,sgs_msk_nm,(lmt_sct *)NULL,(int)0);
if(flg_grd_tpl){
nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */
/* No further access to template file, close it */
nco_close(tpl_id);
/* Remove local copy of file */
if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_tpl);
} /* !flg_grd_tpl */
/* Free pre-allocated array space */
if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in);
if(dmn_ids_out) dmn_ids_out=(int *)nco_free(dmn_ids_out);
if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec);
if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt);
if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in);
if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out);
/* Unpack and copy data from input file */
//int dmn_idx_col=int_CEWI; /* [idx] Index of column dimension */
//int dmn_idx_lat=int_CEWI; /* [idx] Index of latitude dimension */
//int dmn_idx_lon=int_CEWI; /* [idx] Index of longitude dimension */
int thr_idx; /* [idx] Thread index */
//int var_id; /* [id] Current variable ID */
size_t var_sz_in; /* [nbr] Number of elements in variable (will be self-multiplied) */
size_t var_sz_out; /* [nbr] Number of elements in variable (will be self-multiplied) */
ptr_unn var_val_in;
ptr_unn var_val_out;
/* Using naked stdin/stdout/stderr in parallel region generates warning
Copy appropriate filehandle to variable scoped as shared in parallel clause */
FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */
#ifdef __GNUG__
# pragma omp parallel for firstprivate(var_val_in,var_val_out) private(dmn_cnt_in,dmn_cnt_out,dmn_ids_in,dmn_ids_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,has_clm,has_grd,has_lnd,has_pft,has_mss_val,idx_out,idx_tbl,in_id,mss_val_dbl,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ) shared(dmn_id_clm_in,dmn_id_clm_out,dmn_id_col_in,dmn_id_col_out,dmn_id_lat_in,dmn_id_lat_out,dmn_id_lnd_in,dmn_id_lnd_out,dmn_id_lon_in,dmn_id_lon_out,dmn_id_pft_in,dmn_id_pft_out,flg_s1d_clm,flg_s1d_pft,clm_nbr_in,clm_nbr_out,col_nbr,lat_nbr,lnd_nbr_in,lnd_nbr_out,lon_nbr,pft_nbr_in,pft_nbr_out,out_id,pfts1d_ixy,pfts1d_jxy)
#endif /* !__GNUG__ */
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){
trv=trv_tbl->lst[idx_tbl];
thr_idx=omp_get_thread_num();
in_id=trv_tbl->in_id_arr[thr_idx];
#ifdef _OPENMP
if(nco_dbg_lvl_get() >= nco_dbg_grp && !thr_idx && !idx_tbl) (void)fprintf(fp_stdout,"%s: INFO %s reports regrid loop uses %d thread%s\n",nco_prg_nm_get(),fnc_nm,omp_get_num_threads(),(omp_get_num_threads() > 1) ? "s" : "");
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s: INFO thread = %d, idx_tbl = %d, nm = %s\n",nco_prg_nm_get(),thr_idx,idx_tbl,trv.nm);
#endif /* !_OPENMP */
if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s%s ",trv.flg_rgr ? "#" : "~",trv.nm);
if(trv.flg_rgr){
/* Unpack variable */
var_nm=trv.nm;
var_typ=trv.var_typ; /* NB: Output type in file is same as input type */
var_sz_in=1L;
var_sz_out=1L;
rcd=nco_inq_varid(in_id,var_nm,&var_id_in);
rcd=nco_inq_varid(out_id,var_nm,&var_id_out);
rcd=nco_inq_varndims(in_id,var_id_in,&dmn_nbr_in);
rcd=nco_inq_varndims(out_id,var_id_out,&dmn_nbr_out);
dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out;
dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int));
dmn_ids_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int));
dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */
dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
rcd=nco_inq_vardimid(in_id,var_id_in,dmn_ids_in);
rcd=nco_inq_vardimid(out_id,var_id_out,dmn_ids_out);
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_in+dmn_idx);
var_sz_in*=dmn_cnt_in[dmn_idx];
dmn_srt[dmn_idx]=0L;
} /* !dmn_idx */
for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){
rcd=nco_inq_dimlen(out_id,dmn_ids_out[dmn_idx],dmn_cnt_out+dmn_idx);
if(dmn_cnt_out[dmn_idx] == 0L){
/* No records have been written, so overwrite zero output record size with input record size */
char dmn_rec_nm[NC_MAX_NAME]; /* [sng] Record dimension name */
int dmn_rec_id_in;
rcd=nco_inq_dimname(out_id,dmn_ids_out[dmn_idx],dmn_rec_nm);
rcd=nco_inq_dimid(in_id,dmn_rec_nm,&dmn_rec_id_in);
rcd=nco_inq_dimlen(in_id,dmn_rec_id_in,dmn_cnt_out+dmn_idx);
} /* !dmn_cnt_out */
var_sz_out*=dmn_cnt_out[dmn_idx];
dmn_srt[dmn_idx]=0L;
} /* !dmn_idx */
var_val_in.vp=(void *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ),fnc_nm,"Unable to malloc() input value buffer");
var_val_out.vp=(void *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ),fnc_nm,"Unable to malloc() output value buffer");
/* Initialize output */
(void)memset(var_val_out.vp,0,var_sz_out*nco_typ_lng(var_typ));
/* Obtain input variable */
rcd=nco_get_vara(in_id,var_id_in,dmn_srt,dmn_cnt_in,var_val_in.vp,var_typ);
has_clm=has_grd=has_lnd=has_pft=False;
nco_s1d_typ=nco_s1d_nil;
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm;
if(!has_clm && clm_nm_in) has_clm=!strcmp(dmn_nm_cp,clm_nm_in);
if(!has_grd && grd_nm_in) has_grd=!strcmp(dmn_nm_cp,grd_nm_in);
if(!has_lnd && lnd_nm_in) has_lnd=!strcmp(dmn_nm_cp,lnd_nm_in);
if(!has_pft && pft_nm_in) has_pft=!strcmp(dmn_nm_cp,pft_nm_in);
} /* !dmn_idx */
if(has_clm) nco_s1d_typ=nco_s1d_clm;
else if(has_grd) nco_s1d_typ=nco_s1d_grd;
else if(has_lnd) nco_s1d_typ=nco_s1d_lnd;
else if(has_pft) nco_s1d_typ=nco_s1d_pft;
else{
(void)fprintf(stderr,"%s: ERROR %s reports variable %s does not appear to be sparse\n",nco_prg_nm_get(),fnc_nm,var_nm);
nco_exit(EXIT_FAILURE);
} /* !strstr() */
if(nco_dbg_lvl_get() >= nco_dbg_std){
(void)fprintf(stderr,"%s: INFO %s reports variable %s is sparse type %s\n",nco_prg_nm_get(),fnc_nm,var_nm,nco_s1d_sng(nco_s1d_typ));
} /* !dbg */
/* The Hard Work */
if(nco_s1d_typ == nco_s1d_pft){
/* Turn GPP(time,pft) into GPP(time,pft,lndgrid) */
for(pft_idx=0;pft_idx<pft_nbr_in;pft_idx++){
pft_typ=pfts1d_ityp_veg[pft_idx]; /* [1 <= pft_typ <= pft_nbr_out] */
/* Skip bare ground, output array contains only vegetated types */
if(!pft_typ) continue;
/* grd_idx is the index relative to the origin of the horizontal grid for a given level
[0 <= grd_idx_out <= col_nbr_out-1L], [1 <= pfts1d_ixy <= col_nbr_out] */
grd_idx_out= flg_grd_1D ? pfts1d_ixy[pft_idx]-1L : (pfts1d_ixy[pft_idx]-1L)*lat_nbr+(pfts1d_jxy[pft_idx]-1L);
idx_out=(pft_typ-1)*grd_sz_out+grd_idx_out;
/* memcpy() would allow next statement to work for generic types
However, memcpy() is a system call and could be expensive in an innermost loop */
switch(var_typ){
case NC_FLOAT: var_val_out.fp[idx_out]=var_val_in.fp[pft_idx]; break;
case NC_DOUBLE: var_val_out.dp[idx_out]=var_val_in.dp[pft_idx]; break;
case NC_INT: var_val_out.ip[idx_out]=var_val_in.ip[pft_idx]; break;
default:
(void)fprintf(fp_stdout,"%s: ERROR %s reports unsupported type\n",nco_prg_nm_get(),fnc_nm);
nco_dfl_case_nc_type_err();
break;
} /* !var_typ */
} /* !idx */
} /* !nco_s1d_typ */
#pragma omp critical
{ /* begin OpenMP critical */
rcd=nco_put_vara(out_id,var_id_out,dmn_srt,dmn_cnt_out,var_val_out.vp,var_typ);
} /* end OpenMP critical */
if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in);
if(dmn_ids_out) dmn_ids_out=(int *)nco_free(dmn_ids_out);
if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt);
if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in);
if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out);
if(var_val_in.vp) var_val_in.vp=(void *)nco_free(var_val_in.vp);
if(var_val_out.vp) var_val_out.vp=(void *)nco_free(var_val_out.vp);
}else{ /* !trv.flg_rgr */
/* Use standard NCO copy routine for variables that are not regridded
20190511: Copy them only once */
#pragma omp critical
{ /* begin OpenMP critical */
(void)nco_cpy_var_val(in_id,out_id,(FILE *)NULL,(md5_sct *)NULL,trv.nm,trv_tbl);
} /* end OpenMP critical */
} /* !flg_rgr */
} /* !xtr */
} /* end (OpenMP parallel for) loop over idx_tbl */
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"\n");
if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s completion report: Variables interpolated = %d, copied unmodified = %d, omitted = %d, created = %d\n",nco_prg_nm_get(),fnc_nm,var_rgr_nbr,var_cpy_nbr,var_xcl_nbr,var_crt_nbr);
/* Free output data memory */
if(cols1d_ityp) cols1d_ityp=(int *)nco_free(cols1d_ityp);
if(cols1d_ityplun) cols1d_ityplun=(int *)nco_free(cols1d_ityplun);
if(pfts1d_ityp_veg) pfts1d_ityp_veg=(int *)nco_free(pfts1d_ityp_veg);
if(pfts1d_ityplun) pfts1d_ityplun=(int *)nco_free(pfts1d_ityplun);
if(pfts1d_ixy) pfts1d_ixy=(int *)nco_free(pfts1d_ixy);
if(pfts1d_jxy) pfts1d_jxy=(int *)nco_free(pfts1d_jxy);
//if(pfts1d_wtgcell) pfts1d_wtgcell=(double *)nco_free(pfts1d_wtgcell);
if(clm_nm_in) clm_nm_in=(char *)nco_free(clm_nm_in);
if(grd_nm_in) grd_nm_in=(char *)nco_free(grd_nm_in);
if(lnd_nm_in) lnd_nm_in=(char *)nco_free(lnd_nm_in);
if(pft_nm_in) pft_nm_in=(char *)nco_free(pft_nm_in);
if(clm_nm_out) clm_nm_out=(char *)nco_free(clm_nm_out);
if(grd_nm_out) grd_nm_out=(char *)nco_free(grd_nm_out);
if(lnd_nm_out) lnd_nm_out=(char *)nco_free(lnd_nm_out);
if(pft_nm_out) pft_nm_out=(char *)nco_free(pft_nm_out);
return rcd;
} /* !nco_s1d_unpack() */
|
main-parallel.c | #include "tour.h"
#include "rdwt.h"
#include "def.h"
#include "timer.h"
#include <string.h>
#include <omp.h>
void traveling_salesman2(tour path, int k, int ncities, distance_table t_distance,
distance aggregate, tour better_path, distance *low_distance);
void traveling_salesman(tour* path, int k, int ncities, distance_table t_distance,
distance aggregate, tour* better_path, distance* low_distance, int nthreads)
{
int i;
if (k == ncities) {
low_distance[0] = t_distance[0][1] + t_distance[1][0];
memcpy(better_path[0], path[0], sizeof(city)*ncities);
} else {
distance aggregate_local;
#pragma omp parallel for num_threads(nthreads)
for (i = k; i < ncities; i++) {
int rank = omp_get_thread_num();
swap_cities(path[rank], k, i);
aggregate_local = aggregate + get_distance(t_distance, path[rank][k-1], path[rank][k]);
if (aggregate_local < *low_distance ) {
traveling_salesman2(path[rank], k + 1,ncities, t_distance, aggregate_local, better_path[rank], &low_distance[rank]);
}
swap_cities(path[rank], i, k);
}
int lower_distance_index = 0;
i = 0;
do {
if(low_distance[lower_distance_index] > low_distance[i]) {
lower_distance_index = i;
memcpy(better_path[0], path[lower_distance_index], sizeof(city)*ncities);
}
i++;
} while(i < nthreads);
}
}
void traveling_salesman2(tour path, int k, int ncities, distance_table t_distance,
distance aggregate, tour better_path, distance *low_distance)
{
int i;
if (k == ncities) {
aggregate += t_distance[path[k-1]][path[0]];
if (*low_distance > aggregate) {
*low_distance = aggregate;
memcpy(better_path, path, sizeof(city)*ncities);
}
} else {
distance aggregate_local;
for (i = k; i < ncities; i++) {
swap_cities(path, k, i);
aggregate_local = aggregate + get_distance(t_distance, path[k-1], path[k]);
if (aggregate_local < *low_distance ){
traveling_salesman2(path, k + 1,ncities, t_distance, aggregate_local, better_path, low_distance);
}
swap_cities(path, i, k);
}
}
}
int main(int argc, char* argv[]){
int i;
city start_city;
int ncities = 0, nthreads = 0;
distance* low_distance;
distance_table t_distance;
tour* path, *better_path;
get_input(argc, argv, &ncities, &start_city, &nthreads);
path = (tour*)malloc(sizeof(tour)*nthreads);
better_path = (tour*)malloc(sizeof(tour)*nthreads);
low_distance = (distance*)malloc(sizeof(distance)*nthreads);
for(i = 0; i < nthreads; i++) {
path[i] = create_tour(ncities);
populate_tour(path[i],ncities);
better_path[i] = create_tour(ncities);
start_point(path[i], start_city);
low_distance[i] = INF;
}
t_distance = rd_distance(ncities);
double time_ini;
GET_TIME(time_ini);
traveling_salesman(path, 1, ncities, t_distance, 0, better_path, low_distance, nthreads);
double time_end;
GET_TIME(time_end);
pt_betterpath(better_path[0], start_city, ncities, low_distance[0]);
printf("tempo de execução: %0.12lf\n", time_end - time_ini);
return 0;
} |
ParallelClusterCreator.h | #include <memory>
//#ifdef PARALLEL_EXECUTION
//#include <omp.h>
//#endif
// class for running clustering algorithm on Charts
struct ParallelClusterCreator
{
static uint32_t create_charts(std::map<uint32_t, uint32_t>& chart_id_map, Polyhedron& P, const double cost_threshold, const uint32_t chart_threshold, CLUSTER_SETTINGS cluster_settings)
{
std::vector<Chart> charts;
if(chart_id_map.size() == 0)
{
// std::cout << "Creating charts from individual faces\n";
// create charts
create_initial_charts(charts, P);
// populate lookup table to allow quicker determination of whether faces are neighbours when making joins
populate_chart_LUT(charts, chart_id_map);
}
else
{
// std::cout << "Creating charts from grid based initial splitting\n";
uint32_t num_charts = ClusterCreator::initialise_charts_from_grid_clusters(P, chart_id_map, charts, cluster_settings, chart_threshold);
// populate_chart_LUT(charts, chart_id_map);
// check that existing chart number is not already lower than threshold
/*if (num_charts <= chart_threshold)
{
std::cout << "Input to chart clusterer already had number of charts below chart threshold" << std::endl;
return num_charts;
}*/
// recalculate perimeters of charts to ensure they are correct
for(auto& chart : charts)
{
chart.recalculate_perimeter_from_scratch();
chart.create_neighbour_set(chart_id_map);
}
// return chart_id_map.size();
}
// create join bank vector and queue
std::vector<std::shared_ptr<JoinOperation>> join_queue;
create_joins_from_chart_vector(charts, join_queue, cluster_settings, chart_id_map);
// do the clustering
cluster_faces(charts, join_queue, cost_threshold, chart_threshold, cluster_settings, chart_id_map);
return populate_chart_LUT(charts, chart_id_map);
}
// builds a chart list where each face of a polyhedron is 1 chart
static void create_initial_charts(std::vector<Chart>& charts, Polyhedron& P)
{
// calculate areas of each face
// std::cout << "Calculating face areas...\n";
std::map<face_descriptor, double> fareas;
for(face_descriptor fd : faces(P))
{
fareas[fd] = CGAL::Polygon_mesh_processing::face_area(fd, P);
// std::cout << "Area " << fareas[fd] << std::endl;
}
// calculate normals of each faces
// std::cout << "Calculating face normals...\n";
std::map<face_descriptor, Vector> fnormals;
CGAL::Polygon_mesh_processing::compute_face_normals(P, boost::make_assoc_property_map(fnormals));
// get boost face iterator
face_iterator fb_boost, fe_boost;
boost::tie(fb_boost, fe_boost) = faces(P);
// each face begins as its own chart
// std::cout << "Creating initial charts...";
for(Facet_iterator fb = P.facets_begin(); fb != P.facets_end(); ++fb)
{
// init chart instance for face
Chart c(charts.size(), std::make_shared<Facet>(*fb), fnormals[*fb_boost], fareas[*fb_boost]);
charts.push_back(c);
fb_boost++;
}
// std::cout << "..." << charts.size() << " charts.\n";
}
static void create_joins_from_chart_vector(std::vector<Chart>& charts,
// std::vector<std::shared_ptr<JoinOperation> > &joins,
std::vector<std::shared_ptr<JoinOperation>>& join_queue,
CLUSTER_SETTINGS cluster_settings,
std::map<uint32_t, uint32_t>& chart_id_map)
{
// std::cout << "Creating joins from chart list...";
std::set<uint32_t> processed_charts;
std::set<uint32_t> chart_neighbours;
// for each chart
for(auto& chart : charts)
{
chart_neighbours.clear();
// for each face in chart, find neighbours, add to chart_neighbours set
for(auto& face : chart.facets)
{
// for each edge
Halfedge_facet_circulator fc = face->facet_begin();
do
{
if(!fc->is_border() && !(fc->opposite()->is_border())) // guard against no neighbour at this edge
{
// get chart id of neighbour, add to set if it is not this chart
uint32_t nbr_face_id = fc->opposite()->facet()->id();
uint32_t nbr_chart_id = chart_id_map[nbr_face_id];
if(nbr_chart_id != chart.id)
{
chart_neighbours.insert(nbr_chart_id);
}
}
} while(++fc != face->facet_begin());
}
// create joins...
// if neighbours have not already been processed, create join between this and neighbour
for(auto& nbr_chart_id : chart_neighbours)
{
// make sure it hasnt been processed already
if(processed_charts.find(nbr_chart_id) == processed_charts.end())
{
// chart ids should be equal to their index in the vector at this point
JoinOperation join(chart.id, nbr_chart_id, JoinOperation::cost_of_join(charts[chart.id], charts[nbr_chart_id], cluster_settings));
join_queue.push_back(std::make_shared<JoinOperation>(join));
}
}
// add this chart to set of processed charts, so that it is not considered for new joins
processed_charts.insert(chart.id);
} // end for each chart
// std::cout << join_queue.size() << " joins\n";
}
// takes a list of joins and charts, and executes joins until target number of charts/cost threshold is reached
static void cluster_faces(std::vector<Chart>& charts,
// std::vector<std::shared_ptr<JoinOperation> > &joins,
std::vector<std::shared_ptr<JoinOperation>>& join_queue,
const double cost_threshold,
const uint32_t chart_threshold,
CLUSTER_SETTINGS& cluster_settings,
std::map<uint32_t, uint32_t>& chart_id_map)
{
if(join_queue.empty())
{
std::cout << "ERROR: join_queue is empty - no joins possible" << std::endl;
}
// std::cout << "Clustering faces...." << std::endl;
// std::stringstream report;
// report << "--------------------\nReport:\n----------------------\n";
std::vector<std::shared_ptr<JoinOperation>>::iterator it;
// for reporting and calculating when to stop merging
const uint32_t initial_charts = charts.size();
const uint32_t desired_merges = initial_charts - chart_threshold;
uint32_t chart_merges = 0;
int prev_cost_percent = -1;
int prev_charts_percent = -1;
int overall_percent = -1;
// key chart position (in chart vector) :: value - list of pointers to join operations that reference this chart
// std::map<uint32_t, std::vector<std::shared_ptr<JoinOperation> > > chart_to_join_inverse_index;
// populate_inverse_index(chart_to_join_inverse_index, charts, joins);
// join_queue.sort(JoinOperation::sort_join_ptrs);
std::sort(join_queue.begin(), join_queue.end(), JoinOperation::sort_join_ptrs);
const double lowest_cost = join_queue.front()->cost;
// execute lowest join cost and update affected joins. re-sort.
// std::cout << "Processing join queue...\n";
while(join_queue.front()->cost < cost_threshold)
{
//&& !join_queue.empty()) {
//&& (charts.size() - chart_merges) > chart_threshold){
if(join_queue.empty())
{
break;
}
// if (join_queue.front()->cost > 1.f) {
// std::cout << "joining cost: " << join_queue.front()->cost << " ... remaining joins: " << join_queue.size() << std::endl;
//}
// reporting-------------
if(chart_merges % 1000 == 0)
{
// std::cout << "joining cost: " << join_queue.front()->cost << " ... remaining joins: " << join_queue.size() << std::endl;
int percent = (int)(((join_queue.front()->cost - lowest_cost) / (cost_threshold - lowest_cost)) * 100);
if(percent != prev_cost_percent && percent > overall_percent)
{
prev_cost_percent = percent;
overall_percent = percent;
std::cout << percent << " percent to cost threshold (" << chart_merges << " merges done)\n";
}
}
JoinOperation join_todo = *(join_queue.front());
join_queue.erase(join_queue.begin());
// guard against inactive joins
if(!join_todo.active)
{
continue;
}
// check amount of neighbours resulting chart would have. if too few, skip to next one
if(join_todo.results_in_chart_with_neighbours(charts, chart_id_map) < 3)
{
continue;
}
// merge faces from chart2 into chart 1
// std::cout << "merging charts " << join_todo.chart1_id << " and " << join_todo.chart2_id << std::endl;
// charts[join_todo.get_chart1_id()].merge_with(charts[join_todo.get_chart2_id()], join_todo.cost);
charts[join_todo.get_chart1_id()].merge_with(charts[join_todo.get_chart2_id()]);
// DEactivate chart 2
if(charts[join_todo.get_chart2_id()].active == false)
{
// report << "chart " << join_todo.chart2_id << " was already inactive at merge " << chart_merges << std::endl; // should not happen
continue;
}
// DEactivate chart 2
charts[join_todo.get_chart2_id()].active = false;
//--------------------------------------------------------------
// update remaining joins that include either of the merged charts
//--------------------------------------------------------------
#if 0
// use inverse index to retrieve the joins that need to be updated
//merge affected join lists from 2 charts involved (from chart 2 to 1)
std::vector<std::shared_ptr<JoinOperation>>& affected_joins = chart_to_join_inverse_index[join_todo.get_chart1_id()];
affected_joins.insert(
affected_joins.end() ,
chart_to_join_inverse_index[join_todo.get_chart2_id()].begin(),
chart_to_join_inverse_index[join_todo.get_chart2_id()].end());
// std::cout << "merged: " << affected_joins.size() << "\n";
std::list<uint32_t> indices_to_remove;
//for each affected join, update or add to list for removal
for (uint32_t i = 0; i < affected_joins.size(); i++){
// std::shared_ptr<JoinOperation> join_op = affected_joins[i];
std::shared_ptr<JoinOperation> join_op ( affected_joins[i] );
//replace expired chart and sorts chart ids
join_op->replace_id_with(join_todo.get_chart2_id(), join_todo.get_chart1_id());
//check if this join is within a chart now - add to removal list
if (join_op->get_chart1_id() == join_op-> get_chart2_id())
{
indices_to_remove.push_back(i);
join_op->active = false;
}
}
// std::cout << "to remove: " << indices_to_remove.size() << "\n";
//remove those not needed any more
indices_to_remove.sort();
int num_removed = 0;
for (auto id : indices_to_remove) {
std::vector< std::shared_ptr<JoinOperation> >::iterator it2 = affected_joins.begin();
// adjust ID to be deleted to account for previously deleted items
std::advance(it2, id - num_removed);
affected_joins.erase(it2);
num_removed++;
}
// std::cout << "after removing: " << affected_joins.size() << "\n";
//remove duplicates in affected joins
auto new_end_of_array = std::unique(affected_joins.begin(), affected_joins.end(), JoinOperation::compare);
affected_joins.resize( std::distance(affected_joins.begin(),new_end_of_array) );
// std::cout << "after removing duplicates: " << affected_joins.size() << "\n";
//recalculate costs for what is left
for (uint32_t i = 0; i < affected_joins.size(); i++){
// std::cout << "join " << i << std::endl;
std::shared_ptr<JoinOperation> join_op ( affected_joins[i] );
// std::cout << "got join " << i << std::endl;
join_op->cost = JoinOperation::cost_of_join(charts[join_op->get_chart1_id()], charts[join_op->get_chart2_id()], cluster_settings);
// std::cout << "costed join " << i << std::endl;
}
// std::cout << "updated\n";
//resort join queue
std::sort(join_queue.begin(),join_queue.end(), JoinOperation::sort_join_ptrs);
// std::cout << "sorted\n";
#else
// old method of updating join list
std::vector<int> to_erase_merged;
std::vector<std::shared_ptr<JoinOperation>> to_recalculate_error_merged;
//#ifdef PARALLEL_EXECUTION
//#pragma omp declare reduction(merge : std::vector <int> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end()))
//#pragma omp declare reduction(merge : std::vector <std::shared_ptr <JoinOperation>> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end()))
//
//// find affected joins and add to list for erase/update
//#pragma omp parallel for reduction(merge : to_erase_merged, to_recalculate_error_merged)
//#endif
for(uint32_t j = 0; j < join_queue.size(); j++)
{
// if join is affected, update references and cost
if(join_queue[j]->get_chart1_id() == join_todo.get_chart1_id() || join_queue[j]->get_chart1_id() == join_todo.get_chart2_id() ||
join_queue[j]->get_chart2_id() == join_todo.get_chart1_id() || join_queue[j]->get_chart2_id() == join_todo.get_chart2_id())
{
// eliminate references to joined chart 2 (it is no longer active)
// by pointing them to chart 1
if(join_queue[j]->get_chart1_id() == join_todo.get_chart2_id())
{
join_queue[j]->set_chart1_id(join_todo.get_chart1_id());
}
if(join_queue[j]->get_chart2_id() == join_todo.get_chart2_id())
{
join_queue[j]->set_chart2_id(join_todo.get_chart1_id());
}
// save this join to be deleted (and replaced in queue if necessary)
// to_erase[omp_get_thread_num()].push_back(j);
to_erase_merged.push_back(j);
// search for duplicates
if(join_queue[j]->get_chart1_id() == join_todo.get_chart1_id() && join_queue[j]->get_chart2_id() == join_todo.get_chart2_id())
{
// report << "duplicate found : c1 = " << it->chart1_id << ", c2 = " << it->chart2_id << std::endl;
// set inactive
join_queue[j]->active = false;
}
// check for joins within a chart
else if(join_queue[j]->get_chart1_id() == join_queue[j]->get_chart2_id())
{
// report << "Join found within a chart: " << join_queue[j]->chart1_id << std::endl;
// set inactive
join_queue[j]->active = false;
}
else
{
// add (pointer of JO) to vector to be updated
// to_recalculate_error[omp_get_thread_num()].push_back(join_queue[j]);
to_recalculate_error_merged.push_back(join_queue[j]);
}
}
}
// erase all elements that need to be erased (either no longer needed or will be recalculated)
std::sort(to_erase_merged.begin(), to_erase_merged.end());
int num_erased = 0;
for(auto id : to_erase_merged)
{
std::vector<std::shared_ptr<JoinOperation>>::iterator it2 = join_queue.begin();
// adjust ID to be deleted to account for previously deleted items
std::advance(it2, id - num_erased);
join_queue.erase(it2);
num_erased++;
}
#ifdef PARALLEL_EXECUTION
// recalculate error for joins that need to be updated
#pragma omp parallel for
#endif
for(uint32_t j = 0; j < to_recalculate_error_merged.size(); j++)
{
std::shared_ptr<JoinOperation> join_ptr(to_recalculate_error_merged[j]);
join_ptr->cost = JoinOperation::cost_of_join(charts[join_ptr->get_chart1_id()], charts[join_ptr->get_chart2_id()], cluster_settings);
}
// replace joins that were filtered out to be sorted
if(to_recalculate_error_merged.size() > 0)
{
std::sort(to_recalculate_error_merged.begin(), to_recalculate_error_merged.end(), JoinOperation::sort_join_ptrs);
std::vector<std::shared_ptr<JoinOperation>>::iterator it2;
uint32_t insert_item = 0;
for(it2 = join_queue.begin(); it2 != join_queue.end(); ++it2)
{
// insert items while join list item has bigger cost than element to be inserted
while(insert_item < to_recalculate_error_merged.size() && (*it2)->cost > to_recalculate_error_merged[insert_item]->cost)
{
join_queue.insert(it2, to_recalculate_error_merged[insert_item]);
insert_item++;
}
// if all items are in place, we are done
if(insert_item >= to_recalculate_error_merged.size())
{
break;
}
}
// add any remaining items to end of queue
for(uint32_t i = insert_item; i < to_recalculate_error_merged.size(); i++)
{
join_queue.push_back(to_recalculate_error_merged[i]);
}
}
#endif
chart_merges++;
}
// std::cout << "--------------------\nCharts:\n----------------------\n";
uint32_t total_faces = 0;
uint32_t total_active_charts = 0;
for(uint32_t i = 0; i < charts.size(); ++i)
{
if(charts[i].active)
{
uint32_t num_faces = charts[i].facets.size();
total_faces += num_faces;
total_active_charts++;
}
}
if(!join_queue.empty())
{
// std::cout << "joins remaining: " << join_queue.size() << std::endl;
std::cout << "Maximum error exceeded. Cost of cheapest join operation: " << join_queue.front()->cost << std::endl;
}
else
{
std::cout << "Join list empty" << std::endl;
}
// std::cout << "Total number of faces in charts = " << total_faces << std::endl;
// std::cout << "Initial charts = " << charts.size() << std::endl;
// std::cout << "Total number merges = " << chart_merges << std::endl;
std::cout << "Num charts after chartification = " << total_active_charts << std::endl;
// std::cout << report.str();
}
// fill chart_id_map from chart vector
static uint32_t populate_chart_LUT(std::vector<Chart>& charts, std::map<uint32_t, uint32_t>& chart_id_map)
{
chart_id_map.clear();
// populate LUT for face to chart mapping
// count charts on the way to apply new chart ids
uint32_t active_charts = 0;
for(uint32_t id = 0; id < charts.size(); ++id)
{
auto& chart = charts[id];
if(chart.active)
{
for(auto& f : chart.facets)
{
chart_id_map[f->id()] = active_charts;
}
active_charts++;
}
}
return active_charts;
}
// fills inverse index linking each chart with joins that reference it
static void
populate_inverse_index(std::map<uint32_t, std::vector<std::shared_ptr<JoinOperation>>>& chart_to_join_inverse_index, std::vector<Chart>& charts, std::vector<std::shared_ptr<JoinOperation>>& joins)
{
if(charts.size() == 0)
{
std::cout << "WARNING: no charts received in populate_inverse_index() \n";
return;
}
if(joins.size() == 0)
{
std::cout << "WARNING: no joins received in populate_inverse_index() \n";
return;
}
if(chart_to_join_inverse_index.size() == 0)
{
std::cout << "building inverse index from scratch...";
// initialise map?
// for (int i = 0; i < charts.size())
}
// for each join, add a pointer to the list for each relevant chart
for(uint32_t i = 0; i < joins.size(); i++)
{
// chart_to_join_inverse_index[joins[i].get_chart1_id()].push_back( &(joins[i]) );
// chart_to_join_inverse_index[joins[i].get_chart2_id()].push_back( &(joins[i]) );
chart_to_join_inverse_index[joins[i]->get_chart1_id()].push_back(std::shared_ptr<JoinOperation>(joins[i]));
chart_to_join_inverse_index[joins[i]->get_chart2_id()].push_back(std::shared_ptr<JoinOperation>(joins[i]));
}
std::cout << "Inverse index populated with " << chart_to_join_inverse_index.size() << " entries\n";
// debug only - checking inverse index was created correctly
// for(auto& entry : chart_to_join_inverse_index){
// if(entry.second.size() == 0)
// std::cout << "Chart with no joins: " << entry.first << std::endl;
// }
}
};
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.